changeset 39262:5b9f116104f9

merge with stable
author Martin von Zweigbergk <martinvonz@google.com>
date Fri, 24 Aug 2018 12:55:05 -0700
parents c9a3f7f5c023 (diff) bd63ada7e1f8 (current diff)
children eebd591803ab
files hgext/beautifygraph.py mercurial/phases.py
diffstat 231 files changed, 15704 insertions(+), 4044 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Sat Aug 18 10:24:57 2018 +0200
+++ b/Makefile	Fri Aug 24 12:55:05 2018 -0700
@@ -9,7 +9,8 @@
 $(eval HGROOT := $(shell pwd))
 HGPYTHONS ?= $(HGROOT)/build/pythons
 PURE=
-PYFILES:=$(shell find mercurial hgext doc -name '*.py')
+PYFILESCMD=find mercurial hgext doc -name '*.py'
+PYFILES:=$(shell $(PYFILESCMD))
 DOCFILES=mercurial/help/*.txt
 export LANGUAGE=C
 export LC_ALL=C
@@ -145,7 +146,7 @@
         # parse them even though they are not marked for translation.
         # Extracting with an explicit encoding of ISO-8859-1 will make
         # xgettext "parse" and ignore them.
-	echo $(PYFILES) | xargs \
+	$(PYFILESCMD) | xargs \
 	  xgettext --package-name "Mercurial" \
 	  --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
 	  --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \
--- a/contrib/byteify-strings.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/byteify-strings.py	Fri Aug 24 12:55:05 2018 -0700
@@ -169,6 +169,11 @@
                 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
                 continue
 
+        # Looks like "if __name__ == '__main__'".
+        if (t.type == token.NAME and t.string == '__name__'
+            and _isop(i + 1, '==')):
+            _ensuresysstr(i + 2)
+
         # Emit unmodified token.
         yield adjusttokenpos(t, coloffset)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/catapipe.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Tool read primitive events from a pipe to produce a catapult trace.
+
+For now the event stream supports
+
+  START $SESSIONID ...
+
+and
+
+  END $SESSIONID ...
+
+events. Everything after the SESSIONID (which must not contain spaces)
+is used as a label for the event. Events are timestamped as of when
+they arrive in this process and are then used to produce catapult
+traces that can be loaded in Chrome's about:tracing utility. It's
+important that the event stream *into* this process stay simple,
+because we have to emit it from the shell scripts produced by
+run-tests.py.
+
+Typically you'll want to place the path to the named pipe in the
+HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
+understand.
+"""
+from __future__ import absolute_import, print_function
+
+import argparse
+import datetime
+import json
+import os
+
+_TYPEMAP = {
+    'START': 'B',
+    'END': 'E',
+}
+
+_threadmap = {}
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('pipe', type=str, nargs=1,
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('output', default='trace.json', type=str, nargs='?',
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('--debug', default=False, action='store_true',
+                        help='Print useful debug messages')
+    args = parser.parse_args()
+    fn = args.pipe[0]
+    os.mkfifo(fn)
+    try:
+        with open(fn) as f, open(args.output, 'w') as out:
+            out.write('[\n')
+            start = datetime.datetime.now()
+            while True:
+                ev = f.readline().strip()
+                if not ev:
+                    continue
+                now = datetime.datetime.now()
+                if args.debug:
+                    print(ev)
+                verb, session, label = ev.split(' ', 2)
+                if session not in _threadmap:
+                    _threadmap[session] = len(_threadmap)
+                pid = _threadmap[session]
+                ts_micros = (now - start).total_seconds() * 1000000
+                out.write(json.dumps(
+                    {
+                        "name": label,
+                        "cat": "misc",
+                        "ph": _TYPEMAP[verb],
+                        "ts": ts_micros,
+                        "pid": pid,
+                        "tid": 1,
+                        "args": {}
+                    }))
+                out.write(',\n')
+    finally:
+        os.unlink(fn)
+
+if __name__ == '__main__':
+    main()
--- a/contrib/check-code.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/check-code.py	Fri Aug 24 12:55:05 2018 -0700
@@ -30,7 +30,7 @@
     opentext = open
 else:
     def opentext(f):
-        return open(f, encoding='ascii')
+        return open(f, encoding='latin1')
 try:
     xrange
 except NameError:
@@ -511,6 +511,7 @@
     (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
     (r'os\.getenv', "use encoding.environ.get instead"),
     (r'os\.setenv', "modifying the environ dict is not preferred"),
+    (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
   ],
   # warnings
   [],
--- a/contrib/import-checker.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/import-checker.py	Fri Aug 24 12:55:05 2018 -0700
@@ -36,6 +36,7 @@
     'mercurial.pure.parsers',
     # third-party imports should be directly imported
     'mercurial.thirdparty',
+    'mercurial.thirdparty.attr',
     'mercurial.thirdparty.cbor',
     'mercurial.thirdparty.cbor.cbor2',
     'mercurial.thirdparty.zope',
--- a/contrib/perf.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/perf.py	Fri Aug 24 12:55:05 2018 -0700
@@ -663,21 +663,20 @@
     By default, all revisions are added to the changegroup.
     """
     cl = repo.changelog
-    revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
+    nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
     bundler = changegroup.getbundler(version, repo)
 
-    def lookup(node):
-        # The real bundler reads the revision in order to access the
-        # manifest node and files list. Do that here.
-        cl.read(node)
-        return node
-
     def d():
-        for chunk in bundler.group(revs, cl, lookup):
+        state, chunks = bundler._generatechangelog(cl, nodes)
+        for chunk in chunks:
             pass
 
     timer, fm = gettimer(ui, opts)
-    timer(d)
+
+    # Terminal printing can interfere with timing. So disable it.
+    with ui.configoverride({('progress', 'disable'): True}):
+        timer(d)
+
     fm.end()
 
 @command('perfdirs', formatteropts)
@@ -848,17 +847,23 @@
     timer(d)
     fm.end()
 
-@command('perfmanifest', [], 'REV')
-def perfmanifest(ui, repo, rev, **opts):
+@command('perfmanifest',[
+            ('m', 'manifest-rev', False, 'Look up a manifest node revision'),
+            ('', 'clear-disk', False, 'clear on-disk caches too'),
+         ], 'REV|NODE')
+def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
     """benchmark the time to read a manifest from disk and return a usable
     dict-like object
 
     Manifest caches are cleared before retrieval."""
     timer, fm = gettimer(ui, opts)
-    ctx = scmutil.revsingle(repo, rev, rev)
-    t = ctx.manifestnode()
+    if not manifest_rev:
+        ctx = scmutil.revsingle(repo, rev, rev)
+        t = ctx.manifestnode()
+    else:
+        t = repo.manifestlog._revlog.lookup(rev)
     def d():
-        repo.manifestlog.clearcaches()
+        repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
         repo.manifestlog[t].read()
     timer(d)
     fm.end()
@@ -940,6 +945,38 @@
     timer(lambda: len(repo.lookup(rev)))
     fm.end()
 
+@command('perflinelogedits',
+         [('n', 'edits', 10000, 'number of edits'),
+          ('', 'max-hunk-lines', 10, 'max lines in a hunk'),
+         ], norepo=True)
+def perflinelogedits(ui, **opts):
+    from mercurial import linelog
+
+    edits = opts['edits']
+    maxhunklines = opts['max_hunk_lines']
+
+    maxb1 = 100000
+    random.seed(0)
+    randint = random.randint
+    currentlines = 0
+    arglist = []
+    for rev in xrange(edits):
+        a1 = randint(0, currentlines)
+        a2 = randint(a1, min(currentlines, a1 + maxhunklines))
+        b1 = randint(0, maxb1)
+        b2 = randint(b1, b1 + maxhunklines)
+        currentlines += (b2 - b1) - (a2 - a1)
+        arglist.append((rev, a1, a2, b1, b2))
+
+    def d():
+        ll = linelog.linelog()
+        for args in arglist:
+            ll.replacelines(*args)
+
+    timer, fm = gettimer(ui, opts)
+    timer(d)
+    fm.end()
+
 @command('perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
     timer, fm = gettimer(ui, opts)
@@ -980,7 +1017,7 @@
     """
     timer, fm = gettimer(ui, opts)
     def moonwalk():
-        for i in xrange(len(repo), -1, -1):
+        for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
             ctx = repo[i]
             ctx.branch() # read changelog data (in addition to the index)
     timer(moonwalk)
@@ -1771,6 +1808,31 @@
         branchcachewrite.restore()
     fm.end()
 
+@command('perfbranchmapload', [
+     ('f', 'filter', '', 'Specify repoview filter'),
+     ('', 'list', False, 'List brachmap filter caches'),
+    ] + formatteropts)
+def perfbranchmapread(ui, repo, filter='', list=False, **opts):
+    """benchmark reading the branchmap"""
+    if list:
+        for name, kind, st in repo.cachevfs.readdir(stat=True):
+            if name.startswith('branch2'):
+                filtername = name.partition('-')[2] or 'unfiltered'
+                ui.status('%s - %s\n'
+                          % (filtername, util.bytecount(st.st_size)))
+        return
+    if filter:
+        repo = repoview.repoview(repo, filter)
+    else:
+        repo = repo.unfiltered()
+    # try once without timer, the filter may not be cached
+    if branchmap.read(repo) is None:
+        raise error.Abort('No brachmap cached for %s repo'
+                          % (filter or 'unfiltered'))
+    timer, fm = gettimer(ui, opts)
+    timer(lambda: branchmap.read(repo) and None)
+    fm.end()
+
 @command('perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
--- a/contrib/phabricator.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/phabricator.py	Fri Aug 24 12:55:05 2018 -0700
@@ -570,6 +570,7 @@
                 drevid = drevids[i]
                 drev = [d for d in drevs if int(d[r'id']) == drevid][0]
                 newdesc = getdescfromdrev(drev)
+                newdesc = encoding.unitolocal(newdesc)
                 # Make sure commit message contain "Differential Revision"
                 if old.description() != newdesc:
                     parents = [
--- a/contrib/python3-whitelist	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/python3-whitelist	Fri Aug 24 12:55:05 2018 -0700
@@ -1,4 +1,7 @@
 test-abort-checkin.t
+test-absorb-filefixupstate.py
+test-absorb-phase.t
+test-absorb-strip.t
 test-add.t
 test-addremove-similar.t
 test-addremove.t
@@ -48,6 +51,7 @@
 test-cbor.py
 test-censor.t
 test-changelog-exec.t
+test-check-code.t
 test-check-commit.t
 test-check-execute.t
 test-check-interfaces.py
@@ -179,9 +183,12 @@
 test-generaldelta.t
 test-getbundle.t
 test-git-export.t
+test-glog-beautifygraph.t
 test-glog-topological.t
+test-glog.t
 test-gpg.t
 test-graft.t
+test-grep.t
 test-hg-parseurl.py
 test-hghave.t
 test-hgignore.t
@@ -254,6 +261,7 @@
 test-largefiles.t
 test-lfs-largefiles.t
 test-lfs-pointer.py
+test-linelog.py
 test-linerange.py
 test-locate.t
 test-lock-badness.t
@@ -277,6 +285,7 @@
 test-merge-halt.t
 test-merge-internal-tools-pattern.t
 test-merge-local.t
+test-merge-no-file-change.t
 test-merge-remove.t
 test-merge-revert.t
 test-merge-revert2.t
@@ -296,6 +305,7 @@
 test-minifileset.py
 test-minirst.py
 test-mq-git.t
+test-mq-guards.t
 test-mq-header-date.t
 test-mq-header-from.t
 test-mq-merge.t
@@ -308,6 +318,7 @@
 test-mq-qimport-fail-cleanup.t
 test-mq-qnew.t
 test-mq-qpush-exact.t
+test-mq-qpush-fail.t
 test-mq-qqueue.t
 test-mq-qrefresh-interactive.t
 test-mq-qrefresh-replace-log-message.t
@@ -318,6 +329,7 @@
 test-mq-subrepo.t
 test-mq-symlinks.t
 test-mv-cp-st-diff.t
+test-narrow-acl.t
 test-narrow-archive.t
 test-narrow-clone-no-ellipsis.t
 test-narrow-clone-non-narrow-server.t
@@ -358,6 +370,9 @@
 test-parseindex2.py
 test-patch-offset.t
 test-patch.t
+test-patchbomb-bookmark.t
+test-patchbomb-tls.t
+test-patchbomb.t
 test-pathconflicts-merge.t
 test-pathconflicts-update.t
 test-pathencode.py
@@ -405,6 +420,7 @@
 test-pushvars.t
 test-qrecord.t
 test-rebase-abort.t
+test-rebase-backup.t
 test-rebase-base-flag.t
 test-rebase-bookmarks.t
 test-rebase-brute-force.t
@@ -446,6 +462,7 @@
 test-revert-flags.t
 test-revert-interactive.t
 test-revert-unknown.t
+test-revisions.t
 test-revlog-ancestry.py
 test-revlog-group-emptyiter.t
 test-revlog-mmapindex.t
@@ -529,6 +546,7 @@
 test-url-rev.t
 test-url.py
 test-username-newline.t
+test-util.py
 test-verify.t
 test-walk.t
 test-walkrepo.py
--- a/contrib/wix/help.wxs	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/wix/help.wxs	Fri Aug 24 12:55:05 2018 -0700
@@ -46,6 +46,7 @@
             <File Id="internals.censor.txt"       Name="censor.txt" />
             <File Id="internals.changegroups.txt" Name="changegroups.txt" />
             <File Id="internals.config.txt"       Name="config.txt" />
+            <File Id="internals.linelog.txt"      Name="linelog.txt" />
             <File Id="internals.requirements.txt" Name="requirements.txt" />
             <File Id="internals.revlogs.txt"      Name="revlogs.txt" />
             <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
--- a/contrib/zsh_completion	Sat Aug 18 10:24:57 2018 +0200
+++ b/contrib/zsh_completion	Fri Aug 24 12:55:05 2018 -0700
@@ -82,7 +82,7 @@
 
   if [[ -z "$cmd" ]]
   then
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
     ':mercurial command:_hg_commands'
     return
   fi
@@ -119,7 +119,7 @@
     _hg_cmd_${cmd}
   else
     # complete unknown commands normally
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
       '*:files:_hg_files'
   fi
 }
@@ -139,7 +139,7 @@
   typeset -gA _hg_alias_list
   local hline cmd cmdalias
 
-  _call_program hg hg debugcomplete -v | while read -A hline
+  _call_program hg HGPLAINEXCEPT=alias hg debugcomplete -v | while read -A hline
   do
     cmd=$hline[1]
     _hg_cmd_list+=($cmd)
@@ -245,10 +245,10 @@
   _wanted files expl 'missing files' _multi_parts / status_files
 }
 
-_hg_modified() {
+_hg_committable() {
   typeset -a status_files
-  _hg_status m
-  _wanted files expl 'modified files' _multi_parts / status_files
+  _hg_status mar
+  _wanted files expl 'modified, added or removed files' _multi_parts / status_files
 }
 
 _hg_resolve() {
@@ -371,22 +371,24 @@
 
 # Common options
 _hg_global_opts=(
-    '(--repository -R)'{-R+,--repository=}'[repository root directory]:repository:_files -/'
-    '--cwd[change working directory]:new working directory:_files -/'
-    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, assume yes for any required answers]'
+    '(--repository -R)'{-R+,--repository=}'[repository root directory or name of overlay bundle file]:repository:_files -/'
+    '--cwd=[change working directory]:new working directory:_files -/'
+    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, automatically pick the first choice for all prompts]'
     '(--verbose -v)'{-v,--verbose}'[enable additional output]'
     '*--config[set/override config option]:defined config items:_hg_config'
     '(--quiet -q)'{-q,--quiet}'[suppress output]'
     '(--help -h)'{-h,--help}'[display help and exit]'
-    '--debug[debug mode]'
+    '--debug[enable debugging output]'
     '--debugger[start debugger]'
-    '--encoding[set the charset encoding]'
-    '--encodingmode[set the charset encoding mode]'
-    '--lsprof[print improved command execution profile]'
-    '--traceback[print traceback on exception]'
+    '--encoding=[set the charset encoding]:encoding'
+    '--encodingmode=[set the charset encoding mode]:encoding mode'
+    '--traceback[always print a traceback on exception]'
     '--time[time how long the command takes]'
-    '--profile[profile]'
+    '--profile[print command execution profile]'
     '--version[output version information and exit]'
+    '--hidden[consider hidden changesets]'
+    '--color=[when to colorize]:when:(true false yes no always auto never debug)'
+    '--pager=[when to paginate (default: auto)]:when:(true false yes no always auto never)'
 )
 
 _hg_pat_opts=(
@@ -402,8 +404,8 @@
 _hg_date_user_opts=(
   '(--currentdate -D)'{-D,--currentdate}'[record the current date as commit date]'
   '(--currentuser -U)'{-U,--currentuser}'[record the current user as committer]'
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:'
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:')
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date'
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user')
 
 _hg_gitlike_opts=(
   '(--git -g)'{-g,--git}'[use git extended diff format]')
@@ -414,7 +416,8 @@
   '--nodates[omit dates from diff headers]')
 
 _hg_mergetool_opts=(
-  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:tool:')
+  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:merge tool'
+)
 
 _hg_dryrun_opts=(
   '(--dry-run -n)'{-n,--dry-run}'[do not perform actions, just print output]')
@@ -422,28 +425,33 @@
 _hg_ignore_space_opts=(
   '(--ignore-all-space -w)'{-w,--ignore-all-space}'[ignore white space when comparing lines]'
   '(--ignore-space-change -b)'{-b,--ignore-space-change}'[ignore changes in the amount of white space]'
-  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]')
+  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]'
+  '(--ignore-space-at-eol -Z)'{-Z,--ignore-space-at-eol}'[ignore changes in whitespace at EOL]'
+)
 
-_hg_style_opts=(
-  '--style[display using template map file]:'
-  '--template[display with template]:')
+_hg_template_opts=(
+  '--template[display with template]:template'
+)
 
 _hg_log_opts=(
-  $_hg_global_opts $_hg_style_opts $_hg_gitlike_opts
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:'
+  $_hg_global_opts $_hg_template_opts $_hg_gitlike_opts
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit'
   '(--no-merges -M)'{-M,--no-merges}'[do not show merges]'
   '(--patch -p)'{-p,--patch}'[show patch]'
   '--stat[output diffstat-style summary of changes]'
+  '(--graph -G)'{-G,--graph}'[show the revision DAG]'
 )
 
 _hg_commit_opts=(
   '(-m --message -l --logfile --edit -e)'{-e,--edit}'[edit commit message]'
-  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message:'
+  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message'
   '(-e --edit -m --message --logfile -l)'{-l+,--logfile=}'[read the commit message from <file>]:log file:_files')
 
 _hg_remote_opts=(
-  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:'
-  '--remotecmd[specify hg command to run on the remote side]:')
+  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:command'
+  '--remotecmd=[specify hg command to run on the remote side]:remote command'
+  '--insecure[do not verify server certificate (ignoring web.cacerts config)]'
+)
 
 _hg_branch_bmark_opts=(
   '(--bookmark -B)'{-B+,--bookmark=}'[specify bookmark(s)]:bookmark:_hg_bookmarks'
@@ -458,50 +466,53 @@
 }
 
 _hg_cmd_add() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
   '*:unknown files:_hg_unknown'
 }
 
 _hg_cmd_addremove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:unknown or missing files:_hg_addremove'
 }
 
 _hg_cmd_annotate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_ignore_space_opts $_hg_pat_opts \
   '(--rev -r)'{-r+,--rev=}'[annotate the specified revision]:revision:_hg_labels' \
-  '(--follow -f)'{-f,--follow}'[follow file copies and renames]' \
+  "--no-follow[don't follow copies and renames]" \
   '(--text -a)'{-a,--text}'[treat all files as text]' \
-  '(--user -u)'{-u,--user}'[list the author]' \
-  '(--date -d)'{-d,--date}'[list the date]' \
+  '(--user -u)'{-u,--user}'[list the author (long with -v)]' \
+  '(--file -f)'{-f,--file}'[list the filename]' \
+  '(--date -d)'{-d,--date}'[list the date (short with -q)]' \
   '(--number -n)'{-n,--number}'[list the revision number (default)]' \
   '(--changeset -c)'{-c,--changeset}'[list the changeset]' \
+  '(--line-number -l)'{-l,--line-number}'[show line number at the first appearance]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_archive() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '--no-decode[do not pass files through decoders]' \
-  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:' \
+  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:prefix' \
   '(--rev -r)'{-r+,--rev=}'[revision to distribute]:revision:_hg_labels' \
   '(--type -t)'{-t+,--type=}'[type of distribution to create]:archive type:(files tar tbz2 tgz uzip zip)' \
   '*:destination:_files'
 }
 
 _hg_cmd_backout() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
     '--merge[merge with old dirstate parent after backout]' \
-    '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
+    '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
     '--parent[parent to choose when backing out merge]' \
-    '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-    '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
-    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files'
+    '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+    '(--rev -r 1)'{-r+,--rev=}'[revision to backout]:revision:_hg_labels' \
+    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
+    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
+    ':revision:_hg_labels'
 }
 
 _hg_cmd_bisect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(-)'{-r,--reset}'[reset bisect state]' \
   '(--extend -e)'{-e,--extend}'[extend the bisect range]' \
   '(--good -g --bad -b --skip -s --reset -r)'{-g,--good}'[mark changeset good]'::revision:_hg_labels \
@@ -512,7 +523,7 @@
 }
 
 _hg_cmd_bookmarks() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[force]' \
   '(--inactive -i)'{-i,--inactive}'[mark a bookmark inactive]' \
   '(--rev -r --delete -d --rename -m)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
@@ -522,112 +533,123 @@
 }
 
 _hg_cmd_branch() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[set branch name even if it shadows an existing branch]' \
   '(--clean -C)'{-C,--clean}'[reset branch name to parent branch name]'
 }
 
 _hg_cmd_branches() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--active -a)'{-a,--active}'[show only branches that have unmerge heads]' \
+  _arguments -s -S : $_hg_global_opts \
   '(--closed -c)'{-c,--closed}'[show normal and closed branches]'
 }
 
 _hg_cmd_bundle() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
-  '(--force -f)'{-f,--force}'[run even when remote repository is unrelated]' \
-  '(2)*--base[a base changeset to specify instead of a destination]:revision:_hg_labels' \
-  '(--branch -b)'{-b+,--branch=}'[a specific branch to bundle]:' \
-  '(--rev -r)'{-r+,--rev=}'[changeset(s) to bundle]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '(2)*--base[a base changeset assumed to be available at the destination]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[a specific branch you would like to bundle]:branch:_hg_branches' \
+  '*'{-r+,--rev=}'[a changeset intended to be added to the destination]:revision:_hg_labels' \
   '--all[bundle all changesets in the repository]' \
+  '--type[bundle compression type to use (default: bzip2)]:bundle type' \
   ':output file:_files' \
   ':destination repository:_files -/'
 }
 
 _hg_cmd_cat() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
   '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
   '--decode[apply any matching decode filter]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_clone() {
-  _arguments -s -w : $_hg_global_opts $_hg_clone_opts \
-  '(--rev -r)'{-r+,--rev=}'[a changeset you would like to have after cloning]:' \
-  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag or branch to check out]:' \
-  '(--branch -b)'{-b+,--branch=}'[clone only the specified branch]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_clone_opts \
+  '*'{-r+,--rev=}'[do not clone everything, but include this changeset and its ancestors]:revision' \
+  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag or branch to check out]:revision' \
+  '*'{-b+,--branch=}"[do not clone everything, but include this branch's changesets and their ancestors]:branch" \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_commit() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
-  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
+  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
   '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '--amend[amend the parent of the working dir]' \
-  '--close-branch[mark a branch as closed]' \
-  '*:file:_hg_files'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '--amend[amend the parent of the working directory]' \
+  '--close-branch[mark a branch head as closed]' \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
+  '(--secret -s)'{-s,--secret}'[use the secret phase for committing]' \
+  '*:file:_hg_committable'
 }
 
 _hg_cmd_copy() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a copy that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_diff() {
+  local context state state_descr line ret=1
   typeset -A opt_args
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
+
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
                      $_hg_pat_opts $_hg_subrepos_opts \
   '*'{-r+,--rev=}'[revision]:revision:_hg_revrange' \
+  '--noprefix[omit a/ and b/ prefixes from filenames]' \
   '(--show-function -p)'{-p,--show-function}'[show which function each change is in]' \
-  '(--change -c)'{-c+,--change=}'[change made by revision]:' \
+  '(--change -c)'{-c+,--change=}'[change made by revision]:revision:_hg_labels' \
   '(--text -a)'{-a,--text}'[treat all files as text]' \
   '--reverse[produce a diff that undoes the changes]' \
-  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:' \
+  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:count' \
   '--stat[output diffstat-style summary of changes]' \
-  '*:file:->diff_files'
+  '--root=[produce diffs relative to subdirectory]:directory:_files -/' \
+  '*:file:->diff_files' && ret=0
 
   if [[ $state == 'diff_files' ]]
   then
     if [[ -n $opt_args[-r] ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
-      _hg_modified
+      _hg_committable && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_export() {
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts \
-  '(--outout -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
   '--switch-parent[diff against the second parent]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[revisions to export]:revision:_hg_labels' \
   '*:revision:_hg_labels'
 }
 
 _hg_cmd_forget() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_graft() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
                      $_hg_date_user_opts $_hg_mergetool_opts \
-  '(--continue -c)'{-c,--continue}'[resume interrupted graft]' \
+  '*'{-r+,--rev=}'[revisions to graft]:revision:_hg_labels' \
+  '(--continue -c --abort -a)'{-c,--continue}'[resume interrupted graft]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort interrupted graft]' \
   '(--edit -e)'{-e,--edit}'[invoke editor on commit messages]' \
   '--log[append graft info to log message]' \
   '*:revision:_hg_labels'
 }
 
 _hg_cmd_grep() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL]' \
   '--all[print all revisions with matches]' \
   '(--follow -f)'{-f,--follow}'[follow changeset or file history]' \
@@ -642,93 +664,92 @@
 }
 
 _hg_cmd_heads() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
   '(--topo -t)'{-t,--topo}'[show topological heads only]' \
   '(--closed -c)'{-c,--closed}'[show normal and closed branch heads]' \
   '(--rev -r)'{-r+,--rev=}'[show only heads which are descendants of rev]:revision:_hg_labels'
 }
 
 _hg_cmd_help() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--extension -e)'{-e,--extension}'[show only help for extensions]' \
   '(--command -c)'{-c,--command}'[show only help for commands]' \
-  '(--keyword -k)'{-k+,--keyword}'[show topics matching keyword]' \
+  '(--keyword -k)'{-k,--keyword}'[show topics matching keyword]' \
   '*:mercurial help topic:_hg_help_topics'
 }
 
 _hg_cmd_identify() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   '(--rev -r)'{-r+,--rev=}'[identify the specified rev]:revision:_hg_labels' \
   '(--num -n)'{-n,--num}'[show local revision number]' \
   '(--id -i)'{-i,--id}'[show global revision id]' \
   '(--branch -b)'{-b,--branch}'[show branch]' \
-  '(--bookmark -B)'{-B,--bookmark}'[show bookmarks]' \
+  '(--bookmarks -B)'{-B,--bookmarks}'[show bookmarks]' \
   '(--tags -t)'{-t,--tags}'[show tags]'
 }
 
 _hg_cmd_import() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts \
-  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count:' \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts \
+  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count' \
   '(--force -f)'{-f,--force}'[skip check for outstanding uncommitted changes]' \
   '--bypass[apply patch without touching the working directory]' \
   '--no-commit[do not commit, just update the working directory]' \
   '--partial[commit even if some hunks fail]' \
-  '--exact[apply patch to the nodes from which it was generated]' \
+  '--exact[abort if patch would apply lossily]' \
   '--import-branch[use any branch information in patch (implied by --exact)]' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:patch:_files'
 }
 
 _hg_cmd_incoming() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
                      $_hg_subrepos_opts \
   '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision up to which you would like to pull]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
   '--bundle[file to store the bundles into]:bundle file:_files' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_init() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   ':dir:_files -/'
 }
 
 _hg_cmd_locate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--rev -r)'{-r+,--rev=}'[search repository as it stood at revision]:revision:_hg_labels' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '(--fullpath -f)'{-f,--fullpath}'[print complete paths]' \
+  '(--fullpath -f)'{-f,--fullpath}'[print complete paths from the filesystem root]' \
   '*:search pattern:_hg_files'
 }
 
 _hg_cmd_log() {
-  _arguments -s -w : $_hg_log_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_pat_opts \
   '(--follow --follow-first -f)'{-f,--follow}'[follow changeset or history]' \
   '(-f --follow)--follow-first[only follow the first parent of merge changesets]' \
   '(--copies -C)'{-C,--copies}'[show copied files]' \
-  '(--keyword -k)'{-k+,--keyword}'[search for a keyword]:' \
+  '*'{-k+,--keyword=}'[search for a keyword]:keyword' \
   '*'{-r+,--rev=}'[show the specified revision or revset]:revision:_hg_revrange' \
   '(--only-merges -m)'{-m,--only-merges}'[show only merges]' \
-  '(--prune -P)'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
-  '(--graph -G)'{-G,--graph}'[show the revision DAG]' \
-  '(--branch -b)'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
-  '(--user -u)'{-u+,--user=}'[revisions committed by user]:user:' \
-  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date:' \
+  '*'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
+  '*'{-u+,--user=}'[revisions committed by user]:user' \
+  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_manifest() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--all[list files from all revisions]' \
   '(--rev -r)'{-r+,--rev=}'[revision to display]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
 _hg_cmd_merge() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
   '(--force -f)'{-f,--force}'[force a merge with outstanding changes]' \
   '(--rev -r 1)'{-r+,--rev=}'[revision to merge]:revision:_hg_mergerevs' \
   '(--preview -P)'{-P,--preview}'[review revisions to merge (no merge is performed)]' \
@@ -736,167 +757,176 @@
 }
 
 _hg_cmd_outgoing() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
                      $_hg_subrepos_opts \
-  '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '*'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_revrange' \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_revrange' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_parents() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
   '(--rev -r)'{-r+,--rev=}'[show parents of the specified rev]:revision:_hg_labels' \
   ':last modified file:_hg_files'
 }
 
 _hg_cmd_paths() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':path:_hg_paths'
 }
 
 _hg_cmd_phase() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--public -p)'{-p,--public}'[set changeset phase to public]' \
-  '(--draft -d)'{-d,--draft}'[set changeset phase to draft]' \
-  '(--secret -s)'{-s,--secret}'[set changeset phase to secret]' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--public -p --draft -d --secret -s)'{-p,--public}'[set changeset phase to public]' \
+  '(--public -p --draft -d --secret -s)'{-d,--draft}'[set changeset phase to draft]' \
+  '(--public -p --draft -d --secret -s)'{-s,--secret}'[set changeset phase to secret]' \
   '(--force -f)'{-f,--force}'[allow to move boundary backward]' \
-  '(--rev -r)'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  '*'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 _hg_cmd_pull() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--update -u)'{-u,--update}'[update to new tip if changesets were pulled]' \
-  '(--rev -r)'{-r+,--rev}'[a specific revision up to which you would like to pull]:revision:' \
+  '(--update -u)'{-u,--update}'[update to new branch head if new descendants were pulled]' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_push() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[force push]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_labels' \
   '--new-branch[allow pushing a new branch]' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_remove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--after -A)'{-A,--after}'[record remove that has already occurred]' \
-  '(--force -f)'{-f,--force}'[remove file even if modified]' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  '(--after -A)'{-A,--after}'[record delete for missing files]' \
+  '(--force -f)'{-f,--force}'[forget added files, delete modified files]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_rename() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a rename that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_resolve() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
   '(--all -a)'{-a,--all}'[select all unresolved files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--list -l --mark -m --unmark -u)'{-l,--list}'[list state of files needing merge]:*:merged files:->resolve_files' \
   '(--mark -m --list -l --unmark -u)'{-m,--mark}'[mark files as resolved]:*:unresolved files:_hg_unresolved' \
-  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[unmark files as resolved]:*:resolved files:_hg_resolved' \
-  '*:file:_hg_unresolved'
+  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[mark files as unresolved]:*:resolved files:_hg_resolved' \
+  '*:file:_hg_unresolved' && ret=0
 
   if [[ $state == 'resolve_files' ]]
   then
     _alternative 'files:resolved files:_hg_resolved' \
-      'files:unresolved files:_hg_unresolved'
+      'files:unresolved files:_hg_unresolved' && ret=0
   fi
+
+  return ret
 }
 
 _hg_cmd_revert() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--all -a :)'{-a,--all}'[revert all changes when no arguments given]' \
   '(--rev -r)'{-r+,--rev=}'[revision to revert to]:revision:_hg_labels' \
   '(--no-backup -C)'{-C,--no-backup}'[do not save backup copies of files]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date code:' \
-  '*:file:->diff_files'
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--interactive -i)'{-i,--interactive}'[interactively select the changes]' \
+  '*:file:->revert_files' && ret=0
 
-  if [[ $state == 'diff_files' ]]
+  if [[ $state == 'revert_files' ]]
   then
     if [[ -n $opt_args[-r] ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
       typeset -a status_files
       _hg_status mard
-      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files
+      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_rollback() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
   '(--force -f)'{-f,--force}'[ignore safety measures]' \
 }
 
 _hg_cmd_serve() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file]:log file:_files' \
-  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file]:log file:_files' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file to write to]:log file:_files' \
+  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file to write to]:log file:_files' \
   '(--daemon -d)'{-d,--daemon}'[run server in background]' \
-  '(--port -p)'{-p+,--port=}'[listen port]:listen port:' \
-  '(--address -a)'{-a+,--address=}'[interface address]:interface address:' \
-  '--prefix[prefix path to serve from]:directory:_files' \
-  '(--name -n)'{-n+,--name=}'[name to show in web pages]:repository name:' \
-  '--web-conf[name of the hgweb config file]:webconf_file:_files' \
-  '--pid-file[name of file to write process ID to]:pid_file:_files' \
-  '--cmdserver[cmdserver mode]:mode:' \
-  '(--templates -t)'{-t,--templates}'[web template directory]:template dir:_files -/' \
-  '--style[web template style]:style' \
+  '(--port -p)'{-p+,--port=}'[port to listen on (default: 8000)]:listen port' \
+  '(--address -a)'{-a+,--address=}'[address to listen on (default: all interfaces)]:interface address' \
+  '--prefix=[prefix path to serve from (default: server root)]:directory:_files' \
+  '(--name -n)'{-n+,--name=}'[name to show in web pages (default: working directory)]:repository name' \
+  '--web-conf=[name of the hgweb config file]:config file:_files' \
+  '--pid-file=[name of file to write process ID to]:pid file:_files' \
+  '--cmdserver[for remote clients]' \
+  '(--templates -t)'{-t+,--templates=}'[web template directory]:template dir:_files -/' \
+  '--style=[template style to use]:style' \
   '--stdio[for remote clients]' \
-  '--certificate[certificate file]:cert_file:_files' \
-  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]'
+  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]' \
+  '--certificate=[SSL certificate file]:certificate file:_files' \
+  '--print-url[start and print only the URL]'
 }
 
 _hg_cmd_showconfig() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--untrusted -u)'{-u,--untrusted}'[show untrusted configuration options]' \
-  ':config item:_hg_config'
+  '(--edit -e)'{-e,--edit}'[edit user config]' \
+  '(--local -l --global -g)'{-l,--local}'[edit repository config]' \
+  '(--local -l --global -g)'{-g,--global}'[edit global config]' \
+  '*:config item:_hg_config'
 }
 
 _hg_cmd_status() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--all -A)'{-A,--all}'[show status of all files]' \
   '(--modified -m)'{-m,--modified}'[show only modified files]' \
   '(--added -a)'{-a,--added}'[show only added files]' \
   '(--removed -r)'{-r,--removed}'[show only removed files]' \
   '(--deleted -d)'{-d,--deleted}'[show only deleted (but tracked) files]' \
   '(--clean -c)'{-c,--clean}'[show only files without changes]' \
-  '(--unknown -u)'{-u,--unknown}'[show only unknown files]' \
+  '(--unknown -u)'{-u,--unknown}'[show only unknown (not tracked) files]' \
   '(--ignored -i)'{-i,--ignored}'[show ignored files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--copies -C)'{-C,--copies}'[show source of copied files]' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '--rev[show difference from revision]:revision:_hg_labels' \
-  '--change[list the changed files of a revision]:revision:_hg_labels' \
+  '*--rev=[show difference from revision]:revision:_hg_labels' \
+  '--change=[list the changed files of a revision]:revision:_hg_labels' \
   '*:files:_files'
 }
 
 _hg_cmd_summary() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--remote[check for push and pull]'
 }
 
 _hg_cmd_tag() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--local -l)'{-l,--local}'[make the tag local]' \
-  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message:' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
+  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
   '(--rev -r)'{-r+,--rev=}'[revision to tag]:revision:_hg_labels' \
   '(--force -f)'{-f,--force}'[force tag]' \
   '--remove[remove a tag]' \
@@ -905,22 +935,23 @@
 }
 
 _hg_cmd_tip() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts $_hg_template_opts \
   '(--patch -p)'{-p,--patch}'[show patch]'
 }
 
 _hg_cmd_unbundle() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--update -u)'{-u,--update}'[update to new tip if changesets were unbundled]' \
-  ':files:_files'
+  '*:files:_files'
 }
 
 _hg_cmd_update() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--clean -C)'{-C,--clean}'[overwrite locally modified files]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  '(--check -c)'{-c,--check}'[update across branches if no uncommitted changes]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
+  '(--clean -C)'{-C,--clean}'[discard uncommitted changes (no backup)]' \
+  '(--check -c)'{-c,--check}'[require clean working directory]' \
+  '(--merge -m)'{-m,--merge}'[merge uncommitted changes]' \
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--rev -r 1)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
@@ -928,8 +959,8 @@
 
 # HGK
 _hg_cmd_view() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit' \
   ':revision range:_hg_labels'
 }
 
@@ -983,54 +1014,54 @@
   '(--summary -s)'{-s,--summary}'[print first line of patch header]')
 
 _hg_cmd_qapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--last -1)'{-1,--last}'[show only the preceding applied patch]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qclone() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
   '(--patches -p)'{-p+,--patches=}'[location of source patch repository]:' \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_qdelete() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--keep -k)'{-k,--keep}'[keep patch file]' \
   '*'{-r+,--rev=}'[stop managing a revision]:applied patch:_hg_revrange' \
   '*:unapplied patch:_hg_qdeletable'
 }
 
 _hg_cmd_qdiff() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
                      $_hg_ignore_space_opts \
   '*:pattern:_hg_files'
 }
 
 _hg_cmd_qfinish() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--applied -a)'{-a,--applied}'[finish all applied patches]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qfold() {
-  _arguments -s -w : $_hg_global_opts $_h_commit_opts \
-  '(--keep,-k)'{-k,--keep}'[keep folded patch files]' \
+  _arguments -s -S : $_hg_global_opts $_h_commit_opts \
+  '(--keep -k)'{-k,--keep}'[keep folded patch files]' \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--no-backup[do not save backup copies of files]' \
   '*:unapplied patch:_hg_qunapplied'
 }
 
 _hg_cmd_qgoto() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qguard() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--list -l)'{-l,--list}'[list all patches and guards]' \
   '(--none -n)'{-n,--none}'[drop all guards]' \
   ':patch:_hg_qseries' \
@@ -1038,14 +1069,14 @@
 }
 
 _hg_cmd_qheader() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qimport() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts \
   '(--existing -e)'{-e,--existing}'[import file in patch dir]' \
-  '(--name -n 2)'{-n+,--name}'[patch file name]:name:' \
+  '(--name -n 2)'{-n+,--name}'[patch file name]:name' \
   '(--force -f)'{-f,--force}'[overwrite existing files]' \
   '*'{-r+,--rev=}'[place existing revisions under mq control]:revision:_hg_revrange' \
   '(--push -P)'{-P,--push}'[qpush after importing]' \
@@ -1053,16 +1084,16 @@
 }
 
 _hg_cmd_qnew() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
   ':patch:'
 }
 
 _hg_cmd_qnext() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpop() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[pop all patches]' \
   '(--force -f)'{-f,--force}'[forget any local changes]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
@@ -1071,11 +1102,11 @@
 }
 
 _hg_cmd_qprev() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpush() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[apply all patches]' \
   '(--list -l)'{-l,--list}'[list patch name in commit text]' \
   '(--force -f)'{-f,--force}'[apply if the patch has rejects]' \
@@ -1087,19 +1118,19 @@
 }
 
 _hg_cmd_qrefresh() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_gitlike_opts \
   '(--short -s)'{-s,--short}'[short refresh]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_qrename() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qunapplied' \
   ':destination:'
 }
 
 _hg_cmd_qselect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--none -n :)'{-n,--none}'[disable all guards]' \
   '(--series -s :)'{-s,--series}'[list all guards in series file]' \
   '--pop[pop to before first guarded applied patch]' \
@@ -1108,32 +1139,32 @@
 }
 
 _hg_cmd_qseries() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--missing -m)'{-m,--missing}'[print patches not in series]'
 }
 
 _hg_cmd_qunapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--first -1)'{-1,--first}'[show only the first patch]'
 }
 
 _hg_cmd_qtop() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_strip() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--force -f)'{-f,--force}'[force removal, discard uncommitted changes, no backup]' \
-  '(--no-backup -n)'{-n,--no-backup}'[no backups]' \
-  '(--keep -k)'{-k,--keep}'[do not modify working copy during strip]' \
-  '(--bookmark -B)'{-B+,--bookmark=}'[remove revs only reachable from given bookmark]:bookmark:_hg_bookmarks' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  _arguments -s -S : $_hg_global_opts \
+  '(--force -f)'{-f,--force}'[force removal of changesets, discard uncommitted changes (no backup)]' \
+  '--no-backup[no backups]' \
+  '(--keep -k)'{-k,--keep}'[do not modify working directory during strip]' \
+  '*'{-B+,--bookmark=}'[remove revs only reachable from given bookmark]:bookmark:_hg_bookmarks' \
+  '*'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 # Patchbomb
 _hg_cmd_email() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
   '--plain[omit hg patch header]' \
   '--body[send patches as inline message text (default)]' \
   '(--outgoing -o)'{-o,--outgoing}'[send changes not found in the target repository]' \
@@ -1141,79 +1172,76 @@
   '--bundlename[name of the bundle attachment file (default: bundle)]:' \
   '*'{-r+,--rev=}'[search in given revision range]:revision:_hg_revrange' \
   '--force[run even when remote repository is unrelated (with -b/--bundle)]' \
-  '*--base[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
+  '*--base=[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
   '--intro[send an introduction email for a single patch]' \
   '(--inline -i --attach -a)'{-a,--attach}'[send patches as attachments]' \
   '(--attach -a --inline -i)'{-i,--inline}'[send patches as inline attachments]' \
-  '*--bcc[email addresses of blind carbon copy recipients]:email:' \
-  '*'{-c+,--cc}'[email addresses of copy recipients]:email:' \
+  '*--bcc=[email addresses of blind carbon copy recipients]:email' \
+  '*'{-c+,--cc=}'[email addresses of copy recipients]:email' \
   '(--diffstat -d)'{-d,--diffstat}'[add diffstat output to messages]' \
-  '--date[use the given date as the sending date]:date:' \
-  '--desc[use the given file as the series description]:files:_files' \
-  '(--from -f)'{-f,--from}'[email address of sender]:email:' \
+  '--date=[use the given date as the sending date]:date' \
+  '--desc=[use the given file as the series description]:files:_files' \
+  '(--from -f)'{-f+,--from=}'[email address of sender]:email' \
   '(--test -n)'{-n,--test}'[print messages that would be sent]' \
-  '(--mbox -m)'{-m,--mbox}'[write messages to mbox file instead of sending them]:file:' \
-  '*--reply-to[email addresses replies should be sent to]:email:' \
-  '(--subject -s)'{-s,--subject}'[subject of first message (intro or single patch)]:subject:' \
-  '--in-reply-to[message identifier to reply to]:msgid:' \
-  '*--flag[flags to add in subject prefixes]:flag:' \
-  '*'{-t,--to}'[email addresses of recipients]:email:' \
+  '(--mbox -m)'{-m+,--mbox=}'[write messages to mbox file instead of sending them]:file:_files' \
+  '*--reply-to=[email addresses replies should be sent to]:email' \
+  '(--subject -s)'{-s+,--subject=}'[subject of first message (intro or single patch)]:subject' \
+  '--in-reply-to=[message identifier to reply to]:msgid' \
+  '*--flag=[flags to add in subject prefixes]:flag' \
+  '*'{-t+,--to=}'[email addresses of recipients]:email' \
   ':revision:_hg_revrange'
 }
 
 # Rebase
 _hg_cmd_rebase() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts \
   '*'{-r+,--rev=}'[rebase these revisions]:revision:_hg_revrange' \
-  '(--source -s)'{-s+,--source=}'[rebase from the specified changeset]:revision:_hg_labels' \
-  '(--base -b)'{-b+,--base=}'[rebase from the base of the specified changeset]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-s+,--source=}'[rebase the specified changeset and descendants]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-b+,--base=}'[rebase everything from branching point of specified changeset]:revision:_hg_labels' \
   '(--dest -d)'{-d+,--dest=}'[rebase onto the specified changeset]:revision:_hg_labels' \
-  '--collapse[collapse the rebased changeset]' \
-  '--keep[keep original changeset]' \
-  '--keepbranches[keep original branch name]' \
-  '(--continue -c)'{-c,--continue}'[continue an interrupted rebase]' \
-  '(--abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
+  '--collapse[collapse the rebased changesets]' \
+  '(--keep -k)'{-k,--keep}'[keep original changesets]' \
+  '--keepbranches[keep original branch names]' \
+  '(--continue -c --abort -a)'{-c,--continue}'[continue an interrupted rebase]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
 }
 
 # Record
 _hg_cmd_record() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
                      $_hg_ignore_space_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
   '--close-branch[mark a branch as closed, hiding it from the branch list]' \
   '--amend[amend the parent of the working dir]' \
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:' \
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user'
 }
 
 _hg_cmd_qrecord() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
                      $_hg_pat_opts $_hg_ignore_space_opts $_hg_subrepos_opts
 }
 
 # Convert
 _hg_cmd_convert() {
-_arguments -s -w : $_hg_global_opts \
-  '(--source-type -s)'{-s,--source-type}'[source repository type]' \
-  '(--dest-type -d)'{-d,--dest-type}'[destination repository type]' \
-  '(--rev -r)'{-r+,--rev=}'[import up to target revision]:revision:' \
+_arguments -s -S : $_hg_global_opts \
+  '(--source-type -s)'{-s+,--source-type=}'[source repository type]:type:(hg cvs darcs git svn mtn gnuarch bzr p4)' \
+  '(--dest-type -d)'{-d+,--dest-type=}'[destination repository type]:type:(hg svn)' \
+  '*'{-r+,--rev=}'[import up to target revision]:revision' \
   '(--authormap -A)'{-A+,--authormap=}'[remap usernames using this file]:file:_files' \
-  '--filemap[remap file names using contents of file]:file:_files' \
-  '--splicemap[splice synthesized history into place]:file:_files' \
-  '--branchmap[change branch names while converting]:file:_files' \
+  '--filemap=[remap file names using contents of file]:file:_files' \
+  '--full[apply filemap changes by converting all files again]' \
+  '--splicemap=[splice synthesized history into place]:file:_files' \
+  '--branchmap=[change branch names while converting]:file:_files' \
   '--branchsort[try to sort changesets by branches]' \
   '--datesort[try to sort changesets by date]' \
-  '--sourcesort[preserve source changesets order]'
-}
-
-# Graphlog
-_hg_cmd_glog() {
-  _hg_cmd_log $@
+  '--sourcesort[preserve source changesets order]' \
+  '--closesort[try to reorder closed revisions]'
 }
 
 # Purge
 _hg_cmd_purge() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--abort-on-err -a)'{-a,--abort-on-err}'[abort if an error occurs]' \
   '--all[purge ignored files too]' \
   '(--print -p)'{-p,--print}'[print filenames instead of deleting them]' \
--- a/hgdemandimport/demandimportpy2.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgdemandimport/demandimportpy2.py	Fri Aug 24 12:55:05 2018 -0700
@@ -30,6 +30,8 @@
 import contextlib
 import sys
 
+from . import tracing
+
 contextmanager = contextlib.contextmanager
 
 _origimport = __import__
@@ -86,52 +88,55 @@
 
     def _load(self):
         if not self._module:
-            head, globals, locals, after, level, modrefs = self._data
-            mod = _hgextimport(_origimport, head, globals, locals, None, level)
-            if mod is self:
-                # In this case, _hgextimport() above should imply
-                # _demandimport(). Otherwise, _hgextimport() never
-                # returns _demandmod. This isn't intentional behavior,
-                # in fact. (see also issue5304 for detail)
-                #
-                # If self._module is already bound at this point, self
-                # should be already _load()-ed while _hgextimport().
-                # Otherwise, there is no way to import actual module
-                # as expected, because (re-)invoking _hgextimport()
-                # should cause same result.
-                # This is reason why _load() returns without any more
-                # setup but assumes self to be already bound.
-                mod = self._module
-                assert mod and mod is not self, "%s, %s" % (self, mod)
-                return
+            with tracing.log('demandimport %s', self._data[0]):
+                head, globals, locals, after, level, modrefs = self._data
+                mod = _hgextimport(
+                    _origimport, head, globals, locals, None, level)
+                if mod is self:
+                    # In this case, _hgextimport() above should imply
+                    # _demandimport(). Otherwise, _hgextimport() never
+                    # returns _demandmod. This isn't intentional behavior,
+                    # in fact. (see also issue5304 for detail)
+                    #
+                    # If self._module is already bound at this point, self
+                    # should be already _load()-ed while _hgextimport().
+                    # Otherwise, there is no way to import actual module
+                    # as expected, because (re-)invoking _hgextimport()
+                    # should cause same result.
+                    # This is reason why _load() returns without any more
+                    # setup but assumes self to be already bound.
+                    mod = self._module
+                    assert mod and mod is not self, "%s, %s" % (self, mod)
+                    return
 
-            # load submodules
-            def subload(mod, p):
-                h, t = p, None
-                if '.' in p:
-                    h, t = p.split('.', 1)
-                if getattr(mod, h, nothing) is nothing:
-                    setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__,
-                                               level=1))
-                elif t:
-                    subload(getattr(mod, h), t)
+                # load submodules
+                def subload(mod, p):
+                    h, t = p, None
+                    if '.' in p:
+                        h, t = p.split('.', 1)
+                    if getattr(mod, h, nothing) is nothing:
+                        setattr(mod, h, _demandmod(
+                            p, mod.__dict__, mod.__dict__, level=1))
+                    elif t:
+                        subload(getattr(mod, h), t)
 
-            for x in after:
-                subload(mod, x)
+                for x in after:
+                    subload(mod, x)
 
-            # Replace references to this proxy instance with the actual module.
-            if locals:
-                if locals.get(head) is self:
-                    locals[head] = mod
-                elif locals.get(head + r'mod') is self:
-                    locals[head + r'mod'] = mod
+                # Replace references to this proxy instance with the
+                # actual module.
+                if locals:
+                    if locals.get(head) is self:
+                        locals[head] = mod
+                    elif locals.get(head + r'mod') is self:
+                        locals[head + r'mod'] = mod
 
-            for modname in modrefs:
-                modref = sys.modules.get(modname, None)
-                if modref and getattr(modref, head, None) is self:
-                    setattr(modref, head, mod)
+                for modname in modrefs:
+                    modref = sys.modules.get(modname, None)
+                    if modref and getattr(modref, head, None) is self:
+                        setattr(modref, head, mod)
 
-            object.__setattr__(self, r"_module", mod)
+                object.__setattr__(self, r"_module", mod)
 
     def __repr__(self):
         if self._module:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgdemandimport/tracing.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,34 @@
+# Support code for event tracing in Mercurial. Lives in demandimport
+# so it can also be used in demandimport.
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+_pipe = None
+_checked = False
+
+@contextlib.contextmanager
+def log(whencefmt, *whenceargs):
+    global _pipe, _session, _checked
+    if _pipe is None:
+        if _checked:
+            yield
+            return
+        _checked = True
+        if 'HGCATAPULTSERVERPIPE' not in os.environ:
+            yield
+            return
+        _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
+        _session = os.environ.get('HGCATAPULTSESSION', 'none')
+    whence = whencefmt % whenceargs
+    try:
+        _pipe.write('START %s %s\n' % (_session, whence))
+        yield
+    finally:
+        _pipe.write('END %s %s\n' % (_session, whence))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/absorb.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,977 @@
+# absorb.py
+#
+# Copyright 2016 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""apply working directory changes to changesets (EXPERIMENTAL)
+
+The absorb extension provides a command to use annotate information to
+amend modified chunks into the corresponding non-public changesets.
+
+::
+
+    [absorb]
+    # only check 50 recent non-public changesets at most
+    max-stack-size = 50
+    # whether to add noise to new commits to avoid obsolescence cycle
+    add-noise = 1
+    # make `amend --correlated` a shortcut to the main command
+    amend-flag = correlated
+
+    [color]
+    absorb.node = blue bold
+    absorb.path = bold
+"""
+
+# TODO:
+#  * Rename config items to [commands] namespace
+#  * Converge getdraftstack() with other code in core
+#  * move many attributes on fixupstate to be private
+
+from __future__ import absolute_import
+
+import collections
+
+from mercurial.i18n import _
+from mercurial import (
+    cmdutil,
+    commands,
+    context,
+    crecord,
+    error,
+    linelog,
+    mdiff,
+    node,
+    obsolete,
+    patch,
+    phases,
+    pycompat,
+    registrar,
+    repair,
+    scmutil,
+    util,
+)
+from mercurial.utils import (
+    stringutil,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('absorb', 'add-noise', default=True)
+configitem('absorb', 'amend-flag', default=None)
+configitem('absorb', 'max-stack-size', default=50)
+
+colortable = {
+    'absorb.node': 'blue bold',
+    'absorb.path': 'bold',
+}
+
+defaultdict = collections.defaultdict
+
+class nullui(object):
+    """blank ui object doing nothing"""
+    debugflag = False
+    verbose = False
+    quiet = True
+
+    def __getitem__(name):
+        def nullfunc(*args, **kwds):
+            return
+        return nullfunc
+
+class emptyfilecontext(object):
+    """minimal filecontext representing an empty file"""
+    def data(self):
+        return ''
+
+    def node(self):
+        return node.nullid
+
+def uniq(lst):
+    """list -> list. remove duplicated items without changing the order"""
+    seen = set()
+    result = []
+    for x in lst:
+        if x not in seen:
+            seen.add(x)
+            result.append(x)
+    return result
+
+def getdraftstack(headctx, limit=None):
+    """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
+
+    changesets are sorted in topo order, oldest first.
+    return at most limit items, if limit is a positive number.
+
+    merges are considered as non-draft as well. i.e. every commit
+    returned has and only has 1 parent.
+    """
+    ctx = headctx
+    result = []
+    while ctx.phase() != phases.public:
+        if limit and len(result) >= limit:
+            break
+        parents = ctx.parents()
+        if len(parents) != 1:
+            break
+        result.append(ctx)
+        ctx = parents[0]
+    result.reverse()
+    return result
+
+def getfilestack(stack, path, seenfctxs=None):
+    """([ctx], str, set) -> [fctx], {ctx: fctx}
+
+    stack is a list of contexts, from old to new. usually they are what
+    "getdraftstack" returns.
+
+    follows renames, but not copies.
+
+    seenfctxs is a set of filecontexts that will be considered "immutable".
+    they are usually what this function returned in earlier calls, useful
+    to avoid issues that a file was "moved" to multiple places and was then
+    modified differently, like: "a" was copied to "b", "a" was also copied to
+    "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
+    and we enforce only one of them to be able to affect "a"'s content.
+
+    return an empty list and an empty dict, if the specified path does not
+    exist in stack[-1] (the top of the stack).
+
+    otherwise, return a list of de-duplicated filecontexts, and the map to
+    convert ctx in the stack to fctx, for possible mutable fctxs. the first item
+    of the list would be outside the stack and should be considered immutable.
+    the remaining items are within the stack.
+
+    for example, given the following changelog and corresponding filelog
+    revisions:
+
+      changelog: 3----4----5----6----7
+      filelog:   x    0----1----1----2 (x: no such file yet)
+
+    - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
+    - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
+      dummy empty filecontext.
+    - if stack = [2], returns ([], {})
+    - if stack = [7], returns ([1, 2], {7: 2})
+    - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
+      removed, since 1 is immutable.
+    """
+    if seenfctxs is None:
+        seenfctxs = set()
+    assert stack
+
+    if path not in stack[-1]:
+        return [], {}
+
+    fctxs = []
+    fctxmap = {}
+
+    pctx = stack[0].p1() # the public (immutable) ctx we stop at
+    for ctx in reversed(stack):
+        if path not in ctx: # the file is added in the next commit
+            pctx = ctx
+            break
+        fctx = ctx[path]
+        fctxs.append(fctx)
+        if fctx in seenfctxs: # treat fctx as the immutable one
+            pctx = None # do not add another immutable fctx
+            break
+        fctxmap[ctx] = fctx # only for mutable fctxs
+        renamed = fctx.renamed()
+        if renamed:
+            path = renamed[0] # follow rename
+            if path in ctx: # but do not follow copy
+                pctx = ctx.p1()
+                break
+
+    if pctx is not None: # need an extra immutable fctx
+        if path in pctx:
+            fctxs.append(pctx[path])
+        else:
+            fctxs.append(emptyfilecontext())
+
+    fctxs.reverse()
+    # note: we rely on a property of hg: filerev is not reused for linear
+    # history. i.e. it's impossible to have:
+    #   changelog:  4----5----6 (linear, no merges)
+    #   filelog:    1----2----1
+    #                         ^ reuse filerev (impossible)
+    # because parents are part of the hash. if that's not true, we need to
+    # remove uniq and find a different way to identify fctxs.
+    return uniq(fctxs), fctxmap
+
+class overlaystore(patch.filestore):
+    """read-only, hybrid store based on a dict and ctx.
+    memworkingcopy: {path: content}, overrides file contents.
+    """
+    def __init__(self, basectx, memworkingcopy):
+        self.basectx = basectx
+        self.memworkingcopy = memworkingcopy
+
+    def getfile(self, path):
+        """comply with mercurial.patch.filestore.getfile"""
+        if path not in self.basectx:
+            return None, None, None
+        fctx = self.basectx[path]
+        if path in self.memworkingcopy:
+            content = self.memworkingcopy[path]
+        else:
+            content = fctx.data()
+        mode = (fctx.islink(), fctx.isexec())
+        renamed = fctx.renamed() # False or (path, node)
+        return content, mode, (renamed and renamed[0])
+
+def overlaycontext(memworkingcopy, ctx, parents=None, extra=None):
+    """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
+    memworkingcopy overrides file contents.
+    """
+    # parents must contain 2 items: (node1, node2)
+    if parents is None:
+        parents = ctx.repo().changelog.parents(ctx.node())
+    if extra is None:
+        extra = ctx.extra()
+    date = ctx.date()
+    desc = ctx.description()
+    user = ctx.user()
+    files = set(ctx.files()).union(memworkingcopy)
+    store = overlaystore(ctx, memworkingcopy)
+    return context.memctx(
+        repo=ctx.repo(), parents=parents, text=desc,
+        files=files, filectxfn=store, user=user, date=date,
+        branch=None, extra=extra)
+
+class filefixupstate(object):
+    """state needed to apply fixups to a single file
+
+    internally, it keeps file contents of several revisions and a linelog.
+
+    the linelog uses odd revision numbers for original contents (fctxs passed
+    to __init__), and even revision numbers for fixups, like:
+
+        linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
+        linelog rev 2: fixups made to self.fctxs[0]
+        linelog rev 3: self.fctxs[1] (a child of fctxs[0])
+        linelog rev 4: fixups made to self.fctxs[1]
+        ...
+
+    a typical use is like:
+
+        1. call diffwith, to calculate self.fixups
+        2. (optionally), present self.fixups to the user, or change it
+        3. call apply, to apply changes
+        4. read results from "finalcontents", or call getfinalcontent
+    """
+
+    def __init__(self, fctxs, ui=None, opts=None):
+        """([fctx], ui or None) -> None
+
+        fctxs should be linear, and sorted by topo order - oldest first.
+        fctxs[0] will be considered as "immutable" and will not be changed.
+        """
+        self.fctxs = fctxs
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+
+        # following fields are built from fctxs. they exist for perf reason
+        self.contents = [f.data() for f in fctxs]
+        self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
+        self.linelog = self._buildlinelog()
+        if self.ui.debugflag:
+            assert self._checkoutlinelog() == self.contents
+
+        # following fields will be filled later
+        self.chunkstats = [0, 0] # [adopted, total : int]
+        self.targetlines = [] # [str]
+        self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
+        self.finalcontents = [] # [str]
+
+    def diffwith(self, targetfctx, showchanges=False):
+        """calculate fixups needed by examining the differences between
+        self.fctxs[-1] and targetfctx, chunk by chunk.
+
+        targetfctx is the target state we move towards. we may or may not be
+        able to get there because not all modified chunks can be amended into
+        a non-public fctx unambiguously.
+
+        call this only once, before apply().
+
+        update self.fixups, self.chunkstats, and self.targetlines.
+        """
+        a = self.contents[-1]
+        alines = self.contentlines[-1]
+        b = targetfctx.data()
+        blines = mdiff.splitnewlines(b)
+        self.targetlines = blines
+
+        self.linelog.annotate(self.linelog.maxrev)
+        annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
+        assert len(annotated) == len(alines)
+        # add a dummy end line to make insertion at the end easier
+        if annotated:
+            dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
+            annotated.append(dummyendline)
+
+        # analyse diff blocks
+        for chunk in self._alldiffchunks(a, b, alines, blines):
+            newfixups = self._analysediffchunk(chunk, annotated)
+            self.chunkstats[0] += bool(newfixups) # 1 or 0
+            self.chunkstats[1] += 1
+            self.fixups += newfixups
+            if showchanges:
+                self._showchanges(alines, blines, chunk, newfixups)
+
+    def apply(self):
+        """apply self.fixups. update self.linelog, self.finalcontents.
+
+        call this only once, before getfinalcontent(), after diffwith().
+        """
+        # the following is unnecessary, as it's done by "diffwith":
+        #   self.linelog.annotate(self.linelog.maxrev)
+        for rev, a1, a2, b1, b2 in reversed(self.fixups):
+            blines = self.targetlines[b1:b2]
+            if self.ui.debugflag:
+                idx = (max(rev - 1, 0)) // 2
+                self.ui.write(_('%s: chunk %d:%d -> %d lines\n')
+                              % (node.short(self.fctxs[idx].node()),
+                                 a1, a2, len(blines)))
+            self.linelog.replacelines(rev, a1, a2, b1, b2)
+        if self.opts.get('edit_lines', False):
+            self.finalcontents = self._checkoutlinelogwithedits()
+        else:
+            self.finalcontents = self._checkoutlinelog()
+
+    def getfinalcontent(self, fctx):
+        """(fctx) -> str. get modified file content for a given filecontext"""
+        idx = self.fctxs.index(fctx)
+        return self.finalcontents[idx]
+
+    def _analysediffchunk(self, chunk, annotated):
+        """analyse a different chunk and return new fixups found
+
+        return [] if no lines from the chunk can be safely applied.
+
+        the chunk (or lines) cannot be safely applied, if, for example:
+          - the modified (deleted) lines belong to a public changeset
+            (self.fctxs[0])
+          - the chunk is a pure insertion and the adjacent lines (at most 2
+            lines) belong to different non-public changesets, or do not belong
+            to any non-public changesets.
+          - the chunk is modifying lines from different changesets.
+            in this case, if the number of lines deleted equals to the number
+            of lines added, assume it's a simple 1:1 map (could be wrong).
+            otherwise, give up.
+          - the chunk is modifying lines from a single non-public changeset,
+            but other revisions touch the area as well. i.e. the lines are
+            not continuous as seen from the linelog.
+        """
+        a1, a2, b1, b2 = chunk
+        # find involved indexes from annotate result
+        involved = annotated[a1:a2]
+        if not involved and annotated: # a1 == a2 and a is not empty
+            # pure insertion, check nearby lines. ignore lines belong
+            # to the public (first) changeset (i.e. annotated[i][0] == 1)
+            nearbylinenums = {a2, max(0, a1 - 1)}
+            involved = [annotated[i]
+                        for i in nearbylinenums if annotated[i][0] != 1]
+        involvedrevs = list(set(r for r, l in involved))
+        newfixups = []
+        if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
+            # chunk belongs to a single revision
+            rev = involvedrevs[0]
+            if rev > 1:
+                fixuprev = rev + 1
+                newfixups.append((fixuprev, a1, a2, b1, b2))
+        elif a2 - a1 == b2 - b1 or b1 == b2:
+            # 1:1 line mapping, or chunk was deleted
+            for i in pycompat.xrange(a1, a2):
+                rev, linenum = annotated[i]
+                if rev > 1:
+                    if b1 == b2: # deletion, simply remove that single line
+                        nb1 = nb2 = 0
+                    else: # 1:1 line mapping, change the corresponding rev
+                        nb1 = b1 + i - a1
+                        nb2 = nb1 + 1
+                    fixuprev = rev + 1
+                    newfixups.append((fixuprev, i, i + 1, nb1, nb2))
+        return self._optimizefixups(newfixups)
+
+    @staticmethod
+    def _alldiffchunks(a, b, alines, blines):
+        """like mdiff.allblocks, but only care about differences"""
+        blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
+        for chunk, btype in blocks:
+            if btype != '!':
+                continue
+            yield chunk
+
+    def _buildlinelog(self):
+        """calculate the initial linelog based on self.content{,line}s.
+        this is similar to running a partial "annotate".
+        """
+        llog = linelog.linelog()
+        a, alines = '', []
+        for i in pycompat.xrange(len(self.contents)):
+            b, blines = self.contents[i], self.contentlines[i]
+            llrev = i * 2 + 1
+            chunks = self._alldiffchunks(a, b, alines, blines)
+            for a1, a2, b1, b2 in reversed(list(chunks)):
+                llog.replacelines(llrev, a1, a2, b1, b2)
+            a, alines = b, blines
+        return llog
+
+    def _checkoutlinelog(self):
+        """() -> [str]. check out file contents from linelog"""
+        contents = []
+        for i in pycompat.xrange(len(self.contents)):
+            rev = (i + 1) * 2
+            self.linelog.annotate(rev)
+            content = ''.join(map(self._getline, self.linelog.annotateresult))
+            contents.append(content)
+        return contents
+
+    def _checkoutlinelogwithedits(self):
+        """() -> [str]. prompt all lines for edit"""
+        alllines = self.linelog.getalllines()
+        # header
+        editortext = (_('HG: editing %s\nHG: "y" means the line to the right '
+                        'exists in the changeset to the top\nHG:\n')
+                      % self.fctxs[-1].path())
+        # [(idx, fctx)]. hide the dummy emptyfilecontext
+        visiblefctxs = [(i, f)
+                        for i, f in enumerate(self.fctxs)
+                        if not isinstance(f, emptyfilecontext)]
+        for i, (j, f) in enumerate(visiblefctxs):
+            editortext += (_('HG: %s/%s %s %s\n') %
+                           ('|' * i, '-' * (len(visiblefctxs) - i + 1),
+                            node.short(f.node()),
+                            f.description().split('\n',1)[0]))
+        editortext += _('HG: %s\n') % ('|' * len(visiblefctxs))
+        # figure out the lifetime of a line, this is relatively inefficient,
+        # but probably fine
+        lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
+        for i, f in visiblefctxs:
+            self.linelog.annotate((i + 1) * 2)
+            for l in self.linelog.annotateresult:
+                lineset[l].add(i)
+        # append lines
+        for l in alllines:
+            editortext += ('    %s : %s' %
+                           (''.join([('y' if i in lineset[l] else ' ')
+                                     for i, _f in visiblefctxs]),
+                            self._getline(l)))
+        # run editor
+        editedtext = self.ui.edit(editortext, '', action='absorb')
+        if not editedtext:
+            raise error.Abort(_('empty editor text'))
+        # parse edited result
+        contents = ['' for i in self.fctxs]
+        leftpadpos = 4
+        colonpos = leftpadpos + len(visiblefctxs) + 1
+        for l in mdiff.splitnewlines(editedtext):
+            if l.startswith('HG:'):
+                continue
+            if l[colonpos - 1:colonpos + 2] != ' : ':
+                raise error.Abort(_('malformed line: %s') % l)
+            linecontent = l[colonpos + 2:]
+            for i, ch in enumerate(l[leftpadpos:colonpos - 1]):
+                if ch == 'y':
+                    contents[visiblefctxs[i][0]] += linecontent
+        # chunkstats is hard to calculate if anything changes, therefore
+        # set them to just a simple value (1, 1).
+        if editedtext != editortext:
+            self.chunkstats = [1, 1]
+        return contents
+
+    def _getline(self, lineinfo):
+        """((rev, linenum)) -> str. convert rev+line number to line content"""
+        rev, linenum = lineinfo
+        if rev & 1: # odd: original line taken from fctxs
+            return self.contentlines[rev // 2][linenum]
+        else: # even: fixup line from targetfctx
+            return self.targetlines[linenum]
+
+    def _iscontinuous(self, a1, a2, closedinterval=False):
+        """(a1, a2 : int) -> bool
+
+        check if these lines are continuous. i.e. no other insertions or
+        deletions (from other revisions) among these lines.
+
+        closedinterval decides whether a2 should be included or not. i.e. is
+        it [a1, a2), or [a1, a2] ?
+        """
+        if a1 >= a2:
+            return True
+        llog = self.linelog
+        offset1 = llog.getoffset(a1)
+        offset2 = llog.getoffset(a2) + int(closedinterval)
+        linesinbetween = llog.getalllines(offset1, offset2)
+        return len(linesinbetween) == a2 - a1 + int(closedinterval)
+
+    def _optimizefixups(self, fixups):
+        """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
+        merge adjacent fixups to make them less fragmented.
+        """
+        result = []
+        pcurrentchunk = [[-1, -1, -1, -1, -1]]
+
+        def pushchunk():
+            if pcurrentchunk[0][0] != -1:
+                result.append(tuple(pcurrentchunk[0]))
+
+        for i, chunk in enumerate(fixups):
+            rev, a1, a2, b1, b2 = chunk
+            lastrev = pcurrentchunk[0][0]
+            lasta2 = pcurrentchunk[0][2]
+            lastb2 = pcurrentchunk[0][4]
+            if (a1 == lasta2 and b1 == lastb2 and rev == lastrev and
+                    self._iscontinuous(max(a1 - 1, 0), a1)):
+                # merge into currentchunk
+                pcurrentchunk[0][2] = a2
+                pcurrentchunk[0][4] = b2
+            else:
+                pushchunk()
+                pcurrentchunk[0] = list(chunk)
+        pushchunk()
+        return result
+
+    def _showchanges(self, alines, blines, chunk, fixups):
+        ui = self.ui
+
+        def label(line, label):
+            if line.endswith('\n'):
+                line = line[:-1]
+            return ui.label(line, label)
+
+        # this is not optimized for perf but _showchanges only gets executed
+        # with an extra command-line flag.
+        a1, a2, b1, b2 = chunk
+        aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
+        for idx, fa1, fa2, fb1, fb2 in fixups:
+            for i in pycompat.xrange(fa1, fa2):
+                aidxs[i - a1] = (max(idx, 1) - 1) // 2
+            for i in pycompat.xrange(fb1, fb2):
+                bidxs[i - b1] = (max(idx, 1) - 1) // 2
+
+        buf = [] # [(idx, content)]
+        buf.append((0, label('@@ -%d,%d +%d,%d @@'
+                             % (a1, a2 - a1, b1, b2 - b1), 'diff.hunk')))
+        buf += [(aidxs[i - a1], label('-' + alines[i], 'diff.deleted'))
+                for i in pycompat.xrange(a1, a2)]
+        buf += [(bidxs[i - b1], label('+' + blines[i], 'diff.inserted'))
+                for i in pycompat.xrange(b1, b2)]
+        for idx, line in buf:
+            shortnode = idx and node.short(self.fctxs[idx].node()) or ''
+            ui.write(ui.label(shortnode[0:7].ljust(8), 'absorb.node') +
+                     line + '\n')
+
+class fixupstate(object):
+    """state needed to run absorb
+
+    internally, it keeps paths and filefixupstates.
+
+    a typical use is like filefixupstates:
+
+        1. call diffwith, to calculate fixups
+        2. (optionally), present fixups to the user, or edit fixups
+        3. call apply, to apply changes to memory
+        4. call commit, to commit changes to hg database
+    """
+
+    def __init__(self, stack, ui=None, opts=None):
+        """([ctx], ui or None) -> None
+
+        stack: should be linear, and sorted by topo order - oldest first.
+        all commits in stack are considered mutable.
+        """
+        assert stack
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+        self.stack = stack
+        self.repo = stack[-1].repo().unfiltered()
+
+        # following fields will be filled later
+        self.paths = [] # [str]
+        self.status = None # ctx.status output
+        self.fctxmap = {} # {path: {ctx: fctx}}
+        self.fixupmap = {} # {path: filefixupstate}
+        self.replacemap = {} # {oldnode: newnode or None}
+        self.finalnode = None # head after all fixups
+
+    def diffwith(self, targetctx, match=None, showchanges=False):
+        """diff and prepare fixups. update self.fixupmap, self.paths"""
+        # only care about modified files
+        self.status = self.stack[-1].status(targetctx, match)
+        self.paths = []
+        # but if --edit-lines is used, the user may want to edit files
+        # even if they are not modified
+        editopt = self.opts.get('edit_lines')
+        if not self.status.modified and editopt and match:
+            interestingpaths = match.files()
+        else:
+            interestingpaths = self.status.modified
+        # prepare the filefixupstate
+        seenfctxs = set()
+        # sorting is necessary to eliminate ambiguity for the "double move"
+        # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
+        for path in sorted(interestingpaths):
+            self.ui.debug('calculating fixups for %s\n' % path)
+            targetfctx = targetctx[path]
+            fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
+            # ignore symbolic links or binary, or unchanged files
+            if any(f.islink() or stringutil.binary(f.data())
+                   for f in [targetfctx] + fctxs
+                   if not isinstance(f, emptyfilecontext)):
+                continue
+            if targetfctx.data() == fctxs[-1].data() and not editopt:
+                continue
+            seenfctxs.update(fctxs[1:])
+            self.fctxmap[path] = ctx2fctx
+            fstate = filefixupstate(fctxs, ui=self.ui, opts=self.opts)
+            if showchanges:
+                colorpath = self.ui.label(path, 'absorb.path')
+                header = 'showing changes for ' + colorpath
+                self.ui.write(header + '\n')
+            fstate.diffwith(targetfctx, showchanges=showchanges)
+            self.fixupmap[path] = fstate
+            self.paths.append(path)
+
+    def apply(self):
+        """apply fixups to individual filefixupstates"""
+        for path, state in self.fixupmap.iteritems():
+            if self.ui.debugflag:
+                self.ui.write(_('applying fixups to %s\n') % path)
+            state.apply()
+
+    @property
+    def chunkstats(self):
+        """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
+        return dict((path, state.chunkstats)
+                    for path, state in self.fixupmap.iteritems())
+
+    def commit(self):
+        """commit changes. update self.finalnode, self.replacemap"""
+        with self.repo.wlock(), self.repo.lock():
+            with self.repo.transaction('absorb') as tr:
+                self._commitstack()
+                self._movebookmarks(tr)
+                if self.repo['.'].node() in self.replacemap:
+                    self._moveworkingdirectoryparent()
+                if self._useobsolete:
+                    self._obsoleteoldcommits()
+            if not self._useobsolete: # strip must be outside transactions
+                self._stripoldcommits()
+        return self.finalnode
+
+    def printchunkstats(self):
+        """print things like '1 of 2 chunk(s) applied'"""
+        ui = self.ui
+        chunkstats = self.chunkstats
+        if ui.verbose:
+            # chunkstats for each file
+            for path, stat in chunkstats.iteritems():
+                if stat[0]:
+                    ui.write(_('%s: %d of %d chunk(s) applied\n')
+                             % (path, stat[0], stat[1]))
+        elif not ui.quiet:
+            # a summary for all files
+            stats = chunkstats.values()
+            applied, total = (sum(s[i] for s in stats) for i in (0, 1))
+            ui.write(_('%d of %d chunk(s) applied\n') % (applied, total))
+
+    def _commitstack(self):
+        """make new commits. update self.finalnode, self.replacemap.
+        it is splitted from "commit" to avoid too much indentation.
+        """
+        # last node (20-char) committed by us
+        lastcommitted = None
+        # p1 which overrides the parent of the next commit, "None" means use
+        # the original parent unchanged
+        nextp1 = None
+        for ctx in self.stack:
+            memworkingcopy = self._getnewfilecontents(ctx)
+            if not memworkingcopy and not lastcommitted:
+                # nothing changed, nothing commited
+                nextp1 = ctx
+                continue
+            msg = ''
+            if self._willbecomenoop(memworkingcopy, ctx, nextp1):
+                # changeset is no longer necessary
+                self.replacemap[ctx.node()] = None
+                msg = _('became empty and was dropped')
+            else:
+                # changeset needs re-commit
+                nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
+                lastcommitted = self.repo[nodestr]
+                nextp1 = lastcommitted
+                self.replacemap[ctx.node()] = lastcommitted.node()
+                if memworkingcopy:
+                    msg = _('%d file(s) changed, became %s') % (
+                        len(memworkingcopy), self._ctx2str(lastcommitted))
+                else:
+                    msg = _('became %s') % self._ctx2str(lastcommitted)
+            if self.ui.verbose and msg:
+                self.ui.write(_('%s: %s\n') % (self._ctx2str(ctx), msg))
+        self.finalnode = lastcommitted and lastcommitted.node()
+
+    def _ctx2str(self, ctx):
+        if self.ui.debugflag:
+            return ctx.hex()
+        else:
+            return node.short(ctx.node())
+
+    def _getnewfilecontents(self, ctx):
+        """(ctx) -> {path: str}
+
+        fetch file contents from filefixupstates.
+        return the working copy overrides - files different from ctx.
+        """
+        result = {}
+        for path in self.paths:
+            ctx2fctx = self.fctxmap[path] # {ctx: fctx}
+            if ctx not in ctx2fctx:
+                continue
+            fctx = ctx2fctx[ctx]
+            content = fctx.data()
+            newcontent = self.fixupmap[path].getfinalcontent(fctx)
+            if content != newcontent:
+                result[fctx.path()] = newcontent
+        return result
+
+    def _movebookmarks(self, tr):
+        repo = self.repo
+        needupdate = [(name, self.replacemap[hsh])
+                      for name, hsh in repo._bookmarks.iteritems()
+                      if hsh in self.replacemap]
+        changes = []
+        for name, hsh in needupdate:
+            if hsh:
+                changes.append((name, hsh))
+                if self.ui.verbose:
+                    self.ui.write(_('moving bookmark %s to %s\n')
+                                  % (name, node.hex(hsh)))
+            else:
+                changes.append((name, None))
+                if self.ui.verbose:
+                    self.ui.write(_('deleting bookmark %s\n') % name)
+        repo._bookmarks.applychanges(repo, tr, changes)
+
+    def _moveworkingdirectoryparent(self):
+        if not self.finalnode:
+            # Find the latest not-{obsoleted,stripped} parent.
+            revs = self.repo.revs('max(::. - %ln)', self.replacemap.keys())
+            ctx = self.repo[revs.first()]
+            self.finalnode = ctx.node()
+        else:
+            ctx = self.repo[self.finalnode]
+
+        dirstate = self.repo.dirstate
+        # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
+        # be slow. in absorb's case, no need to invalidate fsmonitorstate.
+        noop = lambda: 0
+        restore = noop
+        if util.safehasattr(dirstate, '_fsmonitorstate'):
+            bak = dirstate._fsmonitorstate.invalidate
+            def restore():
+                dirstate._fsmonitorstate.invalidate = bak
+            dirstate._fsmonitorstate.invalidate = noop
+        try:
+            with dirstate.parentchange():
+                dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
+        finally:
+            restore()
+
+    @staticmethod
+    def _willbecomenoop(memworkingcopy, ctx, pctx=None):
+        """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
+
+        if it will become an empty commit (does not change anything, after the
+        memworkingcopy overrides), return True. otherwise return False.
+        """
+        if not pctx:
+            parents = ctx.parents()
+            if len(parents) != 1:
+                return False
+            pctx = parents[0]
+        # ctx changes more files (not a subset of memworkingcopy)
+        if not set(ctx.files()).issubset(set(memworkingcopy)):
+            return False
+        for path, content in memworkingcopy.iteritems():
+            if path not in pctx or path not in ctx:
+                return False
+            fctx = ctx[path]
+            pfctx = pctx[path]
+            if pfctx.flags() != fctx.flags():
+                return False
+            if pfctx.data() != content:
+                return False
+        return True
+
+    def _commitsingle(self, memworkingcopy, ctx, p1=None):
+        """(ctx, {path: content}, node) -> node. make a single commit
+
+        the commit is a clone from ctx, with a (optionally) different p1, and
+        different file contents replaced by memworkingcopy.
+        """
+        parents = p1 and (p1, node.nullid)
+        extra = ctx.extra()
+        if self._useobsolete and self.ui.configbool('absorb', 'add-noise'):
+            extra['absorb_source'] = ctx.hex()
+        mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra)
+        # preserve phase
+        with mctx.repo().ui.configoverride({
+            ('phases', 'new-commit'): ctx.phase()}):
+            return mctx.commit()
+
+    @util.propertycache
+    def _useobsolete(self):
+        """() -> bool"""
+        return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
+
+    def _obsoleteoldcommits(self):
+        relations = [(self.repo[k], v and (self.repo[v],) or ())
+                     for k, v in self.replacemap.iteritems()]
+        if relations:
+            obsolete.createmarkers(self.repo, relations)
+
+    def _stripoldcommits(self):
+        nodelist = self.replacemap.keys()
+        # make sure we don't strip innocent children
+        revs = self.repo.revs('%ln - (::(heads(%ln::)-%ln))', nodelist,
+                              nodelist, nodelist)
+        tonode = self.repo.changelog.node
+        nodelist = [tonode(r) for r in revs]
+        if nodelist:
+            repair.strip(self.repo.ui, self.repo, nodelist)
+
+def _parsechunk(hunk):
+    """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
+    if type(hunk) not in (crecord.uihunk, patch.recordhunk):
+        return None, None
+    path = hunk.header.filename()
+    a1 = hunk.fromline + len(hunk.before) - 1
+    # remove before and after context
+    hunk.before = hunk.after = []
+    buf = util.stringio()
+    hunk.write(buf)
+    patchlines = mdiff.splitnewlines(buf.getvalue())
+    # hunk.prettystr() will update hunk.removed
+    a2 = a1 + hunk.removed
+    blines = [l[1:] for l in patchlines[1:] if l[0] != '-']
+    return path, (a1, a2, blines)
+
+def overlaydiffcontext(ctx, chunks):
+    """(ctx, [crecord.uihunk]) -> memctx
+
+    return a memctx with some [1] patches (chunks) applied to ctx.
+    [1]: modifications are handled. renames, mode changes, etc. are ignored.
+    """
+    # sadly the applying-patch logic is hardly reusable, and messy:
+    # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
+    #    needs a file stream of a patch and will re-parse it, while we have
+    #    structured hunk objects at hand.
+    # 2. a lot of different implementations about "chunk" (patch.hunk,
+    #    patch.recordhunk, crecord.uihunk)
+    # as we only care about applying changes to modified files, no mode
+    # change, no binary diff, and no renames, it's probably okay to
+    # re-invent the logic using much simpler code here.
+    memworkingcopy = {} # {path: content}
+    patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
+    for path, info in map(_parsechunk, chunks):
+        if not path or not info:
+            continue
+        patchmap[path].append(info)
+    for path, patches in patchmap.iteritems():
+        if path not in ctx or not patches:
+            continue
+        patches.sort(reverse=True)
+        lines = mdiff.splitnewlines(ctx[path].data())
+        for a1, a2, blines in patches:
+            lines[a1:a2] = blines
+        memworkingcopy[path] = ''.join(lines)
+    return overlaycontext(memworkingcopy, ctx)
+
+def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
+    """pick fixup chunks from targetctx, apply them to stack.
+
+    if targetctx is None, the working copy context will be used.
+    if stack is None, the current draft stack will be used.
+    return fixupstate.
+    """
+    if stack is None:
+        limit = ui.configint('absorb', 'max-stack-size')
+        stack = getdraftstack(repo['.'], limit)
+        if limit and len(stack) >= limit:
+            ui.warn(_('absorb: only the recent %d changesets will '
+                      'be analysed\n')
+                    % limit)
+    if not stack:
+        raise error.Abort(_('no changeset to change'))
+    if targetctx is None: # default to working copy
+        targetctx = repo[None]
+    if pats is None:
+        pats = ()
+    if opts is None:
+        opts = {}
+    state = fixupstate(stack, ui=ui, opts=opts)
+    matcher = scmutil.match(targetctx, pats, opts)
+    if opts.get('interactive'):
+        diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
+        origchunks = patch.parsepatch(diff)
+        chunks = cmdutil.recordfilter(ui, origchunks)[0]
+        targetctx = overlaydiffcontext(stack[-1], chunks)
+    state.diffwith(targetctx, matcher, showchanges=opts.get('print_changes'))
+    if not opts.get('dry_run'):
+        state.apply()
+        if state.commit():
+            state.printchunkstats()
+        elif not ui.quiet:
+            ui.write(_('nothing applied\n'))
+    return state
+
+@command('^absorb',
+         [('p', 'print-changes', None,
+           _('print which changesets are modified by which changes')),
+          ('i', 'interactive', None,
+           _('interactively select which chunks to apply (EXPERIMENTAL)')),
+          ('e', 'edit-lines', None,
+           _('edit what lines belong to which changesets before commit '
+             '(EXPERIMENTAL)')),
+         ] + commands.dryrunopts + commands.walkopts,
+         _('hg absorb [OPTION] [FILE]...'))
+def absorbcmd(ui, repo, *pats, **opts):
+    """incorporate corrections into the stack of draft changesets
+
+    absorb analyzes each change in your working directory and attempts to
+    amend the changed lines into the changesets in your stack that first
+    introduced those lines.
+
+    If absorb cannot find an unambiguous changeset to amend for a change,
+    that change will be left in the working directory, untouched. They can be
+    observed by :hg:`status` or :hg:`diff` afterwards. In other words,
+    absorb does not write to the working directory.
+
+    Changesets outside the revset `::. and not public() and not merge()` will
+    not be changed.
+
+    Changesets that become empty after applying the changes will be deleted.
+
+    If in doubt, run :hg:`absorb -pn` to preview what changesets will
+    be amended by what changed lines, without actually changing anything.
+
+    Returns 0 on success, 1 if all chunks were ignored and nothing amended.
+    """
+    state = absorb(ui, repo, pats=pats, opts=opts)
+    if sum(s[0] for s in state.chunkstats.values()) == 0:
+        return 1
--- a/hgext/acl.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/acl.py	Fri Aug 24 12:55:05 2018 -0700
@@ -220,6 +220,7 @@
     error,
     extensions,
     match,
+    pycompat,
     registrar,
     util,
 )
@@ -403,7 +404,7 @@
     allow = buildmatch(ui, repo, user, 'acl.allow')
     deny = buildmatch(ui, repo, user, 'acl.deny')
 
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         ctx = repo[rev]
         branch = ctx.branch()
         if denybranches and denybranches(branch):
--- a/hgext/beautifygraph.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/beautifygraph.py	Fri Aug 24 12:55:05 2018 -0700
@@ -18,6 +18,7 @@
     encoding,
     extensions,
     graphmod,
+    pycompat,
     templatekw,
 )
 
@@ -53,8 +54,10 @@
 def convertedges(line):
     line = ' %s ' % line
     pretty = []
-    for idx in xrange(len(line) - 2):
-        pretty.append(prettyedge(line[idx], line[idx + 1], line[idx + 2]))
+    for idx in pycompat.xrange(len(line) - 2):
+        pretty.append(prettyedge(line[idx:idx + 1],
+                                 line[idx + 1:idx + 2],
+                                 line[idx + 2:idx + 3]))
     return ''.join(pretty)
 
 def getprettygraphnode(orig, *args, **kwargs):
@@ -84,7 +87,7 @@
         ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n'))
         return
 
-    if 'A' in encoding._wide:
+    if r'A' in encoding._wide:
         ui.warn(_('beautifygraph: unsupported terminal settings, '
                   'monospace narrow text required\n'))
         return
--- a/hgext/blackbox.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/blackbox.py	Fri Aug 24 12:55:05 2018 -0700
@@ -45,6 +45,7 @@
 
 from mercurial import (
     encoding,
+    pycompat,
     registrar,
     ui as uimod,
     util,
@@ -111,7 +112,7 @@
             if st.st_size >= maxsize:
                 path = vfs.join(name)
                 maxfiles = ui.configint('blackbox', 'maxfiles')
-                for i in xrange(maxfiles - 1, 1, -1):
+                for i in pycompat.xrange(maxfiles - 1, 1, -1):
                     rotate(oldpath='%s.%d' % (path, i - 1),
                            newpath='%s.%d' % (path, i))
                 rotate(oldpath=path,
--- a/hgext/censor.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/censor.py	Fri Aug 24 12:55:05 2018 -0700
@@ -32,6 +32,7 @@
 
 from mercurial import (
     error,
+    pycompat,
     registrar,
     revlog,
     scmutil,
@@ -160,7 +161,7 @@
     offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
 
     # Rewrite all following filelog revisions fixing up offsets and deltas.
-    for srev in xrange(crev + 1, len(flog)):
+    for srev in pycompat.xrange(crev + 1, len(flog)):
         if crev in flog.parentrevs(srev):
             # Immediate children of censored node must be re-added as fulltext.
             try:
--- a/hgext/convert/cvsps.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/convert/cvsps.py	Fri Aug 24 12:55:05 2018 -0700
@@ -763,7 +763,7 @@
             # branchpoints such that it is the latest possible
             # commit without any intervening, unrelated commits.
 
-            for candidate in xrange(i):
+            for candidate in pycompat.xrange(i):
                 if c.branch not in changesets[candidate].branchpoints:
                     if p is not None:
                         break
--- a/hgext/convert/hg.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/convert/hg.py	Fri Aug 24 12:55:05 2018 -0700
@@ -358,7 +358,7 @@
             p2 = node
 
         if self.filemapmode and nparents == 1:
-            man = self.repo.manifestlog._revlog
+            man = self.repo.manifestlog.getstorage(b'')
             mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
             closed = 'close' in commit.extra
             if not closed and not man.cmp(m1node, man.revision(mnode)):
--- a/hgext/eol.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/eol.py	Fri Aug 24 12:55:05 2018 -0700
@@ -266,7 +266,7 @@
     ensureenabled(ui)
     files = set()
     revs = set()
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         revs.add(rev)
         if headsonly:
             ctx = repo[rev]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/__init__.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,193 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# fastannotate: faster annotate implementation using linelog
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""yet another annotate implementation that might be faster (EXPERIMENTAL)
+
+The fastannotate extension provides a 'fastannotate' command that makes
+use of the linelog data structure as a cache layer and is expected to
+be faster than the vanilla 'annotate' if the cache is present.
+
+In most cases, fastannotate requires a setup that mainbranch is some pointer
+that always moves forward, to be most efficient.
+
+Using fastannotate together with linkrevcache would speed up building the
+annotate cache greatly. Run "debugbuildlinkrevcache" before
+"debugbuildannotatecache".
+
+::
+
+    [fastannotate]
+    # specify the main branch head. the internal linelog will only contain
+    # the linear (ignoring p2) "mainbranch". since linelog cannot move
+    # backwards without a rebuild, this should be something that always moves
+    # forward, usually it is "master" or "@".
+    mainbranch = master
+
+    # fastannotate supports different modes to expose its feature.
+    # a list of combination:
+    # - fastannotate: expose the feature via the "fastannotate" command which
+    #   deals with everything in a most efficient way, and provides extra
+    #   features like --deleted etc.
+    # - fctx: replace fctx.annotate implementation. note:
+    #     a. it is less efficient than the "fastannotate" command
+    #     b. it will make it practically impossible to access the old (disk
+    #        side-effect free) annotate implementation
+    #     c. it implies "hgweb".
+    # - hgweb: replace hgweb's annotate implementation. conflict with "fctx".
+    # (default: fastannotate)
+    modes = fastannotate
+
+    # default format when no format flags are used (default: number)
+    defaultformat = changeset, user, date
+
+    # serve the annotate cache via wire protocol (default: False)
+    # tip: the .hg/fastannotate directory is portable - can be rsynced
+    server = True
+
+    # build annotate cache on demand for every client request (default: True)
+    # disabling it could make server response faster, useful when there is a
+    # cronjob building the cache.
+    serverbuildondemand = True
+
+    # update local annotate cache from remote on demand
+    client = False
+
+    # path to use when connecting to the remote server (default: default)
+    remotepath = default
+
+    # minimal length of the history of a file required to fetch linelog from
+    # the server. (default: 10)
+    clientfetchthreshold = 10
+
+    # use flock instead of the file existence lock
+    # flock may not work well on some network filesystems, but they avoid
+    # creating and deleting files frequently, which is faster when updating
+    # the annotate cache in batch. if you have issues with this option, set it
+    # to False. (default: True if flock is supported, False otherwise)
+    useflock = True
+
+    # for "fctx" mode, always follow renames regardless of command line option.
+    # this is a BC with the original command but will reduced the space needed
+    # for annotate cache, and is useful for client-server setup since the
+    # server will only provide annotate cache with default options (i.e. with
+    # follow). do not affect "fastannotate" mode. (default: True)
+    forcefollow = True
+
+    # for "fctx" mode, always treat file as text files, to skip the "isbinary"
+    # check. this is consistent with the "fastannotate" command and could help
+    # to avoid a file fetch if remotefilelog is used. (default: True)
+    forcetext = True
+
+    # use unfiltered repo for better performance.
+    unfilteredrepo = True
+
+    # sacrifice correctness in some corner cases for performance. it does not
+    # affect the correctness of the annotate cache being built. the option
+    # is experimental and may disappear in the future (default: False)
+    perfhack = True
+"""
+
+# TODO from import:
+# * `branch` is probably the wrong term, throughout the code.
+#
+# * replace the fastannotate `modes` configuration with a collection
+#   of booleans.
+#
+# * Use the templater instead of bespoke formatting
+#
+# * rename the config knob for updating the local cache from a remote server
+#
+# * move `flock` based locking to a common area
+#
+# * revise wireprotocol for sharing annotate files
+#
+# * figure out a sensible default for `mainbranch` (with the caveat
+#   that we probably also want to figure out a better term than
+#   `branch`, see above)
+#
+# * format changes to the revmap file (maybe use length-encoding
+#   instead of null-terminated file paths at least?)
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    configitems,
+    error as hgerror,
+    localrepo,
+    registrar,
+)
+
+from . import (
+    commands,
+    context,
+    protocol,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = commands.cmdtable
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('fastannotate', 'modes', default=['fastannotate'])
+configitem('fastannotate', 'server', default=False)
+configitem('fastannotate', 'useflock', default=configitems.dynamicdefault)
+configitem('fastannotate', 'client', default=False)
+configitem('fastannotate', 'unfilteredrepo', default=True)
+configitem('fastannotate', 'defaultformat', default=['number'])
+configitem('fastannotate', 'perfhack', default=False)
+configitem('fastannotate', 'mainbranch')
+configitem('fastannotate', 'forcetext', default=True)
+configitem('fastannotate', 'forcefollow', default=True)
+configitem('fastannotate', 'clientfetchthreshold', default=10)
+configitem('fastannotate', 'serverbuildondemand', default=True)
+configitem('fastannotate', 'remotepath', default='default')
+
+def _flockavailable():
+    try:
+        import fcntl
+        fcntl.flock
+    except StandardError:
+        return False
+    else:
+        return True
+
+def uisetup(ui):
+    modes = set(ui.configlist('fastannotate', 'modes'))
+    if 'fctx' in modes:
+        modes.discard('hgweb')
+    for name in modes:
+        if name == 'fastannotate':
+            commands.registercommand()
+        elif name == 'hgweb':
+            from . import support
+            support.replacehgwebannotate()
+        elif name == 'fctx':
+            from . import support
+            support.replacefctxannotate()
+            commands.wrapdefault()
+        else:
+            raise hgerror.Abort(_('fastannotate: invalid mode: %s') % name)
+
+    if ui.configbool('fastannotate', 'server'):
+        protocol.serveruisetup(ui)
+
+    if ui.configbool('fastannotate', 'useflock', _flockavailable()):
+        context.pathhelper.lock = context.pathhelper._lockflock
+
+def extsetup(ui):
+    # fastannotate has its own locking, without depending on repo lock
+    # TODO: avoid mutating this unless the specific repo has it enabled
+    localrepo.localrepository._wlockfreeprefix.add('fastannotate/')
+
+def reposetup(ui, repo):
+    if ui.configbool('fastannotate', 'client'):
+        protocol.clientreposetup(ui, repo)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/commands.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,281 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# commands: fastannotate commands
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    commands,
+    error,
+    extensions,
+    patch,
+    pycompat,
+    registrar,
+    scmutil,
+    util,
+)
+
+from . import (
+    context as facontext,
+    error as faerror,
+    formatter as faformatter,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
+    """generate paths matching given patterns"""
+    perfhack = repo.ui.configbool('fastannotate', 'perfhack')
+
+    # disable perfhack if:
+    # a) any walkopt is used
+    # b) if we treat pats as plain file names, some of them do not have
+    #    corresponding linelog files
+    if perfhack:
+        # cwd related to reporoot
+        reporoot = os.path.dirname(repo.path)
+        reldir = os.path.relpath(pycompat.getcwd(), reporoot)
+        if reldir == '.':
+            reldir = ''
+        if any(opts.get(o[1]) for o in commands.walkopts): # a)
+            perfhack = False
+        else: # b)
+            relpats = [os.path.relpath(p, reporoot) if os.path.isabs(p) else p
+                       for p in pats]
+            # disable perfhack on '..' since it allows escaping from the repo
+            if any(('..' in f or
+                    not os.path.isfile(
+                        facontext.pathhelper(repo, f, aopts).linelogpath))
+                   for f in relpats):
+                perfhack = False
+
+    # perfhack: emit paths directory without checking with manifest
+    # this can be incorrect if the rev dos not have file.
+    if perfhack:
+        for p in relpats:
+            yield os.path.join(reldir, p)
+    else:
+        def bad(x, y):
+            raise error.Abort("%s: %s" % (x, y))
+        ctx = scmutil.revsingle(repo, rev)
+        m = scmutil.match(ctx, pats, opts, badfn=bad)
+        for p in ctx.walk(m):
+            yield p
+
+fastannotatecommandargs = {
+    'options': [
+        ('r', 'rev', '.', _('annotate the specified revision'), _('REV')),
+        ('u', 'user', None, _('list the author (long with -v)')),
+        ('f', 'file', None, _('list the filename')),
+        ('d', 'date', None, _('list the date (short with -q)')),
+        ('n', 'number', None, _('list the revision number (default)')),
+        ('c', 'changeset', None, _('list the changeset')),
+        ('l', 'line-number', None, _('show line number at the first '
+                                     'appearance')),
+        ('e', 'deleted', None, _('show deleted lines (slow) (EXPERIMENTAL)')),
+        ('', 'no-content', None, _('do not show file content (EXPERIMENTAL)')),
+        ('', 'no-follow', None, _("don't follow copies and renames")),
+        ('', 'linear', None, _('enforce linear history, ignore second parent '
+                               'of merges (EXPERIMENTAL)')),
+        ('', 'long-hash', None, _('show long changeset hash (EXPERIMENTAL)')),
+        ('', 'rebuild', None, _('rebuild cache even if it exists '
+                                '(EXPERIMENTAL)')),
+    ] + commands.diffwsopts + commands.walkopts + commands.formatteropts,
+    'synopsis': _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+    'inferrepo': True,
+}
+
+def fastannotate(ui, repo, *pats, **opts):
+    """show changeset information by line for each file
+
+    List changes in files, showing the revision id responsible for each line.
+
+    This command is useful for discovering when a change was made and by whom.
+
+    By default this command prints revision numbers. If you include --file,
+    --user, or --date, the revision number is suppressed unless you also
+    include --number. The default format can also be customized by setting
+    fastannotate.defaultformat.
+
+    Returns 0 on success.
+
+    .. container:: verbose
+
+        This command uses an implementation different from the vanilla annotate
+        command, which may produce slightly different (while still reasonable)
+        outputs for some cases.
+
+        Unlike the vanilla anootate, fastannotate follows rename regardless of
+        the existence of --file.
+
+        For the best performance when running on a full repo, use -c, -l,
+        avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
+
+        For the best performance when running on a shallow (remotefilelog)
+        repo, avoid --linear, --no-follow, or any diff options. As the server
+        won't be able to populate annotate cache when non-default options
+        affecting results are used.
+    """
+    if not pats:
+        raise error.Abort(_('at least one filename or pattern is required'))
+
+    # performance hack: filtered repo can be slow. unfilter by default.
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    rev = opts.get('rev', '.')
+    rebuild = opts.get('rebuild', False)
+
+    diffopts = patch.difffeatureopts(ui, opts, section='annotate',
+                                     whitespace=True)
+    aopts = facontext.annotateopts(
+        diffopts=diffopts,
+        followmerge=not opts.get('linear', False),
+        followrename=not opts.get('no_follow', False))
+
+    if not any(opts.get(s)
+               for s in ['user', 'date', 'file', 'number', 'changeset']):
+        # default 'number' for compatibility. but fastannotate is more
+        # efficient with "changeset", "line-number" and "no-content".
+        for name in ui.configlist('fastannotate', 'defaultformat', ['number']):
+            opts[name] = True
+
+    ui.pager('fastannotate')
+    template = opts.get('template')
+    if template == 'json':
+        formatter = faformatter.jsonformatter(ui, repo, opts)
+    else:
+        formatter = faformatter.defaultformatter(ui, repo, opts)
+    showdeleted = opts.get('deleted', False)
+    showlines = not bool(opts.get('no_content'))
+    showpath = opts.get('file', False)
+
+    # find the head of the main (master) branch
+    master = ui.config('fastannotate', 'mainbranch') or rev
+
+    # paths will be used for prefetching and the real annotating
+    paths = list(_matchpaths(repo, rev, pats, opts, aopts))
+
+    # for client, prefetch from the server
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        repo.prefetchfastannotate(paths)
+
+    for path in paths:
+        result = lines = existinglines = None
+        while True:
+            try:
+                with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
+                    result = a.annotate(rev, master=master, showpath=showpath,
+                                        showlines=(showlines and
+                                                   not showdeleted))
+                    if showdeleted:
+                        existinglines = set((l[0], l[1]) for l in result)
+                        result = a.annotatealllines(
+                            rev, showpath=showpath, showlines=showlines)
+                break
+            except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                # happens if master moves backwards, or the file was deleted
+                # and readded, or renamed to an existing name, or corrupted.
+                if rebuild: # give up since we have tried rebuild already
+                    raise
+                else: # try a second time rebuilding the cache (slow)
+                    rebuild = True
+                    continue
+
+        if showlines:
+            result, lines = result
+
+        formatter.write(result, lines, existinglines=existinglines)
+    formatter.end()
+
+_newopts = set([])
+_knownopts = set([opt[1].replace('-', '_') for opt in
+                  (fastannotatecommandargs['options'] + commands.globalopts)])
+
+def _annotatewrapper(orig, ui, repo, *pats, **opts):
+    """used by wrapdefault"""
+    # we need this hack until the obsstore has 0.0 seconds perf impact
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    # treat the file as text (skip the isbinary check)
+    if ui.configbool('fastannotate', 'forcetext'):
+        opts['text'] = True
+
+    # check if we need to do prefetch (client-side)
+    rev = opts.get('rev')
+    if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
+        paths = list(_matchpaths(repo, rev, pats, opts))
+        repo.prefetchfastannotate(paths)
+
+    return orig(ui, repo, *pats, **opts)
+
+def registercommand():
+    """register the fastannotate command"""
+    name = '^fastannotate|fastblame|fa'
+    command(name, **fastannotatecommandargs)(fastannotate)
+
+def wrapdefault():
+    """wrap the default annotate command, to be aware of the protocol"""
+    extensions.wrapcommand(commands.table, 'annotate', _annotatewrapper)
+
+@command('debugbuildannotatecache',
+         [('r', 'rev', '', _('build up to the specific revision'), _('REV'))
+         ] + commands.walkopts,
+         _('[-r REV] FILE...'))
+def debugbuildannotatecache(ui, repo, *pats, **opts):
+    """incrementally build fastannotate cache up to REV for specified files
+
+    If REV is not specified, use the config 'fastannotate.mainbranch'.
+
+    If fastannotate.client is True, download the annotate cache from the
+    server. Otherwise, build the annotate cache locally.
+
+    The annotate cache will be built using the default diff and follow
+    options and lives in '.hg/fastannotate/default'.
+    """
+    rev = opts.get('REV') or ui.config('fastannotate', 'mainbranch')
+    if not rev:
+        raise error.Abort(_('you need to provide a revision'),
+                          hint=_('set fastannotate.mainbranch or use --rev'))
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+    ctx = scmutil.revsingle(repo, rev)
+    m = scmutil.match(ctx, pats, opts)
+    paths = list(ctx.walk(m))
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        # client
+        if opts.get('REV'):
+            raise error.Abort(_('--rev cannot be used for client'))
+        repo.prefetchfastannotate(paths)
+    else:
+        # server, or full repo
+        for i, path in enumerate(paths):
+            ui.progress(_('building'), i, total=len(paths))
+            with facontext.annotatecontext(repo, path) as actx:
+                try:
+                    if actx.isuptodate(rev):
+                        continue
+                    actx.annotate(rev, rev)
+                except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                    # the cache is broken (could happen with renaming so the
+                    # file history gets invalidated). rebuild and try again.
+                    ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                             % path)
+                    actx.rebuild()
+                    try:
+                        actx.annotate(rev, rev)
+                    except Exception as ex:
+                        # possibly a bug, but should not stop us from building
+                        # cache for other files.
+                        ui.warn(_('fastannotate: %s: failed to '
+                                  'build cache: %r\n') % (path, ex))
+        # clear the progress bar
+        ui.write()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/context.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,823 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# context: context needed to annotate a file
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import contextlib
+import hashlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    linelog as linelogmod,
+    lock as lockmod,
+    mdiff,
+    node,
+    pycompat,
+    scmutil,
+    util,
+)
+
+from . import (
+    error as faerror,
+    revmap as revmapmod,
+)
+
+# given path, get filelog, cached
+@util.lrucachefunc
+def _getflog(repo, path):
+    return repo.file(path)
+
+# extracted from mercurial.context.basefilectx.annotate
+def _parents(f, follow=True):
+    # Cut _descendantrev here to mitigate the penalty of lazy linkrev
+    # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
+    # from the topmost introrev (= srcrev) down to p.linkrev() if it
+    # isn't an ancestor of the srcrev.
+    f._changeid
+    pl = f.parents()
+
+    # Don't return renamed parents if we aren't following.
+    if not follow:
+        pl = [p for p in pl if p.path() == f.path()]
+
+    # renamed filectx won't have a filelog yet, so set it
+    # from the cache to save time
+    for p in pl:
+        if not '_filelog' in p.__dict__:
+            p._filelog = _getflog(f._repo, p.path())
+
+    return pl
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes a fctx instead of a pair of text and fctx.
+def _decorate(fctx):
+    text = fctx.data()
+    linecount = text.count('\n')
+    if text and not text.endswith('\n'):
+        linecount += 1
+    return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes an extra "blocks" parameter calculated elsewhere, instead of
+# calculating diff here.
+def _pair(parent, child, blocks):
+    for (a1, a2, b1, b2), t in blocks:
+        # Changed blocks ('!') or blocks made only of blank lines ('~')
+        # belong to the child.
+        if t == '=':
+            child[0][b1:b2] = parent[0][a1:a2]
+    return child
+
+# like scmutil.revsingle, but with lru cache, so their states (like manifests)
+# could be reused
+_revsingle = util.lrucachefunc(scmutil.revsingle)
+
+def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
+    """(repo, str, str) -> fctx
+
+    get the filectx object from repo, rev, path, in an efficient way.
+
+    if resolverev is True, "rev" is a revision specified by the revset
+    language, otherwise "rev" is a nodeid, or a revision number that can
+    be consumed by repo.__getitem__.
+
+    if adjustctx is not None, the returned fctx will point to a changeset
+    that introduces the change (last modified the file). if adjustctx
+    is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
+    faster for big repos but is incorrect for some cases.
+    """
+    if resolverev and not isinstance(rev, int) and rev is not None:
+        ctx = _revsingle(repo, rev)
+    else:
+        ctx = repo[rev]
+
+    # If we don't need to adjust the linkrev, create the filectx using the
+    # changectx instead of using ctx[path]. This means it already has the
+    # changectx information, so blame -u will be able to look directly at the
+    # commitctx object instead of having to resolve it by going through the
+    # manifest. In a lazy-manifest world this can prevent us from downloading a
+    # lot of data.
+    if adjustctx is None:
+        # ctx.rev() is None means it's the working copy, which is a special
+        # case.
+        if ctx.rev() is None:
+            fctx = ctx[path]
+        else:
+            fctx = repo.filectx(path, changeid=ctx.rev())
+    else:
+        fctx = ctx[path]
+        if adjustctx == 'linkrev':
+            introrev = fctx.linkrev()
+        else:
+            introrev = fctx.introrev()
+        if introrev != ctx.rev():
+            fctx._changeid = introrev
+            fctx._changectx = repo[introrev]
+    return fctx
+
+# like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
+def encodedir(path):
+    return (path
+            .replace('.hg/', '.hg.hg/')
+            .replace('.l/', '.l.hg/')
+            .replace('.m/', '.m.hg/')
+            .replace('.lock/', '.lock.hg/'))
+
+def hashdiffopts(diffopts):
+    diffoptstr = str(sorted(
+        (k, getattr(diffopts, k))
+        for k in mdiff.diffopts.defaults.iterkeys()
+    ))
+    return hashlib.sha1(diffoptstr).hexdigest()[:6]
+
+_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
+
+class annotateopts(object):
+    """like mercurial.mdiff.diffopts, but is for annotate
+
+    followrename: follow renames, like "hg annotate -f"
+    followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
+    """
+
+    defaults = {
+        'diffopts': None,
+        'followrename': True,
+        'followmerge': True,
+    }
+
+    def __init__(self, **opts):
+        for k, v in self.defaults.iteritems():
+            setattr(self, k, opts.get(k, v))
+
+    @util.propertycache
+    def shortstr(self):
+        """represent opts in a short string, suitable for a directory name"""
+        result = ''
+        if not self.followrename:
+            result += 'r0'
+        if not self.followmerge:
+            result += 'm0'
+        if self.diffopts is not None:
+            assert isinstance(self.diffopts, mdiff.diffopts)
+            diffopthash = hashdiffopts(self.diffopts)
+            if diffopthash != _defaultdiffopthash:
+                result += 'i' + diffopthash
+        return result or 'default'
+
+defaultopts = annotateopts()
+
+class _annotatecontext(object):
+    """do not use this class directly as it does not use lock to protect
+    writes. use "with annotatecontext(...)" instead.
+    """
+
+    def __init__(self, repo, path, linelogpath, revmappath, opts):
+        self.repo = repo
+        self.ui = repo.ui
+        self.path = path
+        self.opts = opts
+        self.linelogpath = linelogpath
+        self.revmappath = revmappath
+        self._linelog = None
+        self._revmap = None
+        self._node2path = {} # {str: str}
+
+    @property
+    def linelog(self):
+        if self._linelog is None:
+            if os.path.exists(self.linelogpath):
+                with open(self.linelogpath, 'rb') as f:
+                    try:
+                        self._linelog = linelogmod.linelog.fromdata(f.read())
+                    except linelogmod.LineLogError:
+                        self._linelog = linelogmod.linelog()
+            else:
+                self._linelog = linelogmod.linelog()
+        return self._linelog
+
+    @property
+    def revmap(self):
+        if self._revmap is None:
+            self._revmap = revmapmod.revmap(self.revmappath)
+        return self._revmap
+
+    def close(self):
+        if self._revmap is not None:
+            self._revmap.flush()
+            self._revmap = None
+        if self._linelog is not None:
+            with open(self.linelogpath, 'wb') as f:
+                f.write(self._linelog.encode())
+            self._linelog = None
+
+    __del__ = close
+
+    def rebuild(self):
+        """delete linelog and revmap, useful for rebuilding"""
+        self.close()
+        self._node2path.clear()
+        _unlinkpaths([self.revmappath, self.linelogpath])
+
+    @property
+    def lastnode(self):
+        """return last node in revmap, or None if revmap is empty"""
+        if self._revmap is None:
+            # fast path, read revmap without loading its full content
+            return revmapmod.getlastnode(self.revmappath)
+        else:
+            return self._revmap.rev2hsh(self._revmap.maxrev)
+
+    def isuptodate(self, master, strict=True):
+        """return True if the revmap / linelog is up-to-date, or the file
+        does not exist in the master revision. False otherwise.
+
+        it tries to be fast and could return false negatives, because of the
+        use of linkrev instead of introrev.
+
+        useful for both server and client to decide whether to update
+        fastannotate cache or not.
+
+        if strict is True, even if fctx exists in the revmap, but is not the
+        last node, isuptodate will return False. it's good for performance - no
+        expensive check was done.
+
+        if strict is False, if fctx exists in the revmap, this function may
+        return True. this is useful for the client to skip downloading the
+        cache if the client's master is behind the server's.
+        """
+        lastnode = self.lastnode
+        try:
+            f = self._resolvefctx(master, resolverev=True)
+            # choose linkrev instead of introrev as the check is meant to be
+            # *fast*.
+            linknode = self.repo.changelog.node(f.linkrev())
+            if not strict and lastnode and linknode != lastnode:
+                # check if f.node() is in the revmap. note: this loads the
+                # revmap and can be slow.
+                return self.revmap.hsh2rev(linknode) is not None
+            # avoid resolving old manifest, or slow adjustlinkrev to be fast,
+            # false negatives are acceptable in this case.
+            return linknode == lastnode
+        except LookupError:
+            # master does not have the file, or the revmap is ahead
+            return True
+
+    def annotate(self, rev, master=None, showpath=False, showlines=False):
+        """incrementally update the cache so it includes revisions in the main
+        branch till 'master'. and run annotate on 'rev', which may or may not be
+        included in the main branch.
+
+        if master is None, do not update linelog.
+
+        the first value returned is the annotate result, it is [(node, linenum)]
+        by default. [(node, linenum, path)] if showpath is True.
+
+        if showlines is True, a second value will be returned, it is a list of
+        corresponding line contents.
+        """
+
+        # the fast path test requires commit hash, convert rev number to hash,
+        # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
+        # command could give us a revision number even if the user passes a
+        # commit hash.
+        if isinstance(rev, int):
+            rev = node.hex(self.repo.changelog.node(rev))
+
+        # fast path: if rev is in the main branch already
+        directly, revfctx = self.canannotatedirectly(rev)
+        if directly:
+            if self.ui.debugflag:
+                self.ui.debug('fastannotate: %s: using fast path '
+                              '(resolved fctx: %s)\n'
+                              % (self.path, util.safehasattr(revfctx, 'node')))
+            return self.annotatedirectly(revfctx, showpath, showlines)
+
+        # resolve master
+        masterfctx = None
+        if master:
+            try:
+                masterfctx = self._resolvefctx(master, resolverev=True,
+                                               adjustctx=True)
+            except LookupError: # master does not have the file
+                pass
+            else:
+                if masterfctx in self.revmap: # no need to update linelog
+                    masterfctx = None
+
+        #                  ... - @ <- rev (can be an arbitrary changeset,
+        #                 /                not necessarily a descendant
+        #      master -> o                 of master)
+        #                |
+        #     a merge -> o         'o': new changesets in the main branch
+        #                |\        '#': revisions in the main branch that
+        #                o *            exist in linelog / revmap
+        #                | .       '*': changesets in side branches, or
+        # last master -> # .            descendants of master
+        #                | .
+        #                # *       joint: '#', and is a parent of a '*'
+        #                |/
+        #     a joint -> # ^^^^ --- side branches
+        #                |
+        #                ^ --- main branch (in linelog)
+
+        # these DFSes are similar to the traditional annotate algorithm.
+        # we cannot really reuse the code for perf reason.
+
+        # 1st DFS calculates merges, joint points, and needed.
+        # "needed" is a simple reference counting dict to free items in
+        # "hist", reducing its memory usage otherwise could be huge.
+        initvisit = [revfctx]
+        if masterfctx:
+            if masterfctx.rev() is None:
+                raise error.Abort(_('cannot update linelog to wdir()'),
+                                  hint=_('set fastannotate.mainbranch'))
+            initvisit.append(masterfctx)
+        visit = initvisit[:]
+        pcache = {}
+        needed = {revfctx: 1}
+        hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
+        while visit:
+            f = visit.pop()
+            if f in pcache or f in hist:
+                continue
+            if f in self.revmap: # in the old main branch, it's a joint
+                llrev = self.revmap.hsh2rev(f.node())
+                self.linelog.annotate(llrev)
+                result = self.linelog.annotateresult
+                hist[f] = (result, f.data())
+                continue
+            pl = self._parentfunc(f)
+            pcache[f] = pl
+            for p in pl:
+                needed[p] = needed.get(p, 0) + 1
+                if p not in pcache:
+                    visit.append(p)
+
+        # 2nd (simple) DFS calculates new changesets in the main branch
+        # ('o' nodes in # the above graph), so we know when to update linelog.
+        newmainbranch = set()
+        f = masterfctx
+        while f and f not in self.revmap:
+            newmainbranch.add(f)
+            pl = pcache[f]
+            if pl:
+                f = pl[0]
+            else:
+                f = None
+                break
+
+        # f, if present, is the position where the last build stopped at, and
+        # should be the "master" last time. check to see if we can continue
+        # building the linelog incrementally. (we cannot if diverged)
+        if masterfctx is not None:
+            self._checklastmasterhead(f)
+
+        if self.ui.debugflag:
+            if newmainbranch:
+                self.ui.debug('fastannotate: %s: %d new changesets in the main'
+                              ' branch\n' % (self.path, len(newmainbranch)))
+            elif not hist: # no joints, no updates
+                self.ui.debug('fastannotate: %s: linelog cannot help in '
+                              'annotating this revision\n' % self.path)
+
+        # prepare annotateresult so we can update linelog incrementally
+        self.linelog.annotate(self.linelog.maxrev)
+
+        # 3rd DFS does the actual annotate
+        visit = initvisit[:]
+        progress = 0
+        while visit:
+            f = visit[-1]
+            if f in hist:
+                visit.pop()
+                continue
+
+            ready = True
+            pl = pcache[f]
+            for p in pl:
+                if p not in hist:
+                    ready = False
+                    visit.append(p)
+            if not ready:
+                continue
+
+            visit.pop()
+            blocks = None # mdiff blocks, used for appending linelog
+            ismainbranch = (f in newmainbranch)
+            # curr is the same as the traditional annotate algorithm,
+            # if we only care about linear history (do not follow merge),
+            # then curr is not actually used.
+            assert f not in hist
+            curr = _decorate(f)
+            for i, p in enumerate(pl):
+                bs = list(self._diffblocks(hist[p][1], curr[1]))
+                if i == 0 and ismainbranch:
+                    blocks = bs
+                curr = _pair(hist[p], curr, bs)
+                if needed[p] == 1:
+                    del hist[p]
+                    del needed[p]
+                else:
+                    needed[p] -= 1
+
+            hist[f] = curr
+            del pcache[f]
+
+            if ismainbranch: # need to write to linelog
+                if not self.ui.quiet:
+                    progress += 1
+                    self.ui.progress(_('building cache'), progress,
+                                     total=len(newmainbranch))
+                bannotated = None
+                if len(pl) == 2 and self.opts.followmerge: # merge
+                    bannotated = curr[0]
+                if blocks is None: # no parents, add an empty one
+                    blocks = list(self._diffblocks('', curr[1]))
+                self._appendrev(f, blocks, bannotated)
+            elif showpath: # not append linelog, but we need to record path
+                self._node2path[f.node()] = f.path()
+
+        if progress: # clean progress bar
+            self.ui.write()
+
+        result = [
+            ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
+            for fr, l in hist[revfctx][0]] # [(node, linenumber)]
+        return self._refineannotateresult(result, revfctx, showpath, showlines)
+
+    def canannotatedirectly(self, rev):
+        """(str) -> bool, fctx or node.
+        return (True, f) if we can annotate without updating the linelog, pass
+        f to annotatedirectly.
+        return (False, f) if we need extra calculation. f is the fctx resolved
+        from rev.
+        """
+        result = True
+        f = None
+        if not isinstance(rev, int) and rev is not None:
+            hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
+            if hsh is not None and (hsh, self.path) in self.revmap:
+                f = hsh
+        if f is None:
+            adjustctx = 'linkrev' if self._perfhack else True
+            f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
+            result = f in self.revmap
+            if not result and self._perfhack:
+                # redo the resolution without perfhack - as we are going to
+                # do write operations, we need a correct fctx.
+                f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
+        return result, f
+
+    def annotatealllines(self, rev, showpath=False, showlines=False):
+        """(rev : str) -> [(node : str, linenum : int, path : str)]
+
+        the result has the same format with annotate, but include all (including
+        deleted) lines up to rev. call this after calling annotate(rev, ...) for
+        better performance and accuracy.
+        """
+        revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
+
+        # find a chain from rev to anything in the mainbranch
+        if revfctx not in self.revmap:
+            chain = [revfctx]
+            a = ''
+            while True:
+                f = chain[-1]
+                pl = self._parentfunc(f)
+                if not pl:
+                    break
+                if pl[0] in self.revmap:
+                    a = pl[0].data()
+                    break
+                chain.append(pl[0])
+
+            # both self.linelog and self.revmap is backed by filesystem. now
+            # we want to modify them but do not want to write changes back to
+            # files. so we create in-memory objects and copy them. it's like
+            # a "fork".
+            linelog = linelogmod.linelog()
+            linelog.copyfrom(self.linelog)
+            linelog.annotate(linelog.maxrev)
+            revmap = revmapmod.revmap()
+            revmap.copyfrom(self.revmap)
+
+            for f in reversed(chain):
+                b = f.data()
+                blocks = list(self._diffblocks(a, b))
+                self._doappendrev(linelog, revmap, f, blocks)
+                a = b
+        else:
+            # fastpath: use existing linelog, revmap as we don't write to them
+            linelog = self.linelog
+            revmap = self.revmap
+
+        lines = linelog.getalllines()
+        hsh = revfctx.node()
+        llrev = revmap.hsh2rev(hsh)
+        result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
+        # cannot use _refineannotateresult since we need custom logic for
+        # resolving line contents
+        if showpath:
+            result = self._addpathtoresult(result, revmap)
+        if showlines:
+            linecontents = self._resolvelines(result, revmap, linelog)
+            result = (result, linecontents)
+        return result
+
+    def _resolvelines(self, annotateresult, revmap, linelog):
+        """(annotateresult) -> [line]. designed for annotatealllines.
+        this is probably the most inefficient code in the whole fastannotate
+        directory. but we have made a decision that the linelog does not
+        store line contents. so getting them requires random accesses to
+        the revlog data, since they can be many, it can be very slow.
+        """
+        # [llrev]
+        revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
+        result = [None] * len(annotateresult)
+        # {(rev, linenum): [lineindex]}
+        key2idxs = collections.defaultdict(list)
+        for i in pycompat.xrange(len(result)):
+            key2idxs[(revs[i], annotateresult[i][1])].append(i)
+        while key2idxs:
+            # find an unresolved line and its linelog rev to annotate
+            hsh = None
+            try:
+                for (rev, _linenum), idxs in key2idxs.iteritems():
+                    if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
+                        continue
+                    hsh = annotateresult[idxs[0]][0]
+                    break
+            except StopIteration: # no more unresolved lines
+                return result
+            if hsh is None:
+                # the remaining key2idxs are not in main branch, resolving them
+                # using the hard way...
+                revlines = {}
+                for (rev, linenum), idxs in key2idxs.iteritems():
+                    if rev not in revlines:
+                        hsh = annotateresult[idxs[0]][0]
+                        if self.ui.debugflag:
+                            self.ui.debug('fastannotate: reading %s line #%d '
+                                          'to resolve lines %r\n'
+                                          % (node.short(hsh), linenum, idxs))
+                        fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+                        lines = mdiff.splitnewlines(fctx.data())
+                        revlines[rev] = lines
+                    for idx in idxs:
+                        result[idx] = revlines[rev][linenum]
+                assert all(x is not None for x in result)
+                return result
+
+            # run the annotate and the lines should match to the file content
+            self.ui.debug('fastannotate: annotate %s to resolve lines\n'
+                          % node.short(hsh))
+            linelog.annotate(rev)
+            fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+            annotated = linelog.annotateresult
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(annotated):
+                raise faerror.CorruptedFileError('unexpected annotated lines')
+            # resolve lines from the annotate result
+            for i, line in enumerate(lines):
+                k = annotated[i]
+                if k in key2idxs:
+                    for idx in key2idxs[k]:
+                        result[idx] = line
+                    del key2idxs[k]
+        return result
+
+    def annotatedirectly(self, f, showpath, showlines):
+        """like annotate, but when we know that f is in linelog.
+        f can be either a 20-char str (node) or a fctx. this is for perf - in
+        the best case, the user provides a node and we don't need to read the
+        filelog or construct any filecontext.
+        """
+        if isinstance(f, str):
+            hsh = f
+        else:
+            hsh = f.node()
+        llrev = self.revmap.hsh2rev(hsh)
+        if not llrev:
+            raise faerror.CorruptedFileError('%s is not in revmap'
+                                             % node.hex(hsh))
+        if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
+            raise faerror.CorruptedFileError('%s is not in revmap mainbranch'
+                                             % node.hex(hsh))
+        self.linelog.annotate(llrev)
+        result = [(self.revmap.rev2hsh(r), l)
+                  for r, l in self.linelog.annotateresult]
+        return self._refineannotateresult(result, f, showpath, showlines)
+
+    def _refineannotateresult(self, result, f, showpath, showlines):
+        """add the missing path or line contents, they can be expensive.
+        f could be either node or fctx.
+        """
+        if showpath:
+            result = self._addpathtoresult(result)
+        if showlines:
+            if isinstance(f, str): # f: node or fctx
+                llrev = self.revmap.hsh2rev(f)
+                fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
+            else:
+                fctx = f
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(result): # linelog is probably corrupted
+                raise faerror.CorruptedFileError()
+            result = (result, lines)
+        return result
+
+    def _appendrev(self, fctx, blocks, bannotated=None):
+        self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
+
+    def _diffblocks(self, a, b):
+        return mdiff.allblocks(a, b, self.opts.diffopts)
+
+    @staticmethod
+    def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
+        """append a revision to linelog and revmap"""
+
+        def getllrev(f):
+            """(fctx) -> int"""
+            # f should not be a linelog revision
+            if isinstance(f, int):
+                raise error.ProgrammingError('f should not be an int')
+            # f is a fctx, allocate linelog rev on demand
+            hsh = f.node()
+            rev = revmap.hsh2rev(hsh)
+            if rev is None:
+                rev = revmap.append(hsh, sidebranch=True, path=f.path())
+            return rev
+
+        # append sidebranch revisions to revmap
+        siderevs = []
+        siderevmap = {} # node: int
+        if bannotated is not None:
+            for (a1, a2, b1, b2), op in blocks:
+                if op != '=':
+                    # f could be either linelong rev, or fctx.
+                    siderevs += [f for f, l in bannotated[b1:b2]
+                                 if not isinstance(f, int)]
+        siderevs = set(siderevs)
+        if fctx in siderevs: # mainnode must be appended seperately
+            siderevs.remove(fctx)
+        for f in siderevs:
+            siderevmap[f] = getllrev(f)
+
+        # the changeset in the main branch, could be a merge
+        llrev = revmap.append(fctx.node(), path=fctx.path())
+        siderevmap[fctx] = llrev
+
+        for (a1, a2, b1, b2), op in reversed(blocks):
+            if op == '=':
+                continue
+            if bannotated is None:
+                linelog.replacelines(llrev, a1, a2, b1, b2)
+            else:
+                blines = [((r if isinstance(r, int) else siderevmap[r]), l)
+                          for r, l in bannotated[b1:b2]]
+                linelog.replacelines_vec(llrev, a1, a2, blines)
+
+    def _addpathtoresult(self, annotateresult, revmap=None):
+        """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
+        if revmap is None:
+            revmap = self.revmap
+
+        def _getpath(nodeid):
+            path = self._node2path.get(nodeid)
+            if path is None:
+                path = revmap.rev2path(revmap.hsh2rev(nodeid))
+                self._node2path[nodeid] = path
+            return path
+
+        return [(n, l, _getpath(n)) for n, l in annotateresult]
+
+    def _checklastmasterhead(self, fctx):
+        """check if fctx is the master's head last time, raise if not"""
+        if fctx is None:
+            llrev = 0
+        else:
+            llrev = self.revmap.hsh2rev(fctx.node())
+            if not llrev:
+                raise faerror.CannotReuseError()
+        if self.linelog.maxrev != llrev:
+            raise faerror.CannotReuseError()
+
+    @util.propertycache
+    def _parentfunc(self):
+        """-> (fctx) -> [fctx]"""
+        followrename = self.opts.followrename
+        followmerge = self.opts.followmerge
+        def parents(f):
+            pl = _parents(f, follow=followrename)
+            if not followmerge:
+                pl = pl[:1]
+            return pl
+        return parents
+
+    @util.propertycache
+    def _perfhack(self):
+        return self.ui.configbool('fastannotate', 'perfhack')
+
+    def _resolvefctx(self, rev, path=None, **kwds):
+        return resolvefctx(self.repo, rev, (path or self.path), **kwds)
+
+def _unlinkpaths(paths):
+    """silent, best-effort unlink"""
+    for path in paths:
+        try:
+            util.unlink(path)
+        except OSError:
+            pass
+
+class pathhelper(object):
+    """helper for getting paths for lockfile, linelog and revmap"""
+
+    def __init__(self, repo, path, opts=defaultopts):
+        # different options use different directories
+        self._vfspath = os.path.join('fastannotate',
+                                     opts.shortstr, encodedir(path))
+        self._repo = repo
+
+    @property
+    def dirname(self):
+        return os.path.dirname(self._repo.vfs.join(self._vfspath))
+
+    @property
+    def linelogpath(self):
+        return self._repo.vfs.join(self._vfspath + '.l')
+
+    def lock(self):
+        return lockmod.lock(self._repo.vfs, self._vfspath + '.lock')
+
+    @contextlib.contextmanager
+    def _lockflock(self):
+        """the same as 'lock' but use flock instead of lockmod.lock, to avoid
+        creating temporary symlinks."""
+        import fcntl
+        lockpath = self.linelogpath
+        util.makedirs(os.path.dirname(lockpath))
+        lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
+        fcntl.flock(lockfd, fcntl.LOCK_EX)
+        try:
+            yield
+        finally:
+            fcntl.flock(lockfd, fcntl.LOCK_UN)
+            os.close(lockfd)
+
+    @property
+    def revmappath(self):
+        return self._repo.vfs.join(self._vfspath + '.m')
+
+@contextlib.contextmanager
+def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
+    """context needed to perform (fast) annotate on a file
+
+    an annotatecontext of a single file consists of two structures: the
+    linelog and the revmap. this function takes care of locking. only 1
+    process is allowed to write that file's linelog and revmap at a time.
+
+    when something goes wrong, this function will assume the linelog and the
+    revmap are in a bad state, and remove them from disk.
+
+    use this function in the following way:
+
+        with annotatecontext(...) as actx:
+            actx. ....
+    """
+    helper = pathhelper(repo, path, opts)
+    util.makedirs(helper.dirname)
+    revmappath = helper.revmappath
+    linelogpath = helper.linelogpath
+    actx = None
+    try:
+        with helper.lock():
+            actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
+            if rebuild:
+                actx.rebuild()
+            yield actx
+    except Exception:
+        if actx is not None:
+            actx.rebuild()
+        repo.ui.debug('fastannotate: %s: cache broken and deleted\n' % path)
+        raise
+    finally:
+        if actx is not None:
+            actx.close()
+
+def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
+    """like annotatecontext but get the context from a fctx. convenient when
+    used in fctx.annotate
+    """
+    repo = fctx._repo
+    path = fctx._path
+    if repo.ui.configbool('fastannotate', 'forcefollow', True):
+        follow = True
+    aopts = annotateopts(diffopts=diffopts, followrename=follow)
+    return annotatecontext(repo, path, aopts, rebuild)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/error.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,13 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# error: errors used in fastannotate
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+class CorruptedFileError(Exception):
+    pass
+
+class CannotReuseError(Exception):
+    """cannot reuse or update the cache incrementally"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/formatter.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,161 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# format: defines the format used to output annotate result
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from mercurial import (
+    encoding,
+    node,
+    pycompat,
+    templatefilters,
+    util,
+)
+from mercurial.utils import (
+        dateutil,
+)
+
+# imitating mercurial.commands.annotate, not using the vanilla formatter since
+# the data structures are a bit different, and we have some fast paths.
+class defaultformatter(object):
+    """the default formatter that does leftpad and support some common flags"""
+
+    def __init__(self, ui, repo, opts):
+        self.ui = ui
+        self.opts = opts
+
+        if ui.quiet:
+            datefunc = dateutil.shortdate
+        else:
+            datefunc = dateutil.datestr
+        datefunc = util.cachefunc(datefunc)
+        getctx = util.cachefunc(lambda x: repo[x[0]])
+        hexfunc = self._hexfunc
+
+        # special handling working copy "changeset" and "rev" functions
+        if self.opts.get('rev') == 'wdir()':
+            orig = hexfunc
+            hexfunc = lambda x: None if x is None else orig(x)
+            wnode = hexfunc(repo[None].p1().node()) + '+'
+            wrev = str(repo[None].p1().rev())
+            wrevpad = ''
+            if not opts.get('changeset'): # only show + if changeset is hidden
+                wrev += '+'
+                wrevpad = ' '
+            revenc = lambda x: wrev if x is None else str(x) + wrevpad
+            csetenc = lambda x: wnode if x is None else str(x) + ' '
+        else:
+            revenc = csetenc = str
+
+        # opt name, separator, raw value (for json/plain), encoder (for plain)
+        opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
+                 ('number', ' ', lambda x: getctx(x).rev(), revenc),
+                 ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
+                 ('date', ' ', lambda x: getctx(x).date(), datefunc),
+                 ('file', ' ', lambda x: x[2], str),
+                 ('line_number', ':', lambda x: x[1] + 1, str)]
+        fieldnamemap = {'number': 'rev', 'changeset': 'node'}
+        funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
+                   for op, sep, get, enc in opmap
+                   if opts.get(op)]
+        # no separator for first column
+        funcmap[0] = list(funcmap[0])
+        funcmap[0][1] = ''
+        self.funcmap = funcmap
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        """(annotateresult, [str], set([rev, linenum])) -> None. write output.
+        annotateresult can be [(node, linenum, path)], or [(node, linenum)]
+        """
+        pieces = [] # [[str]]
+        maxwidths = [] # [int]
+
+        # calculate padding
+        for f, sep, name, enc in self.funcmap:
+            l = [enc(f(x)) for x in annotatedresult]
+            pieces.append(l)
+            if name in ['node', 'date']: # node and date has fixed size
+                l = l[:1]
+            widths = map(encoding.colwidth, set(l))
+            maxwidth = (max(widths) if widths else 0)
+            maxwidths.append(maxwidth)
+
+        # buffered output
+        result = ''
+        for i in pycompat.xrange(len(annotatedresult)):
+            for j, p in enumerate(pieces):
+                sep = self.funcmap[j][1]
+                padding = ' ' * (maxwidths[j] - len(p[i]))
+                result += sep + padding + p[i]
+            if lines:
+                if existinglines is None:
+                    result += ': ' + lines[i]
+                else: # extra formatting showing whether a line exists
+                    key = (annotatedresult[i][0], annotatedresult[i][1])
+                    if key in existinglines:
+                        result += ':  ' + lines[i]
+                    else:
+                        result += ': ' + self.ui.label('-' + lines[i],
+                                                       'diff.deleted')
+
+            if result[-1] != '\n':
+                result += '\n'
+
+        self.ui.write(result)
+
+    @util.propertycache
+    def _hexfunc(self):
+        if self.ui.debugflag or self.opts.get('long_hash'):
+            return node.hex
+        else:
+            return node.short
+
+    def end(self):
+        pass
+
+class jsonformatter(defaultformatter):
+    def __init__(self, ui, repo, opts):
+        super(jsonformatter, self).__init__(ui, repo, opts)
+        self.ui.write('[')
+        self.needcomma = False
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        if annotatedresult:
+            self._writecomma()
+
+        pieces = [(name, map(f, annotatedresult))
+                  for f, sep, name, enc in self.funcmap]
+        if lines is not None:
+            pieces.append(('line', lines))
+        pieces.sort()
+
+        seps = [','] * len(pieces[:-1]) + ['']
+
+        result = ''
+        lasti = len(annotatedresult) - 1
+        for i in pycompat.xrange(len(annotatedresult)):
+            result += '\n {\n'
+            for j, p in enumerate(pieces):
+                k, vs = p
+                result += ('  "%s": %s%s\n'
+                           % (k, templatefilters.json(vs[i], paranoid=False),
+                              seps[j]))
+            result += ' }%s' % ('' if i == lasti else ',')
+        if lasti >= 0:
+            self.needcomma = True
+
+        self.ui.write(result)
+
+    def _writecomma(self):
+        if self.needcomma:
+            self.ui.write(',')
+            self.needcomma = False
+
+    @util.propertycache
+    def _hexfunc(self):
+        return node.hex
+
+    def end(self):
+        self.ui.write('\n]\n')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/protocol.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,229 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# protocol: logic for a server providing fastannotate support
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    extensions,
+    hg,
+    localrepo,
+    util,
+    wireprotov1peer,
+    wireprotov1server,
+)
+from . import context
+
+# common
+
+def _getmaster(ui):
+    """get the mainbranch, and enforce it is set"""
+    master = ui.config('fastannotate', 'mainbranch')
+    if not master:
+        raise error.Abort(_('fastannotate.mainbranch is required '
+                            'for both the client and the server'))
+    return master
+
+# server-side
+
+def _capabilities(orig, repo, proto):
+    result = orig(repo, proto)
+    result.append('getannotate')
+    return result
+
+def _getannotate(repo, proto, path, lastnode):
+    # output:
+    #   FILE := vfspath + '\0' + str(size) + '\0' + content
+    #   OUTPUT := '' | FILE + OUTPUT
+    result = ''
+    buildondemand = repo.ui.configbool('fastannotate', 'serverbuildondemand',
+                                       True)
+    with context.annotatecontext(repo, path) as actx:
+        if buildondemand:
+            # update before responding to the client
+            master = _getmaster(repo.ui)
+            try:
+                if not actx.isuptodate(master):
+                    actx.annotate(master, master)
+            except Exception:
+                # non-fast-forward move or corrupted. rebuild automically.
+                actx.rebuild()
+                try:
+                    actx.annotate(master, master)
+                except Exception:
+                    actx.rebuild() # delete files
+            finally:
+                # although the "with" context will also do a close/flush, we
+                # need to do it early so we can send the correct respond to
+                # client.
+                actx.close()
+        # send back the full content of revmap and linelog, in the future we
+        # may want to do some rsync-like fancy updating.
+        # the lastnode check is not necessary if the client and the server
+        # agree where the main branch is.
+        if actx.lastnode != lastnode:
+            for p in [actx.revmappath, actx.linelogpath]:
+                if not os.path.exists(p):
+                    continue
+                content = ''
+                with open(p, 'rb') as f:
+                    content = f.read()
+                vfsbaselen = len(repo.vfs.base + '/')
+                relpath = p[vfsbaselen:]
+                result += '%s\0%s\0%s' % (relpath, len(content), content)
+    return result
+
+def _registerwireprotocommand():
+    if 'getannotate' in wireprotov1server.commands:
+        return
+    wireprotov1server.wireprotocommand(
+        'getannotate', 'path lastnode')(_getannotate)
+
+def serveruisetup(ui):
+    _registerwireprotocommand()
+    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+
+# client-side
+
+def _parseresponse(payload):
+    result = {}
+    i = 0
+    l = len(payload) - 1
+    state = 0 # 0: vfspath, 1: size
+    vfspath = size = ''
+    while i < l:
+        ch = payload[i]
+        if ch == '\0':
+            if state == 1:
+                result[vfspath] = buffer(payload, i + 1, int(size))
+                i += int(size)
+                state = 0
+                vfspath = size = ''
+            elif state == 0:
+                state = 1
+        else:
+            if state == 1:
+                size += ch
+            elif state == 0:
+                vfspath += ch
+        i += 1
+    return result
+
+def peersetup(ui, peer):
+    class fastannotatepeer(peer.__class__):
+        @wireprotov1peer.batchable
+        def getannotate(self, path, lastnode=None):
+            if not self.capable('getannotate'):
+                ui.warn(_('remote peer cannot provide annotate cache\n'))
+                yield None, None
+            else:
+                args = {'path': path, 'lastnode': lastnode or ''}
+                f = wireprotov1peer.future()
+                yield args, f
+                yield _parseresponse(f.value)
+    peer.__class__ = fastannotatepeer
+
+@contextlib.contextmanager
+def annotatepeer(repo):
+    ui = repo.ui
+
+    remotepath = ui.expandpath(
+        ui.config('fastannotate', 'remotepath', 'default'))
+    peer = hg.peer(ui, {}, remotepath)
+
+    try:
+        yield peer
+    finally:
+        peer.close()
+
+def clientfetch(repo, paths, lastnodemap=None, peer=None):
+    """download annotate cache from the server for paths"""
+    if not paths:
+        return
+
+    if peer is None:
+        with annotatepeer(repo) as peer:
+            return clientfetch(repo, paths, lastnodemap, peer)
+
+    if lastnodemap is None:
+        lastnodemap = {}
+
+    ui = repo.ui
+    results = []
+    with peer.commandexecutor() as batcher:
+        ui.debug('fastannotate: requesting %d files\n' % len(paths))
+        for p in paths:
+            results.append(batcher.callcommand(
+                'getannotate',
+                {'path': p, 'lastnode':lastnodemap.get(p)}))
+
+    ui.debug('fastannotate: server returned\n')
+    for result in results:
+        r = result.result()
+        # TODO: pconvert these paths on the server?
+        r = {util.pconvert(p): v for p, v in r.iteritems()}
+        for path in sorted(r):
+            # ignore malicious paths
+            if not path.startswith('fastannotate/') or '/../' in (path + '/'):
+                ui.debug('fastannotate: ignored malicious path %s\n' % path)
+                continue
+            content = r[path]
+            if ui.debugflag:
+                ui.debug('fastannotate: writing %d bytes to %s\n'
+                         % (len(content), path))
+            repo.vfs.makedirs(os.path.dirname(path))
+            with repo.vfs(path, 'wb') as f:
+                f.write(content)
+
+def _filterfetchpaths(repo, paths):
+    """return a subset of paths whose history is long and need to fetch linelog
+    from the server. works with remotefilelog and non-remotefilelog repos.
+    """
+    threshold = repo.ui.configint('fastannotate', 'clientfetchthreshold', 10)
+    if threshold <= 0:
+        return paths
+
+    result = []
+    for path in paths:
+        try:
+            if len(repo.file(path)) >= threshold:
+                result.append(path)
+        except Exception: # file not found etc.
+            result.append(path)
+
+    return result
+
+def localreposetup(ui, repo):
+    class fastannotaterepo(repo.__class__):
+        def prefetchfastannotate(self, paths, peer=None):
+            master = _getmaster(self.ui)
+            needupdatepaths = []
+            lastnodemap = {}
+            try:
+                for path in _filterfetchpaths(self, paths):
+                    with context.annotatecontext(self, path) as actx:
+                        if not actx.isuptodate(master, strict=False):
+                            needupdatepaths.append(path)
+                            lastnodemap[path] = actx.lastnode
+                if needupdatepaths:
+                    clientfetch(self, needupdatepaths, lastnodemap, peer)
+            except Exception as ex:
+                # could be directory not writable or so, not fatal
+                self.ui.debug('fastannotate: prefetch failed: %r\n' % ex)
+    repo.__class__ = fastannotaterepo
+
+def clientreposetup(ui, repo):
+    _registerwireprotocommand()
+    if isinstance(repo, localrepo.localrepository):
+        localreposetup(ui, repo)
+    # TODO: this mutates global state, but only if at least one repo
+    # has the extension enabled. This is probably bad for hgweb.
+    if peersetup not in hg.wirepeersetupfuncs:
+        hg.wirepeersetupfuncs.append(peersetup)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/revmap.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,254 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# revmap: trivial hg hash - linelog rev bidirectional map
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import bisect
+import os
+import struct
+
+from mercurial.node import hex
+from mercurial import (
+    error as hgerror,
+    pycompat,
+)
+from . import error
+
+# the revmap file format is straightforward:
+#
+#    8 bytes: header
+#    1 byte : flag for linelog revision 1
+#    ? bytes: (optional) '\0'-terminated path string
+#             only exists if (flag & renameflag) != 0
+#   20 bytes: hg hash for linelog revision 1
+#    1 byte : flag for linelog revision 2
+#    ? bytes: (optional) '\0'-terminated path string
+#   20 bytes: hg hash for linelog revision 2
+#   ....
+#
+# the implementation is kinda stupid: __init__ loads the whole revmap.
+# no laziness. benchmark shows loading 10000 revisions is about 0.015
+# seconds, which looks enough for our use-case. if this implementation
+# becomes a bottleneck, we can change it to lazily read the file
+# from the end.
+
+# whether the changeset is in the side branch. i.e. not in the linear main
+# branch but only got referenced by lines in merge changesets.
+sidebranchflag = 1
+
+# whether the changeset changes the file path (ie. is a rename)
+renameflag = 2
+
+# len(mercurial.node.nullid)
+_hshlen = 20
+
+class revmap(object):
+    """trivial hg bin hash - linelog rev bidirectional map
+
+    also stores a flag (uint8) for each revision, and track renames.
+    """
+
+    HEADER = b'REVMAP1\0'
+
+    def __init__(self, path=None):
+        """create or load the revmap, optionally associate to a file
+
+        if path is None, the revmap is entirely in-memory. the caller is
+        responsible for locking. concurrent writes to a same file is unsafe.
+        the caller needs to make sure one file is associated to at most one
+        revmap object at a time."""
+        self.path = path
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        # since rename does not happen frequently, do not store path for every
+        # revision. self._renamerevs can be used for bisecting.
+        self._renamerevs = [0]
+        self._renamepaths = ['']
+        self._lastmaxrev = -1
+        if path:
+            if os.path.exists(path):
+                self._load()
+            else:
+                # write the header so "append" can do incremental updates
+                self.flush()
+
+    def copyfrom(self, rhs):
+        """copy the map data from another revmap. do not affect self.path"""
+        self._rev2hsh = rhs._rev2hsh[:]
+        self._rev2flag = rhs._rev2flag[:]
+        self._hsh2rev = rhs._hsh2rev.copy()
+        self._renamerevs = rhs._renamerevs[:]
+        self._renamepaths = rhs._renamepaths[:]
+        self._lastmaxrev = -1
+
+    @property
+    def maxrev(self):
+        """return max linelog revision number"""
+        return len(self._rev2hsh) - 1
+
+    def append(self, hsh, sidebranch=False, path=None, flush=False):
+        """add a binary hg hash and return the mapped linelog revision.
+        if flush is True, incrementally update the file.
+        """
+        if hsh in self._hsh2rev:
+            raise error.CorruptedFileError('%r is in revmap already' % hex(hsh))
+        if len(hsh) != _hshlen:
+            raise hgerror.ProgrammingError('hsh must be %d-char long' % _hshlen)
+        idx = len(self._rev2hsh)
+        flag = 0
+        if sidebranch:
+            flag |= sidebranchflag
+        if path is not None and path != self._renamepaths[-1]:
+            flag |= renameflag
+            self._renamerevs.append(idx)
+            self._renamepaths.append(path)
+        self._rev2hsh.append(hsh)
+        self._rev2flag.append(flag)
+        self._hsh2rev[hsh] = idx
+        if flush:
+            self.flush()
+        return idx
+
+    def rev2hsh(self, rev):
+        """convert linelog revision to hg hash. return None if not found."""
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2hsh[rev]
+
+    def rev2flag(self, rev):
+        """get the flag (uint8) for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2flag[rev]
+
+    def rev2path(self, rev):
+        """get the path for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        idx = bisect.bisect_right(self._renamerevs, rev) - 1
+        return self._renamepaths[idx]
+
+    def hsh2rev(self, hsh):
+        """convert hg hash to linelog revision. return None if not found."""
+        return self._hsh2rev.get(hsh)
+
+    def clear(self, flush=False):
+        """make the map empty. if flush is True, write to disk"""
+        # rev 0 is reserved, real rev starts from 1
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        self._rev2path = ['']
+        self._lastmaxrev = -1
+        if flush:
+            self.flush()
+
+    def flush(self):
+        """write the state down to the file"""
+        if not self.path:
+            return
+        if self._lastmaxrev == -1: # write the entire file
+            with open(self.path, 'wb') as f:
+                f.write(self.HEADER)
+                for i in pycompat.xrange(1, len(self._rev2hsh)):
+                    self._writerev(i, f)
+        else: # append incrementally
+            with open(self.path, 'ab') as f:
+                for i in pycompat.xrange(self._lastmaxrev + 1,
+                                         len(self._rev2hsh)):
+                    self._writerev(i, f)
+        self._lastmaxrev = self.maxrev
+
+    def _load(self):
+        """load state from file"""
+        if not self.path:
+            return
+        # use local variables in a loop. CPython uses LOAD_FAST for them,
+        # which is faster than both LOAD_CONST and LOAD_GLOBAL.
+        flaglen = 1
+        hshlen = _hshlen
+        with open(self.path, 'rb') as f:
+            if f.read(len(self.HEADER)) != self.HEADER:
+                raise error.CorruptedFileError()
+            self.clear(flush=False)
+            while True:
+                buf = f.read(flaglen)
+                if not buf:
+                    break
+                flag = ord(buf)
+                rev = len(self._rev2hsh)
+                if flag & renameflag:
+                    path = self._readcstr(f)
+                    self._renamerevs.append(rev)
+                    self._renamepaths.append(path)
+                hsh = f.read(hshlen)
+                if len(hsh) != hshlen:
+                    raise error.CorruptedFileError()
+                self._hsh2rev[hsh] = rev
+                self._rev2flag.append(flag)
+                self._rev2hsh.append(hsh)
+        self._lastmaxrev = self.maxrev
+
+    def _writerev(self, rev, f):
+        """append a revision data to file"""
+        flag = self._rev2flag[rev]
+        hsh = self._rev2hsh[rev]
+        f.write(struct.pack('B', flag))
+        if flag & renameflag:
+            path = self.rev2path(rev)
+            if path is None:
+                raise error.CorruptedFileError('cannot find path for %s' % rev)
+            f.write(path + '\0')
+        f.write(hsh)
+
+    @staticmethod
+    def _readcstr(f):
+        """read a C-language-like '\0'-terminated string"""
+        buf = ''
+        while True:
+            ch = f.read(1)
+            if not ch: # unexpected eof
+                raise error.CorruptedFileError()
+            if ch == '\0':
+                break
+            buf += ch
+        return buf
+
+    def __contains__(self, f):
+        """(fctx or (node, path)) -> bool.
+        test if (node, path) is in the map, and is not in a side branch.
+        f can be either a tuple of (node, path), or a fctx.
+        """
+        if isinstance(f, tuple): # f: (node, path)
+            hsh, path = f
+        else: # f: fctx
+            hsh, path = f.node(), f.path()
+        rev = self.hsh2rev(hsh)
+        if rev is None:
+            return False
+        if path is not None and path != self.rev2path(rev):
+            return False
+        return (self.rev2flag(rev) & sidebranchflag) == 0
+
+def getlastnode(path):
+    """return the last hash in a revmap, without loading its full content.
+    this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
+    """
+    hsh = None
+    try:
+        with open(path, 'rb') as f:
+            f.seek(-_hshlen, 2)
+            if f.tell() > len(revmap.HEADER):
+                hsh = f.read(_hshlen)
+    except IOError:
+        pass
+    return hsh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/support.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,122 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# support: fastannotate support for hgweb, and filectx
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+    context as hgcontext,
+    dagop,
+    extensions,
+    hgweb,
+    patch,
+    util,
+)
+
+from . import (
+    context,
+    revmap,
+)
+
+class _lazyfctx(object):
+    """delegates to fctx but do not construct fctx when unnecessary"""
+
+    def __init__(self, repo, node, path):
+        self._node = node
+        self._path = path
+        self._repo = repo
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+    @util.propertycache
+    def _fctx(self):
+        return context.resolvefctx(self._repo, self._node, self._path)
+
+    def __getattr__(self, name):
+        return getattr(self._fctx, name)
+
+def _convertoutputs(repo, annotated, contents):
+    """convert fastannotate outputs to vanilla annotate format"""
+    # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
+    # convert to what fctx.annotate returns: [annotateline]
+    results = []
+    fctxmap = {}
+    annotateline = dagop.annotateline
+    for i, (hsh, linenum, path) in enumerate(annotated):
+        if (hsh, path) not in fctxmap:
+            fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
+        # linenum: the user wants 1-based, we have 0-based.
+        lineno = linenum + 1
+        fctx = fctxmap[(hsh, path)]
+        line = contents[i]
+        results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
+    return results
+
+def _getmaster(fctx):
+    """(fctx) -> str"""
+    return fctx._repo.ui.config('fastannotate', 'mainbranch') or 'default'
+
+def _doannotate(fctx, follow=True, diffopts=None):
+    """like the vanilla fctx.annotate, but do it via fastannotate, and make
+    the output format compatible with the vanilla fctx.annotate.
+    may raise Exception, and always return line numbers.
+    """
+    master = _getmaster(fctx)
+    annotated = contents = None
+
+    with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
+        try:
+            annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                              showpath=True, showlines=True)
+        except Exception:
+            ac.rebuild() # try rebuild once
+            fctx._repo.ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                                % fctx._path)
+            try:
+                annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                                  showpath=True, showlines=True)
+            except Exception:
+                raise
+
+    assert annotated and contents
+    return _convertoutputs(fctx._repo, annotated, contents)
+
+def _hgwebannotate(orig, fctx, ui):
+    diffopts = patch.difffeatureopts(ui, untrusted=True,
+                                     section='annotate', whitespace=True)
+    return _doannotate(fctx, diffopts=diffopts)
+
+def _fctxannotate(orig, self, follow=False, linenumber=False, skiprevs=None,
+                  diffopts=None):
+    if skiprevs:
+        # skiprevs is not supported yet
+        return orig(self, follow, linenumber, skiprevs=skiprevs,
+                    diffopts=diffopts)
+    try:
+        return _doannotate(self, follow, diffopts)
+    except Exception as ex:
+        self._repo.ui.debug('fastannotate: falling back to the vanilla '
+                            'annotate: %r\n' % ex)
+        return orig(self, follow=follow, skiprevs=skiprevs,
+                    diffopts=diffopts)
+
+def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
+    # skipset: a set-like used to test if a fctx needs to be downloaded
+    skipset = None
+    with context.fctxannotatecontext(self, follow, diffopts) as ac:
+        skipset = revmap.revmap(ac.revmappath)
+    return orig(self, follow, skiprevs=skiprevs, diffopts=diffopts,
+                prefetchskip=skipset)
+
+def replacehgwebannotate():
+    extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
+
+def replacefctxannotate():
+    extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
--- a/hgext/fix.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/fix.py	Fri Aug 24 12:55:05 2018 -0700
@@ -96,15 +96,16 @@
 # user.
 configitem('fix', 'maxfilesize', default='2MB')
 
-@command('fix',
-    [('', 'all', False, _('fix all non-public non-obsolete revisions')),
-     ('', 'base', [], _('revisions to diff against (overrides automatic '
-                        'selection, and applies to every revision being '
-                        'fixed)'), _('REV')),
-     ('r', 'rev', [], _('revisions to fix'), _('REV')),
-     ('w', 'working-dir', False, _('fix the working directory')),
-     ('', 'whole', False, _('always fix every line of a file'))],
-    _('[OPTION]... [FILE]...'))
+allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
+baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
+                             'selection, and applies to every revision being '
+                             'fixed)'), _('REV'))
+revopt = ('r', 'rev', [], _('revisions to fix'), _('REV'))
+wdiropt = ('w', 'working-dir', False, _('fix the working directory'))
+wholeopt = ('', 'whole', False, _('always fix every line of a file'))
+usage = _('[OPTION]... [FILE]...')
+
+@command('fix', [allopt, baseopt, revopt, wdiropt, wholeopt], usage)
 def fix(ui, repo, *pats, **opts):
     """rewrite file content in changesets or working directory
 
@@ -161,6 +162,7 @@
         # it makes the results more easily reproducible.
         filedata = collections.defaultdict(dict)
         replacements = {}
+        wdirwritten = False
         commitorder = sorted(revstofix, reverse=True)
         with ui.makeprogress(topic=_('fixing'), unit=_('files'),
                              total=sum(numitems.values())) as progress:
@@ -178,12 +180,28 @@
                     ctx = repo[rev]
                     if rev == wdirrev:
                         writeworkingdir(repo, ctx, filedata[rev], replacements)
+                        wdirwritten = bool(filedata[rev])
                     else:
                         replacerev(ui, repo, ctx, filedata[rev], replacements)
                     del filedata[rev]
 
-        replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
-        scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
+        cleanup(repo, replacements, wdirwritten)
+
+def cleanup(repo, replacements, wdirwritten):
+    """Calls scmutil.cleanupnodes() with the given replacements.
+
+    "replacements" is a dict from nodeid to nodeid, with one key and one value
+    for every revision that was affected by fixing. This is slightly different
+    from cleanupnodes().
+
+    "wdirwritten" is a bool which tells whether the working copy was affected by
+    fixing, since it has no entry in "replacements".
+
+    Useful as a hook point for extending "hg fix" with output summarizing the
+    effects of the command, though we choose not to output anything here.
+    """
+    replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
+    scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
 
 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
     """"Constructs the list of files to be fixed at specific revisions
@@ -267,8 +285,8 @@
     """
     files = set()
     for basectx in basectxs:
-        stat = repo.status(
-            basectx, fixctx, match=match, clean=bool(pats), unknown=bool(pats))
+        stat = basectx.status(fixctx, match=match, listclean=bool(pats),
+                              listunknown=bool(pats))
         files.update(
             set(itertools.chain(stat.added, stat.modified, stat.clean,
                                 stat.unknown)))
@@ -417,12 +435,15 @@
     starting with the file's content in the fixctx. Fixers that support line
     ranges will affect lines that have changed relative to any of the basectxs
     (i.e. they will only avoid lines that are common to all basectxs).
+
+    A fixer tool's stdout will become the file's new content if and only if it
+    exits with code zero.
     """
     newdata = fixctx[path].data()
     for fixername, fixer in fixers.iteritems():
         if fixer.affects(opts, fixctx, path):
-            ranges = lineranges(opts, path, basectxs, fixctx, newdata)
-            command = fixer.command(ui, path, ranges)
+            rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata)
+            command = fixer.command(ui, path, rangesfn)
             if command is None:
                 continue
             ui.debug('subprocess: %s\n' % (command,))
@@ -436,8 +457,11 @@
             newerdata, stderr = proc.communicate(newdata)
             if stderr:
                 showstderr(ui, fixctx.rev(), fixername, stderr)
-            else:
+            if proc.returncode == 0:
                 newdata = newerdata
+            elif not stderr:
+                showstderr(ui, fixctx.rev(), fixername,
+                           _('exited with status %d\n') % (proc.returncode,))
     return newdata
 
 def showstderr(ui, rev, fixername, stderr):
@@ -567,7 +591,7 @@
         """Should this fixer run on the file at the given path and context?"""
         return scmutil.match(fixctx, [self._fileset], opts)(path)
 
-    def command(self, ui, path, ranges):
+    def command(self, ui, path, rangesfn):
         """A shell command to use to invoke this fixer on the given file/lines
 
         May return None if there is no appropriate command to run for the given
@@ -577,6 +601,7 @@
         parts = [expand(ui, self._command,
                         {'rootpath': path, 'basename': os.path.basename(path)})]
         if self._linerange:
+            ranges = rangesfn()
             if not ranges:
                 # No line ranges to fix, so don't run the fixer.
                 return None
--- a/hgext/hgk.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/hgk.py	Fri Aug 24 12:55:05 2018 -0700
@@ -227,7 +227,7 @@
             else:
                 i -= chunk
 
-            for x in xrange(chunk):
+            for x in pycompat.xrange(chunk):
                 if i + x >= count:
                     l[chunk - x:] = [0] * (chunk - x)
                     break
@@ -238,7 +238,7 @@
                 else:
                     if (i + x) in repo:
                         l[x] = 1
-            for x in xrange(chunk - 1, -1, -1):
+            for x in pycompat.xrange(chunk - 1, -1, -1):
                 if l[x] != 0:
                     yield (i + x, full is not None and l[x] or None)
             if i == 0:
@@ -249,7 +249,7 @@
         if len(ar) == 0:
             return 1
         mask = 0
-        for i in xrange(len(ar)):
+        for i in pycompat.xrange(len(ar)):
             if sha in reachable[i]:
                 mask |= 1 << i
 
--- a/hgext/histedit.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/histedit.py	Fri Aug 24 12:55:05 2018 -0700
@@ -386,7 +386,7 @@
         rules = []
         rulelen = int(lines[index])
         index += 1
-        for i in xrange(rulelen):
+        for i in pycompat.xrange(rulelen):
             ruleaction = lines[index]
             index += 1
             rule = lines[index]
@@ -397,7 +397,7 @@
         replacements = []
         replacementlen = int(lines[index])
         index += 1
-        for i in xrange(replacementlen):
+        for i in pycompat.xrange(replacementlen):
             replacement = lines[index]
             original = node.bin(replacement[:40])
             succ = [node.bin(replacement[i:i + 40]) for i in
@@ -1084,7 +1084,7 @@
             raise error.Abort(_('only --commands argument allowed with '
                                '--edit-plan'))
     else:
-        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
+        if state.inprogress():
             raise error.Abort(_('history edit already in progress, try '
                                '--continue or --abort'))
         if outg:
@@ -1624,8 +1624,8 @@
 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
     if isinstance(nodelist, str):
         nodelist = [nodelist]
-    if os.path.exists(os.path.join(repo.path, 'histedit-state')):
-        state = histeditstate(repo)
+    state = histeditstate(repo)
+    if state.inprogress():
         state.read()
         histedit_nodes = {action.node for action
                           in state.actions if action.node}
@@ -1638,9 +1638,9 @@
 extensions.wrapfunction(repair, 'strip', stripwrapper)
 
 def summaryhook(ui, repo):
-    if not os.path.exists(repo.vfs.join('histedit-state')):
+    state = histeditstate(repo)
+    if not state.inprogress():
         return
-    state = histeditstate(repo)
     state.read()
     if state.actions:
         # i18n: column positioning for "hg summary"
--- a/hgext/lfs/__init__.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/lfs/__init__.py	Fri Aug 24 12:55:05 2018 -0700
@@ -136,7 +136,7 @@
     exchange,
     extensions,
     filelog,
-    fileset,
+    filesetlang,
     hg,
     localrepo,
     minifileset,
@@ -261,7 +261,7 @@
         # deprecated config: lfs.threshold
         threshold = repo.ui.configbytes('lfs', 'threshold')
         if threshold:
-            fileset.parse(trackspec)  # make sure syntax errors are confined
+            filesetlang.parse(trackspec)  # make sure syntax errors are confined
             trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
 
         return minifileset.compile(trackspec)
@@ -357,11 +357,11 @@
     # when writing a bundle via "hg bundle" command, upload related LFS blobs
     wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
 
-@filesetpredicate('lfs()', callstatus=True)
+@filesetpredicate('lfs()')
 def lfsfileset(mctx, x):
     """File that uses LFS storage."""
     # i18n: "lfs" is a keyword
-    fileset.getargs(x, 0, 0, _("lfs takes no arguments"))
+    filesetlang.getargs(x, 0, 0, _("lfs takes no arguments"))
     ctx = mctx.ctx
     def lfsfilep(f):
         return wrapper.pointerfromctx(ctx, f, removed=True) is not None
--- a/hgext/mq.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/mq.py	Fri Aug 24 12:55:05 2018 -0700
@@ -414,7 +414,7 @@
         the field and a blank line.'''
         if self.message:
             subj = 'subject: ' + self.message[0].lower()
-            for i in xrange(len(self.comments)):
+            for i in pycompat.xrange(len(self.comments)):
                 if subj == self.comments[i].lower():
                     del self.comments[i]
                     self.message = self.message[2:]
@@ -662,13 +662,13 @@
         exactneg = [g for g in patchguards
                     if g.startswith('-') and g[1:] in guards]
         if exactneg:
-            return False, pycompat.byterepr(exactneg[0])
+            return False, stringutil.pprint(exactneg[0])
         pos = [g for g in patchguards if g.startswith('+')]
         exactpos = [g for g in pos if g[1:] in guards]
         if pos:
             if exactpos:
-                return True, pycompat.byterepr(exactpos[0])
-            return False, ' '.join([pycompat.byterepr(p) for p in pos])
+                return True, stringutil.pprint(exactpos[0])
+            return False, ' '.join([stringutil.pprint(p) for p in pos])
         return True, ''
 
     def explainpushable(self, idx, all_patches=False):
@@ -1800,7 +1800,7 @@
                 # if the patch excludes a modified file, mark that
                 # file with mtime=0 so status can see it.
                 mm = []
-                for i in xrange(len(m) - 1, -1, -1):
+                for i in pycompat.xrange(len(m) - 1, -1, -1):
                     if not match1(m[i]):
                         mm.append(m[i])
                         del m[i]
@@ -1908,7 +1908,7 @@
         else:
             start = self.series.index(patch) + 1
         unapplied = []
-        for i in xrange(start, len(self.series)):
+        for i in pycompat.xrange(start, len(self.series)):
             pushable, reason = self.pushable(i)
             if pushable:
                 unapplied.append((i, self.series[i]))
@@ -1946,7 +1946,7 @@
         if not missing:
             if self.ui.verbose:
                 idxwidth = len("%d" % (start + length - 1))
-            for i in xrange(start, start + length):
+            for i in pycompat.xrange(start, start + length):
                 patch = self.series[i]
                 if patch in applied:
                     char, state = 'A', 'applied'
@@ -2091,7 +2091,7 @@
         def nextpatch(start):
             if all_patches or start >= len(self.series):
                 return start
-            for i in xrange(start, len(self.series)):
+            for i in pycompat.xrange(start, len(self.series)):
                 p, reason = self.pushable(i)
                 if p:
                     return i
@@ -2876,7 +2876,7 @@
         if args or opts.get(r'none'):
             raise error.Abort(_('cannot mix -l/--list with options or '
                                'arguments'))
-        for i in xrange(len(q.series)):
+        for i in pycompat.xrange(len(q.series)):
             status(i)
         return
     if not args or args[0][0:1] in '-+':
@@ -3179,14 +3179,16 @@
     pushable = lambda i: q.pushable(q.applied[i].name)[0]
     if args or opts.get('none'):
         old_unapplied = q.unapplied(repo)
-        old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+        old_guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
         q.setactive(args)
         q.savedirty()
         if not args:
             ui.status(_('guards deactivated\n'))
         if not opts.get('pop') and not opts.get('reapply'):
             unapplied = q.unapplied(repo)
-            guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+            guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
             if len(unapplied) != len(old_unapplied):
                 ui.status(_('number of unguarded, unapplied patches has '
                             'changed from %d to %d\n') %
@@ -3225,7 +3227,7 @@
     reapply = opts.get('reapply') and q.applied and q.applied[-1].name
     popped = False
     if opts.get('pop') or opts.get('reapply'):
-        for i in xrange(len(q.applied)):
+        for i in pycompat.xrange(len(q.applied)):
             if not pushable(i):
                 ui.status(_('popping guarded patches\n'))
                 popped = True
--- a/hgext/narrow/__init__.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/__init__.py	Fri Aug 24 12:55:05 2018 -0700
@@ -15,17 +15,15 @@
 testedwith = 'ships-with-hg-core'
 
 from mercurial import (
-    changegroup,
     extensions,
-    hg,
     localrepo,
     registrar,
+    repository,
     verify as verifymod,
 )
 
 from . import (
     narrowbundle2,
-    narrowchangegroup,
     narrowcommands,
     narrowcopies,
     narrowpatch,
@@ -55,7 +53,7 @@
 cmdtable = narrowcommands.table
 
 def featuresetup(ui, features):
-    features.add(changegroup.NARROW_REQUIREMENT)
+    features.add(repository.NARROW_REQUIREMENT)
 
 def uisetup(ui):
     """Wraps user-facing mercurial commands with narrow-aware versions."""
@@ -63,7 +61,6 @@
     narrowrevlog.setup()
     narrowbundle2.setup()
     narrowcommands.setup()
-    narrowchangegroup.setup()
     narrowwirepeer.uisetup()
 
 def reposetup(ui, repo):
@@ -71,7 +68,7 @@
     if not repo.local():
         return
 
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
         narrowcopies.setup(repo)
         narrowpatch.setup(repo)
@@ -86,8 +83,6 @@
 
 def extsetup(ui):
     extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
-    extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
-    extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
 
 templatekeyword = narrowtemplates.templatekeyword
 revsetpredicate = narrowtemplates.revsetpredicate
--- a/hgext/narrow/narrowbundle2.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/narrowbundle2.py	Fri Aug 24 12:55:05 2018 -0700
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import collections
 import errno
 import struct
 
@@ -15,17 +14,16 @@
 from mercurial.node import (
     bin,
     nullid,
-    nullrev,
 )
 from mercurial import (
     bundle2,
     changegroup,
-    dagutil,
     error,
     exchange,
     extensions,
     narrowspec,
     repair,
+    repository,
     util,
     wireprototypes,
 )
@@ -52,171 +50,12 @@
     caps[NARROWCAP] = ['v0']
     return caps
 
-def _computeellipsis(repo, common, heads, known, match, depth=None):
-    """Compute the shape of a narrowed DAG.
-
-    Args:
-      repo: The repository we're transferring.
-      common: The roots of the DAG range we're transferring.
-              May be just [nullid], which means all ancestors of heads.
-      heads: The heads of the DAG range we're transferring.
-      match: The narrowmatcher that allows us to identify relevant changes.
-      depth: If not None, only consider nodes to be full nodes if they are at
-             most depth changesets away from one of heads.
-
-    Returns:
-      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
-
-        visitnodes: The list of nodes (either full or ellipsis) which
-                    need to be sent to the client.
-        relevant_nodes: The set of changelog nodes which change a file inside
-                 the narrowspec. The client needs these as non-ellipsis nodes.
-        ellipsisroots: A dict of {rev: parents} that is used in
-                       narrowchangegroup to produce ellipsis nodes with the
-                       correct parents.
-    """
-    cl = repo.changelog
-    mfl = repo.manifestlog
-
-    cldag = dagutil.revlogdag(cl)
-    # dagutil does not like nullid/nullrev
-    commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
-    headsrevs = cldag.internalizeall(heads)
-    if depth:
-        revdepth = {h: 0 for h in headsrevs}
-
-    ellipsisheads = collections.defaultdict(set)
-    ellipsisroots = collections.defaultdict(set)
-
-    def addroot(head, curchange):
-        """Add a root to an ellipsis head, splitting heads with 3 roots."""
-        ellipsisroots[head].add(curchange)
-        # Recursively split ellipsis heads with 3 roots by finding the
-        # roots' youngest common descendant which is an elided merge commit.
-        # That descendant takes 2 of the 3 roots as its own, and becomes a
-        # root of the head.
-        while len(ellipsisroots[head]) > 2:
-            child, roots = splithead(head)
-            splitroots(head, child, roots)
-            head = child  # Recurse in case we just added a 3rd root
-
-    def splitroots(head, child, roots):
-        ellipsisroots[head].difference_update(roots)
-        ellipsisroots[head].add(child)
-        ellipsisroots[child].update(roots)
-        ellipsisroots[child].discard(child)
-
-    def splithead(head):
-        r1, r2, r3 = sorted(ellipsisroots[head])
-        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
-            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
-                            nr1, head, nr2, head)
-            for j in mid:
-                if j == nr2:
-                    return nr2, (nr1, nr2)
-                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
-                    return j, (nr1, nr2)
-        raise error.Abort('Failed to split up ellipsis node! head: %d, '
-                          'roots: %d %d %d' % (head, r1, r2, r3))
-
-    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
-    visit = reversed(missing)
-    relevant_nodes = set()
-    visitnodes = [cl.node(m) for m in missing]
-    required = set(headsrevs) | known
-    for rev in visit:
-        clrev = cl.changelogrevision(rev)
-        ps = cldag.parents(rev)
-        if depth is not None:
-            curdepth = revdepth[rev]
-            for p in ps:
-                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
-        needed = False
-        shallow_enough = depth is None or revdepth[rev] <= depth
-        if shallow_enough:
-            curmf = mfl[clrev.manifest].read()
-            if ps:
-                # We choose to not trust the changed files list in
-                # changesets because it's not always correct. TODO: could
-                # we trust it for the non-merge case?
-                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
-                needed = bool(curmf.diff(p1mf, match))
-                if not needed and len(ps) > 1:
-                    # For merge changes, the list of changed files is not
-                    # helpful, since we need to emit the merge if a file
-                    # in the narrow spec has changed on either side of the
-                    # merge. As a result, we do a manifest diff to check.
-                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
-                    needed = bool(curmf.diff(p2mf, match))
-            else:
-                # For a root node, we need to include the node if any
-                # files in the node match the narrowspec.
-                needed = any(curmf.walk(match))
-
-        if needed:
-            for head in ellipsisheads[rev]:
-                addroot(head, rev)
-            for p in ps:
-                required.add(p)
-            relevant_nodes.add(cl.node(rev))
-        else:
-            if not ps:
-                ps = [nullrev]
-            if rev in required:
-                for head in ellipsisheads[rev]:
-                    addroot(head, rev)
-                for p in ps:
-                    ellipsisheads[p].add(rev)
-            else:
-                for p in ps:
-                    ellipsisheads[p] |= ellipsisheads[rev]
-
-    # add common changesets as roots of their reachable ellipsis heads
-    for c in commonrevs:
-        for head in ellipsisheads[c]:
-            addroot(head, c)
-    return visitnodes, relevant_nodes, ellipsisroots
-
-def _packellipsischangegroup(repo, common, match, relevant_nodes,
-                             ellipsisroots, visitnodes, depth, source, version):
-    if version in ('01', '02'):
-        raise error.Abort(
-            'ellipsis nodes require at least cg3 on client and server, '
-            'but negotiated version %s' % version)
-    # We wrap cg1packer.revchunk, using a side channel to pass
-    # relevant_nodes into that area. Then if linknode isn't in the
-    # set, we know we have an ellipsis node and we should defer
-    # sending that node's data. We override close() to detect
-    # pending ellipsis nodes and flush them.
-    packer = changegroup.getbundler(version, repo)
-    # Let the packer have access to the narrow matcher so it can
-    # omit filelogs and dirlogs as needed
-    packer._narrow_matcher = lambda : match
-    # Give the packer the list of nodes which should not be
-    # ellipsis nodes. We store this rather than the set of nodes
-    # that should be an ellipsis because for very large histories
-    # we expect this to be significantly smaller.
-    packer.full_nodes = relevant_nodes
-    # Maps ellipsis revs to their roots at the changelog level.
-    packer.precomputed_ellipsis = ellipsisroots
-    # Maps CL revs to per-revlog revisions. Cleared in close() at
-    # the end of each group.
-    packer.clrev_to_localrev = {}
-    packer.next_clrev_to_localrev = {}
-    # Maps changelog nodes to changelog revs. Filled in once
-    # during changelog stage and then left unmodified.
-    packer.clnode_to_rev = {}
-    packer.changelog_done = False
-    # If true, informs the packer that it is serving shallow content and might
-    # need to pack file contents not introduced by the changes being packed.
-    packer.is_shallow = depth is not None
-
-    return packer.generate(common, visitnodes, False, source)
-
 # Serve a changegroup for a client with a narrow clone.
 def getbundlechangegrouppart_narrow(bundler, repo, source,
                                     bundlecaps=None, b2caps=None, heads=None,
                                     common=None, **kwargs):
+    assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
+
     cgversions = b2caps.get('changegroup')
     if cgversions:  # 3.1 and 3.2 ship with an empty value
         cgversions = [v for v in cgversions
@@ -231,32 +70,6 @@
     include = sorted(filter(bool, kwargs.get(r'includepats', [])))
     exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
     newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
-    if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
-        outgoing = exchange._computeoutgoing(repo, heads, common)
-        if not outgoing.missing:
-            return
-        def wrappedgetbundler(orig, *args, **kwargs):
-            bundler = orig(*args, **kwargs)
-            bundler._narrow_matcher = lambda : newmatch
-            return bundler
-        with extensions.wrappedfunction(changegroup, 'getbundler',
-                                        wrappedgetbundler):
-            cg = changegroup.makestream(repo, outgoing, version, source)
-        part = bundler.newpart('changegroup', data=cg)
-        part.addparam('version', version)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
-
-        if include or exclude:
-            narrowspecpart = bundler.newpart(_SPECPART)
-            if include:
-                narrowspecpart.addparam(
-                    _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
-            if exclude:
-                narrowspecpart.addparam(
-                    _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
-
-        return
 
     depth = kwargs.get(r'depth', None)
     if depth is not None:
@@ -300,72 +113,46 @@
                 yield repo.changelog.node(r)
             yield _DONESIGNAL
         bundler.newpart(_CHANGESPECPART, data=genkills())
-        newvisit, newfull, newellipsis = _computeellipsis(
+        newvisit, newfull, newellipsis = exchange._computeellipsis(
             repo, set(), common, known, newmatch)
         if newvisit:
-            cg = _packellipsischangegroup(
-                repo, common, newmatch, newfull, newellipsis,
-                newvisit, depth, source, version)
-            part = bundler.newpart('changegroup', data=cg)
+            packer = changegroup.getbundler(version, repo,
+                                            filematcher=newmatch,
+                                            ellipses=True,
+                                            shallow=depth is not None,
+                                            ellipsisroots=newellipsis,
+                                            fullnodes=newfull)
+            cgdata = packer.generate(common, newvisit, False, source)
+
+            part = bundler.newpart('changegroup', data=cgdata)
             part.addparam('version', version)
             if 'treemanifest' in repo.requirements:
                 part.addparam('treemanifest', '1')
 
-    visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
+    visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
         repo, common, heads, set(), newmatch, depth=depth)
 
     repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
     if visitnodes:
-        cg = _packellipsischangegroup(
-            repo, common, newmatch, relevant_nodes, ellipsisroots,
-            visitnodes, depth, source, version)
-        part = bundler.newpart('changegroup', data=cg)
+        packer = changegroup.getbundler(version, repo,
+                                        filematcher=newmatch,
+                                        ellipses=True,
+                                        shallow=depth is not None,
+                                        ellipsisroots=ellipsisroots,
+                                        fullnodes=relevant_nodes)
+        cgdata = packer.generate(common, visitnodes, False, source)
+
+        part = bundler.newpart('changegroup', data=cgdata)
         part.addparam('version', version)
         if 'treemanifest' in repo.requirements:
             part.addparam('treemanifest', '1')
 
-def applyacl_narrow(repo, kwargs):
-    ui = repo.ui
-    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
-    user_includes = ui.configlist(
-        _NARROWACL_SECTION, username + '.includes',
-        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
-    user_excludes = ui.configlist(
-        _NARROWACL_SECTION, username + '.excludes',
-        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
-    if not user_includes:
-        raise error.Abort(_("{} configuration for user {} is empty")
-                          .format(_NARROWACL_SECTION, username))
-
-    user_includes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_includes]
-    user_excludes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
-
-    req_includes = set(kwargs.get(r'includepats', []))
-    req_excludes = set(kwargs.get(r'excludepats', []))
-
-    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
-        req_includes, req_excludes, user_includes, user_excludes)
-
-    if invalid_includes:
-        raise error.Abort(
-            _("The following includes are not accessible for {}: {}")
-            .format(username, invalid_includes))
-
-    new_args = {}
-    new_args.update(kwargs)
-    new_args['includepats'] = req_includes
-    if req_excludes:
-        new_args['excludepats'] = req_excludes
-    return new_args
-
 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
 def _handlechangespec_2(op, inpart):
     includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
     excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
-    if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
-        op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+    if not repository.NARROW_REQUIREMENT in op.repo.requirements:
+        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
         op.repo._writerequirements()
     op.repo.setnarrowpats(includepats, excludepats)
 
@@ -479,27 +266,15 @@
     def wrappedcgfn(*args, **kwargs):
         repo = args[1]
         if repo.ui.has_section(_NARROWACL_SECTION):
-            getbundlechangegrouppart_narrow(
-                *args, **applyacl_narrow(repo, kwargs))
-        elif kwargs.get(r'narrow', False):
+            kwargs = exchange.applynarrowacl(repo, kwargs)
+
+        if (kwargs.get(r'narrow', False) and
+            repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
             getbundlechangegrouppart_narrow(*args, **kwargs)
         else:
             origcgfn(*args, **kwargs)
     exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
 
-    # disable rev branch cache exchange when serving a narrow bundle
-    # (currently incompatible with that part)
-    origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
-    def wrappedcgfn(*args, **kwargs):
-        repo = args[1]
-        if repo.ui.has_section(_NARROWACL_SECTION):
-            return
-        elif kwargs.get(r'narrow', False):
-            return
-        else:
-            origrbcfn(*args, **kwargs)
-    exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
-
     # Extend changegroup receiver so client can fixup after widen requests.
     origcghandler = bundle2.parthandlermapping['changegroup']
     def wrappedcghandler(op, inpart):
--- a/hgext/narrow/narrowchangegroup.py	Sat Aug 18 10:24:57 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,373 +0,0 @@
-# narrowchangegroup.py - narrow clone changegroup creation and consumption
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
-    changegroup,
-    error,
-    extensions,
-    manifest,
-    match as matchmod,
-    mdiff,
-    node,
-    revlog,
-    util,
-)
-
-def setup():
-
-    def _cgmatcher(cgpacker):
-        localmatcher = cgpacker._repo.narrowmatch()
-        remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)()
-        if remotematcher:
-            return matchmod.intersectmatchers(localmatcher, remotematcher)
-        else:
-            return localmatcher
-
-    def prune(orig, self, revlog, missing, commonrevs):
-        if isinstance(revlog, manifest.manifestrevlog):
-            matcher = _cgmatcher(self)
-            if (matcher and
-                not matcher.visitdir(revlog._dir[:-1] or '.')):
-                return []
-        return orig(self, revlog, missing, commonrevs)
-
-    extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
-
-    def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
-                      source):
-        matcher = _cgmatcher(self)
-        if matcher:
-            changedfiles = list(filter(matcher, changedfiles))
-        if getattr(self, 'is_shallow', False):
-            # See comment in generate() for why this sadness is a thing.
-            mfdicts = self._mfdicts
-            del self._mfdicts
-            # In a shallow clone, the linknodes callback needs to also include
-            # those file nodes that are in the manifests we sent but weren't
-            # introduced by those manifests.
-            commonctxs = [self._repo[c] for c in commonrevs]
-            oldlinknodes = linknodes
-            clrev = self._repo.changelog.rev
-            def linknodes(flog, fname):
-                for c in commonctxs:
-                    try:
-                        fnode = c.filenode(fname)
-                        self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
-                    except error.ManifestLookupError:
-                        pass
-                links = oldlinknodes(flog, fname)
-                if len(links) != len(mfdicts):
-                    for mf, lr in mfdicts:
-                        fnode = mf.get(fname, None)
-                        if fnode in links:
-                            links[fnode] = min(links[fnode], lr, key=clrev)
-                        elif fnode:
-                            links[fnode] = lr
-                return links
-        return orig(self, changedfiles, linknodes, commonrevs, source)
-    extensions.wrapfunction(
-        changegroup.cg1packer, 'generatefiles', generatefiles)
-
-    def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
-        n = revlog_.node(rev)
-        p1n, p2n = revlog_.node(p1), revlog_.node(p2)
-        flags = revlog_.flags(rev)
-        flags |= revlog.REVIDX_ELLIPSIS
-        meta = packer.builddeltaheader(
-            n, p1n, p2n, node.nullid, linknode, flags)
-        # TODO: try and actually send deltas for ellipsis data blocks
-        diffheader = mdiff.trivialdiffheader(len(data))
-        l = len(meta) + len(diffheader) + len(data)
-        return ''.join((changegroup.chunkheader(l),
-                        meta,
-                        diffheader,
-                        data))
-
-    def close(orig, self):
-        getattr(self, 'clrev_to_localrev', {}).clear()
-        if getattr(self, 'next_clrev_to_localrev', {}):
-            self.clrev_to_localrev = self.next_clrev_to_localrev
-            del self.next_clrev_to_localrev
-        self.changelog_done = True
-        return orig(self)
-    extensions.wrapfunction(changegroup.cg1packer, 'close', close)
-
-    # In a perfect world, we'd generate better ellipsis-ified graphs
-    # for non-changelog revlogs. In practice, we haven't started doing
-    # that yet, so the resulting DAGs for the manifestlog and filelogs
-    # are actually full of bogus parentage on all the ellipsis
-    # nodes. This has the side effect that, while the contents are
-    # correct, the individual DAGs might be completely out of whack in
-    # a case like 882681bc3166 and its ancestors (back about 10
-    # revisions or so) in the main hg repo.
-    #
-    # The one invariant we *know* holds is that the new (potentially
-    # bogus) DAG shape will be valid if we order the nodes in the
-    # order that they're introduced in dramatis personae by the
-    # changelog, so what we do is we sort the non-changelog histories
-    # by the order in which they are used by the changelog.
-    def _sortgroup(orig, self, revlog, nodelist, lookup):
-        if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
-            return orig(self, revlog, nodelist, lookup)
-        key = lambda n: self.clnode_to_rev[lookup(n)]
-        return [revlog.rev(n) for n in sorted(nodelist, key=key)]
-
-    extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
-
-    def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
-        # Note: other than delegating to orig, the only deviation in
-        # logic from normal hg's generate is marked with BEGIN/END
-        # NARROW HACK.
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow bundle
-            for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
-                yield x
-            return
-
-        repo = self._repo
-        cl = repo.changelog
-        mfl = repo.manifestlog
-        mfrevlog = mfl._revlog
-
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            # BEGIN NARROW HACK
-            #
-            # Only update mfs if x is going to be sent. Otherwise we
-            # end up with bogus linkrevs specified for manifests and
-            # we skip some manifest nodes that we should otherwise
-            # have sent.
-            if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
-                n = c[0]
-                # record the first changeset introducing this manifest version
-                mfs.setdefault(n, x)
-                # Set this narrow-specific dict so we have the lowest manifest
-                # revnum to look up for this cl revnum. (Part of mapping
-                # changelog ellipsis parents to manifest ellipsis parents)
-                self.next_clrev_to_localrev.setdefault(cl.rev(x),
-                                                       mfrevlog.rev(n))
-            # We can't trust the changed files list in the changeset if the
-            # client requested a shallow clone.
-            if self.is_shallow:
-                changedfiles.update(mfl[c[0]].read().keys())
-            else:
-                changedfiles.update(c[3])
-            # END NARROW HACK
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            return x
-
-        self._verbosenote(_('uncompressed size of bundle content:\n'))
-        size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
-        self._verbosenote(_('%8.i (changelog)\n') % size)
-
-        # We need to make sure that the linkrev in the changegroup refers to
-        # the first changeset that introduced the manifest or file revision.
-        # The fastpath is usually safer than the slowpath, because the filelogs
-        # are walked in revlog order.
-        #
-        # When taking the slowpath with reorder=None and the manifest revlog
-        # uses generaldelta, the manifest may be walked in the "wrong" order.
-        # Without 'clrevorder', we would get an incorrect linkrev (see fix in
-        # cc0ff93d0c0c).
-        #
-        # When taking the fastpath, we are only vulnerable to reordering
-        # of the changelog itself. The changelog never uses generaldelta, so
-        # it is only reordered when reorder=True. To handle this case, we
-        # simply take the slowpath, which already has the 'clrevorder' logic.
-        # This was also fixed in cc0ff93d0c0c.
-        fastpathlinkrev = fastpathlinkrev and not self._reorder
-        # Treemanifests don't work correctly with fastpathlinkrev
-        # either, because we don't discover which directory nodes to
-        # send along with files. This could probably be fixed.
-        fastpathlinkrev = fastpathlinkrev and (
-            'treemanifest' not in repo.requirements)
-        # Shallow clones also don't work correctly with fastpathlinkrev
-        # because file nodes may need to be sent for a manifest even if they
-        # weren't introduced by that manifest.
-        fastpathlinkrev = fastpathlinkrev and not self.is_shallow
-
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        # BEGIN NARROW HACK
-        mfdicts = None
-        if self.is_shallow:
-            mfdicts = [(self._repo.manifestlog[n].read(), lr)
-                       for (n, lr) in mfs.iteritems()]
-        # END NARROW HACK
-        mfs.clear()
-        clrevs = set(cl.rev(x) for x in clnodes)
-
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
-
-        # BEGIN NARROW HACK
-        #
-        # We need to pass the mfdicts variable down into
-        # generatefiles(), but more than one command might have
-        # wrapped generatefiles so we can't modify the function
-        # signature. Instead, we pass the data to ourselves using an
-        # instance attribute. I'm sorry.
-        self._mfdicts = mfdicts
-        # END NARROW HACK
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
-
-        yield self.close()
-
-        if clnodes:
-            repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
-    extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
-
-    def revchunk(orig, self, revlog, rev, prev, linknode):
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow changegroup
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # build up some mapping information that's useful later. See
-        # the local() nested function below.
-        if not self.changelog_done:
-            self.clnode_to_rev[linknode] = rev
-            linkrev = rev
-            self.clrev_to_localrev[linkrev] = rev
-        else:
-            linkrev = self.clnode_to_rev[linknode]
-            self.clrev_to_localrev[linkrev] = rev
-        # This is a node to send in full, because the changeset it
-        # corresponds to was a full changeset.
-        if linknode in self.full_nodes:
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # At this point, a node can either be one we should skip or an
-        # ellipsis. If it's not an ellipsis, bail immediately.
-        if linkrev not in self.precomputed_ellipsis:
-            return
-        linkparents = self.precomputed_ellipsis[linkrev]
-        def local(clrev):
-            """Turn a changelog revnum into a local revnum.
-
-            The ellipsis dag is stored as revnums on the changelog,
-            but when we're producing ellipsis entries for
-            non-changelog revlogs, we need to turn those numbers into
-            something local. This does that for us, and during the
-            changelog sending phase will also expand the stored
-            mappings as needed.
-            """
-            if clrev == node.nullrev:
-                return node.nullrev
-            if not self.changelog_done:
-                # If we're doing the changelog, it's possible that we
-                # have a parent that is already on the client, and we
-                # need to store some extra mapping information so that
-                # our contained ellipsis nodes will be able to resolve
-                # their parents.
-                if clrev not in self.clrev_to_localrev:
-                    clnode = revlog.node(clrev)
-                    self.clnode_to_rev[clnode] = clrev
-                return clrev
-            # Walk the ellipsis-ized changelog breadth-first looking for a
-            # change that has been linked from the current revlog.
-            #
-            # For a flat manifest revlog only a single step should be necessary
-            # as all relevant changelog entries are relevant to the flat
-            # manifest.
-            #
-            # For a filelog or tree manifest dirlog however not every changelog
-            # entry will have been relevant, so we need to skip some changelog
-            # nodes even after ellipsis-izing.
-            walk = [clrev]
-            while walk:
-                p = walk[0]
-                walk = walk[1:]
-                if p in self.clrev_to_localrev:
-                    return self.clrev_to_localrev[p]
-                elif p in self.full_nodes:
-                    walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
-                                    if pp != node.nullrev])
-                elif p in self.precomputed_ellipsis:
-                    walk.extend([pp for pp in self.precomputed_ellipsis[p]
-                                    if pp != node.nullrev])
-                else:
-                    # In this case, we've got an ellipsis with parents
-                    # outside the current bundle (likely an
-                    # incremental pull). We "know" that we can use the
-                    # value of this same revlog at whatever revision
-                    # is pointed to by linknode. "Know" is in scare
-                    # quotes because I haven't done enough examination
-                    # of edge cases to convince myself this is really
-                    # a fact - it works for all the (admittedly
-                    # thorough) cases in our testsuite, but I would be
-                    # somewhat unsurprised to find a case in the wild
-                    # where this breaks down a bit. That said, I don't
-                    # know if it would hurt anything.
-                    for i in xrange(rev, 0, -1):
-                        if revlog.linkrev(i) == clrev:
-                            return i
-                    # We failed to resolve a parent for this node, so
-                    # we crash the changegroup construction.
-                    raise error.Abort(
-                        'unable to resolve parent while packing %r %r'
-                        ' for changeset %r' % (revlog.indexfile, rev, clrev))
-            return node.nullrev
-
-        if not linkparents or (
-            revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
-            p1, p2 = node.nullrev, node.nullrev
-        elif len(linkparents) == 1:
-            p1, = sorted(local(p) for p in linkparents)
-            p2 = node.nullrev
-        else:
-            p1, p2 = sorted(local(p) for p in linkparents)
-        n = revlog.node(rev)
-        yield ellipsisdata(
-            self, rev, revlog, p1, p2, revlog.revision(n), linknode)
-    extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
-
-    def deltaparent(orig, self, revlog, rev, p1, p2, prev):
-        if util.safehasattr(self, 'full_nodes'):
-            # TODO: send better deltas when in narrow mode.
-            #
-            # changegroup.group() loops over revisions to send,
-            # including revisions we'll skip. What this means is that
-            # `prev` will be a potentially useless delta base for all
-            # ellipsis nodes, as the client likely won't have it. In
-            # the future we should do bookkeeping about which nodes
-            # have been sent to the client, and try to be
-            # significantly smarter about delta bases. This is
-            # slightly tricky because this same code has to work for
-            # all revlogs, and we don't have the linkrev/linknode here.
-            return p1
-        return orig(self, revlog, rev, p1, p2, prev)
-    extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- a/hgext/narrow/narrowcommands.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/narrowcommands.py	Fri Aug 24 12:55:05 2018 -0700
@@ -7,10 +7,10 @@
 from __future__ import absolute_import
 
 import itertools
+import os
 
 from mercurial.i18n import _
 from mercurial import (
-    changegroup,
     cmdutil,
     commands,
     discovery,
@@ -24,7 +24,9 @@
     pycompat,
     registrar,
     repair,
+    repository,
     repoview,
+    sparse,
     util,
 )
 
@@ -43,6 +45,8 @@
                      _("create a narrow clone of select files")))
     entry[1].append(('', 'depth', '',
                      _("limit the history fetched by distance from heads")))
+    entry[1].append(('', 'narrowspec', '',
+                     _("read narrowspecs from file")))
     # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
     if 'sparse' not in extensions.enabled():
         entry[1].append(('', 'include', [],
@@ -73,6 +77,27 @@
     opts = pycompat.byteskwargs(opts)
     wrappedextraprepare = util.nullcontextmanager()
     opts_narrow = opts['narrow']
+    narrowspecfile = opts['narrowspec']
+
+    if narrowspecfile:
+        filepath = os.path.join(pycompat.getcwd(), narrowspecfile)
+        ui.status(_("reading narrowspec from '%s'\n") % filepath)
+        try:
+            fp = open(filepath, 'rb')
+        except IOError:
+            raise error.Abort(_("file '%s' not found") % filepath)
+
+        includes, excludes, profiles = sparse.parseconfig(ui, fp.read(),
+                                                          'narrow')
+        if profiles:
+            raise error.Abort(_("cannot specify other files using '%include' in"
+                                " narrowspec"))
+
+        # narrowspec is passed so we should assume that user wants narrow clone
+        opts_narrow = True
+        opts['include'].extend(includes)
+        opts['exclude'].extend(excludes)
+
     if opts_narrow:
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             # Create narrow spec patterns from clone flags
@@ -101,7 +126,7 @@
 
     def pullnarrow(orig, repo, *args, **kwargs):
         if opts_narrow:
-            repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+            repo.requirements.add(repository.NARROW_REQUIREMENT)
             repo._writerequirements()
 
         return orig(repo, *args, **kwargs)
@@ -114,7 +139,7 @@
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
@@ -128,7 +153,7 @@
 
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
         includes = set(opts.get(r'include', []))
         excludes = set(opts.get(r'exclude', []))
@@ -142,7 +167,7 @@
 
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
     if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
@@ -331,7 +356,7 @@
     empty and will not match any files.
     """
     opts = pycompat.byteskwargs(opts)
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         ui.warn(_('The narrow command is only supported on respositories cloned'
                   ' with --narrow.\n'))
         return 1
--- a/hgext/narrow/narrowdirstate.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/narrowdirstate.py	Fri Aug 24 12:55:05 2018 -0700
@@ -11,8 +11,6 @@
 from mercurial import (
     error,
     match as matchmod,
-    narrowspec,
-    util as hgutil,
 )
 
 def wrapdirstate(repo, dirstate):
@@ -29,10 +27,6 @@
             return fn(self, *args)
         return _wrapper
 
-    def _narrowbackupname(backupname):
-        assert 'dirstate' in backupname
-        return backupname.replace('dirstate', narrowspec.FILENAME)
-
     class narrowdirstate(dirstate.__class__):
         def walk(self, match, subrepos, unknown, ignored, full=True,
                  narrowonly=True):
@@ -78,22 +72,5 @@
                 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
             super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
 
-        def restorebackup(self, tr, backupname):
-            self._opener.rename(_narrowbackupname(backupname),
-                                narrowspec.FILENAME, checkambig=True)
-            super(narrowdirstate, self).restorebackup(tr, backupname)
-
-        def savebackup(self, tr, backupname):
-            super(narrowdirstate, self).savebackup(tr, backupname)
-
-            narrowbackupname = _narrowbackupname(backupname)
-            self._opener.tryunlink(narrowbackupname)
-            hgutil.copyfile(self._opener.join(narrowspec.FILENAME),
-                            self._opener.join(narrowbackupname), hardlink=True)
-
-        def clearbackup(self, tr, backupname):
-            super(narrowdirstate, self).clearbackup(tr, backupname)
-            self._opener.unlink(_narrowbackupname(backupname))
-
     dirstate.__class__ = narrowdirstate
     return dirstate
--- a/hgext/narrow/narrowrepo.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/narrowrepo.py	Fri Aug 24 12:55:05 2018 -0700
@@ -7,35 +7,11 @@
 
 from __future__ import absolute_import
 
-from mercurial import (
-    changegroup,
-    hg,
-    narrowspec,
-    scmutil,
-)
-
 from . import (
     narrowdirstate,
     narrowrevlog,
 )
 
-def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
-    orig(sourcerepo, destrepo, **kwargs)
-    if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements:
-        with destrepo.wlock():
-            with destrepo.vfs('shared', 'a') as fp:
-                fp.write(narrowspec.FILENAME + '\n')
-
-def unsharenarrowspec(orig, ui, repo, repopath):
-    if (changegroup.NARROW_REQUIREMENT in repo.requirements
-        and repo.path == repopath and repo.shared()):
-        srcrepo = hg.sharedreposource(repo)
-        with srcrepo.vfs(narrowspec.FILENAME) as f:
-            spec = f.read()
-        with repo.vfs(narrowspec.FILENAME, 'w') as f:
-            f.write(spec)
-    return orig(ui, repo, repopath)
-
 def wraprepo(repo):
     """Enables narrow clone functionality on a single local repository."""
 
@@ -46,23 +22,6 @@
             narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
             return fl
 
-        # I'm not sure this is the right place to do this filter.
-        # context._manifestmatches() would probably be better, or perhaps
-        # move it to a later place, in case some of the callers do want to know
-        # which directories changed. This seems to work for now, though.
-        def status(self, *args, **kwargs):
-            s = super(narrowrepository, self).status(*args, **kwargs)
-            narrowmatch = self.narrowmatch()
-            modified = list(filter(narrowmatch, s.modified))
-            added = list(filter(narrowmatch, s.added))
-            removed = list(filter(narrowmatch, s.removed))
-            deleted = list(filter(narrowmatch, s.deleted))
-            unknown = list(filter(narrowmatch, s.unknown))
-            ignored = list(filter(narrowmatch, s.ignored))
-            clean = list(filter(narrowmatch, s.clean))
-            return scmutil.status(modified, added, removed, deleted, unknown,
-                                  ignored, clean)
-
         def _makedirstate(self):
             dirstate = super(narrowrepository, self)._makedirstate()
             return narrowdirstate.wrapdirstate(self, dirstate)
--- a/hgext/narrow/narrowtemplates.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/narrow/narrowtemplates.py	Fri Aug 24 12:55:05 2018 -0700
@@ -42,7 +42,7 @@
             return 'outsidenarrow'
     return ''
 
-@revsetpredicate('ellipsis')
+@revsetpredicate('ellipsis()')
 def ellipsisrevset(repo, subset, x):
     """Changesets that are ellipsis nodes."""
     return subset.filter(lambda r: _isellipsis(repo, r))
--- a/hgext/patchbomb.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/patchbomb.py	Fri Aug 24 12:55:05 2018 -0700
@@ -73,7 +73,7 @@
 '''
 from __future__ import absolute_import
 
-import email as emailmod
+import email.encoders as emailencoders
 import email.generator as emailgen
 import email.mime.base as emimebase
 import email.mime.multipart as emimemultipart
@@ -139,6 +139,11 @@
     default=None,
 )
 
+if pycompat.ispy3:
+    _bytesgenerator = emailgen.BytesGenerator
+else:
+    _bytesgenerator = emailgen.Generator
+
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -273,10 +278,11 @@
                                                  seqno=idx, total=total)
             else:
                 patchname = cmdutil.makefilename(repo[node], '%b.patch')
-        disposition = 'inline'
+        disposition = r'inline'
         if opts.get('attach'):
-            disposition = 'attachment'
-        p['Content-Disposition'] = disposition + '; filename=' + patchname
+            disposition = r'attachment'
+        p[r'Content-Disposition'] = (
+            disposition + r'; filename=' + encoding.strfromlocal(patchname))
         msg.attach(p)
     else:
         msg = mail.mimetextpatch(body, display=opts.get('test'))
@@ -370,12 +376,12 @@
     msg = emimemultipart.MIMEMultipart()
     if body:
         msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
-    datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle')
+    datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle')
     datapart.set_payload(bundle)
     bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle')
-    datapart.add_header('Content-Disposition', 'attachment',
-                        filename=bundlename)
-    emailmod.Encoders.encode_base64(datapart)
+    datapart.add_header(r'Content-Disposition', r'attachment',
+                        filename=encoding.strfromlocal(bundlename))
+    emailencoders.encode_base64(datapart)
     msg.attach(datapart)
     msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
     return [(msg, subj, None)]
@@ -463,6 +469,11 @@
         ui.status(_("no changes found\n"))
     return revs
 
+def _msgid(node, timestamp):
+    hostname = encoding.strtolocal(socket.getfqdn())
+    hostname = encoding.environ.get('HGHOSTNAME', hostname)
+    return '<%s.%d@%s>' % (node, timestamp, hostname)
+
 emailopts = [
     ('', 'body', None, _('send patches as inline message text (default)')),
     ('a', 'attach', None, _('send patches as attachments')),
@@ -671,8 +682,7 @@
         start_time = dateutil.makedate()
 
     def genmsgid(id):
-        return '<%s.%d@%s>' % (id[:20], int(start_time[0]),
-                               encoding.strtolocal(socket.getfqdn()))
+        return _msgid(id[:20], int(start_time[0]))
 
     # deprecated config: patchbomb.from
     sender = (opts.get('from') or ui.config('email', 'from') or
@@ -780,10 +790,27 @@
             m['Bcc'] = ', '.join(bcc)
         if replyto:
             m['Reply-To'] = ', '.join(replyto)
+        # Fix up all headers to be native strings.
+        # TODO(durin42): this should probably be cleaned up above in the future.
+        if pycompat.ispy3:
+            for hdr, val in list(m.items()):
+                change = False
+                if isinstance(hdr, bytes):
+                    del m[hdr]
+                    hdr = pycompat.strurl(hdr)
+                    change = True
+                if isinstance(val, bytes):
+                    val = pycompat.strurl(val)
+                    if not change:
+                        # prevent duplicate headers
+                        del m[hdr]
+                    change = True
+                if change:
+                    m[hdr] = val
         if opts.get('test'):
             ui.status(_('displaying '), subj, ' ...\n')
             ui.pager('email')
-            generator = emailgen.Generator(ui, mangle_from_=False)
+            generator = _bytesgenerator(ui, mangle_from_=False)
             try:
                 generator.flatten(m, 0)
                 ui.write('\n')
@@ -799,8 +826,10 @@
                 # Exim does not remove the Bcc field
                 del m['Bcc']
             fp = stringio()
-            generator = emailgen.Generator(fp, mangle_from_=False)
+            generator = _bytesgenerator(fp, mangle_from_=False)
             generator.flatten(m, 0)
-            sendmail(sender_addr, to + bcc + cc, fp.getvalue())
+            alldests = to + bcc + cc
+            alldests = [encoding.strfromlocal(d) for d in alldests]
+            sendmail(sender_addr, alldests, fp.getvalue())
 
     progress.complete()
--- a/hgext/rebase.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/rebase.py	Fri Aug 24 12:55:05 2018 -0700
@@ -177,6 +177,7 @@
         if e:
             self.extrafns = [e]
 
+        self.backupf = ui.configbool('ui', 'history-editing-backup')
         self.keepf = opts.get('keep', False)
         self.keepbranchesf = opts.get('keepbranches', False)
         self.obsoletenotrebased = {}
@@ -343,7 +344,9 @@
                 msg = _('cannot continue inconsistent rebase')
                 hint = _('use "hg rebase --abort" to clear broken state')
                 raise error.Abort(msg, hint=hint)
+
         if isabort:
+            backup = backup and self.backupf
             return abort(self.repo, self.originalwd, self.destmap, self.state,
                          activebookmark=self.activebookmark, backup=backup,
                          suppwarns=suppwarns)
@@ -632,7 +635,7 @@
         if self.collapsef and not self.keepf:
             collapsedas = newnode
         clearrebased(ui, repo, self.destmap, self.state, self.skipped,
-                     collapsedas, self.keepf, fm=fm)
+                     collapsedas, self.keepf, fm=fm, backup=self.backupf)
 
         clearstatus(repo)
         clearcollapsemsg(repo)
@@ -670,6 +673,7 @@
     ('D', 'detach', False, _('(DEPRECATED)')),
     ('i', 'interactive', False, _('(DEPRECATED)')),
     ('t', 'tool', '', _('specify merge tool')),
+    ('', 'stop', False, _('stop interrupted rebase')),
     ('c', 'continue', False, _('continue an interrupted rebase')),
     ('a', 'abort', False, _('abort an interrupted rebase')),
     ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
@@ -729,7 +733,8 @@
     deleted, there is no hook presently available for this.
 
     If a rebase is interrupted to manually resolve a conflict, it can be
-    continued with --continue/-c or aborted with --abort/-a.
+    continued with --continue/-c, aborted with --abort/-a, or stopped with
+    --stop.
 
     .. container:: verbose
 
@@ -800,22 +805,20 @@
     opts = pycompat.byteskwargs(opts)
     inmemory = ui.configbool('rebase', 'experimental.inmemory')
     dryrun = opts.get('dry_run')
-    if dryrun:
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --dry-run and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --dry-run and --continue'))
-    if opts.get('confirm'):
-        dryrun = True
-        if opts.get('dry_run'):
-            raise error.Abort(_('cannot specify both --confirm and --dry-run'))
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --confirm and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --confirm and --continue'))
+    confirm = opts.get('confirm')
+    selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
+    if len(selactions) > 1:
+        raise error.Abort(_('cannot use --%s with --%s')
+                          % tuple(selactions[:2]))
+    action = selactions[0] if selactions else None
+    if dryrun and action:
+        raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
+    if confirm and action:
+        raise error.Abort(_('cannot specify both --confirm and --%s') % action)
+    if dryrun and confirm:
+        raise error.Abort(_('cannot specify both --confirm and --dry-run'))
 
-    if (opts.get('continue') or opts.get('abort') or
-        repo.currenttransaction() is not None):
+    if action or repo.currenttransaction() is not None:
         # in-memory rebase is not compatible with resuming rebases.
         # (Or if it is run within a transaction, since the restart logic can
         # fail the entire transaction.)
@@ -830,24 +833,43 @@
         opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
         opts['dest'] = '_destautoorphanrebase(SRC)'
 
-    if dryrun:
-        return _dryrunrebase(ui, repo, opts)
+    if dryrun or confirm:
+        return _dryrunrebase(ui, repo, action, opts)
+    elif action == 'stop':
+        rbsrt = rebaseruntime(repo, ui)
+        with repo.wlock(), repo.lock():
+            rbsrt.restorestatus()
+            if rbsrt.collapsef:
+                raise error.Abort(_("cannot stop in --collapse session"))
+            allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
+            if not (rbsrt.keepf or allowunstable):
+                raise error.Abort(_("cannot remove original changesets with"
+                                    " unrebased descendants"),
+                    hint=_('either enable obsmarkers to allow unstable '
+                           'revisions or use --keep to keep original '
+                           'changesets'))
+            if needupdate(repo, rbsrt.state):
+                # update to the current working revision
+                # to clear interrupted merge
+                hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
+            rbsrt._finishrebase()
+            return 0
     elif inmemory:
         try:
             # in-memory merge doesn't support conflicts, so if we hit any, abort
             # and re-run as an on-disk merge.
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                return _dorebase(ui, repo, opts, inmemory=inmemory)
+                return _dorebase(ui, repo, action, opts, inmemory=inmemory)
         except error.InMemoryMergeConflictsError:
             ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
                       ' merge\n'))
-            _dorebase(ui, repo, {'abort': True})
-            return _dorebase(ui, repo, opts, inmemory=False)
+            _dorebase(ui, repo, action='abort', opts={})
+            return _dorebase(ui, repo, action, opts, inmemory=False)
     else:
-        return _dorebase(ui, repo, opts)
+        return _dorebase(ui, repo, action, opts)
 
-def _dryrunrebase(ui, repo, opts):
+def _dryrunrebase(ui, repo, action, opts):
     rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
     confirm = opts.get('confirm')
     if confirm:
@@ -860,7 +882,7 @@
         try:
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                _origrebase(ui, repo, opts, rbsrt, inmemory=True,
+                _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
                             leaveunfinished=True)
         except error.InMemoryMergeConflictsError:
             ui.status(_('hit a merge conflict\n'))
@@ -886,11 +908,13 @@
                 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
                                               suppwarns=True)
 
-def _dorebase(ui, repo, opts, inmemory=False):
+def _dorebase(ui, repo, action, opts, inmemory=False):
     rbsrt = rebaseruntime(repo, ui, inmemory, opts)
-    return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory)
+    return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
 
-def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False):
+def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
+                leaveunfinished=False):
+    assert action != 'stop'
     with repo.wlock(), repo.lock():
         # Validate input and define rebasing points
         destf = opts.get('dest', None)
@@ -900,8 +924,6 @@
         # search default destination in this space
         # used in the 'hg pull --rebase' case, see issue 5214.
         destspace = opts.get('_destspace')
-        contf = opts.get('continue')
-        abortf = opts.get('abort')
         if opts.get('interactive'):
             try:
                 if extensions.find('histedit'):
@@ -917,22 +939,20 @@
             raise error.Abort(
                 _('message can only be specified with collapse'))
 
-        if contf or abortf:
-            if contf and abortf:
-                raise error.Abort(_('cannot use both abort and continue'))
+        if action:
             if rbsrt.collapsef:
                 raise error.Abort(
                     _('cannot use collapse with continue or abort'))
             if srcf or basef or destf:
                 raise error.Abort(
                     _('abort and continue do not allow specifying revisions'))
-            if abortf and opts.get('tool', False):
+            if action == 'abort' and opts.get('tool', False):
                 ui.warn(_('tool option will be ignored\n'))
-            if contf:
+            if action == 'continue':
                 ms = mergemod.mergestate.read(repo)
                 mergeutil.checkunresolved(ms)
 
-            retcode = rbsrt._prepareabortorcontinue(abortf)
+            retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
             if retcode is not None:
                 return retcode
         else:
@@ -1728,7 +1748,7 @@
     return originalwd, destmap, state
 
 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
-                 keepf=False, fm=None):
+                 keepf=False, fm=None, backup=True):
     """dispose of rebased revision at the end of the rebase
 
     If `collapsedas` is not None, the rebase was a collapse whose result if the
@@ -1736,6 +1756,9 @@
 
     If `keepf` is not True, the rebase has --keep set and no nodes should be
     removed (but bookmarks still need to be moved).
+
+    If `backup` is False, no backup will be stored when stripping rebased
+    revisions.
     """
     tonode = repo.changelog.node
     replacements = {}
@@ -1751,7 +1774,7 @@
                 else:
                     succs = (newnode,)
                 replacements[oldnode] = succs
-    scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
+    scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
     if fm:
         hf = fm.hexfunc
         fl = fm.formatlist
--- a/hgext/shelve.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/shelve.py	Fri Aug 24 12:55:05 2018 -0700
@@ -41,6 +41,7 @@
     lock as lockmod,
     mdiff,
     merge,
+    narrowspec,
     node as nodemod,
     patch,
     phases,
@@ -314,10 +315,13 @@
     '''Abort current transaction for shelve/unshelve, but keep dirstate
     '''
     tr = repo.currenttransaction()
-    backupname = 'dirstate.shelve'
-    repo.dirstate.savebackup(tr, backupname)
+    dirstatebackupname = 'dirstate.shelve'
+    narrowspecbackupname = 'narrowspec.shelve'
+    repo.dirstate.savebackup(tr, dirstatebackupname)
+    narrowspec.savebackup(repo, narrowspecbackupname)
     tr.abort()
-    repo.dirstate.restorebackup(None, backupname)
+    narrowspec.restorebackup(repo, narrowspecbackupname)
+    repo.dirstate.restorebackup(None, dirstatebackupname)
 
 def createcmd(ui, repo, pats, opts):
     """subcommand that creates a new shelve"""
@@ -783,7 +787,7 @@
             tr.close()
 
             nodestoremove = [repo.changelog.node(rev)
-                             for rev in xrange(oldtiprev, len(repo))]
+                             for rev in pycompat.xrange(oldtiprev, len(repo))]
             shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
                               branchtorestore, opts.get('keep'), activebookmark)
             raise error.InterventionRequired(
--- a/hgext/uncommit.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/uncommit.py	Fri Aug 24 12:55:05 2018 -0700
@@ -182,7 +182,7 @@
 
             with repo.dirstate.parentchange():
                 repo.dirstate.setparents(newid, node.nullid)
-                s = repo.status(old.p1(), old, match=match)
+                s = old.p1().status(old, match=match)
                 _fixdirstate(repo, old, repo[newid], s)
 
 def predecessormarkers(ctx):
--- a/hgext/win32text.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/hgext/win32text.py	Fri Aug 24 12:55:05 2018 -0700
@@ -49,6 +49,7 @@
     short,
 )
 from mercurial import (
+    pycompat,
     registrar,
 )
 from mercurial.utils import (
@@ -141,7 +142,8 @@
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
     tip = repo['tip']
-    for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
+    for rev in pycompat.xrange(repo.changelog.tiprev(),
+                               repo[node].rev() - 1, -1):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
--- a/i18n/hggettext	Sat Aug 18 10:24:57 2018 +0200
+++ b/i18n/hggettext	Fri Aug 24 12:55:05 2018 -0700
@@ -63,7 +63,7 @@
 
 doctestre = re.compile(r'^ +>>> ', re.MULTILINE)
 
-def offset(src, doc, name, default):
+def offset(src, doc, name, lineno, default):
     """Compute offset or issue a warning on stdout."""
     # remove doctest part, in order to avoid backslash mismatching
     m = doctestre.search(doc)
@@ -76,8 +76,9 @@
         # This can happen if the docstring contains unnecessary escape
         # sequences such as \" in a triple-quoted string. The problem
         # is that \" is turned into " and so doc wont appear in src.
-        sys.stderr.write("warning: unknown offset in %s, assuming %d lines\n"
-                         % (name, default))
+        sys.stderr.write("%s:%d:warning:"
+                         " unknown docstr offset, assuming %d lines\n"
+                         % (name, lineno, default))
         return default
     else:
         return src.count('\n', 0, end)
@@ -106,7 +107,7 @@
     if not path.startswith('mercurial/') and mod.__doc__:
         with open(path) as fobj:
             src = fobj.read()
-        lineno = 1 + offset(src, mod.__doc__, path, 7)
+        lineno = 1 + offset(src, mod.__doc__, path, 1, 7)
         print(poentry(path, lineno, mod.__doc__))
 
     functions = list(getattr(mod, 'i18nfunctions', []))
@@ -129,7 +130,6 @@
             actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra)
 
             src = inspect.getsource(func)
-            name = "%s.%s" % (actualpath, func.__name__)
             lineno = inspect.getsourcelines(func)[1]
             doc = docobj.__doc__
             origdoc = getattr(docobj, '_origdoc', '')
@@ -137,9 +137,9 @@
                 doc = doc.rstrip()
                 origdoc = origdoc.rstrip()
             if origdoc:
-                lineno += offset(src, origdoc, name, 1)
+                lineno += offset(src, origdoc, actualpath, lineno, 1)
             else:
-                lineno += offset(src, doc, name, 1)
+                lineno += offset(src, doc, actualpath, lineno, 1)
             print(poentry(actualpath, lineno, doc))
 
 
--- a/mercurial/__init__.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/__init__.py	Fri Aug 24 12:55:05 2018 -0700
@@ -182,7 +182,7 @@
                     continue
                 r, c = t.start
                 l = (b'; from mercurial.pycompat import '
-                     b'delattr, getattr, hasattr, setattr, xrange, '
+                     b'delattr, getattr, hasattr, setattr, '
                      b'open, unicode\n')
                 for u in tokenize.tokenize(io.BytesIO(l).readline):
                     if u.type in (tokenize.ENCODING, token.ENDMARKER):
@@ -223,7 +223,7 @@
     # ``replacetoken`` or any mechanism that changes semantics of module
     # loading is changed. Otherwise cached bytecode may get loaded without
     # the new transformation mechanisms applied.
-    BYTECODEHEADER = b'HG\x00\x0a'
+    BYTECODEHEADER = b'HG\x00\x0b'
 
     class hgloader(importlib.machinery.SourceFileLoader):
         """Custom module loader that transforms source code.
--- a/mercurial/ancestor.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/ancestor.py	Fri Aug 24 12:55:05 2018 -0700
@@ -11,6 +11,9 @@
 import heapq
 
 from .node import nullrev
+from . import (
+    pycompat,
+)
 
 def commonancestorsheads(pfunc, *nodes):
     """Returns a set with the heads of all common ancestors of all nodes,
@@ -174,7 +177,7 @@
             # no revs to consider
             return
 
-        for curr in xrange(start, min(revs) - 1, -1):
+        for curr in pycompat.xrange(start, min(revs) - 1, -1):
             if curr not in bases:
                 continue
             revs.discard(curr)
@@ -215,7 +218,7 @@
         # exit.
 
         missing = []
-        for curr in xrange(start, nullrev, -1):
+        for curr in pycompat.xrange(start, nullrev, -1):
             if not revsvisit:
                 break
 
--- a/mercurial/branchmap.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/branchmap.py	Fri Aug 24 12:55:05 2018 -0700
@@ -38,15 +38,11 @@
     return filename
 
 def read(repo):
+    f = None
     try:
         f = repo.cachevfs(_filename(repo))
-        lines = f.read().split('\n')
-        f.close()
-    except (IOError, OSError):
-        return None
-
-    try:
-        cachekey = lines.pop(0).split(" ", 2)
+        lineiter = iter(f)
+        cachekey = next(lineiter).rstrip('\n').split(" ", 2)
         last, lrev = cachekey[:2]
         last, lrev = bin(last), int(lrev)
         filteredhash = None
@@ -58,7 +54,8 @@
             # invalidate the cache
             raise ValueError(r'tip differs')
         cl = repo.changelog
-        for l in lines:
+        for l in lineiter:
+            l = l.rstrip('\n')
             if not l:
                 continue
             node, state, label = l.split(" ", 2)
@@ -72,6 +69,10 @@
             partial.setdefault(label, []).append(node)
             if state == 'c':
                 partial._closednodes.add(node)
+
+    except (IOError, OSError):
+        return None
+
     except Exception as inst:
         if repo.ui.debugflag:
             msg = 'invalid branchheads cache'
@@ -80,6 +81,11 @@
             msg += ': %s\n'
             repo.ui.debug(msg % pycompat.bytestr(inst))
         partial = None
+
+    finally:
+        if f:
+            f.close()
+
     return partial
 
 ### Nearest subset relation
--- a/mercurial/bundle2.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/bundle2.py	Fri Aug 24 12:55:05 2018 -0700
@@ -2223,11 +2223,11 @@
         total += header[1] + header[2]
         utf8branch = inpart.read(header[0])
         branch = encoding.tolocal(utf8branch)
-        for x in xrange(header[1]):
+        for x in pycompat.xrange(header[1]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, False)
-        for x in xrange(header[2]):
+        for x in pycompat.xrange(header[2]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, True)
--- a/mercurial/bundlerepo.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/bundlerepo.py	Fri Aug 24 12:55:05 2018 -0700
@@ -80,7 +80,7 @@
             # start, size, full unc. size, base (unused), link, p1, p2, node
             e = (revlog.offset_type(start, flags), size, -1, baserev, link,
                  self.rev(p1), self.rev(p2), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
@@ -187,7 +187,7 @@
 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
     def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
                  dir=''):
-        manifest.manifestrevlog.__init__(self, opener, dir=dir)
+        manifest.manifestrevlog.__init__(self, opener, tree=dir)
         bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
                               linkmapper)
         if dirlogstarts is None:
--- a/mercurial/cext/parsers.c	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/cext/parsers.c	Fri Aug 24 12:55:05 2018 -0700
@@ -713,7 +713,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 5;
+static const int version = 9;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/revlog.c	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/cext/revlog.c	Fri Aug 24 12:55:05 2018 -0700
@@ -28,15 +28,27 @@
 #define PyInt_AsLong PyLong_AsLong
 #endif
 
+typedef struct indexObjectStruct indexObject;
+
+typedef struct {
+	int children[16];
+} nodetreenode;
+
 /*
  * A base-16 trie for fast node->rev mapping.
  *
  * Positive value is index of the next node in the trie
- * Negative value is a leaf: -(rev + 1)
+ * Negative value is a leaf: -(rev + 2)
  * Zero is empty
  */
 typedef struct {
-	int children[16];
+	PyObject_HEAD
+	indexObject *index;
+	nodetreenode *nodes;
+	unsigned length;     /* # nodes in use */
+	unsigned capacity;   /* # nodes allocated */
+	int depth;           /* maximum depth of tree */
+	int splits;          /* # splits performed */
 } nodetree;
 
 /*
@@ -51,7 +63,7 @@
  * With string keys, we lazily perform a reverse mapping from node to
  * rev, using a base-16 trie.
  */
-typedef struct {
+struct indexObjectStruct {
 	PyObject_HEAD
 	/* Type-specific fields go here. */
 	PyObject *data;        /* raw bytes of index */
@@ -64,15 +76,11 @@
 	PyObject *headrevs;    /* cache, invalidated on changes */
 	PyObject *filteredrevs;/* filtered revs set */
 	nodetree *nt;          /* base-16 trie */
-	unsigned ntlength;          /* # nodes in use */
-	unsigned ntcapacity;        /* # nodes allocated */
-	int ntdepth;           /* maximum depth of tree */
-	int ntsplits;          /* # splits performed */
 	int ntrev;             /* last rev scanned */
 	int ntlookups;         /* # lookups */
 	int ntmisses;          /* # lookups that miss the cache */
 	int inlined;
-} indexObject;
+};
 
 static Py_ssize_t index_length(const indexObject *self)
 {
@@ -95,6 +103,36 @@
 /* A RevlogNG v1 index entry is 64 bytes long. */
 static const long v1_hdrsize = 64;
 
+static void raise_revlog_error(void)
+{
+	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
+
+	mod = PyImport_ImportModule("mercurial.error");
+	if (mod == NULL) {
+		goto cleanup;
+	}
+
+	dict = PyModule_GetDict(mod);
+	if (dict == NULL) {
+		goto cleanup;
+	}
+	Py_INCREF(dict);
+
+	errclass = PyDict_GetItemString(dict, "RevlogError");
+	if (errclass == NULL) {
+		PyErr_SetString(PyExc_SystemError,
+				"could not find RevlogError");
+		goto cleanup;
+	}
+
+	/* value of exception is ignored by callers */
+	PyErr_SetString(errclass, "RevlogError");
+
+cleanup:
+	Py_XDECREF(dict);
+	Py_XDECREF(mod);
+}
+
 /*
  * Return a pointer to the beginning of a RevlogNG record.
  */
@@ -117,9 +155,8 @@
 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
 				    int *ps, int maxrev)
 {
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-						  rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
 		ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
 	} else {
@@ -158,22 +195,19 @@
 	Py_ssize_t length = index_length(self);
 	PyObject *entry;
 
-	if (pos < 0)
-		pos += length;
+	if (pos == -1) {
+		Py_INCREF(nullentry);
+		return nullentry;
+	}
 
 	if (pos < 0 || pos >= length) {
 		PyErr_SetString(PyExc_IndexError, "revlog index out of range");
 		return NULL;
 	}
 
-	if (pos == length - 1) {
-		Py_INCREF(nullentry);
-		return nullentry;
-	}
-
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *obj;
-		obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		obj = PyList_GET_ITEM(self->added, pos - self->length);
 		Py_INCREF(obj);
 		return obj;
 	}
@@ -231,15 +265,15 @@
 	Py_ssize_t length = index_length(self);
 	const char *data;
 
-	if (pos == length - 1 || pos == INT_MAX)
+	if (pos == -1)
 		return nullid;
 
 	if (pos >= length)
 		return NULL;
 
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *tuple, *str;
-		tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		tuple = PyList_GET_ITEM(self->added, pos - self->length);
 		str = PyTuple_GetItem(tuple, 7);
 		return str ? PyBytes_AS_STRING(str) : NULL;
 	}
@@ -262,47 +296,34 @@
 	return node;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev);
+static int nt_insert(nodetree *self, const char *node, int rev);
 
-static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
+static int node_check(PyObject *obj, char **node)
 {
-	if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
+	Py_ssize_t nodelen;
+	if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
 		return -1;
-	if (*nodelen == 20)
+	if (nodelen == 20)
 		return 0;
 	PyErr_SetString(PyExc_ValueError, "20-byte hash required");
 	return -1;
 }
 
-static PyObject *index_insert(indexObject *self, PyObject *args)
+static PyObject *index_append(indexObject *self, PyObject *obj)
 {
-	PyObject *obj;
 	char *node;
-	int index;
-	Py_ssize_t len, nodelen;
-
-	if (!PyArg_ParseTuple(args, "iO", &index, &obj))
-		return NULL;
+	Py_ssize_t len;
 
 	if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
 		PyErr_SetString(PyExc_TypeError, "8-tuple required");
 		return NULL;
 	}
 
-	if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
+	if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
 		return NULL;
 
 	len = index_length(self);
 
-	if (index < 0)
-		index += len;
-
-	if (index != len - 1) {
-		PyErr_SetString(PyExc_IndexError,
-				"insert only supported at index -1");
-		return NULL;
-	}
-
 	if (self->added == NULL) {
 		self->added = PyList_New(0);
 		if (self->added == NULL)
@@ -313,41 +334,12 @@
 		return NULL;
 
 	if (self->nt)
-		nt_insert(self, node, index);
+		nt_insert(self->nt, node, (int)len);
 
 	Py_CLEAR(self->headrevs);
 	Py_RETURN_NONE;
 }
 
-static void _index_clearcaches(indexObject *self)
-{
-	if (self->cache) {
-		Py_ssize_t i;
-
-		for (i = 0; i < self->raw_length; i++)
-			Py_CLEAR(self->cache[i]);
-		free(self->cache);
-		self->cache = NULL;
-	}
-	if (self->offsets) {
-		PyMem_Free(self->offsets);
-		self->offsets = NULL;
-	}
-	free(self->nt);
-	self->nt = NULL;
-	Py_CLEAR(self->headrevs);
-}
-
-static PyObject *index_clearcaches(indexObject *self)
-{
-	_index_clearcaches(self);
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
-	self->ntrev = -1;
-	self->ntlookups = self->ntmisses = 0;
-	Py_RETURN_NONE;
-}
-
 static PyObject *index_stats(indexObject *self)
 {
 	PyObject *obj = PyDict_New();
@@ -376,16 +368,18 @@
 		Py_DECREF(t);
 	}
 
-	if (self->raw_length != self->length - 1)
+	if (self->raw_length != self->length)
 		istat(raw_length, "revs on disk");
 	istat(length, "revs in memory");
-	istat(ntcapacity, "node trie capacity");
-	istat(ntdepth, "node trie depth");
-	istat(ntlength, "node trie count");
 	istat(ntlookups, "node trie lookups");
 	istat(ntmisses, "node trie misses");
 	istat(ntrev, "node trie last rev scanned");
-	istat(ntsplits, "node trie splits");
+	if (self->nt) {
+		istat(nt->capacity, "node trie capacity");
+		istat(nt->depth, "node trie depth");
+		istat(nt->length, "node trie count");
+		istat(nt->splits, "node trie splits");
+	}
 
 #undef istat
 
@@ -451,7 +445,7 @@
 {
 	PyObject *iter = NULL;
 	PyObject *iter_item = NULL;
-	Py_ssize_t min_idx = index_length(self) + 1;
+	Py_ssize_t min_idx = index_length(self) + 2;
 	long iter_item_long;
 
 	if (PyList_GET_SIZE(list) != 0) {
@@ -463,7 +457,7 @@
 			Py_DECREF(iter_item);
 			if (iter_item_long < min_idx)
 				min_idx = iter_item_long;
-			phases[iter_item_long] = marker;
+			phases[iter_item_long] = (char)marker;
 		}
 		Py_DECREF(iter);
 	}
@@ -493,7 +487,7 @@
 	PyObject *reachable = NULL;
 
 	PyObject *val;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	long revnum;
 	Py_ssize_t k;
 	Py_ssize_t i;
@@ -615,7 +609,7 @@
 			      revstates[parents[1] + 1]) & RS_REACHABLE)
 			    && !(revstates[i + 1] & RS_REACHABLE)) {
 				revstates[i + 1] |= RS_REACHABLE;
-				val = PyInt_FromLong(i);
+				val = PyInt_FromSsize_t(i);
 				if (val == NULL)
 					goto bail;
 				r = PyList_Append(reachable, val);
@@ -645,7 +639,7 @@
 	PyObject *phaseset = NULL;
 	PyObject *phasessetlist = NULL;
 	PyObject *rev = NULL;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	Py_ssize_t numphase = 0;
 	Py_ssize_t minrevallphases = 0;
 	Py_ssize_t minrevphase = 0;
@@ -702,7 +696,7 @@
 		}
 	}
 	/* Transform phase list to a python list */
-	phasessize = PyInt_FromLong(len);
+	phasessize = PyInt_FromSsize_t(len);
 	if (phasessize == NULL)
 		goto release;
 	for (i = 0; i < len; i++) {
@@ -711,7 +705,7 @@
 		 * is computed as a difference */
 		if (phase != 0) {
 			phaseset = PyList_GET_ITEM(phasessetlist, phase);
-			rev = PyInt_FromLong(i);
+			rev = PyInt_FromSsize_t(i);
 			if (rev == NULL)
 				goto release;
 			PySet_Add(phaseset, rev);
@@ -756,7 +750,7 @@
 		}
 	}
 
-	len = index_length(self) - 1;
+	len = index_length(self);
 	heads = PyList_New(0);
 	if (heads == NULL)
 		goto bail;
@@ -838,9 +832,8 @@
 {
 	const char *data;
 
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-			rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
 	}
 	else {
@@ -881,7 +874,7 @@
 		return NULL;
 	}
 
-	if (rev < 0 || rev >= length - 1) {
+	if (rev < 0 || rev >= length) {
 		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
 		return NULL;
 	}
@@ -924,7 +917,7 @@
 			break;
 		}
 
-		if (iterrev >= length - 1) {
+		if (iterrev >= length) {
 			PyErr_SetString(PyExc_IndexError, "revision outside index");
 			return NULL;
 		}
@@ -984,7 +977,7 @@
  *   -2: not found
  * rest: valid rev
  */
-static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
+static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
 		   int hex)
 {
 	int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
@@ -993,9 +986,6 @@
 	if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
 		return -1;
 
-	if (self->nt == NULL)
-		return -2;
-
 	if (hex)
 		maxlevel = nodelen > 40 ? 40 : (int)nodelen;
 	else
@@ -1003,15 +993,15 @@
 
 	for (level = off = 0; level < maxlevel; level++) {
 		int k = getnybble(node, level);
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		int v = n->children[k];
 
 		if (v < 0) {
 			const char *n;
 			Py_ssize_t i;
 
-			v = -(v + 1);
-			n = index_node(self, v);
+			v = -(v + 2);
+			n = index_node(self->index, v);
 			if (n == NULL)
 				return -2;
 			for (i = level; i < maxlevel; i++)
@@ -1027,65 +1017,67 @@
 	return -4;
 }
 
-static int nt_new(indexObject *self)
+static int nt_new(nodetree *self)
 {
-	if (self->ntlength == self->ntcapacity) {
-		if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
-			PyErr_SetString(PyExc_MemoryError,
-					"overflow in nt_new");
+	if (self->length == self->capacity) {
+		unsigned newcapacity;
+		nodetreenode *newnodes;
+		newcapacity = self->capacity * 2;
+		if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
+			PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
 			return -1;
 		}
-		self->ntcapacity *= 2;
-		self->nt = realloc(self->nt,
-				   self->ntcapacity * sizeof(nodetree));
-		if (self->nt == NULL) {
+		newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
+		if (newnodes == NULL) {
 			PyErr_SetString(PyExc_MemoryError, "out of memory");
 			return -1;
 		}
-		memset(&self->nt[self->ntlength], 0,
-		       sizeof(nodetree) * (self->ntcapacity - self->ntlength));
+		self->capacity = newcapacity;
+		self->nodes = newnodes;
+		memset(&self->nodes[self->length], 0,
+		       sizeof(nodetreenode) * (self->capacity - self->length));
 	}
-	return self->ntlength++;
+	return self->length++;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev)
+static int nt_insert(nodetree *self, const char *node, int rev)
 {
 	int level = 0;
 	int off = 0;
 
 	while (level < 40) {
 		int k = nt_level(node, level);
-		nodetree *n;
+		nodetreenode *n;
 		int v;
 
-		n = &self->nt[off];
+		n = &self->nodes[off];
 		v = n->children[k];
 
 		if (v == 0) {
-			n->children[k] = -rev - 1;
+			n->children[k] = -rev - 2;
 			return 0;
 		}
 		if (v < 0) {
-			const char *oldnode = index_node_existing(self, -(v + 1));
+			const char *oldnode = index_node_existing(self->index, -(v + 2));
 			int noff;
 
 			if (oldnode == NULL)
 				return -1;
 			if (!memcmp(oldnode, node, 20)) {
-				n->children[k] = -rev - 1;
+				n->children[k] = -rev - 2;
 				return 0;
 			}
 			noff = nt_new(self);
 			if (noff == -1)
 				return -1;
-			/* self->nt may have been changed by realloc */
-			self->nt[off].children[k] = noff;
+			/* self->nodes may have been changed by realloc */
+			self->nodes[off].children[k] = noff;
 			off = noff;
-			n = &self->nt[off];
+			n = &self->nodes[off];
 			n->children[nt_level(oldnode, ++level)] = v;
-			if (level > self->ntdepth)
-				self->ntdepth = level;
-			self->ntsplits += 1;
+			if (level > self->depth)
+				self->depth = level;
+			self->splits += 1;
 		} else {
 			level += 1;
 			off = v;
@@ -1095,167 +1087,69 @@
 	return -1;
 }
 
-static int nt_init(indexObject *self)
+static PyObject *nt_insert_py(nodetree *self, PyObject *args)
 {
-	if (self->nt == NULL) {
-		if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
-			PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
-			return -1;
-		}
-		self->ntcapacity = self->raw_length < 4
-			? 4 : (int)self->raw_length / 2;
+	Py_ssize_t rev;
+	const char *node;
+	Py_ssize_t length;
+	if (!PyArg_ParseTuple(args, "n", &rev))
+		return NULL;
+	length = index_length(self->index);
+	if (rev < 0 || rev >= length) {
+		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
+		return NULL;
+	}
+	node = index_node_existing(self->index, rev);
+	if (nt_insert(self, node, (int)rev) == -1)
+		return NULL;
+	Py_RETURN_NONE;
+}
 
-		self->nt = calloc(self->ntcapacity, sizeof(nodetree));
-		if (self->nt == NULL) {
-			PyErr_NoMemory();
-			return -1;
-		}
-		self->ntlength = 1;
-		self->ntrev = (int)index_length(self) - 1;
-		self->ntlookups = 1;
-		self->ntmisses = 0;
-		if (nt_insert(self, nullid, INT_MAX) == -1)
-			return -1;
+static int nt_delete_node(nodetree *self, const char *node)
+{
+	/* rev==-2 happens to get encoded as 0, which is interpreted as not set */
+	return nt_insert(self, node, -2);
+}
+
+static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
+{
+	/* Initialize before overflow-checking to avoid nt_dealloc() crash. */
+	self->nodes = NULL;
+
+	self->index = index;
+	Py_INCREF(index);
+	/* The input capacity is in terms of revisions, while the field is in
+	 * terms of nodetree nodes. */
+	self->capacity = (capacity < 4 ? 4 : capacity / 2);
+	self->depth = 0;
+	self->splits = 0;
+	if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
+		PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
+		return -1;
 	}
+	self->nodes = calloc(self->capacity, sizeof(nodetreenode));
+	if (self->nodes == NULL) {
+		PyErr_NoMemory();
+		return -1;
+	}
+	self->length = 1;
 	return 0;
 }
 
-/*
- * Return values:
- *
- *   -3: error (exception set)
- *   -2: not found (no exception set)
- * rest: valid rev
- */
-static int index_find_node(indexObject *self,
-			   const char *node, Py_ssize_t nodelen)
-{
-	int rev;
-
-	self->ntlookups++;
-	rev = nt_find(self, node, nodelen, 0);
-	if (rev >= -1)
-		return rev;
-
-	if (nt_init(self) == -1)
-		return -3;
+static PyTypeObject indexType;
 
-	/*
-	 * For the first handful of lookups, we scan the entire index,
-	 * and cache only the matching nodes. This optimizes for cases
-	 * like "hg tip", where only a few nodes are accessed.
-	 *
-	 * After that, we cache every node we visit, using a single
-	 * scan amortized over multiple lookups.  This gives the best
-	 * bulk performance, e.g. for "hg log".
-	 */
-	if (self->ntmisses++ < 4) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				if (nt_insert(self, n, rev) == -1)
-					return -3;
-				break;
-			}
-		}
-	} else {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (nt_insert(self, n, rev) == -1) {
-				self->ntrev = rev + 1;
-				return -3;
-			}
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				break;
-			}
-		}
-		self->ntrev = rev;
-	}
-
-	if (rev >= 0)
-		return rev;
-	return -2;
+static int nt_init_py(nodetree *self, PyObject *args)
+{
+	PyObject *index;
+	unsigned capacity;
+	if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
+		return -1;
+	return nt_init(self, (indexObject*)index, capacity);
 }
 
-static void raise_revlog_error(void)
-{
-	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
-
-	mod = PyImport_ImportModule("mercurial.error");
-	if (mod == NULL) {
-		goto cleanup;
-	}
-
-	dict = PyModule_GetDict(mod);
-	if (dict == NULL) {
-		goto cleanup;
-	}
-	Py_INCREF(dict);
-
-	errclass = PyDict_GetItemString(dict, "RevlogError");
-	if (errclass == NULL) {
-		PyErr_SetString(PyExc_SystemError,
-				"could not find RevlogError");
-		goto cleanup;
-	}
-
-	/* value of exception is ignored by callers */
-	PyErr_SetString(errclass, "RevlogError");
-
-cleanup:
-	Py_XDECREF(dict);
-	Py_XDECREF(mod);
-}
-
-static PyObject *index_getitem(indexObject *self, PyObject *value)
-{
-	char *node;
-	Py_ssize_t nodelen;
-	int rev;
-
-	if (PyInt_Check(value))
-		return index_get(self, PyInt_AS_LONG(value));
-
-	if (node_check(value, &node, &nodelen) == -1)
-		return NULL;
-	rev = index_find_node(self, node, nodelen);
-	if (rev >= -1)
-		return PyInt_FromLong(rev);
-	if (rev == -2)
-		raise_revlog_error();
-	return NULL;
-}
-
-/*
- * Fully populate the radix tree.
- */
-static int nt_populate(indexObject *self) {
-	int rev;
-	if (self->ntrev > 0) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -1;
-			if (nt_insert(self, n, rev) == -1)
-				return -1;
-		}
-		self->ntrev = -1;
-	}
-	return 0;
-}
-
-static int nt_partialmatch(indexObject *self, const char *node,
+static int nt_partialmatch(nodetree *self, const char *node,
 			   Py_ssize_t nodelen)
 {
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	return nt_find(self, node, nodelen, 1);
 }
 
@@ -1268,24 +1162,19 @@
  *   -2: not found (no exception set)
  * rest: length of shortest prefix
  */
-static int nt_shortest(indexObject *self, const char *node)
+static int nt_shortest(nodetree *self, const char *node)
 {
 	int level, off;
 
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	for (level = off = 0; level < 40; level++) {
 		int k, v;
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		k = nt_level(node, level);
 		v = n->children[k];
 		if (v < 0) {
 			const char *n;
-			v = -(v + 1);
-			n = index_node_existing(self, v);
+			v = -(v + 2);
+			n = index_node_existing(self->index, v);
 			if (n == NULL)
 				return -3;
 			if (memcmp(node, n, 20) != 0)
@@ -1310,6 +1199,204 @@
 	return -3;
 }
 
+static PyObject *nt_shortest_py(nodetree *self, PyObject *args)
+{
+	PyObject *val;
+	char *node;
+	int length;
+
+	if (!PyArg_ParseTuple(args, "O", &val))
+		return NULL;
+	if (node_check(val, &node) == -1)
+		return NULL;
+
+	length = nt_shortest(self, node);
+	if (length == -3)
+		return NULL;
+	if (length == -2) {
+		raise_revlog_error();
+		return NULL;
+	}
+	return PyInt_FromLong(length);
+}
+
+static void nt_dealloc(nodetree *self)
+{
+	Py_XDECREF(self->index);
+	free(self->nodes);
+	self->nodes = NULL;
+	PyObject_Del(self);
+}
+
+static PyMethodDef nt_methods[] = {
+	{"insert", (PyCFunction)nt_insert_py, METH_VARARGS,
+	 "insert an index entry"},
+	{"shortest", (PyCFunction)nt_shortest_py, METH_VARARGS,
+	 "find length of shortest hex nodeid of a binary ID"},
+	{NULL} /* Sentinel */
+};
+
+static PyTypeObject nodetreeType = {
+	PyVarObject_HEAD_INIT(NULL, 0) /* header */
+	"parsers.nodetree",        /* tp_name */
+	sizeof(nodetree) ,         /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)nt_dealloc,    /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	"nodetree",                /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	nt_methods,                /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)nt_init_py,      /* tp_init */
+	0,                         /* tp_alloc */
+};
+
+static int index_init_nt(indexObject *self)
+{
+	if (self->nt == NULL) {
+		self->nt = PyObject_New(nodetree, &nodetreeType);
+		if (self->nt == NULL) {
+			return -1;
+		}
+		if (nt_init(self->nt, self, (int)self->raw_length) == -1) {
+			nt_dealloc(self->nt);
+			self->nt = NULL;
+			return -1;
+		}
+		if (nt_insert(self->nt, nullid, -1) == -1) {
+			nt_dealloc(self->nt);
+			self->nt = NULL;
+			return -1;
+		}
+		self->ntrev = (int)index_length(self);
+		self->ntlookups = 1;
+		self->ntmisses = 0;
+	}
+	return 0;
+}
+
+/*
+ * Return values:
+ *
+ *   -3: error (exception set)
+ *   -2: not found (no exception set)
+ * rest: valid rev
+ */
+static int index_find_node(indexObject *self,
+			   const char *node, Py_ssize_t nodelen)
+{
+	int rev;
+
+	if (index_init_nt(self) == -1)
+		return -3;
+
+	self->ntlookups++;
+	rev = nt_find(self->nt, node, nodelen, 0);
+	if (rev >= -1)
+		return rev;
+
+	/*
+	 * For the first handful of lookups, we scan the entire index,
+	 * and cache only the matching nodes. This optimizes for cases
+	 * like "hg tip", where only a few nodes are accessed.
+	 *
+	 * After that, we cache every node we visit, using a single
+	 * scan amortized over multiple lookups.  This gives the best
+	 * bulk performance, e.g. for "hg log".
+	 */
+	if (self->ntmisses++ < 4) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				if (nt_insert(self->nt, n, rev) == -1)
+					return -3;
+				break;
+			}
+		}
+	} else {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (nt_insert(self->nt, n, rev) == -1) {
+				self->ntrev = rev + 1;
+				return -3;
+			}
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				break;
+			}
+		}
+		self->ntrev = rev;
+	}
+
+	if (rev >= 0)
+		return rev;
+	return -2;
+}
+
+static PyObject *index_getitem(indexObject *self, PyObject *value)
+{
+	char *node;
+	int rev;
+
+	if (PyInt_Check(value))
+		return index_get(self, PyInt_AS_LONG(value));
+
+	if (node_check(value, &node) == -1)
+		return NULL;
+	rev = index_find_node(self, node, 20);
+	if (rev >= -1)
+		return PyInt_FromLong(rev);
+	if (rev == -2)
+		raise_revlog_error();
+	return NULL;
+}
+
+/*
+ * Fully populate the radix tree.
+ */
+static int index_populate_nt(indexObject *self) {
+	int rev;
+	if (self->ntrev > 0) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -1;
+			if (nt_insert(self->nt, n, rev) == -1)
+				return -1;
+		}
+		self->ntrev = -1;
+	}
+	return 0;
+}
+
 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
 {
 	const char *fullnode;
@@ -1338,12 +1425,15 @@
 		Py_RETURN_NONE;
 	}
 
-	rev = nt_partialmatch(self, node, nodelen);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	rev = nt_partialmatch(self->nt, node, nodelen);
 
 	switch (rev) {
 	case -4:
 		raise_revlog_error();
-	case -3:
 		return NULL;
 	case -2:
 		Py_RETURN_NONE;
@@ -1360,18 +1450,21 @@
 
 static PyObject *index_shortest(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int length;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
 
 	self->ntlookups++;
-	length = nt_shortest(self, node);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	length = nt_shortest(self->nt, node);
 	if (length == -3)
 		return NULL;
 	if (length == -2) {
@@ -1383,16 +1476,15 @@
 
 static PyObject *index_m_get(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int rev;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
-	rev = index_find_node(self, node, nodelen);
+	rev = index_find_node(self, node, 20);
 	if (rev == -3)
 		return NULL;
 	if (rev == -2)
@@ -1403,17 +1495,16 @@
 static int index_contains(indexObject *self, PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 
 	if (PyInt_Check(value)) {
 		long rev = PyInt_AS_LONG(value);
 		return rev >= -1 && rev < index_length(self);
 	}
 
-	if (node_check(value, &node, &nodelen) == -1)
+	if (node_check(value, &node) == -1)
 		return -1;
 
-	switch (index_find_node(self, node, nodelen)) {
+	switch (index_find_node(self, node, 20)) {
 	case -3:
 		return -1;
 	case -2:
@@ -1554,7 +1645,7 @@
 		goto bail;
 	}
 
-	interesting = calloc(sizeof(*interesting), 1 << revcount);
+	interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
 	if (interesting == NULL) {
 		PyErr_NoMemory();
 		goto bail;
@@ -1687,7 +1778,7 @@
 	revs = PyMem_Malloc(argcount * sizeof(*revs));
 	if (argcount > 0 && revs == NULL)
 		return PyErr_NoMemory();
-	len = index_length(self) - 1;
+	len = index_length(self);
 
 	for (i = 0; i < argcount; i++) {
 		static const int capacity = 24;
@@ -1787,7 +1878,7 @@
 /*
  * Invalidate any trie entries introduced by added revs.
  */
-static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
+static void index_invalidate_added(indexObject *self, Py_ssize_t start)
 {
 	Py_ssize_t i, len = PyList_GET_SIZE(self->added);
 
@@ -1795,7 +1886,7 @@
 		PyObject *tuple = PyList_GET_ITEM(self->added, i);
 		PyObject *node = PyTuple_GET_ITEM(tuple, 7);
 
-		nt_insert(self, PyBytes_AS_STRING(node), -1);
+		nt_delete_node(self->nt, PyBytes_AS_STRING(node));
 	}
 
 	if (start == 0)
@@ -1809,7 +1900,7 @@
 static int index_slice_del(indexObject *self, PyObject *item)
 {
 	Py_ssize_t start, stop, step, slicelength;
-	Py_ssize_t length = index_length(self);
+	Py_ssize_t length = index_length(self) + 1;
 	int ret = 0;
 
 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
@@ -1845,23 +1936,23 @@
 		return -1;
 	}
 
-	if (start < self->length - 1) {
+	if (start < self->length) {
 		if (self->nt) {
 			Py_ssize_t i;
 
-			for (i = start + 1; i < self->length - 1; i++) {
+			for (i = start + 1; i < self->length; i++) {
 				const char *node = index_node_existing(self, i);
 				if (node == NULL)
 					return -1;
 
-				nt_insert(self, node, -1);
+				nt_delete_node(self->nt, node);
 			}
 			if (self->added)
-				nt_invalidate_added(self, 0);
+				index_invalidate_added(self, 0);
 			if (self->ntrev > start)
 				self->ntrev = (int)start;
 		}
-		self->length = start + 1;
+		self->length = start;
 		if (start < self->raw_length) {
 			if (self->cache) {
 				Py_ssize_t i;
@@ -1874,12 +1965,12 @@
 	}
 
 	if (self->nt) {
-		nt_invalidate_added(self, start - self->length + 1);
+		index_invalidate_added(self, start - self->length);
 		if (self->ntrev > start)
 			self->ntrev = (int)start;
 	}
 	if (self->added)
-		ret = PyList_SetSlice(self->added, start - self->length + 1,
+		ret = PyList_SetSlice(self->added, start - self->length,
 				      PyList_GET_SIZE(self->added), NULL);
 done:
 	Py_CLEAR(self->headrevs);
@@ -1897,17 +1988,16 @@
 				  PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 	long rev;
 
 	if (PySlice_Check(item) && value == NULL)
 		return index_slice_del(self, item);
 
-	if (node_check(item, &node, &nodelen) == -1)
+	if (node_check(item, &node) == -1)
 		return -1;
 
 	if (value == NULL)
-		return self->nt ? nt_insert(self, node, -1) : 0;
+		return self->nt ? nt_delete_node(self->nt, node) : 0;
 	rev = PyInt_AsLong(value);
 	if (rev > INT_MAX || rev < 0) {
 		if (!PyErr_Occurred())
@@ -1915,9 +2005,9 @@
 		return -1;
 	}
 
-	if (nt_init(self) == -1)
+	if (index_init_nt(self) == -1)
 		return -1;
-	return nt_insert(self, node, (int)rev);
+	return nt_insert(self->nt, node, (int)rev);
 }
 
 /*
@@ -1984,8 +2074,6 @@
 	self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
 	self->data = data_obj;
 
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
 	self->ntlookups = self->ntmisses = 0;
 	self->ntrev = -1;
 	Py_INCREF(self->data);
@@ -1995,14 +2083,14 @@
 		if (len == -1)
 			goto bail;
 		self->raw_length = len;
-		self->length = len + 1;
+		self->length = len;
 	} else {
 		if (size % v1_hdrsize) {
 			PyErr_SetString(PyExc_ValueError, "corrupt index file");
 			goto bail;
 		}
 		self->raw_length = size / v1_hdrsize;
-		self->length = self->raw_length + 1;
+		self->length = self->raw_length;
 	}
 
 	return 0;
@@ -2016,6 +2104,35 @@
 	return (PyObject *)self;
 }
 
+static void _index_clearcaches(indexObject *self)
+{
+	if (self->cache) {
+		Py_ssize_t i;
+
+		for (i = 0; i < self->raw_length; i++)
+			Py_CLEAR(self->cache[i]);
+		free(self->cache);
+		self->cache = NULL;
+	}
+	if (self->offsets) {
+		PyMem_Free((void *)self->offsets);
+		self->offsets = NULL;
+	}
+	if (self->nt != NULL) {
+		nt_dealloc(self->nt);
+	}
+	self->nt = NULL;
+	Py_CLEAR(self->headrevs);
+}
+
+static PyObject *index_clearcaches(indexObject *self)
+{
+	_index_clearcaches(self);
+	self->ntrev = -1;
+	self->ntlookups = self->ntmisses = 0;
+	Py_RETURN_NONE;
+}
+
 static void index_dealloc(indexObject *self)
 {
 	_index_clearcaches(self);
@@ -2026,6 +2143,7 @@
 	}
 	Py_XDECREF(self->data);
 	Py_XDECREF(self->added);
+	Py_XDECREF(self->nt);
 	PyObject_Del(self);
 }
 
@@ -2066,8 +2184,8 @@
 	 "get filtered head revisions"}, /* Can always do filtering */
 	{"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
 	 "determine revisions with deltas to reconstruct fulltext"},
-	{"insert", (PyCFunction)index_insert, METH_VARARGS,
-	 "insert an index entry"},
+	{"append", (PyCFunction)index_append, METH_O,
+	 "append an index entry"},
 	{"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
 	 "match a potentially ambiguous node ID"},
 	{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
@@ -2175,6 +2293,12 @@
 	Py_INCREF(&indexType);
 	PyModule_AddObject(mod, "index", (PyObject *)&indexType);
 
+	nodetreeType.tp_new = PyType_GenericNew;
+	if (PyType_Ready(&nodetreeType) < 0)
+		return;
+	Py_INCREF(&nodetreeType);
+	PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
+
 	nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
 				  -1, -1, -1, -1, nullid, 20);
 	if (nullentry)
--- a/mercurial/changegroup.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/changegroup.py	Fri Aug 24 12:55:05 2018 -0700
@@ -14,33 +14,37 @@
 from .i18n import _
 from .node import (
     hex,
+    nullid,
     nullrev,
     short,
 )
 
+from .thirdparty import (
+    attr,
+)
+
 from . import (
-    dagutil,
+    dagop,
     error,
+    match as matchmod,
     mdiff,
     phases,
     pycompat,
+    repository,
     util,
 )
 
 from .utils import (
+    interfaceutil,
     stringutil,
 )
 
-_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
-_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
-_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
+_CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
+_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
+_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
 
 LFS_REQUIREMENT = 'lfs'
 
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = 'narrowhg-experimental'
-
 readexactly = util.readexactly
 
 def getchunk(stream):
@@ -61,6 +65,10 @@
     """return a changegroup chunk header (string) for a zero-length chunk"""
     return struct.pack(">l", 0)
 
+def _fileheader(path):
+    """Obtain a changegroup chunk header for a named path."""
+    return chunkheader(len(path)) + path
+
 def writechunks(ui, chunks, filename, vfs=None):
     """Write chunks to a file and return its filename.
 
@@ -114,7 +122,7 @@
     bundlerepo and some debug commands - their use is discouraged.
     """
     deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '01'
     _grouplistcount = 1 # One list of files after the manifests
 
@@ -187,7 +195,7 @@
         if not l:
             return {}
         headerdata = readexactly(self._stream, self.deltaheadersize)
-        header = struct.unpack(self.deltaheader, headerdata)
+        header = self.deltaheader.unpack(headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
         node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
         return (node, p1, p2, cs, deltabase, delta, flags)
@@ -245,7 +253,7 @@
         # be empty during the pull
         self.manifestheader()
         deltas = self.deltaiter()
-        repo.manifestlog.addgroup(deltas, revmap, trp)
+        repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
         prog.complete()
         self.callback = None
 
@@ -325,7 +333,7 @@
                 cl = repo.changelog
                 ml = repo.manifestlog
                 # validate incoming csets have their manifests
-                for cset in xrange(clstart, clend):
+                for cset in pycompat.xrange(clstart, clend):
                     mfnode = cl.changelogrevision(cset).manifest
                     mfest = ml[mfnode].readdelta()
                     # store file cgnodes we must see
@@ -367,7 +375,7 @@
                 repo.hook('pretxnchangegroup',
                           throw=True, **pycompat.strkwargs(hookargs))
 
-            added = [cl.node(r) for r in xrange(clstart, clend)]
+            added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
             phaseall = None
             if srctype in ('push', 'serve'):
                 # Old servers can not push the boundary themselves.
@@ -446,7 +454,7 @@
     remain the same.
     """
     deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '02'
 
     def _deltaheader(self, headertuple, prevnode):
@@ -462,7 +470,7 @@
     separating manifests and files.
     """
     deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '03'
     _grouplistcount = 2 # One list of manifests and one list of files
 
@@ -476,9 +484,8 @@
             # If we get here, there are directory manifests in the changegroup
             d = chunkdata["filename"]
             repo.ui.debug("adding %s revisions\n" % d)
-            dirlog = repo.manifestlog._revlog.dirlog(d)
             deltas = self.deltaiter()
-            if not dirlog.addgroup(deltas, revmap, trp):
+            if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
                 raise error.Abort(_("received dir revlog group is empty"))
 
 class headerlessfixup(object):
@@ -493,139 +500,358 @@
             return d
         return readexactly(self._fh, n)
 
-class cg1packer(object):
-    deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    version = '01'
-    def __init__(self, repo, bundlecaps=None):
+@interfaceutil.implementer(repository.irevisiondeltarequest)
+@attr.s(slots=True, frozen=True)
+class revisiondeltarequest(object):
+    node = attr.ib()
+    linknode = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    ellipsis = attr.ib(default=False)
+
+def _revisiondeltatochunks(delta, headerfn):
+    """Serialize a revisiondelta to changegroup chunks."""
+
+    # The captured revision delta may be encoded as a delta against
+    # a base revision or as a full revision. The changegroup format
+    # requires that everything on the wire be deltas. So for full
+    # revisions, we need to invent a header that says to rewrite
+    # data.
+
+    if delta.delta is not None:
+        prefix, data = b'', delta.delta
+    elif delta.basenode == nullid:
+        data = delta.revision
+        prefix = mdiff.trivialdiffheader(len(data))
+    else:
+        data = delta.revision
+        prefix = mdiff.replacediffheader(delta.baserevisionsize,
+                                         len(data))
+
+    meta = headerfn(delta)
+
+    yield chunkheader(len(meta) + len(prefix) + len(data))
+    yield meta
+    if prefix:
+        yield prefix
+    yield data
+
+def _sortnodesnormal(store, nodes, reorder):
+    """Sort nodes for changegroup generation and turn into revnums."""
+    # for generaldelta revlogs, we linearize the revs; this will both be
+    # much quicker and generate a much smaller bundle
+    if (store._generaldelta and reorder is None) or reorder:
+        revs = set(store.rev(n) for n in nodes)
+        return dagop.linearize(revs, store.parentrevs)
+    else:
+        return sorted([store.rev(n) for n in nodes])
+
+def _sortnodesellipsis(store, nodes, cl, lookup):
+    """Sort nodes for changegroup generation and turn into revnums."""
+    # Ellipses serving mode.
+    #
+    # In a perfect world, we'd generate better ellipsis-ified graphs
+    # for non-changelog revlogs. In practice, we haven't started doing
+    # that yet, so the resulting DAGs for the manifestlog and filelogs
+    # are actually full of bogus parentage on all the ellipsis
+    # nodes. This has the side effect that, while the contents are
+    # correct, the individual DAGs might be completely out of whack in
+    # a case like 882681bc3166 and its ancestors (back about 10
+    # revisions or so) in the main hg repo.
+    #
+    # The one invariant we *know* holds is that the new (potentially
+    # bogus) DAG shape will be valid if we order the nodes in the
+    # order that they're introduced in dramatis personae by the
+    # changelog, so what we do is we sort the non-changelog histories
+    # by the order in which they are used by the changelog.
+    key = lambda n: cl.rev(lookup(n))
+    return [store.rev(n) for n in sorted(nodes, key=key)]
+
+def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
+                            linknode, clrevtolocalrev, fullclnodes,
+                            precomputedellipsis):
+    linkparents = precomputedellipsis[linkrev]
+    def local(clrev):
+        """Turn a changelog revnum into a local revnum.
+
+        The ellipsis dag is stored as revnums on the changelog,
+        but when we're producing ellipsis entries for
+        non-changelog revlogs, we need to turn those numbers into
+        something local. This does that for us, and during the
+        changelog sending phase will also expand the stored
+        mappings as needed.
+        """
+        if clrev == nullrev:
+            return nullrev
+
+        if ischangelog:
+            return clrev
+
+        # Walk the ellipsis-ized changelog breadth-first looking for a
+        # change that has been linked from the current revlog.
+        #
+        # For a flat manifest revlog only a single step should be necessary
+        # as all relevant changelog entries are relevant to the flat
+        # manifest.
+        #
+        # For a filelog or tree manifest dirlog however not every changelog
+        # entry will have been relevant, so we need to skip some changelog
+        # nodes even after ellipsis-izing.
+        walk = [clrev]
+        while walk:
+            p = walk[0]
+            walk = walk[1:]
+            if p in clrevtolocalrev:
+                return clrevtolocalrev[p]
+            elif p in fullclnodes:
+                walk.extend([pp for pp in cl.parentrevs(p)
+                                if pp != nullrev])
+            elif p in precomputedellipsis:
+                walk.extend([pp for pp in precomputedellipsis[p]
+                                if pp != nullrev])
+            else:
+                # In this case, we've got an ellipsis with parents
+                # outside the current bundle (likely an
+                # incremental pull). We "know" that we can use the
+                # value of this same revlog at whatever revision
+                # is pointed to by linknode. "Know" is in scare
+                # quotes because I haven't done enough examination
+                # of edge cases to convince myself this is really
+                # a fact - it works for all the (admittedly
+                # thorough) cases in our testsuite, but I would be
+                # somewhat unsurprised to find a case in the wild
+                # where this breaks down a bit. That said, I don't
+                # know if it would hurt anything.
+                for i in pycompat.xrange(rev, 0, -1):
+                    if store.linkrev(i) == clrev:
+                        return i
+                # We failed to resolve a parent for this node, so
+                # we crash the changegroup construction.
+                raise error.Abort(
+                    'unable to resolve parent while packing %r %r'
+                    ' for changeset %r' % (store.indexfile, rev, clrev))
+
+        return nullrev
+
+    if not linkparents or (
+        store.parentrevs(rev) == (nullrev, nullrev)):
+        p1, p2 = nullrev, nullrev
+    elif len(linkparents) == 1:
+        p1, = sorted(local(p) for p in linkparents)
+        p2 = nullrev
+    else:
+        p1, p2 = sorted(local(p) for p in linkparents)
+
+    p1node, p2node = store.node(p1), store.node(p2)
+
+    # TODO: try and actually send deltas for ellipsis data blocks
+    return revisiondeltarequest(
+        node=node,
+        p1node=p1node,
+        p2node=p2node,
+        linknode=linknode,
+        basenode=nullid,
+        ellipsis=True,
+    )
+
+def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
+               allowreorder,
+               topic=None,
+               ellipses=False, clrevtolocalrev=None, fullclnodes=None,
+               precomputedellipsis=None):
+    """Calculate deltas for a set of revisions.
+
+    Is a generator of ``revisiondelta`` instances.
+
+    If topic is not None, progress detail will be generated using this
+    topic name (e.g. changesets, manifests, etc).
+    """
+    if not nodes:
+        return
+
+    # We perform two passes over the revisions whose data we will emit.
+    #
+    # In the first pass, we obtain information about the deltas that will
+    # be generated. This involves computing linknodes and adjusting the
+    # request to take shallow fetching into account. The end result of
+    # this pass is a list of "request" objects stating which deltas
+    # to obtain.
+    #
+    # The second pass is simply resolving the requested deltas.
+
+    cl = repo.changelog
+
+    if ischangelog:
+        # Changelog doesn't benefit from reordering revisions. So send
+        # out revisions in store order.
+        # TODO the API would be cleaner if this were controlled by the
+        # store producing the deltas.
+        revs = sorted(cl.rev(n) for n in nodes)
+    elif ellipses:
+        revs = _sortnodesellipsis(store, nodes, cl, lookup)
+    else:
+        revs = _sortnodesnormal(store, nodes, allowreorder)
+
+    # In the first pass, collect info about the deltas we'll be
+    # generating.
+    requests = []
+
+    # Add the parent of the first rev.
+    revs.insert(0, store.parentrevs(revs[0])[0])
+
+    for i in pycompat.xrange(len(revs) - 1):
+        prev = revs[i]
+        curr = revs[i + 1]
+
+        node = store.node(curr)
+        linknode = lookup(node)
+        p1node, p2node = store.parents(node)
+
+        if ellipses:
+            linkrev = cl.rev(linknode)
+            clrevtolocalrev[linkrev] = curr
+
+            # This is a node to send in full, because the changeset it
+            # corresponds to was a full changeset.
+            if linknode in fullclnodes:
+                requests.append(revisiondeltarequest(
+                    node=node,
+                    p1node=p1node,
+                    p2node=p2node,
+                    linknode=linknode,
+                    basenode=None,
+                ))
+
+            elif linkrev not in precomputedellipsis:
+                pass
+            else:
+                requests.append(_makenarrowdeltarequest(
+                    cl, store, ischangelog, curr, node, linkrev, linknode,
+                    clrevtolocalrev, fullclnodes,
+                    precomputedellipsis))
+        else:
+            requests.append(revisiondeltarequest(
+                node=node,
+                p1node=p1node,
+                p2node=p2node,
+                linknode=linknode,
+                basenode=store.node(prev) if forcedeltaparentprev else None,
+            ))
+
+    # We expect the first pass to be fast, so we only engage the progress
+    # meter for constructing the revision deltas.
+    progress = None
+    if topic is not None:
+        progress = repo.ui.makeprogress(topic, unit=_('chunks'),
+                                        total=len(requests))
+
+    for i, delta in enumerate(store.emitrevisiondeltas(requests)):
+        if progress:
+            progress.update(i + 1)
+
+        yield delta
+
+    if progress:
+        progress.complete()
+
+class cgpacker(object):
+    def __init__(self, repo, filematcher, version, allowreorder,
+                 builddeltaheader, manifestsend,
+                 forcedeltaparentprev=False,
+                 bundlecaps=None, ellipses=False,
+                 shallow=False, ellipsisroots=None, fullnodes=None):
         """Given a source repo, construct a bundler.
 
+        filematcher is a matcher that matches on files to include in the
+        changegroup. Used to facilitate sparse changegroups.
+
+        allowreorder controls whether reordering of revisions is allowed.
+        This value is used when ``bundle.reorder`` is ``auto`` or isn't
+        set.
+
+        forcedeltaparentprev indicates whether delta parents must be against
+        the previous revision in a delta group. This should only be used for
+        compatibility with changegroup version 1.
+
+        builddeltaheader is a callable that constructs the header for a group
+        delta.
+
+        manifestsend is a chunk to send after manifests have been fully emitted.
+
+        ellipses indicates whether ellipsis serving mode is enabled.
+
         bundlecaps is optional and can be used to specify the set of
         capabilities which can be used to build the bundle. While bundlecaps is
         unused in core Mercurial, extensions rely on this feature to communicate
         capabilities to customize the changegroup packer.
+
+        shallow indicates whether shallow data might be sent. The packer may
+        need to pack file contents not introduced by the changes being packed.
+
+        fullnodes is the set of changelog nodes which should not be ellipsis
+        nodes. We store this rather than the set of nodes that should be
+        ellipsis because for very large histories we expect this to be
+        significantly smaller.
         """
+        assert filematcher
+        self._filematcher = filematcher
+
+        self.version = version
+        self._forcedeltaparentprev = forcedeltaparentprev
+        self._builddeltaheader = builddeltaheader
+        self._manifestsend = manifestsend
+        self._ellipses = ellipses
+
         # Set of capabilities we can use to build the bundle.
         if bundlecaps is None:
             bundlecaps = set()
         self._bundlecaps = bundlecaps
+        self._isshallow = shallow
+        self._fullclnodes = fullnodes
+
+        # Maps ellipsis revs to their roots at the changelog level.
+        self._precomputedellipsis = ellipsisroots
+
         # experimental config: bundle.reorder
         reorder = repo.ui.config('bundle', 'reorder')
         if reorder == 'auto':
-            reorder = None
+            self._reorder = allowreorder
         else:
-            reorder = stringutil.parsebool(reorder)
+            self._reorder = stringutil.parsebool(reorder)
+
         self._repo = repo
-        self._reorder = reorder
+
         if self._repo.ui.verbose and not self._repo.ui.debugflag:
             self._verbosenote = self._repo.ui.note
         else:
             self._verbosenote = lambda s: None
 
-    def close(self):
-        return closechunk()
-
-    def fileheader(self, fname):
-        return chunkheader(len(fname)) + fname
-
-    # Extracted both for clarity and for overriding in extensions.
-    def _sortgroup(self, revlog, nodelist, lookup):
-        """Sort nodes for change group and turn them into revnums."""
-        # for generaldelta revlogs, we linearize the revs; this will both be
-        # much quicker and generate a much smaller bundle
-        if (revlog._generaldelta and self._reorder is None) or self._reorder:
-            dag = dagutil.revlogdag(revlog)
-            return dag.linearize(set(revlog.rev(n) for n in nodelist))
-        else:
-            return sorted([revlog.rev(n) for n in nodelist])
-
-    def group(self, nodelist, revlog, lookup, units=None):
-        """Calculate a delta group, yielding a sequence of changegroup chunks
-        (strings).
-
-        Given a list of changeset revs, return a set of deltas and
-        metadata corresponding to nodes. The first delta is
-        first parent(nodelist[0]) -> nodelist[0], the receiver is
-        guaranteed to have this parent as it has all history before
-        these changesets. In the case firstparent is nullrev the
-        changegroup starts with a full revision.
-
-        If units is not None, progress detail will be generated, units specifies
-        the type of revlog that is touched (changelog, manifest, etc.).
-        """
-        # if we don't have any revisions touched by these changesets, bail
-        if len(nodelist) == 0:
-            yield self.close()
-            return
-
-        revs = self._sortgroup(revlog, nodelist, lookup)
+    def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
+        """Yield a sequence of changegroup byte chunks."""
 
-        # add the parent of the first rev
-        p = revlog.parentrevs(revs[0])[0]
-        revs.insert(0, p)
-
-        # build deltas
-        progress = None
-        if units is not None:
-            progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
-                                                  total=(len(revs) - 1))
-        for r in xrange(len(revs) - 1):
-            if progress:
-                progress.update(r + 1)
-            prev, curr = revs[r], revs[r + 1]
-            linknode = lookup(revlog.node(curr))
-            for c in self.revchunk(revlog, curr, prev, linknode):
-                yield c
-
-        if progress:
-            progress.complete()
-        yield self.close()
-
-    # filter any nodes that claim to be part of the known set
-    def prune(self, revlog, missing, commonrevs):
-        rr, rl = revlog.rev, revlog.linkrev
-        return [n for n in missing if rl(rr(n)) not in commonrevs]
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        """Pack flat manifests into a changegroup stream."""
-        assert not dir
-        for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
-                                lookuplinknode, units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return ''
-
-    def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
         repo = self._repo
         cl = repo.changelog
 
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            n = c[0]
-            # record the first changeset introducing this manifest version
-            mfs.setdefault(n, x)
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            changedfiles.update(c[3])
-            return x
-
         self._verbosenote(_('uncompressed size of bundle content:\n'))
         size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
+
+        clstate, deltas = self._generatechangelog(cl, clnodes)
+        for delta in deltas:
+            for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
+                size += len(chunk)
+                yield chunk
+
+        close = closechunk()
+        size += len(close)
+        yield closechunk()
+
         self._verbosenote(_('%8.i (changelog)\n') % size)
 
+        clrevorder = clstate['clrevorder']
+        manifests = clstate['manifests']
+        changedfiles = clstate['changedfiles']
+
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
         # The fastpath is usually safer than the slowpath, because the filelogs
@@ -648,34 +874,142 @@
         fastpathlinkrev = fastpathlinkrev and (
             'treemanifest' not in repo.requirements)
 
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        mfs.clear()
+        fnodes = {}  # needed file nodes
+
+        size = 0
+        it = self.generatemanifests(
+            commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
+            clstate['clrevtomanifestrev'])
+
+        for tree, deltas in it:
+            if tree:
+                assert self.version == b'03'
+                chunk = _fileheader(tree)
+                size += len(chunk)
+                yield chunk
+
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
+
+            close = closechunk()
+            size += len(close)
+            yield close
+
+        self._verbosenote(_('%8.i (manifests)\n') % size)
+        yield self._manifestsend
+
+        mfdicts = None
+        if self._ellipses and self._isshallow:
+            mfdicts = [(self._repo.manifestlog[n].read(), lr)
+                       for (n, lr) in manifests.iteritems()]
+
+        manifests.clear()
         clrevs = set(cl.rev(x) for x in clnodes)
 
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
+        it = self.generatefiles(changedfiles, commonrevs,
+                                source, mfdicts, fastpathlinkrev,
+                                fnodes, clrevs)
+
+        for path, deltas in it:
+            h = _fileheader(path)
+            size = len(h)
+            yield h
 
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
 
-        yield self.close()
+            close = closechunk()
+            size += len(close)
+            yield close
+
+            self._verbosenote(_('%8.i  %s\n') % (size, path))
+
+        yield closechunk()
 
         if clnodes:
             repo.hook('outgoing', node=hex(clnodes[0]), source=source)
 
-    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
-                          fnodes, source):
+    def _generatechangelog(self, cl, nodes):
+        """Generate data for changelog chunks.
+
+        Returns a 2-tuple of a dict containing state and an iterable of
+        byte chunks. The state will not be fully populated until the
+        chunk stream has been fully consumed.
+        """
+        clrevorder = {}
+        manifests = {}
+        mfl = self._repo.manifestlog
+        changedfiles = set()
+        clrevtomanifestrev = {}
+
+        # Callback for the changelog, used to collect changed files and
+        # manifest nodes.
+        # Returns the linkrev node (identity in the changelog case).
+        def lookupcl(x):
+            c = cl.changelogrevision(x)
+            clrevorder[x] = len(clrevorder)
+
+            if self._ellipses:
+                # Only update manifests if x is going to be sent. Otherwise we
+                # end up with bogus linkrevs specified for manifests and
+                # we skip some manifest nodes that we should otherwise
+                # have sent.
+                if (x in self._fullclnodes
+                    or cl.rev(x) in self._precomputedellipsis):
+
+                    manifestnode = c.manifest
+                    # Record the first changeset introducing this manifest
+                    # version.
+                    manifests.setdefault(manifestnode, x)
+                    # Set this narrow-specific dict so we have the lowest
+                    # manifest revnum to look up for this cl revnum. (Part of
+                    # mapping changelog ellipsis parents to manifest ellipsis
+                    # parents)
+                    clrevtomanifestrev.setdefault(
+                        cl.rev(x), mfl.rev(manifestnode))
+                # We can't trust the changed files list in the changeset if the
+                # client requested a shallow clone.
+                if self._isshallow:
+                    changedfiles.update(mfl[c.manifest].read().keys())
+                else:
+                    changedfiles.update(c.files)
+            else:
+                # record the first changeset introducing this manifest version
+                manifests.setdefault(c.manifest, x)
+                # Record a complete list of potentially-changed files in
+                # this manifest.
+                changedfiles.update(c.files)
+
+            return x
+
+        state = {
+            'clrevorder': clrevorder,
+            'manifests': manifests,
+            'changedfiles': changedfiles,
+            'clrevtomanifestrev': clrevtomanifestrev,
+        }
+
+        gen = deltagroup(
+            self._repo, cl, nodes, True, lookupcl,
+            self._forcedeltaparentprev,
+            # Reorder settings are currently ignored for changelog.
+            True,
+            ellipses=self._ellipses,
+            topic=_('changesets'),
+            clrevtolocalrev={},
+            fullclnodes=self._fullclnodes,
+            precomputedellipsis=self._precomputedellipsis)
+
+        return state, gen
+
+    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
+                          manifests, fnodes, source, clrevtolocalrev):
         """Returns an iterator of changegroup chunks containing manifests.
 
         `source` is unused here, but is used by extensions like remotefilelog to
@@ -683,16 +1017,15 @@
         """
         repo = self._repo
         mfl = repo.manifestlog
-        dirlog = mfl._revlog.dirlog
-        tmfnodes = {'': mfs}
+        tmfnodes = {'': manifests}
 
         # Callback for the manifest, used to collect linkrevs for filelog
         # revisions.
         # Returns the linkrev node (collected in lookupcl).
-        def makelookupmflinknode(dir, nodes):
+        def makelookupmflinknode(tree, nodes):
             if fastpathlinkrev:
-                assert not dir
-                return mfs.__getitem__
+                assert not tree
+                return manifests.__getitem__
 
             def lookupmflinknode(x):
                 """Callback for looking up the linknode for manifests.
@@ -711,16 +1044,16 @@
                 treemanifests to send.
                 """
                 clnode = nodes[x]
-                mdata = mfl.get(dir, x).readfast(shallow=True)
+                mdata = mfl.get(tree, x).readfast(shallow=True)
                 for p, n, fl in mdata.iterentries():
                     if fl == 't': # subdirectory manifest
-                        subdir = dir + p + '/'
-                        tmfclnodes = tmfnodes.setdefault(subdir, {})
+                        subtree = tree + p + '/'
+                        tmfclnodes = tmfnodes.setdefault(subtree, {})
                         tmfclnode = tmfclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[tmfclnode]:
                             tmfclnodes[n] = clnode
                     else:
-                        f = dir + p
+                        f = tree + p
                         fclnodes = fnodes.setdefault(f, {})
                         fclnode = fclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[fclnode]:
@@ -728,22 +1061,84 @@
                 return clnode
             return lookupmflinknode
 
-        size = 0
         while tmfnodes:
-            dir, nodes = tmfnodes.popitem()
-            prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
-            if not dir or prunednodes:
-                for x in self._packmanifests(dir, prunednodes,
-                                             makelookupmflinknode(dir, nodes)):
-                    size += len(x)
-                    yield x
-        self._verbosenote(_('%8.i (manifests)\n') % size)
-        yield self._manifestsdone()
+            tree, nodes = tmfnodes.popitem()
+            store = mfl.getstorage(tree)
+
+            if not self._filematcher.visitdir(store._tree[:-1] or '.'):
+                prunednodes = []
+            else:
+                frev, flr = store.rev, store.linkrev
+                prunednodes = [n for n in nodes
+                               if flr(frev(n)) not in commonrevs]
+
+            if tree and not prunednodes:
+                continue
+
+            lookupfn = makelookupmflinknode(tree, nodes)
+
+            deltas = deltagroup(
+                self._repo, store, prunednodes, False, lookupfn,
+                self._forcedeltaparentprev, self._reorder,
+                ellipses=self._ellipses,
+                topic=_('manifests'),
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            yield tree, deltas
 
     # The 'source' parameter is useful for extensions
-    def generatefiles(self, changedfiles, linknodes, commonrevs, source):
+    def generatefiles(self, changedfiles, commonrevs, source,
+                      mfdicts, fastpathlinkrev, fnodes, clrevs):
+        changedfiles = list(filter(self._filematcher, changedfiles))
+
+        if not fastpathlinkrev:
+            def normallinknodes(unused, fname):
+                return fnodes.get(fname, {})
+        else:
+            cln = self._repo.changelog.node
+
+            def normallinknodes(store, fname):
+                flinkrev = store.linkrev
+                fnode = store.node
+                revs = ((r, flinkrev(r)) for r in store)
+                return dict((fnode(r), cln(lr))
+                            for r, lr in revs if lr in clrevs)
+
+        clrevtolocalrev = {}
+
+        if self._isshallow:
+            # In a shallow clone, the linknodes callback needs to also include
+            # those file nodes that are in the manifests we sent but weren't
+            # introduced by those manifests.
+            commonctxs = [self._repo[c] for c in commonrevs]
+            clrev = self._repo.changelog.rev
+
+            # Defining this function has a side-effect of overriding the
+            # function of the same name that was passed in as an argument.
+            # TODO have caller pass in appropriate function.
+            def linknodes(flog, fname):
+                for c in commonctxs:
+                    try:
+                        fnode = c.filenode(fname)
+                        clrevtolocalrev[c.rev()] = flog.rev(fnode)
+                    except error.ManifestLookupError:
+                        pass
+                links = normallinknodes(flog, fname)
+                if len(links) != len(mfdicts):
+                    for mf, lr in mfdicts:
+                        fnode = mf.get(fname, None)
+                        if fnode in links:
+                            links[fnode] = min(links[fnode], lr, key=clrev)
+                        elif fnode:
+                            links[fnode] = lr
+                return links
+        else:
+            linknodes = normallinknodes
+
         repo = self._repo
-        progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
+        progress = repo.ui.makeprogress(_('files'), unit=_('files'),
                                         total=len(changedfiles))
         for i, fname in enumerate(sorted(changedfiles)):
             filerevlog = repo.file(fname)
@@ -751,129 +1146,89 @@
                 raise error.Abort(_("empty or missing file data for %s") %
                                   fname)
 
+            clrevtolocalrev.clear()
+
             linkrevnodes = linknodes(filerevlog, fname)
             # Lookup for filenodes, we collected the linkrev nodes above in the
             # fastpath case and with lookupmf in the slowpath case.
             def lookupfilelog(x):
                 return linkrevnodes[x]
 
-            filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
-            if filenodes:
-                progress.update(i + 1, item=fname)
-                h = self.fileheader(fname)
-                size = len(h)
-                yield h
-                for chunk in self.group(filenodes, filerevlog, lookupfilelog):
-                    size += len(chunk)
-                    yield chunk
-                self._verbosenote(_('%8.i  %s\n') % (size, fname))
+            frev, flr = filerevlog.rev, filerevlog.linkrev
+            filenodes = [n for n in linkrevnodes
+                         if flr(frev(n)) not in commonrevs]
+
+            if not filenodes:
+                continue
+
+            progress.update(i + 1, item=fname)
+
+            deltas = deltagroup(
+                self._repo, filerevlog, filenodes, False, lookupfilelog,
+                self._forcedeltaparentprev, self._reorder,
+                ellipses=self._ellipses,
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            yield fname, deltas
+
         progress.complete()
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        if not revlog.candelta(prev, rev):
-            raise error.ProgrammingError('cg1 should not be used in this case')
-        return prev
-
-    def revchunk(self, revlog, rev, prev, linknode):
-        node = revlog.node(rev)
-        p1, p2 = revlog.parentrevs(rev)
-        base = self.deltaparent(revlog, rev, p1, p2, prev)
+def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.linknode)
 
-        prefix = ''
-        if revlog.iscensored(base) or revlog.iscensored(rev):
-            try:
-                delta = revlog.revision(node, raw=True)
-            except error.CensoredNodeError as e:
-                delta = e.tombstone
-            if base == nullrev:
-                prefix = mdiff.trivialdiffheader(len(delta))
-            else:
-                baselen = revlog.rawsize(base)
-                prefix = mdiff.replacediffheader(baselen, len(delta))
-        elif base == nullrev:
-            delta = revlog.revision(node, raw=True)
-            prefix = mdiff.trivialdiffheader(len(delta))
-        else:
-            delta = revlog.revdiff(base, rev)
-        p1n, p2n = revlog.parents(node)
-        basenode = revlog.node(base)
-        flags = revlog.flags(rev)
-        meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
-        meta += prefix
-        l = len(meta) + len(delta)
-        yield chunkheader(l)
-        yield meta
-        yield delta
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # do nothing with basenode, it is implicitly the previous one in HG10
-        # do nothing with flags, it is implicitly 0 for cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
+    return cgpacker(repo, filematcher, b'01',
+                    allowreorder=None,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    forcedeltaparentprev=True,
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-class cg2packer(cg1packer):
-    version = '02'
-    deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-
-    def __init__(self, repo, bundlecaps=None):
-        super(cg2packer, self).__init__(repo, bundlecaps)
-        if self._reorder is None:
-            # Since generaldelta is directly supported by cg2, reordering
-            # generally doesn't help, so we disable it by default (treating
-            # bundle.reorder=auto just like bundle.reorder=False).
-            self._reorder = False
+def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode)
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        dp = revlog.deltaparent(rev)
-        if dp == nullrev and revlog.storedeltachains:
-            # Avoid sending full revisions when delta parent is null. Pick prev
-            # in that case. It's tempting to pick p1 in this case, as p1 will
-            # be smaller in the common case. However, computing a delta against
-            # p1 may require resolving the raw text of p1, which could be
-            # expensive. The revlog caches should have prev cached, meaning
-            # less CPU for changegroup generation. There is likely room to add
-            # a flag and/or config option to control this behavior.
-            base = prev
-        elif dp == nullrev:
-            # revlog is configured to use full snapshot for a reason,
-            # stick to full snapshot.
-            base = nullrev
-        elif dp not in (p1, p2, prev):
-            # Pick prev when we can't be sure remote has the base revision.
-            return prev
-        else:
-            base = dp
-        if base != nullrev and not revlog.candelta(base, rev):
-            base = nullrev
-        return base
+    # Since generaldelta is directly supported by cg2, reordering
+    # generally doesn't help, so we disable it by default (treating
+    # bundle.reorder=auto just like bundle.reorder=False).
+    return cgpacker(repo, filematcher, b'02',
+                    allowreorder=False,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # Do nothing with flags, it is implicitly 0 in cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
-
-class cg3packer(cg2packer):
-    version = '03'
-    deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        if dir:
-            yield self.fileheader(dir)
+def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
 
-        dirlog = self._repo.manifestlog._revlog.dirlog(dir)
-        for chunk in self.group(mfnodes, dirlog, lookuplinknode,
-                                units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return self.close()
+    return cgpacker(repo, filematcher, b'03',
+                    allowreorder=False,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=closechunk(),
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        return struct.pack(
-            self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
-
-_packermap = {'01': (cg1packer, cg1unpacker),
+_packermap = {'01': (_makecg1packer, cg1unpacker),
              # cg2 adds support for exchanging generaldelta
-             '02': (cg2packer, cg2unpacker),
+             '02': (_makecg2packer, cg2unpacker),
              # cg3 adds support for exchanging revlog flags and treemanifests
-             '03': (cg3packer, cg3unpacker),
+             '03': (_makecg3packer, cg3unpacker),
 }
 
 def allsupportedversions(repo):
@@ -899,7 +1254,7 @@
         # support versions 01 and 02.
         versions.discard('01')
         versions.discard('02')
-    if NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
         versions.discard('01')
@@ -927,9 +1282,32 @@
     assert versions
     return min(versions)
 
-def getbundler(version, repo, bundlecaps=None):
+def getbundler(version, repo, bundlecaps=None, filematcher=None,
+               ellipses=False, shallow=False, ellipsisroots=None,
+               fullnodes=None):
     assert version in supportedoutgoingversions(repo)
-    return _packermap[version][0](repo, bundlecaps)
+
+    if filematcher is None:
+        filematcher = matchmod.alwaysmatcher(repo.root, '')
+
+    if version == '01' and not filematcher.always():
+        raise error.ProgrammingError('version 01 changegroups do not support '
+                                     'sparse file matchers')
+
+    if ellipses and version in (b'01', b'02'):
+        raise error.Abort(
+            _('ellipsis nodes require at least cg3 on client and server, '
+              'but negotiated version %s') % version)
+
+    # Requested files could include files not in the local store. So
+    # filter those out.
+    filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
+                                             filematcher)
+
+    fn = _packermap[version][0]
+    return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
+              shallow=shallow, ellipsisroots=ellipsisroots,
+              fullnodes=fullnodes)
 
 def getunbundler(version, fh, alg, extras=None):
     return _packermap[version][1](fh, alg, extras=extras)
@@ -950,8 +1328,9 @@
                         {'clcount': len(outgoing.missing) })
 
 def makestream(repo, outgoing, version, source, fastpath=False,
-               bundlecaps=None):
-    bundler = getbundler(version, repo, bundlecaps=bundlecaps)
+               bundlecaps=None, filematcher=None):
+    bundler = getbundler(version, repo, bundlecaps=bundlecaps,
+                         filematcher=filematcher)
 
     repo = repo.unfiltered()
     commonrevs = outgoing.common
@@ -989,7 +1368,7 @@
         revisions += len(fl) - o
         if f in needfiles:
             needs = needfiles[f]
-            for new in xrange(o, len(fl)):
+            for new in pycompat.xrange(o, len(fl)):
                 n = fl.node(new)
                 if n in needs:
                     needs.remove(n)
--- a/mercurial/changelog.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/changelog.py	Fri Aug 24 12:55:05 2018 -0700
@@ -22,7 +22,6 @@
     error,
     pycompat,
     revlog,
-    util,
 )
 from .utils import (
     dateutil,
@@ -304,7 +303,7 @@
         # Delta chains for changelogs tend to be very small because entries
         # tend to be small and don't delta well with each. So disable delta
         # chains.
-        self.storedeltachains = False
+        self._storedeltachains = False
 
         self._realopener = opener
         self._delayed = False
@@ -313,7 +312,7 @@
         self.filteredrevs = frozenset()
 
     def tiprev(self):
-        for i in xrange(len(self) -1, -2, -1):
+        for i in pycompat.xrange(len(self) -1, -2, -1):
             if i not in self.filteredrevs:
                 return i
 
@@ -332,7 +331,7 @@
             return revlog.revlog.__iter__(self)
 
         def filterediter():
-            for i in xrange(len(self)):
+            for i in pycompat.xrange(len(self)):
                 if i not in self.filteredrevs:
                     yield i
 
@@ -344,12 +343,6 @@
             if i not in self.filteredrevs:
                 yield i
 
-    @util.propertycache
-    def nodemap(self):
-        # XXX need filtering too
-        self.rev(self.node(0))
-        return self._nodecache
-
     def reachableroots(self, minroot, heads, roots, includepath=False):
         return self.index.reachableroots2(minroot, heads, roots, includepath)
 
@@ -563,8 +556,8 @@
         if revs is not None:
             if revs:
                 assert revs[-1] + 1 == rev
-                revs = xrange(revs[0], rev + 1)
+                revs = pycompat.membershiprange(revs[0], rev + 1)
             else:
-                revs = xrange(rev, rev + 1)
+                revs = pycompat.membershiprange(rev, rev + 1)
             transaction.changes['revs'] = revs
         return node
--- a/mercurial/cmdutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/cmdutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -607,17 +607,13 @@
 def _unshelvemsg():
     return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
 
-def _updatecleanmsg(dest=None):
-    warning = _('warning: this will discard uncommitted changes')
-    return 'hg update --clean %s (%s)' % (dest or '.', warning)
-
 def _graftmsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-    return _helpmessage('hg graft --continue', _updatecleanmsg())
+    return _helpmessage('hg graft --continue', 'hg graft --abort')
 
 def _mergemsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-     return _helpmessage('hg commit', _updatecleanmsg())
+    return _helpmessage('hg commit', 'hg merge --abort')
 
 def _bisectmsg():
     msg = _('To mark the changeset good:    hg bisect --good\n'
@@ -1087,11 +1083,11 @@
                                    "treemanifest enabled"))
             if not dir.endswith('/'):
                 dir = dir + '/'
-            dirlog = repo.manifestlog._revlog.dirlog(dir)
+            dirlog = repo.manifestlog.getstorage(dir)
             if len(dirlog):
                 r = dirlog
         elif mf:
-            r = repo.manifestlog._revlog
+            r = repo.manifestlog.getstorage(b'')
         elif file_:
             filelog = repo.file(file_)
             if len(filelog):
@@ -1755,7 +1751,7 @@
         """
         cl_count = len(repo)
         revs = []
-        for j in xrange(0, last + 1):
+        for j in pycompat.xrange(0, last + 1):
             linkrev = filelog.linkrev(j)
             if linkrev < minrev:
                 continue
@@ -1889,9 +1885,6 @@
     revs = _walkrevs(repo, opts)
     if not revs:
         return []
-    if allfiles and len(revs) > 1:
-        raise error.Abort(_("multiple revisions not supported with "
-                            "--all-files"))
     wanted = set()
     slowpath = match.anypats() or (not match.always() and opts.get('removed'))
     fncache = {}
@@ -1902,7 +1895,7 @@
     # wanted: a cache of filenames that were changed (ctx.files()) and that
     # match the file filtering conditions.
 
-    if match.always():
+    if match.always() or allfiles:
         # No files, no patterns.  Display all revs.
         wanted = revs
     elif not slowpath:
@@ -1966,7 +1959,7 @@
         rev = repo[rev].rev()
         ff = _followfilter(repo)
         stop = min(revs[0], revs[-1])
-        for x in xrange(rev, stop - 1, -1):
+        for x in pycompat.xrange(rev, stop - 1, -1):
             if ff.match(x):
                 wanted = wanted - [x]
 
@@ -1985,7 +1978,7 @@
         stopiteration = False
         for windowsize in increasingwindows():
             nrevs = []
-            for i in xrange(windowsize):
+            for i in pycompat.xrange(windowsize):
                 rev = next(it, None)
                 if rev is None:
                     stopiteration = True
@@ -2038,7 +2031,8 @@
                 cca(f)
             names.append(f)
             if ui.verbose or not exact:
-                ui.status(_('adding %s\n') % match.rel(f))
+                ui.status(_('adding %s\n') % match.rel(f),
+                          label='addremove.added')
 
     for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
@@ -2136,7 +2130,8 @@
 
     for f in forget:
         if ui.verbose or not match.exact(f) or interactive:
-            ui.status(_('removing %s\n') % match.rel(f))
+            ui.status(_('removing %s\n') % match.rel(f),
+                      label='addremove.removed')
 
     if not dryrun:
         rejected = wctx.forget(forget, prefix)
@@ -2269,7 +2264,8 @@
     for f in list:
         if ui.verbose or not m.exact(f):
             progress.increment()
-            ui.status(_('removing %s\n') % m.rel(f))
+            ui.status(_('removing %s\n') % m.rel(f),
+                      label='addremove.removed')
     progress.complete()
 
     if not dryrun:
@@ -2428,7 +2424,7 @@
         if len(old.parents()) > 1:
             # ctx.files() isn't reliable for merges, so fall back to the
             # slower repo.status() method
-            files = set([fn for st in repo.status(base, old)[:3]
+            files = set([fn for st in base.status(old)[:3]
                          for fn in st])
         else:
             files = set(old.files())
@@ -2556,8 +2552,10 @@
         obsmetadata = None
         if opts.get('note'):
             obsmetadata = {'note': encoding.fromlocal(opts['note'])}
+        backup = ui.configbool('ui', 'history-editing-backup')
         scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
-                             fixphase=True, targetphase=commitphase)
+                             fixphase=True, targetphase=commitphase,
+                             backup=backup)
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
--- a/mercurial/color.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/color.py	Fri Aug 24 12:55:05 2018 -0700
@@ -83,6 +83,8 @@
     'grep.filename': 'magenta',
     'grep.user': 'magenta',
     'grep.date': 'magenta',
+    'addremove.added': 'green',
+    'addremove.removed': 'red',
     'bookmarks.active': 'green',
     'branches.active': 'none',
     'branches.closed': 'black bold',
@@ -117,6 +119,7 @@
     'formatvariant.config.default': 'green',
     'formatvariant.default': '',
     'histedit.remaining': 'red bold',
+    'ui.error': 'red',
     'ui.prompt': 'yellow',
     'log.changeset': 'yellow',
     'patchbomb.finalsummary': '',
--- a/mercurial/commands.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/commands.py	Fri Aug 24 12:55:05 2018 -0700
@@ -35,6 +35,7 @@
     error,
     exchange,
     extensions,
+    filemerge,
     formatter,
     graphmod,
     hbisect,
@@ -2532,6 +2533,7 @@
     """
     opts = pycompat.byteskwargs(opts)
     diff = opts.get('all') or opts.get('diff')
+    all_files = opts.get('all_files')
     if diff and opts.get('all_files'):
         raise error.Abort(_('--diff and --all-files are mutually exclusive'))
     # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working
@@ -2606,16 +2608,16 @@
     def difflinestates(a, b):
         sm = difflib.SequenceMatcher(None, a, b)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag == 'insert':
-                for i in xrange(blo, bhi):
+            if tag == r'insert':
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
-            elif tag == 'delete':
-                for i in xrange(alo, ahi):
+            elif tag == r'delete':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-            elif tag == 'replace':
-                for i in xrange(alo, ahi):
+            elif tag == r'replace':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-                for i in xrange(blo, bhi):
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
 
     def display(fm, fn, ctx, pstates, states):
@@ -2623,7 +2625,7 @@
         if fm.isplain():
             formatuser = ui.shortuser
         else:
-            formatuser = str
+            formatuser = pycompat.bytestr
         if ui.quiet:
             datefmt = '%Y-%m-%d'
         else:
@@ -2648,20 +2650,22 @@
             fm.data(node=fm.hexfunc(scmutil.binnode(ctx)))
 
             cols = [
-                ('filename', fn, True),
-                ('rev', rev, not plaingrep),
-                ('linenumber', l.linenum, opts.get('line_number')),
+                ('filename', '%s', fn, True),
+                ('rev', '%d', rev, not plaingrep),
+                ('linenumber', '%d', l.linenum, opts.get('line_number')),
             ]
             if diff:
-                cols.append(('change', change, True))
+                cols.append(('change', '%s', change, True))
             cols.extend([
-                ('user', formatuser(ctx.user()), opts.get('user')),
-                ('date', fm.formatdate(ctx.date(), datefmt), opts.get('date')),
+                ('user', '%s', formatuser(ctx.user()), opts.get('user')),
+                ('date', '%s', fm.formatdate(ctx.date(), datefmt),
+                 opts.get('date')),
             ])
-            lastcol = next(name for name, data, cond in reversed(cols) if cond)
-            for name, data, cond in cols:
+            lastcol = next(
+                name for name, fmt, data, cond in reversed(cols) if cond)
+            for name, fmt, data, cond in cols:
                 field = fieldnamemap.get(name, name)
-                fm.condwrite(cond, field, '%s', data, label='grep.%s' % name)
+                fm.condwrite(cond, field, fmt, data, label='grep.%s' % name)
                 if cond and name != lastcol:
                     fm.plain(sep, label='grep.sep')
             if not opts.get('files_with_matches'):
@@ -2756,7 +2760,7 @@
             if pstates or states:
                 r = display(fm, fn, ctx, pstates, states)
                 found = found or r
-                if r and not diff:
+                if r and not diff and not all_files:
                     skip[fn] = True
                     if copy:
                         skip[copy] = True
@@ -4528,6 +4532,7 @@
     """
 
     opts = pycompat.byteskwargs(opts)
+    confirm = ui.configbool('commands', 'resolve.confirm')
     flaglist = 'all mark unmark list no_status'.split()
     all, mark, unmark, show, nostatus = \
         [opts.get(o) for o in flaglist]
@@ -4540,6 +4545,20 @@
         raise error.Abort(_('no files or directories specified'),
                          hint=('use --all to re-merge all unresolved files'))
 
+    if confirm:
+        if all:
+            if ui.promptchoice(_(b're-merge all unresolved files (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if mark and not pats:
+            if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if unmark and not pats:
+            if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+
     if show:
         ui.pager('resolve')
         fm = ui.formatter('resolve', opts)
@@ -4594,6 +4613,12 @@
         runconclude = False
 
         tocomplete = []
+        hasconflictmarkers = []
+        if mark:
+            markcheck = ui.config('commands', 'resolve.mark-check')
+            if markcheck not in ['warn', 'abort']:
+                # Treat all invalid / unrecognized values as 'none'.
+                markcheck = False
         for f in ms:
             if not m(f):
                 continue
@@ -4629,6 +4654,12 @@
                 continue
 
             if mark:
+                if markcheck:
+                    with repo.wvfs(f) as fobj:
+                        fdata = fobj.read()
+                    if filemerge.hasconflictmarkers(fdata) and \
+                        ms[f] != mergemod.MERGE_RECORD_RESOLVED:
+                        hasconflictmarkers.append(f)
                 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
             elif unmark:
                 ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
@@ -4663,6 +4694,13 @@
                         if inst.errno != errno.ENOENT:
                             raise
 
+        if hasconflictmarkers:
+            ui.warn(_('warning: the following files still have conflict '
+                      'markers:\n  ') + '\n  '.join(hasconflictmarkers) + '\n')
+            if markcheck == 'abort' and not all:
+                raise error.Abort(_('conflict markers detected'),
+                                  hint=_('use --all to mark anyway'))
+
         for f in tocomplete:
             try:
                 # resolve file
--- a/mercurial/commandserver.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/commandserver.py	Fri Aug 24 12:55:05 2018 -0700
@@ -353,7 +353,7 @@
         # handle exceptions that may be raised by command server. most of
         # known exceptions are caught by dispatch.
         except error.Abort as inst:
-            ui.warn(_('abort: %s\n') % inst)
+            ui.error(_('abort: %s\n') % inst)
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
--- a/mercurial/configitems.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/configitems.py	Fri Aug 24 12:55:05 2018 -0700
@@ -190,6 +190,12 @@
 coreconfigitem('commands', 'grep.all-files',
     default=False,
 )
+coreconfigitem('commands', 'resolve.confirm',
+    default=False,
+)
+coreconfigitem('commands', 'resolve.mark-check',
+    default='none',
+)
 coreconfigitem('commands', 'show.aliasprefix',
     default=list,
 )
@@ -584,9 +590,15 @@
 coreconfigitem('experimental', 'removeemptydirs',
     default=True,
 )
+coreconfigitem('experimental', 'revisions.prefixhexnode',
+    default=False,
+)
 coreconfigitem('experimental', 'revlogv2',
     default=None,
 )
+coreconfigitem('experimental', 'revisions.disambiguatewithin',
+    default=None,
+)
 coreconfigitem('experimental', 'single-head-per-branch',
     default=False,
 )
@@ -759,6 +771,9 @@
 coreconfigitem('merge', 'preferancestor',
         default=lambda: ['*'],
 )
+coreconfigitem('merge', 'strict-capability-check',
+    default=False,
+)
 coreconfigitem('merge-tools', '.*',
     default=None,
     generic=True,
--- a/mercurial/context.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/context.py	Fri Aug 24 12:55:05 2018 -0700
@@ -372,6 +372,10 @@
                 for rfiles, sfiles in zip(r, s):
                     rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
 
+        narrowmatch = self._repo.narrowmatch()
+        if not narrowmatch.always():
+            for l in r:
+                l[:] = list(filter(narrowmatch, l))
         for l in r:
             l.sort()
 
@@ -438,7 +442,6 @@
                         "unsupported changeid '%s' of type %s" %
                         (changeid, type(changeid)))
 
-            # lookup failed
         except (error.FilteredIndexError, error.FilteredLookupError):
             raise error.FilteredRepoLookupError(_("filtered revision '%s'")
                                                 % pycompat.bytestr(changeid))
@@ -590,12 +593,6 @@
                             short(n) for n in sorted(cahs) if n != anc))
         return changectx(self._repo, anc)
 
-    def descendant(self, other):
-        msg = (b'ctx.descendant(other) is deprecated, '
-               b'use ctx.isancestorof(other)')
-        self._repo.ui.deprecwarn(msg, b'4.7')
-        return self.isancestorof(other)
-
     def isancestorof(self, other):
         """True if this changeset is an ancestor of other"""
         return self._repo.changelog.isancestorrev(self._rev, other._rev)
@@ -1903,9 +1900,9 @@
         # Test that each new directory to be created to write this path from p2
         # is not a file in p1.
         components = path.split('/')
-        for i in xrange(len(components)):
+        for i in pycompat.xrange(len(components)):
             component = "/".join(components[0:i])
-            if component in self.p1():
+            if component in self.p1() and self._cache[component]['exists']:
                 fail(path, component)
 
         # Test the other direction -- that this path from p2 isn't a directory
@@ -1929,8 +1926,13 @@
                         flags=flags)
 
     def setflags(self, path, l, x):
+        flag = ''
+        if l:
+            flag = 'l'
+        elif x:
+            flag = 'x'
         self._markdirty(path, exists=True, date=dateutil.makedate(),
-                        flags=(l and 'l' or '') + (x and 'x' or ''))
+                        flags=flag)
 
     def remove(self, path):
         self._markdirty(path, exists=False)
@@ -2037,6 +2039,13 @@
         return keys
 
     def _markdirty(self, path, exists, data=None, date=None, flags=''):
+        # data not provided, let's see if we already have some; if not, let's
+        # grab it from our underlying context, so that we always have data if
+        # the file is marked as existing.
+        if exists and data is None:
+            oldentry = self._cache.get(path) or {}
+            data = oldentry.get('data') or self._wrappedctx[path].data()
+
         self._cache[path] = {
             'exists': exists,
             'data': data,
--- a/mercurial/dagop.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/dagop.py	Fri Aug 24 12:55:05 2018 -0700
@@ -195,7 +195,7 @@
     """Build map of 'rev -> child revs', offset from startrev"""
     cl = repo.changelog
     nullrev = node.nullrev
-    descmap = [[] for _rev in xrange(startrev, len(cl))]
+    descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
     for currev in cl.revs(startrev + 1):
         p1rev, p2rev = cl.parentrevs(currev)
         if p1rev >= startrev:
@@ -435,7 +435,7 @@
         for idx, (parent, blocks) in enumerate(pblocks):
             for (a1, a2, b1, b2), _t in blocks:
                 if a2 - a1 >= b2 - b1:
-                    for bk in xrange(b1, b2):
+                    for bk in pycompat.xrange(b1, b2):
                         if child.fctxs[bk] == childfctx:
                             ak = min(a1 + (bk - b1), a2 - 1)
                             child.fctxs[bk] = parent.fctxs[ak]
@@ -448,7 +448,7 @@
         # line.
         for parent, blocks in remaining:
             for a1, a2, b1, b2 in blocks:
-                for bk in xrange(b1, b2):
+                for bk in pycompat.xrange(b1, b2):
                     if child.fctxs[bk] == childfctx:
                         ak = min(a1 + (bk - b1), a2 - 1)
                         child.fctxs[bk] = parent.fctxs[ak]
@@ -715,3 +715,63 @@
     for g in groups:
         for r in g[0]:
             yield r
+
+def headrevs(revs, parentsfn):
+    """Resolve the set of heads from a set of revisions.
+
+    Receives an iterable of revision numbers and a callbable that receives a
+    revision number and returns an iterable of parent revision numbers, possibly
+    including nullrev.
+
+    Returns a set of revision numbers that are DAG heads within the passed
+    subset.
+
+    ``nullrev`` is never included in the returned set, even if it is provided in
+    the input set.
+    """
+    headrevs = set(revs)
+
+    for rev in revs:
+        for prev in parentsfn(rev):
+            headrevs.discard(prev)
+
+    headrevs.discard(node.nullrev)
+
+    return headrevs
+
+def linearize(revs, parentsfn):
+    """Linearize and topologically sort a list of revisions.
+
+    The linearization process tires to create long runs of revs where a child
+    rev comes immediately after its first parent. This is done by visiting the
+    heads of the revs in inverse topological order, and for each visited rev,
+    visiting its second parent, then its first parent, then adding the rev
+    itself to the output list.
+
+    Returns a list of revision numbers.
+    """
+    visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
+    finished = set()
+    result = []
+
+    while visit:
+        rev = visit.pop()
+        if rev < 0:
+            rev = -rev - 1
+
+            if rev not in finished:
+                result.append(rev)
+                finished.add(rev)
+
+        else:
+            visit.append(-rev - 1)
+
+            for prev in parentsfn(rev):
+                if prev == node.nullrev or prev not in revs or prev in finished:
+                    continue
+
+                visit.append(prev)
+
+    assert len(result) == len(revs)
+
+    return result
--- a/mercurial/dagparser.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/dagparser.py	Fri Aug 24 12:55:05 2018 -0700
@@ -222,7 +222,7 @@
         elif c == '+':
             c, digs = nextrun(nextch(), pycompat.bytestr(string.digits))
             n = int(digs)
-            for i in xrange(0, n):
+            for i in pycompat.xrange(0, n):
                 yield 'n', (r, [p1])
                 p1 = r
                 r += 1
--- a/mercurial/dagutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,287 +0,0 @@
-# dagutil.py - dag utilities for mercurial
-#
-# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
-# and Peter Arrenbrecht <peter@arrenbrecht.ch>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from .i18n import _
-from .node import nullrev
-
-class basedag(object):
-    '''generic interface for DAGs
-
-    terms:
-    "ix" (short for index) identifies a nodes internally,
-    "id" identifies one externally.
-
-    All params are ixs unless explicitly suffixed otherwise.
-    Pluralized params are lists or sets.
-    '''
-
-    def __init__(self):
-        self._inverse = None
-
-    def nodeset(self):
-        '''set of all node ixs'''
-        raise NotImplementedError
-
-    def heads(self):
-        '''list of head ixs'''
-        raise NotImplementedError
-
-    def parents(self, ix):
-        '''list of parents ixs of ix'''
-        raise NotImplementedError
-
-    def inverse(self):
-        '''inverse DAG, where parents becomes children, etc.'''
-        raise NotImplementedError
-
-    def ancestorset(self, starts, stops=None):
-        '''
-        set of all ancestors of starts (incl), but stop walk at stops (excl)
-        '''
-        raise NotImplementedError
-
-    def descendantset(self, starts, stops=None):
-        '''
-        set of all descendants of starts (incl), but stop walk at stops (excl)
-        '''
-        return self.inverse().ancestorset(starts, stops)
-
-    def headsetofconnecteds(self, ixs):
-        '''
-        subset of connected list of ixs so that no node has a descendant in it
-
-        By "connected list" we mean that if an ancestor and a descendant are in
-        the list, then so is at least one path connecting them.
-        '''
-        raise NotImplementedError
-
-    def externalize(self, ix):
-        '''return a node id'''
-        return self._externalize(ix)
-
-    def externalizeall(self, ixs):
-        '''return a list of (or set if given a set) of node ids'''
-        ids = self._externalizeall(ixs)
-        if isinstance(ixs, set):
-            return set(ids)
-        return list(ids)
-
-    def internalize(self, id):
-        '''return a node ix'''
-        return self._internalize(id)
-
-    def internalizeall(self, ids, filterunknown=False):
-        '''return a list of (or set if given a set) of node ixs'''
-        ixs = self._internalizeall(ids, filterunknown)
-        if isinstance(ids, set):
-            return set(ixs)
-        return list(ixs)
-
-
-class genericdag(basedag):
-    '''generic implementations for DAGs'''
-
-    def ancestorset(self, starts, stops=None):
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            n = pending.pop()
-            if n not in seen and n not in stops:
-                seen.add(n)
-                pending.extend(self.parents(n))
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        hds = set(ixs)
-        if not hds:
-            return hds
-        for n in ixs:
-            for p in self.parents(n):
-                hds.discard(p)
-        assert hds
-        return hds
-
-
-class revlogbaseddag(basedag):
-    '''generic dag interface to a revlog'''
-
-    def __init__(self, revlog, nodeset):
-        basedag.__init__(self)
-        self._revlog = revlog
-        self._heads = None
-        self._nodeset = nodeset
-
-    def nodeset(self):
-        return self._nodeset
-
-    def heads(self):
-        if self._heads is None:
-            self._heads = self._getheads()
-        return self._heads
-
-    def _externalize(self, ix):
-        return self._revlog.index[ix][7]
-    def _externalizeall(self, ixs):
-        idx = self._revlog.index
-        return [idx[i][7] for i in ixs]
-
-    def _internalize(self, id):
-        ix = self._revlog.rev(id)
-        if ix == nullrev:
-            raise LookupError(id, self._revlog.indexfile, _('nullid'))
-        return ix
-    def _internalizeall(self, ids, filterunknown):
-        rl = self._revlog
-        if filterunknown:
-            return [r for r in map(rl.nodemap.get, ids)
-                    if (r is not None
-                        and r != nullrev
-                        and r not in rl.filteredrevs)]
-        return [self._internalize(i) for i in ids]
-
-
-class revlogdag(revlogbaseddag):
-    '''dag interface to a revlog'''
-
-    def __init__(self, revlog, localsubset=None):
-        revlogbaseddag.__init__(self, revlog, set(revlog))
-        self._heads = localsubset
-
-    def _getheads(self):
-        return [r for r in self._revlog.headrevs() if r != nullrev]
-
-    def parents(self, ix):
-        rlog = self._revlog
-        idx = rlog.index
-        revdata = idx[ix]
-        prev = revdata[5]
-        if prev != nullrev:
-            prev2 = revdata[6]
-            if prev2 == nullrev:
-                return [prev]
-            return [prev, prev2]
-        prev2 = revdata[6]
-        if prev2 != nullrev:
-            return [prev2]
-        return []
-
-    def inverse(self):
-        if self._inverse is None:
-            self._inverse = inverserevlogdag(self)
-        return self._inverse
-
-    def ancestorset(self, starts, stops=None):
-        rlog = self._revlog
-        idx = rlog.index
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            rev = pending.pop()
-            if rev not in seen and rev not in stops:
-                seen.add(rev)
-                revdata = idx[rev]
-                for i in [5, 6]:
-                    prev = revdata[i]
-                    if prev != nullrev:
-                        pending.append(prev)
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        if not ixs:
-            return set()
-        rlog = self._revlog
-        idx = rlog.index
-        headrevs = set(ixs)
-        for rev in ixs:
-            revdata = idx[rev]
-            for i in [5, 6]:
-                prev = revdata[i]
-                if prev != nullrev:
-                    headrevs.discard(prev)
-        assert headrevs
-        return headrevs
-
-    def linearize(self, ixs):
-        '''linearize and topologically sort a list of revisions
-
-        The linearization process tries to create long runs of revs where
-        a child rev comes immediately after its first parent. This is done by
-        visiting the heads of the given revs in inverse topological order,
-        and for each visited rev, visiting its second parent, then its first
-        parent, then adding the rev itself to the output list.
-        '''
-        sorted = []
-        visit = list(self.headsetofconnecteds(ixs))
-        visit.sort(reverse=True)
-        finished = set()
-
-        while visit:
-            cur = visit.pop()
-            if cur < 0:
-                cur = -cur - 1
-                if cur not in finished:
-                    sorted.append(cur)
-                    finished.add(cur)
-            else:
-                visit.append(-cur - 1)
-                visit += [p for p in self.parents(cur)
-                          if p in ixs and p not in finished]
-        assert len(sorted) == len(ixs)
-        return sorted
-
-
-class inverserevlogdag(revlogbaseddag, genericdag):
-    '''inverse of an existing revlog dag; see revlogdag.inverse()'''
-
-    def __init__(self, orig):
-        revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
-        self._orig = orig
-        self._children = {}
-        self._roots = []
-        self._walkfrom = len(self._revlog) - 1
-
-    def _walkto(self, walkto):
-        rev = self._walkfrom
-        cs = self._children
-        roots = self._roots
-        idx = self._revlog.index
-        while rev >= walkto:
-            data = idx[rev]
-            isroot = True
-            for prev in [data[5], data[6]]: # parent revs
-                if prev != nullrev:
-                    cs.setdefault(prev, []).append(rev)
-                    isroot = False
-            if isroot:
-                roots.append(rev)
-            rev -= 1
-        self._walkfrom = rev
-
-    def _getheads(self):
-        self._walkto(nullrev)
-        return self._roots
-
-    def parents(self, ix):
-        if ix is None:
-            return []
-        if ix <= self._walkfrom:
-            self._walkto(ix)
-        return self._children.get(ix, [])
-
-    def inverse(self):
-        return self._orig
--- a/mercurial/debugcommands.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/debugcommands.py	Fri Aug 24 12:55:05 2018 -0700
@@ -42,13 +42,12 @@
     color,
     context,
     dagparser,
-    dagutil,
     encoding,
     error,
     exchange,
     extensions,
     filemerge,
-    fileset,
+    filesetlang,
     formatter,
     hg,
     httppeer,
@@ -177,7 +176,8 @@
     if mergeable_file:
         linesperrev = 2
         # make a file with k lines per rev
-        initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
+        initialmergedlines = ['%d' % i
+                              for i in pycompat.xrange(0, total * linesperrev)]
         initialmergedlines.append("")
 
     tags = []
@@ -790,9 +790,10 @@
             if not opts.get('nonheads'):
                 ui.write(("unpruned common: %s\n") %
                          " ".join(sorted(short(n) for n in common)))
-                dag = dagutil.revlogdag(repo.changelog)
-                all = dag.ancestorset(dag.internalizeall(common))
-                common = dag.externalizeall(dag.headsetofconnecteds(all))
+
+                clnode = repo.changelog.node
+                common = repo.revs('heads(::%ln)', common)
+                common = {clnode(r) for r in common}
         else:
             nodes = None
             if pushedrevs:
@@ -887,15 +888,45 @@
 @command('debugfileset',
     [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
      ('', 'all-files', False,
-      _('test files from all revisions and working directory'))],
-    _('[-r REV] [--all-files] FILESPEC'))
+      _('test files from all revisions and working directory')),
+     ('s', 'show-matcher', None,
+      _('print internal representation of matcher')),
+     ('p', 'show-stage', [],
+      _('print parsed tree at the given stage'), _('NAME'))],
+    _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
 def debugfileset(ui, repo, expr, **opts):
     '''parse and apply a fileset specification'''
+    from . import fileset
+    fileset.symbols # force import of fileset so we have predicates to optimize
     opts = pycompat.byteskwargs(opts)
     ctx = scmutil.revsingle(repo, opts.get('rev'), None)
-    if ui.verbose:
-        tree = fileset.parse(expr)
-        ui.note(fileset.prettyformat(tree), "\n")
+
+    stages = [
+        ('parsed', pycompat.identity),
+        ('analyzed', filesetlang.analyze),
+        ('optimized', filesetlang.optimize),
+    ]
+    stagenames = set(n for n, f in stages)
+
+    showalways = set()
+    if ui.verbose and not opts['show_stage']:
+        # show parsed tree by --verbose (deprecated)
+        showalways.add('parsed')
+    if opts['show_stage'] == ['all']:
+        showalways.update(stagenames)
+    else:
+        for n in opts['show_stage']:
+            if n not in stagenames:
+                raise error.Abort(_('invalid stage name: %s') % n)
+        showalways.update(opts['show_stage'])
+
+    tree = filesetlang.parse(expr)
+    for n, f in stages:
+        tree = f(tree)
+        if n in showalways:
+            if opts['show_stage'] or n != 'parsed':
+                ui.write(("* %s:\n") % n)
+            ui.write(filesetlang.prettyformat(tree), "\n")
 
     files = set()
     if opts['all_files']:
@@ -914,14 +945,15 @@
         files.update(ctx.substate)
 
     m = ctx.matchfileset(expr)
+    if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
+        ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
     for f in sorted(files):
         if not m(f):
             continue
         ui.write("%s\n" % f)
 
 @command('debugformat',
-         [] + cmdutil.formatteropts,
-        _(''))
+         [] + cmdutil.formatteropts)
 def debugformat(ui, repo, **opts):
     """display format information about the current repository
 
@@ -1446,6 +1478,53 @@
 
     return held
 
+@command('debugmanifestfulltextcache', [
+        ('', 'clear', False, _('clear the cache')),
+        ('a', 'add', '', _('add the given manifest node to the cache'),
+         _('NODE'))
+    ], '')
+def debugmanifestfulltextcache(ui, repo, add=None, **opts):
+    """show, clear or amend the contents of the manifest fulltext cache"""
+    with repo.lock():
+        r = repo.manifestlog.getstorage(b'')
+        try:
+            cache = r._fulltextcache
+        except AttributeError:
+            ui.warn(_(
+                "Current revlog implementation doesn't appear to have a "
+                'manifest fulltext cache\n'))
+            return
+
+        if opts.get(r'clear'):
+            cache.clear()
+
+        if add:
+            try:
+                manifest = repo.manifestlog[r.lookup(add)]
+            except error.LookupError as e:
+                raise error.Abort(e, hint="Check your manifest node id")
+            manifest.read()  # stores revisision in cache too
+
+        if not len(cache):
+            ui.write(_('Cache empty'))
+        else:
+            ui.write(
+                _('Cache contains %d manifest entries, in order of most to '
+                  'least recent:\n') % (len(cache),))
+            totalsize = 0
+            for nodeid in cache:
+                # Use cache.get to not update the LRU order
+                data = cache.get(nodeid)
+                size = len(data)
+                totalsize += size + 24   # 20 bytes nodeid, 4 bytes size
+                ui.write(_('id: %s, size %s\n') % (
+                    hex(nodeid), util.bytecount(size)))
+            ondisk = cache._opener.stat('manifestfulltextcache').st_size
+            ui.write(
+                _('Total cache data size %s, on-disk %s\n') % (
+                    util.bytecount(totalsize), util.bytecount(ondisk))
+            )
+
 @command('debugmergestate', [], '')
 def debugmergestate(ui, repo, *args):
     """print merge state
@@ -1971,7 +2050,7 @@
         ts = 0
         heads = set()
 
-        for rev in xrange(numrevs):
+        for rev in pycompat.xrange(numrevs):
             dbase = r.deltaparent(rev)
             if dbase == -1:
                 dbase = rev
@@ -2006,20 +2085,43 @@
     if not flags:
         flags = ['(none)']
 
+    ### tracks merge vs single parent
     nummerges = 0
+
+    ### tracks ways the "delta" are build
+    # nodelta
+    numempty = 0
+    numemptytext = 0
+    numemptydelta = 0
+    # full file content
     numfull = 0
+    # intermediate snapshot against a prior snapshot
+    numsemi = 0
+    # snapshot count per depth
+    numsnapdepth = collections.defaultdict(lambda: 0)
+    # delta against previous revision
     numprev = 0
+    # delta against first or second parent (not prev)
     nump1 = 0
     nump2 = 0
+    # delta against neither prev nor parents
     numother = 0
+    # delta against prev that are also first or second parent
+    # (details of `numprev`)
     nump1prev = 0
     nump2prev = 0
+
+    # data about delta chain of each revs
     chainlengths = []
     chainbases = []
     chainspans = []
 
+    # data about each revision
     datasize = [None, 0, 0]
     fullsize = [None, 0, 0]
+    semisize = [None, 0, 0]
+    # snapshot count per depth
+    snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
     deltasize = [None, 0, 0]
     chunktypecounts = {}
     chunktypesizes = {}
@@ -2032,7 +2134,7 @@
         l[2] += size
 
     numrevs = len(r)
-    for rev in xrange(numrevs):
+    for rev in pycompat.xrange(numrevs):
         p1, p2 = r.parentrevs(rev)
         delta = r.deltaparent(rev)
         if format > 0:
@@ -2044,30 +2146,49 @@
             chainlengths.append(0)
             chainbases.append(r.start(rev))
             chainspans.append(size)
-            numfull += 1
-            addsize(size, fullsize)
+            if size == 0:
+                numempty += 1
+                numemptytext += 1
+            else:
+                numfull += 1
+                numsnapdepth[0] += 1
+                addsize(size, fullsize)
+                addsize(size, snapsizedepth[0])
         else:
             chainlengths.append(chainlengths[delta] + 1)
             baseaddr = chainbases[delta]
             revaddr = r.start(rev)
             chainbases.append(baseaddr)
             chainspans.append((revaddr - baseaddr) + size)
-            addsize(size, deltasize)
-            if delta == rev - 1:
-                numprev += 1
-                if delta == p1:
-                    nump1prev += 1
+            if size == 0:
+                numempty += 1
+                numemptydelta += 1
+            elif r.issnapshot(rev):
+                addsize(size, semisize)
+                numsemi += 1
+                depth = r.snapshotdepth(rev)
+                numsnapdepth[depth] += 1
+                addsize(size, snapsizedepth[depth])
+            else:
+                addsize(size, deltasize)
+                if delta == rev - 1:
+                    numprev += 1
+                    if delta == p1:
+                        nump1prev += 1
+                    elif delta == p2:
+                        nump2prev += 1
+                elif delta == p1:
+                    nump1 += 1
                 elif delta == p2:
-                    nump2prev += 1
-            elif delta == p1:
-                nump1 += 1
-            elif delta == p2:
-                nump2 += 1
-            elif delta != nullrev:
-                numother += 1
+                    nump2 += 1
+                elif delta != nullrev:
+                    numother += 1
 
         # Obtain data on the raw chunks in the revlog.
-        segment = r._getsegmentforrevs(rev, rev)[1]
+        if util.safehasattr(r, '_getsegmentforrevs'):
+            segment = r._getsegmentforrevs(rev, rev)[1]
+        else:
+            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
         if segment:
             chunktype = bytes(segment[0:1])
         else:
@@ -2081,20 +2202,28 @@
         chunktypesizes[chunktype] += size
 
     # Adjust size min value for empty cases
-    for size in (datasize, fullsize, deltasize):
+    for size in (datasize, fullsize, semisize, deltasize):
         if size[0] is None:
             size[0] = 0
 
-    numdeltas = numrevs - numfull
+    numdeltas = numrevs - numfull - numempty - numsemi
     numoprev = numprev - nump1prev - nump2prev
     totalrawsize = datasize[2]
     datasize[2] /= numrevs
     fulltotal = fullsize[2]
     fullsize[2] /= numfull
+    semitotal = semisize[2]
+    snaptotal = {}
+    if 0 < numsemi:
+        semisize[2] /= numsemi
+    for depth in snapsizedepth:
+        snaptotal[depth] = snapsizedepth[depth][2]
+        snapsizedepth[depth][2] /= numsnapdepth[depth]
+
     deltatotal = deltasize[2]
-    if numrevs - numfull > 0:
-        deltasize[2] /= numrevs - numfull
-    totalsize = fulltotal + deltatotal
+    if numdeltas > 0:
+        deltasize[2] /= numdeltas
+    totalsize = fulltotal + semitotal + deltatotal
     avgchainlen = sum(chainlengths) / numrevs
     maxchainlen = max(chainlengths)
     maxchainspan = max(chainspans)
@@ -2126,10 +2255,22 @@
     ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
     ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
     ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    empty     : ') + fmt % pcfmt(numempty, numrevs))
+    ui.write(('                   text  : ')
+             + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
+    ui.write(('                   delta : ')
+             + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
+    ui.write(('    snapshot  : ') + fmt % pcfmt(numfull + numsemi, numrevs))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(numsnapdepth[depth], numrevs))
     ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
     ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    snapshot  : ')
+             + fmt % pcfmt(fulltotal + semitotal, totalsize))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(snaptotal[depth], totalsize))
     ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
     def fmtchunktype(chunktype):
@@ -2163,6 +2304,13 @@
                  % tuple(datasize))
     ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
              % tuple(fullsize))
+    ui.write(('inter-snapshot size (min/max/avg)    : %d / %d / %d\n')
+             % tuple(semisize))
+    for depth in sorted(snapsizedepth):
+        if depth == 0:
+            continue
+        ui.write(('    level-%-3d (min/max/avg)          : %d / %d / %d\n')
+                 % ((depth,) + tuple(snapsizedepth[depth])))
     ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
              % tuple(deltasize))
 
@@ -2642,7 +2790,7 @@
         if line.startswith(b'#'):
             continue
 
-        if not line.startswith(' '):
+        if not line.startswith(b' '):
             # New block. Flush previous one.
             if activeaction:
                 yield activeaction, blocklines
@@ -3122,13 +3270,14 @@
             # urllib.Request insists on using has_data() as a proxy for
             # determining the request method. Override that to use our
             # explicitly requested method.
-            req.get_method = lambda: method
+            req.get_method = lambda: pycompat.sysstr(method)
 
             try:
                 res = opener.open(req)
                 body = res.read()
             except util.urlerr.urlerror as e:
-                e.read()
+                # read() method must be called, but only exists in Python 2
+                getattr(e, 'read', lambda: None)()
                 continue
 
             if res.headers.get('Content-Type') == 'application/mercurial-cbor':
--- a/mercurial/diffhelper.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/diffhelper.py	Fri Aug 24 12:55:05 2018 -0700
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    pycompat,
 )
 
 def addlines(fp, hunk, lena, lenb, a, b):
@@ -26,7 +27,7 @@
         num = max(todoa, todob)
         if num == 0:
             break
-        for i in xrange(num):
+        for i in pycompat.xrange(num):
             s = fp.readline()
             if not s:
                 raise error.ParseError(_('incomplete hunk'))
@@ -71,7 +72,7 @@
     blen = len(b)
     if alen > blen - bstart or bstart < 0:
         return False
-    for i in xrange(alen):
+    for i in pycompat.xrange(alen):
         if a[i][1:] != b[i + bstart]:
             return False
     return True
--- a/mercurial/dirstate.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/dirstate.py	Fri Aug 24 12:55:05 2018 -0700
@@ -893,8 +893,11 @@
             wadd = work.append
             while work:
                 nd = work.pop()
-                if not match.visitdir(nd):
+                visitentries = match.visitchildrenset(nd)
+                if not visitentries:
                     continue
+                if visitentries == 'this' or visitentries == 'all':
+                    visitentries = None
                 skip = None
                 if nd == '.':
                     nd = ''
@@ -909,6 +912,16 @@
                         continue
                     raise
                 for f, kind, st in entries:
+                    # Some matchers may return files in the visitentries set,
+                    # instead of 'this', if the matcher explicitly mentions them
+                    # and is not an exactmatcher. This is acceptable; we do not
+                    # make any hard assumptions about file-or-directory below
+                    # based on the presence of `f` in visitentries. If
+                    # visitchildrenset returned a set, we can always skip the
+                    # entries *not* in the set it provided regardless of whether
+                    # they're actually a file or a directory.
+                    if visitentries and f not in visitentries:
+                        continue
                     if normalizefile:
                         # even though f might be a directory, we're only
                         # interested in comparing it to files currently in the
--- a/mercurial/dirstateguard.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/dirstateguard.py	Fri Aug 24 12:55:05 2018 -0700
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    narrowspec,
     util,
 )
 
@@ -33,7 +34,10 @@
         self._active = False
         self._closed = False
         self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
+        self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
+                                      (name, id(self)))
         repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
+        narrowspec.savebackup(repo, self._narrowspecbackupname)
         self._active = True
 
     def __del__(self):
@@ -52,10 +56,12 @@
 
         self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
                                          self._backupname)
+        narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
         self._active = False
         self._closed = True
 
     def _abort(self):
+        narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
         self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
                                            self._backupname)
         self._active = False
--- a/mercurial/dispatch.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/dispatch.py	Fri Aug 24 12:55:05 2018 -0700
@@ -21,6 +21,8 @@
 
 from .i18n import _
 
+from hgdemandimport import tracing
+
 from . import (
     cmdutil,
     color,
@@ -84,7 +86,8 @@
 def run():
     "run the command in sys.argv"
     initstdio()
-    req = request(pycompat.sysargv[1:])
+    with tracing.log('parse args into request'):
+        req = request(pycompat.sysargv[1:])
     err = None
     try:
         status = dispatch(req)
@@ -176,182 +179,184 @@
 
 def dispatch(req):
     """run the command specified in req.args; returns an integer status code"""
-    if req.ferr:
-        ferr = req.ferr
-    elif req.ui:
-        ferr = req.ui.ferr
-    else:
-        ferr = procutil.stderr
+    with tracing.log('dispatch.dispatch'):
+        if req.ferr:
+            ferr = req.ferr
+        elif req.ui:
+            ferr = req.ui.ferr
+        else:
+            ferr = procutil.stderr
 
-    try:
-        if not req.ui:
-            req.ui = uimod.ui.load()
-        req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
-        if req.earlyoptions['traceback']:
-            req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
+        try:
+            if not req.ui:
+                req.ui = uimod.ui.load()
+            req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
+            if req.earlyoptions['traceback']:
+                req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
 
-        # set ui streams from the request
-        if req.fin:
-            req.ui.fin = req.fin
-        if req.fout:
-            req.ui.fout = req.fout
-        if req.ferr:
-            req.ui.ferr = req.ferr
-    except error.Abort as inst:
-        ferr.write(_("abort: %s\n") % inst)
-        if inst.hint:
-            ferr.write(_("(%s)\n") % inst.hint)
-        return -1
-    except error.ParseError as inst:
-        _formatparse(ferr.write, inst)
-        return -1
+            # set ui streams from the request
+            if req.fin:
+                req.ui.fin = req.fin
+            if req.fout:
+                req.ui.fout = req.fout
+            if req.ferr:
+                req.ui.ferr = req.ferr
+        except error.Abort as inst:
+            ferr.write(_("abort: %s\n") % inst)
+            if inst.hint:
+                ferr.write(_("(%s)\n") % inst.hint)
+            return -1
+        except error.ParseError as inst:
+            _formatparse(ferr.write, inst)
+            return -1
 
-    msg = _formatargs(req.args)
-    starttime = util.timer()
-    ret = 1  # default of Python exit code on unhandled exception
-    try:
-        ret = _runcatch(req) or 0
-    except error.ProgrammingError as inst:
-        req.ui.warn(_('** ProgrammingError: %s\n') % inst)
-        if inst.hint:
-            req.ui.warn(_('** (%s)\n') % inst.hint)
-        raise
-    except KeyboardInterrupt as inst:
+        msg = _formatargs(req.args)
+        starttime = util.timer()
+        ret = 1  # default of Python exit code on unhandled exception
         try:
-            if isinstance(inst, error.SignalInterrupt):
-                msg = _("killed!\n")
-            else:
-                msg = _("interrupted!\n")
-            req.ui.warn(msg)
-        except error.SignalInterrupt:
-            # maybe pager would quit without consuming all the output, and
-            # SIGPIPE was raised. we cannot print anything in this case.
-            pass
-        except IOError as inst:
-            if inst.errno != errno.EPIPE:
-                raise
-        ret = -1
-    finally:
-        duration = util.timer() - starttime
-        req.ui.flush()
-        if req.ui.logblockedtimes:
-            req.ui._blockedtimes['command_duration'] = duration * 1000
-            req.ui.log('uiblocked', 'ui blocked ms',
-                       **pycompat.strkwargs(req.ui._blockedtimes))
-        req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
-                   msg, ret & 255, duration)
-        try:
-            req._runexithandlers()
-        except: # exiting, so no re-raises
-            ret = ret or -1
-    return ret
+            ret = _runcatch(req) or 0
+        except error.ProgrammingError as inst:
+            req.ui.error(_('** ProgrammingError: %s\n') % inst)
+            if inst.hint:
+                req.ui.error(_('** (%s)\n') % inst.hint)
+            raise
+        except KeyboardInterrupt as inst:
+            try:
+                if isinstance(inst, error.SignalInterrupt):
+                    msg = _("killed!\n")
+                else:
+                    msg = _("interrupted!\n")
+                req.ui.error(msg)
+            except error.SignalInterrupt:
+                # maybe pager would quit without consuming all the output, and
+                # SIGPIPE was raised. we cannot print anything in this case.
+                pass
+            except IOError as inst:
+                if inst.errno != errno.EPIPE:
+                    raise
+            ret = -1
+        finally:
+            duration = util.timer() - starttime
+            req.ui.flush()
+            if req.ui.logblockedtimes:
+                req.ui._blockedtimes['command_duration'] = duration * 1000
+                req.ui.log('uiblocked', 'ui blocked ms',
+                           **pycompat.strkwargs(req.ui._blockedtimes))
+            req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
+                       msg, ret & 255, duration)
+            try:
+                req._runexithandlers()
+            except: # exiting, so no re-raises
+                ret = ret or -1
+        return ret
 
 def _runcatch(req):
-    def catchterm(*args):
-        raise error.SignalInterrupt
+    with tracing.log('dispatch._runcatch'):
+        def catchterm(*args):
+            raise error.SignalInterrupt
 
-    ui = req.ui
-    try:
-        for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
-            num = getattr(signal, name, None)
-            if num:
-                signal.signal(num, catchterm)
-    except ValueError:
-        pass # happens if called in a thread
-
-    def _runcatchfunc():
-        realcmd = None
+        ui = req.ui
         try:
-            cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
-            cmd = cmdargs[0]
-            aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
-            realcmd = aliases[0]
-        except (error.UnknownCommand, error.AmbiguousCommand,
-                IndexError, getopt.GetoptError):
-            # Don't handle this here. We know the command is
-            # invalid, but all we're worried about for now is that
-            # it's not a command that server operators expect to
-            # be safe to offer to users in a sandbox.
-            pass
-        if realcmd == 'serve' and '--stdio' in cmdargs:
-            # We want to constrain 'hg serve --stdio' instances pretty
-            # closely, as many shared-ssh access tools want to grant
-            # access to run *only* 'hg -R $repo serve --stdio'. We
-            # restrict to exactly that set of arguments, and prohibit
-            # any repo name that starts with '--' to prevent
-            # shenanigans wherein a user does something like pass
-            # --debugger or --config=ui.debugger=1 as a repo
-            # name. This used to actually run the debugger.
-            if (len(req.args) != 4 or
-                req.args[0] != '-R' or
-                req.args[1].startswith('--') or
-                req.args[2] != 'serve' or
-                req.args[3] != '--stdio'):
-                raise error.Abort(
-                    _('potentially unsafe serve --stdio invocation: %s') %
-                    (stringutil.pprint(req.args),))
+            for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+                num = getattr(signal, name, None)
+                if num:
+                    signal.signal(num, catchterm)
+        except ValueError:
+            pass # happens if called in a thread
 
-        try:
-            debugger = 'pdb'
-            debugtrace = {
-                'pdb': pdb.set_trace
-            }
-            debugmortem = {
-                'pdb': pdb.post_mortem
-            }
+        def _runcatchfunc():
+            realcmd = None
+            try:
+                cmdargs = fancyopts.fancyopts(
+                    req.args[:], commands.globalopts, {})
+                cmd = cmdargs[0]
+                aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
+                realcmd = aliases[0]
+            except (error.UnknownCommand, error.AmbiguousCommand,
+                    IndexError, getopt.GetoptError):
+                # Don't handle this here. We know the command is
+                # invalid, but all we're worried about for now is that
+                # it's not a command that server operators expect to
+                # be safe to offer to users in a sandbox.
+                pass
+            if realcmd == 'serve' and '--stdio' in cmdargs:
+                # We want to constrain 'hg serve --stdio' instances pretty
+                # closely, as many shared-ssh access tools want to grant
+                # access to run *only* 'hg -R $repo serve --stdio'. We
+                # restrict to exactly that set of arguments, and prohibit
+                # any repo name that starts with '--' to prevent
+                # shenanigans wherein a user does something like pass
+                # --debugger or --config=ui.debugger=1 as a repo
+                # name. This used to actually run the debugger.
+                if (len(req.args) != 4 or
+                    req.args[0] != '-R' or
+                    req.args[1].startswith('--') or
+                    req.args[2] != 'serve' or
+                    req.args[3] != '--stdio'):
+                    raise error.Abort(
+                        _('potentially unsafe serve --stdio invocation: %s') %
+                        (stringutil.pprint(req.args),))
 
-            # read --config before doing anything else
-            # (e.g. to change trust settings for reading .hg/hgrc)
-            cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
-
-            if req.repo:
-                # copy configs that were passed on the cmdline (--config) to
-                # the repo ui
-                for sec, name, val in cfgs:
-                    req.repo.ui.setconfig(sec, name, val, source='--config')
+            try:
+                debugger = 'pdb'
+                debugtrace = {
+                    'pdb': pdb.set_trace
+                }
+                debugmortem = {
+                    'pdb': pdb.post_mortem
+                }
 
-            # developer config: ui.debugger
-            debugger = ui.config("ui", "debugger")
-            debugmod = pdb
-            if not debugger or ui.plain():
-                # if we are in HGPLAIN mode, then disable custom debugging
-                debugger = 'pdb'
-            elif req.earlyoptions['debugger']:
-                # This import can be slow for fancy debuggers, so only
-                # do it when absolutely necessary, i.e. when actual
-                # debugging has been requested
-                with demandimport.deactivated():
-                    try:
-                        debugmod = __import__(debugger)
-                    except ImportError:
-                        pass # Leave debugmod = pdb
+                # read --config before doing anything else
+                # (e.g. to change trust settings for reading .hg/hgrc)
+                cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
+
+                if req.repo:
+                    # copy configs that were passed on the cmdline (--config) to
+                    # the repo ui
+                    for sec, name, val in cfgs:
+                        req.repo.ui.setconfig(sec, name, val, source='--config')
 
-            debugtrace[debugger] = debugmod.set_trace
-            debugmortem[debugger] = debugmod.post_mortem
+                # developer config: ui.debugger
+                debugger = ui.config("ui", "debugger")
+                debugmod = pdb
+                if not debugger or ui.plain():
+                    # if we are in HGPLAIN mode, then disable custom debugging
+                    debugger = 'pdb'
+                elif req.earlyoptions['debugger']:
+                    # This import can be slow for fancy debuggers, so only
+                    # do it when absolutely necessary, i.e. when actual
+                    # debugging has been requested
+                    with demandimport.deactivated():
+                        try:
+                            debugmod = __import__(debugger)
+                        except ImportError:
+                            pass # Leave debugmod = pdb
 
-            # enter the debugger before command execution
-            if req.earlyoptions['debugger']:
-                ui.warn(_("entering debugger - "
-                        "type c to continue starting hg or h for help\n"))
+                debugtrace[debugger] = debugmod.set_trace
+                debugmortem[debugger] = debugmod.post_mortem
 
-                if (debugger != 'pdb' and
-                    debugtrace[debugger] == debugtrace['pdb']):
-                    ui.warn(_("%s debugger specified "
-                              "but its module was not found\n") % debugger)
-                with demandimport.deactivated():
-                    debugtrace[debugger]()
-            try:
-                return _dispatch(req)
-            finally:
-                ui.flush()
-        except: # re-raises
-            # enter the debugger when we hit an exception
-            if req.earlyoptions['debugger']:
-                traceback.print_exc()
-                debugmortem[debugger](sys.exc_info()[2])
-            raise
+                # enter the debugger before command execution
+                if req.earlyoptions['debugger']:
+                    ui.warn(_("entering debugger - "
+                            "type c to continue starting hg or h for help\n"))
 
-    return _callcatch(ui, _runcatchfunc)
+                    if (debugger != 'pdb' and
+                        debugtrace[debugger] == debugtrace['pdb']):
+                        ui.warn(_("%s debugger specified "
+                                  "but its module was not found\n") % debugger)
+                    with demandimport.deactivated():
+                        debugtrace[debugger]()
+                try:
+                    return _dispatch(req)
+                finally:
+                    ui.flush()
+            except: # re-raises
+                # enter the debugger when we hit an exception
+                if req.earlyoptions['debugger']:
+                    traceback.print_exc()
+                    debugmortem[debugger](sys.exc_info()[2])
+                raise
+        return _callcatch(ui, _runcatchfunc)
 
 def _callcatch(ui, func):
     """like scmutil.callcatch but handles more high-level exceptions about
@@ -370,9 +375,8 @@
             ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
             commands.help_(ui, inst.args[0], full=False, command=True)
         else:
-            ui.pager('help')
             ui.warn(_("hg: %s\n") % inst.args[1])
-            commands.help_(ui, 'shortlist')
+            ui.warn(_("(use 'hg help -v' for a list of global options)\n"))
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
@@ -394,9 +398,8 @@
                     _reportsimilar(ui.warn, sim)
                     suggested = True
             if not suggested:
-                ui.pager('help')
                 ui.warn(nocmdmsg)
-                commands.help_(ui, 'shortlist')
+                ui.warn(_("(use 'hg help' for a list of commands)\n"))
     except IOError:
         raise
     except KeyboardInterrupt:
--- a/mercurial/encoding.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/encoding.py	Fri Aug 24 12:55:05 2018 -0700
@@ -251,7 +251,7 @@
 def getcols(s, start, c):
     '''Use colwidth to find a c-column substring of s starting at byte
     index start'''
-    for x in xrange(start + c, len(s)):
+    for x in pycompat.xrange(start + c, len(s)):
         t = s[start:x]
         if colwidth(t) == c:
             return t
@@ -346,7 +346,7 @@
     else:
         uslice = lambda i: u[:-i]
         concat = lambda s: s + ellipsis
-    for i in xrange(1, len(u)):
+    for i in pycompat.xrange(1, len(u)):
         usub = uslice(i)
         if ucolwidth(usub) <= width:
             return concat(usub.encode(_sysstr(encoding)))
--- a/mercurial/error.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/error.py	Fri Aug 24 12:55:05 2018 -0700
@@ -58,6 +58,9 @@
     def __str__(self):
         return RevlogError.__str__(self)
 
+class AmbiguousPrefixLookupError(LookupError):
+    pass
+
 class FilteredLookupError(LookupError):
     pass
 
--- a/mercurial/exchange.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/exchange.py	Fri Aug 24 12:55:05 2018 -0700
@@ -15,6 +15,7 @@
     bin,
     hex,
     nullid,
+    nullrev,
 )
 from .thirdparty import (
     attr,
@@ -27,10 +28,12 @@
     error,
     lock as lockmod,
     logexchange,
+    narrowspec,
     obsolete,
     phases,
     pushkey,
     pycompat,
+    repository,
     scmutil,
     sslutil,
     streamclone,
@@ -44,6 +47,8 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+_NARROWACL_SECTION = 'narrowhgacl'
+
 # Maps bundle version human names to changegroup versions.
 _bundlespeccgversions = {'v1': '01',
                          'v2': '02',
@@ -1427,7 +1432,7 @@
         old_heads = unficl.heads()
         clstart = len(unficl)
         _pullbundle2(pullop)
-        if changegroup.NARROW_REQUIREMENT in repo.requirements:
+        if repository.NARROW_REQUIREMENT in repo.requirements:
             # XXX narrow clones filter the heads on the server side during
             # XXX getbundle and result in partial replies as well.
             # XXX Disable pull bundles in this case as band aid to avoid
@@ -1830,6 +1835,176 @@
             pullop.repo.invalidatevolatilesets()
     return tr
 
+def applynarrowacl(repo, kwargs):
+    """Apply narrow fetch access control.
+
+    This massages the named arguments for getbundle wire protocol commands
+    so requested data is filtered through access control rules.
+    """
+    ui = repo.ui
+    # TODO this assumes existence of HTTP and is a layering violation.
+    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+    user_includes = ui.configlist(
+        _NARROWACL_SECTION, username + '.includes',
+        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+    user_excludes = ui.configlist(
+        _NARROWACL_SECTION, username + '.excludes',
+        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+    if not user_includes:
+        raise error.Abort(_("{} configuration for user {} is empty")
+                          .format(_NARROWACL_SECTION, username))
+
+    user_includes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_includes]
+    user_excludes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+
+    req_includes = set(kwargs.get(r'includepats', []))
+    req_excludes = set(kwargs.get(r'excludepats', []))
+
+    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
+        req_includes, req_excludes, user_includes, user_excludes)
+
+    if invalid_includes:
+        raise error.Abort(
+            _("The following includes are not accessible for {}: {}")
+            .format(username, invalid_includes))
+
+    new_args = {}
+    new_args.update(kwargs)
+    new_args[r'narrow'] = True
+    new_args[r'includepats'] = req_includes
+    if req_excludes:
+        new_args[r'excludepats'] = req_excludes
+
+    return new_args
+
+def _computeellipsis(repo, common, heads, known, match, depth=None):
+    """Compute the shape of a narrowed DAG.
+
+    Args:
+      repo: The repository we're transferring.
+      common: The roots of the DAG range we're transferring.
+              May be just [nullid], which means all ancestors of heads.
+      heads: The heads of the DAG range we're transferring.
+      match: The narrowmatcher that allows us to identify relevant changes.
+      depth: If not None, only consider nodes to be full nodes if they are at
+             most depth changesets away from one of heads.
+
+    Returns:
+      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
+
+        visitnodes: The list of nodes (either full or ellipsis) which
+                    need to be sent to the client.
+        relevant_nodes: The set of changelog nodes which change a file inside
+                 the narrowspec. The client needs these as non-ellipsis nodes.
+        ellipsisroots: A dict of {rev: parents} that is used in
+                       narrowchangegroup to produce ellipsis nodes with the
+                       correct parents.
+    """
+    cl = repo.changelog
+    mfl = repo.manifestlog
+
+    clrev = cl.rev
+
+    commonrevs = {clrev(n) for n in common} | {nullrev}
+    headsrevs = {clrev(n) for n in heads}
+
+    if depth:
+        revdepth = {h: 0 for h in headsrevs}
+
+    ellipsisheads = collections.defaultdict(set)
+    ellipsisroots = collections.defaultdict(set)
+
+    def addroot(head, curchange):
+        """Add a root to an ellipsis head, splitting heads with 3 roots."""
+        ellipsisroots[head].add(curchange)
+        # Recursively split ellipsis heads with 3 roots by finding the
+        # roots' youngest common descendant which is an elided merge commit.
+        # That descendant takes 2 of the 3 roots as its own, and becomes a
+        # root of the head.
+        while len(ellipsisroots[head]) > 2:
+            child, roots = splithead(head)
+            splitroots(head, child, roots)
+            head = child  # Recurse in case we just added a 3rd root
+
+    def splitroots(head, child, roots):
+        ellipsisroots[head].difference_update(roots)
+        ellipsisroots[head].add(child)
+        ellipsisroots[child].update(roots)
+        ellipsisroots[child].discard(child)
+
+    def splithead(head):
+        r1, r2, r3 = sorted(ellipsisroots[head])
+        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
+            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
+                            nr1, head, nr2, head)
+            for j in mid:
+                if j == nr2:
+                    return nr2, (nr1, nr2)
+                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
+                    return j, (nr1, nr2)
+        raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
+                            'roots: %d %d %d') % (head, r1, r2, r3))
+
+    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
+    visit = reversed(missing)
+    relevant_nodes = set()
+    visitnodes = [cl.node(m) for m in missing]
+    required = set(headsrevs) | known
+    for rev in visit:
+        clrev = cl.changelogrevision(rev)
+        ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
+        if depth is not None:
+            curdepth = revdepth[rev]
+            for p in ps:
+                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
+        needed = False
+        shallow_enough = depth is None or revdepth[rev] <= depth
+        if shallow_enough:
+            curmf = mfl[clrev.manifest].read()
+            if ps:
+                # We choose to not trust the changed files list in
+                # changesets because it's not always correct. TODO: could
+                # we trust it for the non-merge case?
+                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
+                needed = bool(curmf.diff(p1mf, match))
+                if not needed and len(ps) > 1:
+                    # For merge changes, the list of changed files is not
+                    # helpful, since we need to emit the merge if a file
+                    # in the narrow spec has changed on either side of the
+                    # merge. As a result, we do a manifest diff to check.
+                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
+                    needed = bool(curmf.diff(p2mf, match))
+            else:
+                # For a root node, we need to include the node if any
+                # files in the node match the narrowspec.
+                needed = any(curmf.walk(match))
+
+        if needed:
+            for head in ellipsisheads[rev]:
+                addroot(head, rev)
+            for p in ps:
+                required.add(p)
+            relevant_nodes.add(cl.node(rev))
+        else:
+            if not ps:
+                ps = [nullrev]
+            if rev in required:
+                for head in ellipsisheads[rev]:
+                    addroot(head, rev)
+                for p in ps:
+                    ellipsisheads[p].add(rev)
+            else:
+                for p in ps:
+                    ellipsisheads[p] |= ellipsisheads[rev]
+
+    # add common changesets as roots of their reachable ellipsis heads
+    for c in commonrevs:
+        for head in ellipsisheads[c]:
+            addroot(head, c)
+    return visitnodes, relevant_nodes, ellipsisroots
+
 def caps20to10(repo, role):
     """return a set with appropriate options to use bundle20 during getbundle"""
     caps = {'HG20'}
@@ -1924,30 +2099,52 @@
 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
                               b2caps=None, heads=None, common=None, **kwargs):
     """add a changegroup part to the requested bundle"""
-    cgstream = None
-    if kwargs.get(r'cg', True):
-        # build changegroup bundle here.
-        version = '01'
-        cgversions = b2caps.get('changegroup')
-        if cgversions:  # 3.1 and 3.2 ship with an empty value
-            cgversions = [v for v in cgversions
-                          if v in changegroup.supportedoutgoingversions(repo)]
-            if not cgversions:
-                raise ValueError(_('no common changegroup version'))
-            version = max(cgversions)
-        outgoing = _computeoutgoing(repo, heads, common)
-        if outgoing.missing:
-            cgstream = changegroup.makestream(repo, outgoing, version, source,
-                                              bundlecaps=bundlecaps)
+    if not kwargs.get(r'cg', True):
+        return
+
+    version = '01'
+    cgversions = b2caps.get('changegroup')
+    if cgversions:  # 3.1 and 3.2 ship with an empty value
+        cgversions = [v for v in cgversions
+                      if v in changegroup.supportedoutgoingversions(repo)]
+        if not cgversions:
+            raise ValueError(_('no common changegroup version'))
+        version = max(cgversions)
+
+    outgoing = _computeoutgoing(repo, heads, common)
+    if not outgoing.missing:
+        return
 
-    if cgstream:
-        part = bundler.newpart('changegroup', data=cgstream)
-        if cgversions:
-            part.addparam('version', version)
-        part.addparam('nbchanges', '%d' % len(outgoing.missing),
-                      mandatory=False)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+    if kwargs.get(r'narrow', False):
+        include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+        exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+        filematcher = narrowspec.match(repo.root, include=include,
+                                       exclude=exclude)
+    else:
+        filematcher = None
+
+    cgstream = changegroup.makestream(repo, outgoing, version, source,
+                                      bundlecaps=bundlecaps,
+                                      filematcher=filematcher)
+
+    part = bundler.newpart('changegroup', data=cgstream)
+    if cgversions:
+        part.addparam('version', version)
+
+    part.addparam('nbchanges', '%d' % len(outgoing.missing),
+                  mandatory=False)
+
+    if 'treemanifest' in repo.requirements:
+        part.addparam('treemanifest', '1')
+
+    if kwargs.get(r'narrow', False) and (include or exclude):
+        narrowspecpart = bundler.newpart('narrow:spec')
+        if include:
+            narrowspecpart.addparam(
+                'include', '\n'.join(include), mandatory=True)
+        if exclude:
+            narrowspecpart.addparam(
+                'exclude', '\n'.join(exclude), mandatory=True)
 
 @getbundle2partsgenerator('bookmarks')
 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
@@ -2069,8 +2266,13 @@
     # Don't send unless:
     # - changeset are being exchanged,
     # - the client supports it.
-    if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
+    # - narrow bundle isn't in play (not currently compatible).
+    if (not kwargs.get(r'cg', True)
+        or 'rev-branch-cache' not in b2caps
+        or kwargs.get(r'narrow', False)
+        or repo.ui.has_section(_NARROWACL_SECTION)):
         return
+
     outgoing = _computeoutgoing(repo, heads, common)
     bundle2.addpartrevbranchcache(repo, bundler, outgoing)
 
--- a/mercurial/extensions.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/extensions.py	Fri Aug 24 12:55:05 2018 -0700
@@ -124,7 +124,7 @@
     # note: this ui.debug happens before --debug is processed,
     #       Use --config ui.debug=1 to see them.
     if ui.configbool('devel', 'debug.extensions'):
-        ui.debug('could not import %s (%s): trying %s\n'
+        ui.debug('debug.extensions:     - could not import %s (%s): trying %s\n'
                  % (failed, stringutil.forcebytestr(err), next))
         if ui.debugflag:
             ui.traceback()
@@ -166,7 +166,7 @@
             _rejectunicode(t, o._table)
     _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
 
-def load(ui, name, path):
+def load(ui, name, path, log=lambda *a: None):
     if name.startswith('hgext.') or name.startswith('hgext/'):
         shortname = name[6:]
     else:
@@ -175,8 +175,11 @@
         return None
     if shortname in _extensions:
         return _extensions[shortname]
+    log('  - loading extension: %r\n', shortname)
     _extensions[shortname] = None
-    mod = _importext(name, path, bind(_reportimporterror, ui))
+    with util.timedcm('load extension %r', shortname) as stats:
+        mod = _importext(name, path, bind(_reportimporterror, ui))
+    log('  > %r extension loaded in %s\n', shortname, stats)
 
     # Before we do anything with the extension, check against minimum stated
     # compatibility. This gives extension authors a mechanism to have their
@@ -187,12 +190,16 @@
         ui.warn(_('(third party extension %s requires version %s or newer '
                   'of Mercurial; disabling)\n') % (shortname, minver))
         return
+    log('    - validating extension tables: %r\n', shortname)
     _validatetables(ui, mod)
 
     _extensions[shortname] = mod
     _order.append(shortname)
-    for fn in _aftercallbacks.get(shortname, []):
-        fn(loaded=True)
+    log('    - invoking registered callbacks: %r\n', shortname)
+    with util.timedcm('callbacks extension %r', shortname) as stats:
+        for fn in _aftercallbacks.get(shortname, []):
+            fn(loaded=True)
+    log('    > callbacks completed in %s\n', stats)
     return mod
 
 def _runuisetup(name, ui):
@@ -225,28 +232,41 @@
     return True
 
 def loadall(ui, whitelist=None):
+    if ui.configbool('devel', 'debug.extensions'):
+        log = lambda msg, *values: ui.debug('debug.extensions: ',
+            msg % values, label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
     result = ui.configitems("extensions")
     if whitelist is not None:
         result = [(k, v) for (k, v) in result if k in whitelist]
     newindex = len(_order)
-    for (name, path) in result:
-        if path:
-            if path[0:1] == '!':
-                _disabledextensions[name] = path[1:]
-                continue
-        try:
-            load(ui, name, path)
-        except Exception as inst:
-            msg = stringutil.forcebytestr(inst)
+    log('loading %sextensions\n', 'additional ' if newindex else '')
+    log('- processing %d entries\n', len(result))
+    with util.timedcm('load all extensions') as stats:
+        for (name, path) in result:
             if path:
-                ui.warn(_("*** failed to import extension %s from %s: %s\n")
-                        % (name, path, msg))
-            else:
-                ui.warn(_("*** failed to import extension %s: %s\n")
-                        % (name, msg))
-            if isinstance(inst, error.Hint) and inst.hint:
-                ui.warn(_("*** (%s)\n") % inst.hint)
-            ui.traceback()
+                if path[0:1] == '!':
+                    if name not in _disabledextensions:
+                        log('  - skipping disabled extension: %r\n', name)
+                    _disabledextensions[name] = path[1:]
+                    continue
+            try:
+                load(ui, name, path, log)
+            except Exception as inst:
+                msg = stringutil.forcebytestr(inst)
+                if path:
+                    ui.warn(_("*** failed to import extension %s from %s: %s\n")
+                            % (name, path, msg))
+                else:
+                    ui.warn(_("*** failed to import extension %s: %s\n")
+                            % (name, msg))
+                if isinstance(inst, error.Hint) and inst.hint:
+                    ui.warn(_("*** (%s)\n") % inst.hint)
+                ui.traceback()
+
+    log('> loaded %d extensions, total time %s\n',
+        len(_order) - newindex, stats)
     # list of (objname, loadermod, loadername) tuple:
     # - objname is the name of an object in extension module,
     #   from which extra information is loaded
@@ -258,29 +278,47 @@
     earlyextraloaders = [
         ('configtable', configitems, 'loadconfigtable'),
     ]
+
+    log('- loading configtable attributes\n')
     _loadextra(ui, newindex, earlyextraloaders)
 
     broken = set()
+    log('- executing uisetup hooks\n')
     for name in _order[newindex:]:
-        if not _runuisetup(name, ui):
-            broken.add(name)
+        log('  - running uisetup for %r\n', name)
+        with util.timedcm('uisetup %r', name) as stats:
+            if not _runuisetup(name, ui):
+                log('    - the %r extension uisetup failed\n', name)
+                broken.add(name)
+        log('  > uisetup for %r took %s\n', name, stats)
 
+    log('- executing extsetup hooks\n')
     for name in _order[newindex:]:
         if name in broken:
             continue
-        if not _runextsetup(name, ui):
-            broken.add(name)
+        log('  - running extsetup for %r\n', name)
+        with util.timedcm('extsetup %r', name) as stats:
+            if not _runextsetup(name, ui):
+                log('    - the %r extension extsetup failed\n', name)
+                broken.add(name)
+        log('  > extsetup for %r took %s\n', name, stats)
 
     for name in broken:
+        log('    - disabling broken %r extension\n', name)
         _extensions[name] = None
 
     # Call aftercallbacks that were never met.
-    for shortname in _aftercallbacks:
-        if shortname in _extensions:
-            continue
+    log('- executing remaining aftercallbacks\n')
+    with util.timedcm('aftercallbacks') as stats:
+        for shortname in _aftercallbacks:
+            if shortname in _extensions:
+                continue
 
-        for fn in _aftercallbacks[shortname]:
-            fn(loaded=False)
+            for fn in _aftercallbacks[shortname]:
+                log('  - extension %r not loaded, notify callbacks\n',
+                    shortname)
+                fn(loaded=False)
+    log('> remaining aftercallbacks completed in %s\n', stats)
 
     # loadall() is called multiple times and lingering _aftercallbacks
     # entries could result in double execution. See issue4646.
@@ -304,6 +342,7 @@
     # - loadermod is the module where loader is placed
     # - loadername is the name of the function,
     #   which takes (ui, extensionname, extraobj) arguments
+    log('- loading extension registration objects\n')
     extraloaders = [
         ('cmdtable', commands, 'loadcmdtable'),
         ('colortable', color, 'loadcolortable'),
@@ -314,7 +353,10 @@
         ('templatefunc', templatefuncs, 'loadfunction'),
         ('templatekeyword', templatekw, 'loadkeyword'),
     ]
-    _loadextra(ui, newindex, extraloaders)
+    with util.timedcm('load registration objects') as stats:
+        _loadextra(ui, newindex, extraloaders)
+    log('> extension registration object loading took %s\n', stats)
+    log('extension loading complete\n')
 
 def _loadextra(ui, newindex, extraloaders):
     for name in _order[newindex:]:
--- a/mercurial/filelog.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/filelog.py	Fri Aug 24 12:55:05 2018 -0700
@@ -26,7 +26,6 @@
         self.filename = path
         self.index = self._revlog.index
         self.version = self._revlog.version
-        self.storedeltachains = self._revlog.storedeltachains
         self._generaldelta = self._revlog._generaldelta
 
     def __len__(self):
@@ -77,9 +76,6 @@
     def deltaparent(self, rev):
         return self._revlog.deltaparent(rev)
 
-    def candelta(self, baserev, rev):
-        return self._revlog.candelta(baserev, rev)
-
     def iscensored(self, rev):
         return self._revlog.iscensored(rev)
 
@@ -95,6 +91,9 @@
     def revdiff(self, rev1, rev2):
         return self._revlog.revdiff(rev1, rev2)
 
+    def emitrevisiondeltas(self, requests):
+        return self._revlog.emitrevisiondeltas(requests)
+
     def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
                     node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
                     cachedelta=None):
--- a/mercurial/filemerge.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/filemerge.py	Fri Aug 24 12:55:05 2018 -0700
@@ -137,6 +137,14 @@
     return procutil.findexe(util.expandpath(exe))
 
 def _picktool(repo, ui, path, binary, symlink, changedelete):
+    strictcheck = ui.configbool('merge', 'strict-capability-check')
+
+    def hascapability(tool, capability, strict=False):
+        if strict and tool in internals:
+            if internals[tool].capabilities.get(capability):
+                return True
+        return _toolbool(ui, tool, capability)
+
     def supportscd(tool):
         return tool in internals and internals[tool].mergetype == nomerge
 
@@ -149,9 +157,9 @@
                 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
             else: # configured but non-existing tools are more silent
                 ui.note(_("couldn't find merge tool %s\n") % tmsg)
-        elif symlink and not _toolbool(ui, tool, "symlink"):
+        elif symlink and not hascapability(tool, "symlink", strictcheck):
             ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
-        elif binary and not _toolbool(ui, tool, "binary"):
+        elif binary and not hascapability(tool, "binary", strictcheck):
             ui.warn(_("tool %s can't handle binary\n") % tmsg)
         elif changedelete and not supportscd(tool):
             # the nomerge tools are the only tools that support change/delete
@@ -186,9 +194,19 @@
             return (hgmerge, hgmerge)
 
     # then patterns
+
+    # whether binary capability should be checked strictly
+    binarycap = binary and strictcheck
+
     for pat, tool in ui.configitems("merge-patterns"):
         mf = match.match(repo.root, '', [pat])
-        if mf(path) and check(tool, pat, symlink, False, changedelete):
+        if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
+            if binary and not hascapability(tool, "binary", strict=True):
+                ui.warn(_("warning: check merge-patterns configurations,"
+                          " if %r for binary file %r is unintentional\n"
+                          "(see 'hg help merge-tools'"
+                          " for binary files capability)\n")
+                        % (pycompat.bytestr(tool), pycompat.bytestr(path)))
             toolpath = _findtool(ui, tool)
             return (tool, _quotetoolpath(toolpath))
 
@@ -208,9 +226,10 @@
     if uimerge:
         # external tools defined in uimerge won't be able to handle
         # change/delete conflicts
-        if uimerge not in names and not changedelete:
-            return (uimerge, uimerge)
-        tools.insert(0, (None, uimerge)) # highest priority
+        if check(uimerge, path, symlink, binary, changedelete):
+            if uimerge not in names and not changedelete:
+                return (uimerge, uimerge)
+            tools.insert(0, (None, uimerge)) # highest priority
     tools.append((None, "hgmerge")) # the old default, if found
     for p, t in tools:
         if check(t, None, symlink, binary, changedelete):
@@ -469,7 +488,7 @@
     success, status = tagmerge.merge(repo, fcd, fco, fca)
     return success, status, False
 
-@internaltool('dump', fullmerge)
+@internaltool('dump', fullmerge, binary=True, symlink=True)
 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Creates three versions of the files to merge, containing the
@@ -495,7 +514,7 @@
     repo.wwrite(fd + ".base", fca.data(), fca.flags())
     return False, 1, False
 
-@internaltool('forcedump', mergeonly)
+@internaltool('forcedump', mergeonly, binary=True, symlink=True)
 def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
                 labels=None):
     """
@@ -916,14 +935,17 @@
         _haltmerge()
     # default action is 'continue', in which case we neither prompt nor halt
 
+def hasconflictmarkers(data):
+    return bool(re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", data,
+                          re.MULTILINE))
+
 def _check(repo, r, ui, tool, fcd, files):
     fd = fcd.path()
     unused, unused, unused, back = files
 
     if not r and (_toolbool(ui, tool, "checkconflicts") or
                   'conflicts' in _toollist(ui, tool, "check")):
-        if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
-                     re.MULTILINE):
+        if hasconflictmarkers(fcd.data()):
             r = 1
 
     checked = False
@@ -967,6 +989,12 @@
         internals['internal:' + name] = func
         internalsdoc[fullname] = func
 
+        capabilities = sorted([k for k, v in func.capabilities.items() if v])
+        if capabilities:
+            capdesc = _("(actual capabilities: %s)") % ', '.join(capabilities)
+            func.__doc__ = (func.__doc__ +
+                            pycompat.sysstr("\n\n    %s" % capdesc))
+
 # load built-in merge tools explicitly to setup internalsdoc
 loadinternalmerge(None, None, internaltool)
 
--- a/mercurial/fileset.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/fileset.py	Fri Aug 24 12:55:05 2018 -0700
@@ -13,9 +13,9 @@
 from .i18n import _
 from . import (
     error,
+    filesetlang,
     match as matchmod,
     merge,
-    parser,
     pycompat,
     registrar,
     scmutil,
@@ -25,126 +25,28 @@
     stringutil,
 )
 
-elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    ":": (15, None, None, ("kindpat", 15), None),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
-}
-
-keywords = {'and', 'or', 'not'}
-
-globchars = ".*{}[]?/\\_"
+# common weight constants
+_WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME
+_WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS
+_WEIGHT_STATUS = filesetlang.WEIGHT_STATUS
+_WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH
 
-def tokenize(program):
-    pos, l = 0, len(program)
-    program = pycompat.bytestr(program)
-    while pos < l:
-        c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
-            pass
-        elif c in "(),-:|&+!": # handle simple operators
-            yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
-                pos += 1
-                c = program[pos]
-                decode = lambda x: x
-            else:
-                decode = parser.unescapestr
-            pos += 1
-            s = pos
-            while pos < l: # find closing quote
-                d = program[pos]
-                if d == '\\': # skip over escaped characters
-                    pos += 2
-                    continue
-                if d == c:
-                    yield ('string', decode(program[s:pos]), s)
-                    break
-                pos += 1
-            else:
-                raise error.ParseError(_("unterminated string"), s)
-        elif c.isalnum() or c in globchars or ord(c) > 127:
-            # gather up a symbol/keyword
-            s = pos
-            pos += 1
-            while pos < l: # find end of symbol
-                d = program[pos]
-                if not (d.isalnum() or d in globchars or ord(d) > 127):
-                    break
-                pos += 1
-            sym = program[s:pos]
-            if sym in keywords: # operator keywords
-                yield (sym, None, s)
-            else:
-                yield ('symbol', sym, s)
-            pos -= 1
-        else:
-            raise error.ParseError(_("syntax error"), pos)
-        pos += 1
-    yield ('end', None, pos)
-
-def parse(expr):
-    p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(expr))
-    if pos != len(expr):
-        raise error.ParseError(_("invalid token"), pos)
-    return tree
-
-def getsymbol(x):
-    if x and x[0] == 'symbol':
-        return x[1]
-    raise error.ParseError(_('not a symbol'))
-
-def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
-        return x[1]
-    raise error.ParseError(err)
-
-def _getkindpat(x, y, allkinds, err):
-    kind = getsymbol(x)
-    pat = getstring(y, err)
-    if kind not in allkinds:
-        raise error.ParseError(_("invalid pattern kind: %s") % kind)
-    return '%s:%s' % (kind, pat)
-
-def getpattern(x, allkinds, err):
-    if x and x[0] == 'kindpat':
-        return _getkindpat(x[1], x[2], allkinds, err)
-    return getstring(x, err)
-
-def getlist(x):
-    if not x:
-        return []
-    if x[0] == 'list':
-        return getlist(x[1]) + [x[2]]
-    return [x]
-
-def getargs(x, min, max, err):
-    l = getlist(x)
-    if len(l) < min or len(l) > max:
-        raise error.ParseError(err)
-    return l
+# helpers for processing parsed tree
+getsymbol = filesetlang.getsymbol
+getstring = filesetlang.getstring
+_getkindpat = filesetlang.getkindpat
+getpattern = filesetlang.getpattern
+getargs = filesetlang.getargs
 
 def getmatch(mctx, x):
     if not x:
         raise error.ParseError(_("missing argument"))
     return methods[x[0]](mctx, *x[1:])
 
+def getmatchwithstatus(mctx, x, hint):
+    keys = set(getstring(hint, 'status hint must be a string').split())
+    return getmatch(mctx.withstatus(keys), x)
+
 def stringmatch(mctx, x):
     return mctx.matcher([x])
 
@@ -152,15 +54,20 @@
     return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds,
                                          _("pattern must be a string")))
 
+def patternsmatch(mctx, *xs):
+    allkinds = matchmod.allpatternkinds
+    patterns = [getpattern(x, allkinds, _("pattern must be a string"))
+                for x in xs]
+    return mctx.matcher(patterns)
+
 def andmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.intersectmatchers(xm, ym)
 
-def ormatch(mctx, x, y):
-    xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
-    return matchmod.unionmatcher([xm, ym])
+def ormatch(mctx, *xs):
+    ms = [getmatch(mctx, x) for x in xs]
+    return matchmod.unionmatcher(ms)
 
 def notmatch(mctx, x):
     m = getmatch(mctx, x)
@@ -168,15 +75,12 @@
 
 def minusmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.differencematcher(xm, ym)
 
-def negatematch(mctx, x):
-    raise error.ParseError(_("can't use negate operator in this context"))
-
-def listmatch(mctx, x, y):
+def listmatch(mctx, *xs):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "filesets.x or y"'))
+                           hint=_('see \'hg help "filesets.x or y"\''))
 
 def func(mctx, a, b):
     funcname = getsymbol(a)
@@ -193,14 +97,11 @@
 # with:
 #  mctx - current matchctx instance
 #  x - argument in tree form
-symbols = {}
+symbols = filesetlang.symbols
 
-# filesets using matchctx.status()
-_statuscallers = set()
+predicate = registrar.filesetpredicate(symbols)
 
-predicate = registrar.filesetpredicate()
-
-@predicate('modified()', callstatus=True)
+@predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS)
 def modified(mctx, x):
     """File that is modified according to :hg:`status`.
     """
@@ -209,7 +110,7 @@
     s = set(mctx.status().modified)
     return mctx.predicate(s.__contains__, predrepr='modified')
 
-@predicate('added()', callstatus=True)
+@predicate('added()', callstatus=True, weight=_WEIGHT_STATUS)
 def added(mctx, x):
     """File that is added according to :hg:`status`.
     """
@@ -218,7 +119,7 @@
     s = set(mctx.status().added)
     return mctx.predicate(s.__contains__, predrepr='added')
 
-@predicate('removed()', callstatus=True)
+@predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS)
 def removed(mctx, x):
     """File that is removed according to :hg:`status`.
     """
@@ -227,7 +128,7 @@
     s = set(mctx.status().removed)
     return mctx.predicate(s.__contains__, predrepr='removed')
 
-@predicate('deleted()', callstatus=True)
+@predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS)
 def deleted(mctx, x):
     """Alias for ``missing()``.
     """
@@ -236,7 +137,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('missing()', callstatus=True)
+@predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS)
 def missing(mctx, x):
     """File that is missing according to :hg:`status`.
     """
@@ -245,7 +146,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('unknown()', callstatus=True)
+@predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def unknown(mctx, x):
     """File that is unknown according to :hg:`status`."""
     # i18n: "unknown" is a keyword
@@ -253,7 +154,7 @@
     s = set(mctx.status().unknown)
     return mctx.predicate(s.__contains__, predrepr='unknown')
 
-@predicate('ignored()', callstatus=True)
+@predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def ignored(mctx, x):
     """File that is ignored according to :hg:`status`."""
     # i18n: "ignored" is a keyword
@@ -261,7 +162,7 @@
     s = set(mctx.status().ignored)
     return mctx.predicate(s.__contains__, predrepr='ignored')
 
-@predicate('clean()', callstatus=True)
+@predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS)
 def clean(mctx, x):
     """File that is clean according to :hg:`status`.
     """
@@ -277,7 +178,7 @@
     getargs(x, 0, 0, _("tracked takes no arguments"))
     return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked')
 
-@predicate('binary()')
+@predicate('binary()', weight=_WEIGHT_READ_CONTENTS)
 def binary(mctx, x):
     """File that appears to be binary (contains NUL bytes).
     """
@@ -304,7 +205,7 @@
     ctx = mctx.ctx
     return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink')
 
-@predicate('resolved()')
+@predicate('resolved()', weight=_WEIGHT_STATUS)
 def resolved(mctx, x):
     """File that is marked resolved according to :hg:`resolve -l`.
     """
@@ -316,7 +217,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'r',
                           predrepr='resolved')
 
-@predicate('unresolved()')
+@predicate('unresolved()', weight=_WEIGHT_STATUS)
 def unresolved(mctx, x):
     """File that is marked unresolved according to :hg:`resolve -l`.
     """
@@ -328,7 +229,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'u',
                           predrepr='unresolved')
 
-@predicate('hgignore()')
+@predicate('hgignore()', weight=_WEIGHT_STATUS)
 def hgignore(mctx, x):
     """File that matches the active .hgignore pattern.
     """
@@ -336,7 +237,7 @@
     getargs(x, 0, 0, _("hgignore takes no arguments"))
     return mctx.ctx.repo().dirstate._ignore
 
-@predicate('portable()')
+@predicate('portable()', weight=_WEIGHT_CHECK_FILENAME)
 def portable(mctx, x):
     """File that has a portable name. (This doesn't include filenames with case
     collisions.)
@@ -346,7 +247,7 @@
     return mctx.predicate(lambda f: util.checkwinfilename(f) is None,
                           predrepr='portable')
 
-@predicate('grep(regex)')
+@predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS)
 def grep(mctx, x):
     """File contains the given regular expression.
     """
@@ -400,7 +301,7 @@
         b = _sizetomax(expr)
         return lambda x: x >= a and x <= b
 
-@predicate('size(expression)')
+@predicate('size(expression)', weight=_WEIGHT_STATUS)
 def size(mctx, x):
     """File size matches the given expression. Examples:
 
@@ -415,7 +316,7 @@
     return mctx.fpredicate(lambda fctx: m(fctx.size()),
                            predrepr=('size(%r)', expr), cache=True)
 
-@predicate('encoding(name)')
+@predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS)
 def encoding(mctx, x):
     """File can be successfully decoded with the given character
     encoding. May not be useful for encodings other than ASCII and
@@ -437,7 +338,7 @@
 
     return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True)
 
-@predicate('eol(style)')
+@predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS)
 def eol(mctx, x):
     """File contains newlines of the given style (dos, unix, mac). Binary
     files are excluded, files with mixed line endings match multiple
@@ -471,7 +372,7 @@
         return p and p[0].path() != fctx.path()
     return mctx.fpredicate(copiedp, predrepr='copied', cache=True)
 
-@predicate('revs(revs, pattern)')
+@predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS)
 def revs(mctx, x):
     """Evaluate set in the specified revisions. If the revset match multiple
     revs, this will return file matching pattern in any of the revision.
@@ -486,14 +387,15 @@
     matchers = []
     for r in revs:
         ctx = repo[r]
-        matchers.append(getmatch(mctx.switch(ctx, _buildstatus(ctx, x)), x))
+        mc = mctx.switch(ctx.p1(), ctx)
+        matchers.append(getmatch(mc, x))
     if not matchers:
         return mctx.never()
     if len(matchers) == 1:
         return matchers[0]
     return matchmod.unionmatcher(matchers)
 
-@predicate('status(base, rev, pattern)')
+@predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS)
 def status(mctx, x):
     """Evaluate predicate using status change between ``base`` and
     ``rev``. Examples:
@@ -513,7 +415,8 @@
     if not revspec:
         raise error.ParseError(reverr)
     basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec])
-    return getmatch(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x)
+    mc = mctx.switch(basectx, ctx)
+    return getmatch(mc, x)
 
 @predicate('subrepo([pattern])')
 def subrepo(mctx, x):
@@ -539,24 +442,52 @@
         return mctx.predicate(sstate.__contains__, predrepr='subrepo')
 
 methods = {
+    'withstatus': getmatchwithstatus,
     'string': stringmatch,
     'symbol': stringmatch,
     'kindpat': kindpatmatch,
+    'patterns': patternsmatch,
     'and': andmatch,
     'or': ormatch,
     'minus': minusmatch,
-    'negate': negatematch,
     'list': listmatch,
-    'group': getmatch,
     'not': notmatch,
     'func': func,
 }
 
 class matchctx(object):
-    def __init__(self, ctx, status=None, badfn=None):
+    def __init__(self, basectx, ctx, badfn=None):
+        self._basectx = basectx
         self.ctx = ctx
-        self._status = status
         self._badfn = badfn
+        self._match = None
+        self._status = None
+
+    def narrowed(self, match):
+        """Create matchctx for a sub-tree narrowed by the given matcher"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = match
+        # leave wider status which we don't have to care
+        mctx._status = self._status
+        return mctx
+
+    def switch(self, basectx, ctx):
+        mctx = matchctx(basectx, ctx, self._badfn)
+        mctx._match = self._match
+        return mctx
+
+    def withstatus(self, keys):
+        """Create matchctx which has precomputed status specified by the keys"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = self._match
+        mctx._buildstatus(keys)
+        return mctx
+
+    def _buildstatus(self, keys):
+        self._status = self._basectx.status(self.ctx, self._match,
+                                            listignored='ignored' in keys,
+                                            listclean='clean' in keys,
+                                            listunknown='unknown' in keys)
 
     def status(self):
         return self._status
@@ -612,62 +543,20 @@
         return matchmod.nevermatcher(repo.root, repo.getcwd(),
                                      badfn=self._badfn)
 
-    def switch(self, ctx, status=None):
-        return matchctx(ctx, status, self._badfn)
-
-# filesets using matchctx.switch()
-_switchcallers = [
-    'revs',
-    'status',
-]
-
-def _intree(funcs, tree):
-    if isinstance(tree, tuple):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
-            if tree[1][1] in funcs:
-                return True
-            if tree[1][1] in _switchcallers:
-                # arguments won't be evaluated in the current context
-                return False
-        for s in tree[1:]:
-            if _intree(funcs, s):
-                return True
-    return False
-
 def match(ctx, expr, badfn=None):
     """Create a matcher for a single fileset expression"""
-    tree = parse(expr)
-    mctx = matchctx(ctx, _buildstatus(ctx, tree), badfn=badfn)
+    tree = filesetlang.parse(expr)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
+    mctx = matchctx(ctx.p1(), ctx, badfn=badfn)
     return getmatch(mctx, tree)
 
-def _buildstatus(ctx, tree, basectx=None):
-    # do we need status info?
-
-    if _intree(_statuscallers, tree):
-        unknown = _intree(['unknown'], tree)
-        ignored = _intree(['ignored'], tree)
-
-        r = ctx.repo()
-        if basectx is None:
-            basectx = ctx.p1()
-        return r.status(basectx, ctx,
-                        unknown=unknown, ignored=ignored, clean=True)
-    else:
-        return None
-
-def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
 
 def loadpredicate(ui, extname, registrarobj):
     """Load fileset predicates from specified registrarobj
     """
     for name, func in registrarobj._table.iteritems():
         symbols[name] = func
-        if func._callstatus:
-            _statuscallers.add(name)
-
-# load built-in predicates explicitly to setup _statuscallers
-loadpredicate(None, None, predicate)
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = symbols.values()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/filesetlang.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,330 @@
+# filesetlang.py - parser, tokenizer and utility for file set language
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+from . import (
+    error,
+    parser,
+    pycompat,
+)
+
+# common weight constants for static optimization
+# (see registrar.filesetpredicate for details)
+WEIGHT_CHECK_FILENAME = 0.5
+WEIGHT_READ_CONTENTS = 30
+WEIGHT_STATUS = 10
+WEIGHT_STATUS_THOROUGH = 50
+
+elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    ":": (15, None, None, ("kindpat", 15), None),
+    "-": (5, None, ("negate", 19), ("minus", 5), None),
+    "not": (10, None, ("not", 10), None, None),
+    "!": (10, None, ("not", 10), None, None),
+    "and": (5, None, None, ("and", 5), None),
+    "&": (5, None, None, ("and", 5), None),
+    "or": (4, None, None, ("or", 4), None),
+    "|": (4, None, None, ("or", 4), None),
+    "+": (4, None, None, ("or", 4), None),
+    ",": (2, None, None, ("list", 2), None),
+    ")": (0, None, None, None, None),
+    "symbol": (0, "symbol", None, None, None),
+    "string": (0, "string", None, None, None),
+    "end": (0, None, None, None, None),
+}
+
+keywords = {'and', 'or', 'not'}
+
+symbols = {}
+
+globchars = ".*{}[]?/\\_"
+
+def tokenize(program):
+    pos, l = 0, len(program)
+    program = pycompat.bytestr(program)
+    while pos < l:
+        c = program[pos]
+        if c.isspace(): # skip inter-token whitespace
+            pass
+        elif c in "(),-:|&+!": # handle simple operators
+            yield (c, None, pos)
+        elif (c in '"\'' or c == 'r' and
+              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+            if c == 'r':
+                pos += 1
+                c = program[pos]
+                decode = lambda x: x
+            else:
+                decode = parser.unescapestr
+            pos += 1
+            s = pos
+            while pos < l: # find closing quote
+                d = program[pos]
+                if d == '\\': # skip over escaped characters
+                    pos += 2
+                    continue
+                if d == c:
+                    yield ('string', decode(program[s:pos]), s)
+                    break
+                pos += 1
+            else:
+                raise error.ParseError(_("unterminated string"), s)
+        elif c.isalnum() or c in globchars or ord(c) > 127:
+            # gather up a symbol/keyword
+            s = pos
+            pos += 1
+            while pos < l: # find end of symbol
+                d = program[pos]
+                if not (d.isalnum() or d in globchars or ord(d) > 127):
+                    break
+                pos += 1
+            sym = program[s:pos]
+            if sym in keywords: # operator keywords
+                yield (sym, None, s)
+            else:
+                yield ('symbol', sym, s)
+            pos -= 1
+        else:
+            raise error.ParseError(_("syntax error"), pos)
+        pos += 1
+    yield ('end', None, pos)
+
+def parse(expr):
+    p = parser.parser(elements)
+    tree, pos = p.parse(tokenize(expr))
+    if pos != len(expr):
+        raise error.ParseError(_("invalid token"), pos)
+    return parser.simplifyinfixops(tree, {'list', 'or'})
+
+def getsymbol(x):
+    if x and x[0] == 'symbol':
+        return x[1]
+    raise error.ParseError(_('not a symbol'))
+
+def getstring(x, err):
+    if x and (x[0] == 'string' or x[0] == 'symbol'):
+        return x[1]
+    raise error.ParseError(err)
+
+def getkindpat(x, y, allkinds, err):
+    kind = getsymbol(x)
+    pat = getstring(y, err)
+    if kind not in allkinds:
+        raise error.ParseError(_("invalid pattern kind: %s") % kind)
+    return '%s:%s' % (kind, pat)
+
+def getpattern(x, allkinds, err):
+    if x and x[0] == 'kindpat':
+        return getkindpat(x[1], x[2], allkinds, err)
+    return getstring(x, err)
+
+def getlist(x):
+    if not x:
+        return []
+    if x[0] == 'list':
+        return list(x[1:])
+    return [x]
+
+def getargs(x, min, max, err):
+    l = getlist(x)
+    if len(l) < min or len(l) > max:
+        raise error.ParseError(err)
+    return l
+
+def _analyze(x):
+    if x is None:
+        return x
+
+    op = x[0]
+    if op in {'string', 'symbol'}:
+        return x
+    if op == 'kindpat':
+        getsymbol(x[1])  # kind must be a symbol
+        t = _analyze(x[2])
+        return (op, x[1], t)
+    if op == 'group':
+        return _analyze(x[1])
+    if op == 'negate':
+        raise error.ParseError(_("can't use negate operator in this context"))
+    if op == 'not':
+        t = _analyze(x[1])
+        return (op, t)
+    if op == 'and':
+        ta = _analyze(x[1])
+        tb = _analyze(x[2])
+        return (op, ta, tb)
+    if op == 'minus':
+        return _analyze(('and', x[1], ('not', x[2])))
+    if op in {'list', 'or'}:
+        ts = tuple(_analyze(y) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        getsymbol(x[1])  # function name must be a symbol
+        ta = _analyze(x[2])
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _insertstatushints(x):
+    """Insert hint nodes where status should be calculated (first path)
+
+    This works in bottom-up way, summing up status names and inserting hint
+    nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left.
+
+    Returns (status-names, new-tree) at the given subtree, where status-names
+    is a sum of status names referenced in the given subtree.
+    """
+    if x is None:
+        return (), x
+
+    op = x[0]
+    if op in {'string', 'symbol', 'kindpat'}:
+        return (), x
+    if op == 'not':
+        h, t = _insertstatushints(x[1])
+        return h, (op, t)
+    if op == 'and':
+        ha, ta = _insertstatushints(x[1])
+        hb, tb = _insertstatushints(x[2])
+        hr = ha + hb
+        if ha and hb:
+            return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
+        return hr, (op, ta, tb)
+    if op == 'or':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        hr = sum(hs, ())
+        if sum(bool(h) for h in hs) > 1:
+            return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
+        return hr, (op,) + ts
+    if op == 'list':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        return sum(hs, ()), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        # don't propagate 'ha' crossing a function boundary
+        ha, ta = _insertstatushints(x[2])
+        if getattr(symbols.get(f), '_callstatus', False):
+            return (f,), ('withstatus', (op, x[1], ta), ('string', f))
+        return (), (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _mergestatushints(x, instatus):
+    """Remove redundant status hint nodes (second path)
+
+    This is the top-down path to eliminate inner hint nodes.
+    """
+    if x is None:
+        return x
+
+    op = x[0]
+    if op == 'withstatus':
+        if instatus:
+            # drop redundant hint node
+            return _mergestatushints(x[1], instatus)
+        t = _mergestatushints(x[1], instatus=True)
+        return (op, t, x[2])
+    if op in {'string', 'symbol', 'kindpat'}:
+        return x
+    if op == 'not':
+        t = _mergestatushints(x[1], instatus)
+        return (op, t)
+    if op == 'and':
+        ta = _mergestatushints(x[1], instatus)
+        tb = _mergestatushints(x[2], instatus)
+        return (op, ta, tb)
+    if op in {'list', 'or'}:
+        ts = tuple(_mergestatushints(y, instatus) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        # don't propagate 'instatus' crossing a function boundary
+        ta = _mergestatushints(x[2], instatus=False)
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def analyze(x):
+    """Transform raw parsed tree to evaluatable tree which can be fed to
+    optimize() or getmatch()
+
+    All pseudo operations should be mapped to real operations or functions
+    defined in methods or symbols table respectively.
+    """
+    t = _analyze(x)
+    _h, t = _insertstatushints(t)
+    return _mergestatushints(t, instatus=False)
+
+def _optimizeandops(op, ta, tb):
+    if tb is not None and tb[0] == 'not':
+        return ('minus', ta, tb[1])
+    return (op, ta, tb)
+
+def _optimizeunion(xs):
+    # collect string patterns so they can be compiled into a single regexp
+    ws, ts, ss = [], [], []
+    for x in xs:
+        w, t = _optimize(x)
+        if t is not None and t[0] in {'string', 'symbol', 'kindpat'}:
+            ss.append(t)
+            continue
+        ws.append(w)
+        ts.append(t)
+    if ss:
+        ws.append(WEIGHT_CHECK_FILENAME)
+        ts.append(('patterns',) + tuple(ss))
+    return ws, ts
+
+def _optimize(x):
+    if x is None:
+        return 0, x
+
+    op = x[0]
+    if op == 'withstatus':
+        w, t = _optimize(x[1])
+        return w, (op, t, x[2])
+    if op in {'string', 'symbol'}:
+        return WEIGHT_CHECK_FILENAME, x
+    if op == 'kindpat':
+        w, t = _optimize(x[2])
+        return w, (op, x[1], t)
+    if op == 'not':
+        w, t = _optimize(x[1])
+        return w, (op, t)
+    if op == 'and':
+        wa, ta = _optimize(x[1])
+        wb, tb = _optimize(x[2])
+        if wa <= wb:
+            return wa, _optimizeandops(op, ta, tb)
+        else:
+            return wb, _optimizeandops(op, tb, ta)
+    if op == 'or':
+        ws, ts = _optimizeunion(x[1:])
+        if len(ts) == 1:
+            return ws[0], ts[0] # 'or' operation is fully optimized out
+        ts = tuple(it[1] for it in sorted(enumerate(ts),
+                                          key=lambda it: ws[it[0]]))
+        return max(ws), (op,) + ts
+    if op == 'list':
+        ws, ts = zip(*(_optimize(y) for y in x[1:]))
+        return sum(ws), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        w = getattr(symbols.get(f), '_weight', 1)
+        wa, ta = _optimize(x[2])
+        return w + wa, (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def optimize(x):
+    """Reorder/rewrite evaluatable tree for optimization
+
+    All pseudo operations should be transformed beforehand.
+    """
+    _w, t = _optimize(x)
+    return t
+
+def prettyformat(tree):
+    return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/graphmod.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/graphmod.py	Fri Aug 24 12:55:05 2018 -0700
@@ -22,6 +22,7 @@
 from .node import nullrev
 from . import (
     dagop,
+    pycompat,
     smartset,
     util,
 )
@@ -426,16 +427,16 @@
     # shift_interline is the line containing the non-vertical
     # edges between this entry and the next
     shift_interline = echars[:idx * 2]
-    for i in xrange(2 + coldiff):
+    for i in pycompat.xrange(2 + coldiff):
         shift_interline.append(' ')
     count = ncols - idx - 1
     if coldiff == -1:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['/', ' '])
     elif coldiff == 0:
         shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
     else:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['\\', ' '])
 
     # draw edges from the current node to its parents
--- a/mercurial/help/config.txt	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/help/config.txt	Fri Aug 24 12:55:05 2018 -0700
@@ -438,6 +438,20 @@
 ``commands``
 ------------
 
+``resolve.confirm``
+    Confirm before performing action if no filename is passed.
+    (default: False)
+
+``resolve.mark-check``
+    Determines what level of checking :hg:`resolve --mark` will perform before
+    marking files as resolved. Valid values are ``none`, ``warn``, and
+    ``abort``. ``warn`` will output a warning listing the file(s) that still
+    have conflict markers in them, but will still mark everything resolved.
+    ``abort`` will output the same warning but will not mark things as resolved.
+    If --all is passed and this is set to ``abort``, only a warning will be
+    shown (an error will not be raised).
+    (default: ``none``)
+
 ``status.relative``
     Make paths in :hg:`status` output relative to the current directory.
     (default: False)
@@ -1333,6 +1347,11 @@
    halted, the repository is left in a normal ``unresolved`` merge state.
    (default: ``continue``)
 
+``strict-capability-check``
+   Whether capabilities of internal merge tools are checked strictly
+   or not, while examining rules to decide merge tool to be used.
+   (default: False)
+
 ``merge-patterns``
 ------------------
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/linelog.txt	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,302 @@
+linelog is a storage format inspired by the "Interleaved deltas" idea. See
+https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction.
+
+0. SCCS Weave
+
+  To understand what linelog is, first we have a quick look at a simplified
+  (with header removed) SCCS weave format, which is an implementation of the
+  "Interleaved deltas" idea.
+
+0.1 Basic SCCS Weave File Format
+
+  A SCCS weave file consists of plain text lines. Each line is either a
+  special instruction starting with "^A" or part of the content of the real
+  file the weave tracks. There are 3 important operations, where REV denotes
+  the revision number:
+
+    ^AI REV, marking the beginning of an insertion block introduced by REV
+    ^AD REV, marking the beginning of a deletion block introduced by REV
+    ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV"
+
+  Note on revision numbers: For any two different revision numbers, one must
+  be an ancestor of the other to make them comparable. This enforces linear
+  history. Besides, the comparison functions (">=", "<") should be efficient.
+  This means, if revisions are strings like git or hg, an external map is
+  required to convert them into integers.
+
+  For example, to represent the following changes:
+
+    REV 1 | REV 2 | REV 3
+    ------+-------+-------
+    a     | a     | a
+    b     | b     | 2
+    c     | 1     | c
+          | 2     |
+          | c     |
+
+  A possible weave file looks like:
+
+    ^AI 1
+    a
+    ^AD 3
+    b
+    ^AI 2
+    1
+    ^AE 3
+    2
+    ^AE 2
+    c
+    ^AE 1
+
+  An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In
+  the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3".
+  Therefore we need some extra information for "^AE". The SCCS weave uses a
+  revision number. It could also be a boolean value about whether it is an
+  insertion or a deletion (see section 0.4).
+
+0.2 Checkout
+
+  The "checkout" operation is to retrieve file content at a given revision,
+  say X. It's doable by going through the file line by line and:
+
+    - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there
+    - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there
+    - Ignore ^AE
+    - For normal lines, just output them
+
+0.3 Annotate
+
+  The "annotate" operation is to show extra metadata like the revision number
+  and the original line number a line comes from.
+
+  It's basically just a "Checkout". For the extra metadata, they can be stored
+  side by side with the line contents. Alternatively, we can infer the
+  revision number from "^AI"s.
+
+  Some SCM tools have to calculate diffs on the fly and thus are much slower
+  on this operation.
+
+0.4 Tree Structure
+
+  The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE"
+  blocks can be interleaved.
+
+  If we consider insertions and deletions separately, they can form tree
+  structures, respectively.
+
+    +--- ^AI 1        +--- ^AD 3
+    | +- ^AI 2        | +- ^AD 2
+    | |               | |
+    | +- ^AE 2        | +- ^AE 2
+    |                 |
+    +--- ^AE 1        +--- ^AE 3
+
+  More specifically, it's possible to build a tree for all insertions, where
+  the tree node has the structure "(rev, startline, endline)". "startline" is
+  the line number of "^AI" and "endline" is the line number of the matched
+  "^AE".  The tree will have these properties:
+
+    1. child.rev > parent.rev
+    2. child.startline > parent.startline
+    3. child.endline < parent.endline
+
+  A similar tree for all deletions can also be built with the first property
+  changed to:
+
+    1. child.rev < parent.rev
+
+0.5 Malformed Cases
+
+  The following cases are considered malformed in our implementation:
+
+    1. Interleaved insertions, or interleaved deletions.
+       It can be rewritten to a non-interleaved tree structure.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x         ^AI x
+       a             a
+       ^AI x + 1  -> ^AI x + 1
+       b             b
+       ^AE x         ^AE x + 1
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    2. Nested insertions, where the inner one has a smaller revision number.
+       Or nested deletions, where the inner one has a larger revision number.
+       It can be rewritten to a non-nested form.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x + 1     ^AI x + 1
+       a             a
+       ^AI x      -> ^AE x + 1
+       b             ^AI x
+       ^AE x         b
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    3. Insertion inside deletion with a smaller revision number.
+
+       Rewrite by duplicating the content inserted:
+
+       ^AD x          ^AD x
+       a              a
+       ^AI x + 1  ->  b
+       b              c
+       ^AE x + 1      ^AE x
+       c              ^AI x + 1
+       ^AE x          b
+                      ^AE x + 1
+
+       Note: If "annotate" purely depends on "^AI" information, then the
+       duplication content will lose track of where "b" is originally from.
+
+  Some of them may be valid in other implementations for special purposes. For
+  example, to "revive" a previously deleted block in a newer revision.
+
+0.6 Cases Can Be Optimized
+
+  It's always better to get things nested. For example, the left is more
+  efficient than the right while they represent the same content:
+
+    +--- ^AD 2          +- ^AD 1
+    | +- ^AD 1          |   LINE A
+    | |   LINE A        +- ^AE 1
+    | +- ^AE 1          +- ^AD 2
+    |     LINE B        |   LINE B
+    +--- ^AE 2          +- ^AE 2
+
+  Our implementation sometimes generates the less efficient data. To always
+  get the optimal form, it requires extra code complexity that seems unworthy.
+
+0.7 Inefficiency
+
+  The file format can be slow because:
+
+  - Inserting a new line at position P requires rewriting all data after P.
+  - Finding "^AE" requires walking through the content (O(N), where N is the
+    number of lines between "^AI/D" and "^AE").
+
+1. Linelog
+
+  The linelog is a binary format that dedicates to speed up mercurial (or
+  git)'s "annotate" operation. It's designed to avoid issues mentioned in
+  section 0.7.
+
+1.1 Content Stored
+
+  Linelog is not another storage for file contents. It only stores line
+  numbers and corresponding revision numbers, instead of actual line content.
+  This is okay for the "annotate" operation because usually the external
+  source is fast to checkout the content of a file at a specific revision.
+
+  A typical SCCS weave is also fast on the "grep" operation, which needs
+  random accesses to line contents from different revisions of a file. This
+  can be slow with linelog's no-line-content design. However we could use
+  an extra map ((rev, line num) -> line content) to speed it up.
+
+  Note the revision numbers in linelog should be independent from mercurial
+  integer revision numbers. There should be some mapping between linelog rev
+  and hg hash stored side by side, to make the files reusable after being
+  copied to another machine.
+
+1.2 Basic Format
+
+  A linelog file consists of "instruction"s. An "instruction" can be either:
+
+    - JGE  REV ADDR     # jump to ADDR if rev >= REV
+    - JL   REV ADDR     # jump to ADDR if rev < REV
+    - LINE REV LINENUM  # append the (LINENUM+1)-th line in revision REV
+
+  For example, here is the example linelog representing the same file with
+  3 revisions mentioned in section 0.1:
+
+    SCCS  |    Linelog
+    Weave | Addr : Instruction
+    ------+------+-------------
+    ^AI 1 |    0 : JL   1 8
+    a     |    1 : LINE 1 0
+    ^AD 3 |    2 : JGE  3 6
+    b     |    3 : LINE 1 1
+    ^AI 2 |    4 : JL   2 7
+    1     |    5 : LINE 2 2
+    ^AE 3 |
+    2     |    6 : LINE 2 3
+    ^AE 2 |
+    c     |    7 : LINE 1 2
+    ^AE 1 |
+          |    8 : END
+
+  This way, "find ^AE" is O(1) because we just jump there. And we can insert
+  new lines without rewriting most part of the file by appending new lines and
+  changing a single instruction to jump to them.
+
+  The current implementation uses 64 bits for an instruction: The opcode (JGE,
+  JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32
+  bits. It also stores the max revision number and buffer size at the first
+  64 bits for quick access to these values.
+
+1.3 Comparing with Mercurial's revlog format
+
+  Apparently, linelog is very different from revlog: linelog stores rev and
+  line numbers, while revlog has line contents and other metadata (like
+  parents, flags). However, the revlog format could also be used to store rev
+  and line numbers. For example, to speed up the annotate operation, we could
+  also pre-calculate annotate results and just store them using the revlog
+  format.
+
+  Therefore, linelog is actually somehow similar to revlog, with the important
+  trade-off that it only supports linear history (mentioned in section 0.1).
+  Essentially, the differences are:
+
+    a) Linelog is full of deltas, while revlog could contain full file
+       contents sometimes. So linelog is smaller. Revlog could trade
+       reconstruction speed for file size - best case, revlog is as small as
+       linelog.
+    b) The interleaved delta structure allows skipping large portion of
+       uninteresting deltas so linelog's content reconstruction is faster than
+       the delta-only version of revlog (however it's possible to construct
+       a case where interleaved deltas degrade to plain deltas, so linelog
+       worst case would be delta-only revlog). Revlog could trade file size
+       for reconstruction speed.
+    c) Linelog implicitly maintains the order of all lines it stores. So it
+       could dump all the lines from all revisions, with a reasonable order.
+       While revlog could also dump all line additions, it requires extra
+       computation to figure out the order putting those lines - that's some
+       kind of "merge".
+
+  "c" makes "hg absorb" easier to implement and makes it possible to do
+  "annotate --deleted".
+
+1.4 Malformed Cases Handling
+
+  The following "case 1", "case 2", and "case 3" refer to cases mentioned
+  in section 0.5.
+
+  Using the exposed API (replacelines), case 1 is impossible to generate,
+  although it's possible to generate it by constructing rawdata and load that
+  via linelog.fromdata.
+
+  Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to
+  replacelines are related to the latest revision) eliminates the possibility
+  of case 3. That makes sense since usually you'd like to make edits on top of
+  the latest revision. Practically, both absorb and fastannotate do this.
+
+  Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev
+  eliminates the possibility of case 2. That makes sense since usually the
+  edits belong to "new revisions", not "old revisions". Practically,
+  fastannotate does this. Absorb calls replacelines with rev < maxrev to edit
+  past revisions. So it needs some extra care to not generate case 2.
+
+  If case 1 occurs, that probably means linelog file corruption (assuming
+  linelog is edited via public APIs) the checkout or annotate result could
+  be less meaningful or even error out, but linelog wouldn't enter an infinite
+  loop.
+
+  If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE"
+  operations on the left side are silently ignored.
--- a/mercurial/help/merge-tools.txt	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/help/merge-tools.txt	Fri Aug 24 12:55:05 2018 -0700
@@ -36,8 +36,9 @@
 
 .. internaltoolsmarker
 
-Internal tools are always available and do not require a GUI but will by default
-not handle symlinks or binary files.
+Internal tools are always available and do not require a GUI but will
+by default not handle symlinks or binary files. See next section for
+detail about "actual capabilities" described above.
 
 Choosing a merge tool
 =====================
@@ -54,8 +55,7 @@
 
 3. If the filename of the file to be merged matches any of the patterns in the
    merge-patterns configuration section, the first usable merge tool
-   corresponding to a matching pattern is used. Here, binary capabilities of the
-   merge tool are not considered.
+   corresponding to a matching pattern is used.
 
 4. If ui.merge is set it will be considered next. If the value is not the name
    of a configured tool, the specified value is used and must be executable by
@@ -72,6 +72,23 @@
 
 8. Otherwise, ``:prompt`` is used.
 
+For historical reason, Mercurial assumes capabilities of internal
+merge tools as below while examining rules above, regardless of actual
+capabilities of them.
+
+==== =============== ====== =======
+step specified via   binary symlink
+==== =============== ====== =======
+1.   --tool          o      o
+2.   HGMERGE         o      o
+3.   merge-patterns  o (*)  x (*)
+4.   ui.merge        x (*)  x (*)
+==== =============== ====== =======
+
+If ``merge.strict-capability-check`` configuration is true, Mercurial
+checks capabilities of internal merge tools strictly in (*) cases
+above. It is false by default for backward compatibility.
+
 .. note::
 
    After selecting a merge program, Mercurial will by default attempt
--- a/mercurial/hg.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/hg.py	Fri Aug 24 12:55:05 2018 -0700
@@ -9,6 +9,7 @@
 from __future__ import absolute_import
 
 import errno
+import functools
 import hashlib
 import os
 import shutil
@@ -162,9 +163,16 @@
     """return a repository object for the specified path"""
     obj = _peerlookup(path).instance(ui, path, create, intents=intents)
     ui = getattr(obj, "ui", ui)
+    if ui.configbool('devel', 'debug.extensions'):
+        log = functools.partial(
+            ui.debug, 'debug.extensions: ', label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
     for f in presetupfuncs or []:
         f(ui, obj)
+    log('- executing reposetup hooks\n')
     for name, module in extensions.extensions(ui):
+        log('  - running reposetup for %s\n' % (name,))
         hook = getattr(module, 'reposetup', None)
         if hook:
             hook(ui, obj)
@@ -258,7 +266,7 @@
         raise error.Abort(_('destination already exists'))
 
     if not destwvfs.isdir():
-        destwvfs.mkdir()
+        destwvfs.makedirs()
     destvfs.makedir()
 
     requirements = ''
@@ -626,7 +634,7 @@
             srcrepo.hook('preoutgoing', throw=True, source='clone')
             hgdir = os.path.realpath(os.path.join(dest, ".hg"))
             if not os.path.exists(dest):
-                os.mkdir(dest)
+                util.makedirs(dest)
             else:
                 # only clean up directories we create ourselves
                 cleandir = hgdir
--- a/mercurial/hgweb/hgweb_mod.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/hgweb/hgweb_mod.py	Fri Aug 24 12:55:05 2018 -0700
@@ -140,11 +140,6 @@
         if not staticurl.endswith('/'):
             staticurl += '/'
 
-        # some functions for the templater
-
-        def motd(**map):
-            yield self.config('web', 'motd')
-
         # figure out which style to use
 
         vars = {}
@@ -177,12 +172,16 @@
             'urlbase': req.advertisedbaseurl,
             'repo': self.reponame,
             'encoding': encoding.encoding,
-            'motd': motd,
             'sessionvars': sessionvars,
             'pathdef': makebreadcrumb(req.apppath),
             'style': style,
             'nonce': self.nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            yield self.config('web', 'motd')
+
         tres = formatter.templateresources(self.repo.ui, self.repo)
         tmpl = templater.templater.frommapfile(mapfile,
                                                filters=filters,
--- a/mercurial/hgweb/hgwebdir_mod.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/hgweb/hgwebdir_mod.py	Fri Aug 24 12:55:05 2018 -0700
@@ -33,6 +33,7 @@
     hg,
     profiling,
     pycompat,
+    registrar,
     scmutil,
     templater,
     templateutil,
@@ -495,12 +496,6 @@
 
     def templater(self, req, nonce):
 
-        def motd(**map):
-            if self.motd is not None:
-                yield self.motd
-            else:
-                yield config('web', 'motd')
-
         def config(section, name, default=uimod._unset, untrusted=True):
             return self.ui.config(section, name, default, untrusted)
 
@@ -520,7 +515,6 @@
 
         defaults = {
             "encoding": encoding.encoding,
-            "motd": motd,
             "url": req.apppath + '/',
             "logourl": logourl,
             "logoimg": logoimg,
@@ -529,5 +523,13 @@
             "style": style,
             "nonce": nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            if self.motd is not None:
+                yield self.motd
+            else:
+                yield config('web', 'motd')
+
         tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
         return tmpl
--- a/mercurial/hgweb/webcommands.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/hgweb/webcommands.py	Fri Aug 24 12:55:05 2018 -0700
@@ -215,7 +215,7 @@
 
         def revgen():
             cl = web.repo.changelog
-            for i in xrange(len(web.repo) - 1, 0, -100):
+            for i in pycompat.xrange(len(web.repo) - 1, 0, -100):
                 l = []
                 for j in cl.revs(max(0, i - 99), i):
                     ctx = web.repo[j]
--- a/mercurial/hgweb/webutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/hgweb/webutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -408,6 +408,12 @@
 
 whyunstable._requires = {'repo', 'ctx'}
 
+# helper to mark a function as a new-style template keyword; can be removed
+# once old-style function gets unsupported and new-style becomes the default
+def _kwfunc(f):
+    f._requires = ()
+    return f
+
 def commonentry(repo, ctx):
     node = ctx.node()
     return {
@@ -432,8 +438,8 @@
         'branches': nodebranchdict(repo, ctx),
         'tags': nodetagsdict(repo, node),
         'bookmarks': nodebookmarksdict(repo, node),
-        'parent': lambda **x: parents(ctx),
-        'child': lambda **x: children(ctx),
+        'parent': _kwfunc(lambda context, mapping: parents(ctx)),
+        'child': _kwfunc(lambda context, mapping: children(ctx)),
     }
 
 def changelistentry(web, ctx):
@@ -450,9 +456,9 @@
 
     entry = commonentry(repo, ctx)
     entry.update(
-        allparents=lambda **x: parents(ctx),
-        parent=lambda **x: parents(ctx, rev - 1),
-        child=lambda **x: children(ctx, rev + 1),
+        allparents=_kwfunc(lambda context, mapping: parents(ctx)),
+        parent=_kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
+        child=_kwfunc(lambda context, mapping: children(ctx, rev + 1)),
         changelogtag=showtags,
         files=files,
     )
@@ -521,7 +527,7 @@
         changesetbranch=showbranch,
         files=templateutil.mappedgenerator(_listfilesgen,
                                            args=(ctx, web.stripecount)),
-        diffsummary=lambda **x: diffsummary(diffstatsgen),
+        diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
         diffstat=diffstats,
         archives=web.archivelist(ctx.hex()),
         **pycompat.strkwargs(commonentry(web.repo, ctx)))
@@ -613,21 +619,21 @@
         len1 = lhi - llo
         len2 = rhi - rlo
         count = min(len1, len2)
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             yield _compline(type=type,
                             leftlineno=llo + i + 1,
                             leftline=leftlines[llo + i],
                             rightlineno=rlo + i + 1,
                             rightline=rightlines[rlo + i])
         if len1 > len2:
-            for i in xrange(llo + count, lhi):
+            for i in pycompat.xrange(llo + count, lhi):
                 yield _compline(type=type,
                                 leftlineno=i + 1,
                                 leftline=leftlines[i],
                                 rightlineno=None,
                                 rightline=None)
         elif len2 > len1:
-            for i in xrange(rlo + count, rhi):
+            for i in pycompat.xrange(rlo + count, rhi):
                 yield _compline(type=type,
                                 leftlineno=None,
                                 leftline=None,
--- a/mercurial/httppeer.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/httppeer.py	Fri Aug 24 12:55:05 2018 -0700
@@ -64,7 +64,7 @@
     result = []
 
     n = 0
-    for i in xrange(0, len(value), valuelen):
+    for i in pycompat.xrange(0, len(value), valuelen):
         n += 1
         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/linelog.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,436 @@
+# linelog - efficient cache for annotate data
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""linelog is an efficient cache for annotate data inspired by SCCS Weaves.
+
+SCCS Weaves are an implementation of
+https://en.wikipedia.org/wiki/Interleaved_deltas. See
+mercurial/help/internals/linelog.txt for an exploration of SCCS weaves
+and how linelog works in detail.
+
+Here's a hacker's summary: a linelog is a program which is executed in
+the context of a revision. Executing the program emits information
+about lines, including the revision that introduced them and the line
+number in the file at the introducing revision. When an insertion or
+deletion is performed on the file, a jump instruction is used to patch
+in a new body of annotate information.
+"""
+from __future__ import absolute_import, print_function
+
+import abc
+import struct
+
+from .thirdparty import (
+    attr,
+)
+from . import (
+    pycompat,
+)
+
+_llentry = struct.Struct('>II')
+
+class LineLogError(Exception):
+    """Error raised when something bad happens internally in linelog."""
+
+@attr.s
+class lineinfo(object):
+    # Introducing revision of this line.
+    rev = attr.ib()
+    # Line number for this line in its introducing revision.
+    linenum = attr.ib()
+    # Private. Offset in the linelog program of this line. Used internally.
+    _offset = attr.ib()
+
+@attr.s
+class annotateresult(object):
+    rev = attr.ib()
+    lines = attr.ib()
+    _eof = attr.ib()
+
+    def __iter__(self):
+        return iter(self.lines)
+
+class _llinstruction(object):
+
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def __init__(self, op1, op2):
+        pass
+
+    @abc.abstractmethod
+    def __str__(self):
+        pass
+
+    def __repr__(self):
+        return str(self)
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        pass
+
+    @abc.abstractmethod
+    def encode(self):
+        """Encode this instruction to the binary linelog format."""
+
+    @abc.abstractmethod
+    def execute(self, rev, pc, emit):
+        """Execute this instruction.
+
+        Args:
+          rev: The revision we're annotating.
+          pc: The current offset in the linelog program.
+          emit: A function that accepts a single lineinfo object.
+
+        Returns:
+          The new value of pc. Returns None if exeuction should stop
+          (that is, we've found the end of the file.)
+        """
+
+class _jge(_llinstruction):
+    """If the current rev is greater than or equal to op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JGE %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(self._cmprev << 2, self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev >= self._cmprev:
+            return self._target
+        return pc + 1
+
+class _jump(_llinstruction):
+    """Unconditional jumps are expressed as a JGE with op1 set to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed JUMP, op1 must be 0, got %d" % op1)
+        self._target = op2
+
+    def __str__(self):
+        return r'JUMP %d' % (self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(0, self._target)
+
+    def execute(self, rev, pc, emit):
+        return self._target
+
+class _eof(_llinstruction):
+    """EOF is expressed as a JGE that always jumps to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed EOF, op1 must be 0, got %d" % op1)
+        if op2 != 0:
+            raise LineLogError("malformed EOF, op2 must be 0, got %d" % op2)
+
+    def __str__(self):
+        return r'EOF'
+
+    def __eq__(self, other):
+        return type(self) == type(other)
+
+    def encode(self):
+        return _llentry.pack(0, 0)
+
+    def execute(self, rev, pc, emit):
+        return None
+
+class _jl(_llinstruction):
+    """If the current rev is less than op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JL %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(1 | (self._cmprev << 2), self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev < self._cmprev:
+            return self._target
+        return pc + 1
+
+class _line(_llinstruction):
+    """Emit a line."""
+
+    def __init__(self, op1, op2):
+        # This line was introduced by this revision number.
+        self._rev = op1
+        # This line had the specified line number in the introducing revision.
+        self._origlineno = op2
+
+    def __str__(self):
+        return r'LINE %d %d' % (self._rev, self._origlineno)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._rev == other._rev
+                and self._origlineno == other._origlineno)
+
+    def encode(self):
+        return _llentry.pack(2 | (self._rev << 2), self._origlineno)
+
+    def execute(self, rev, pc, emit):
+        emit(lineinfo(self._rev, self._origlineno, pc))
+        return pc + 1
+
+def _decodeone(data, offset):
+    """Decode a single linelog instruction from an offset in a buffer."""
+    try:
+        op1, op2 = _llentry.unpack_from(data, offset)
+    except struct.error as e:
+        raise LineLogError('reading an instruction failed: %r' % e)
+    opcode = op1 & 0b11
+    op1 = op1 >> 2
+    if opcode == 0:
+        if op1 == 0:
+            if op2 == 0:
+                return _eof(op1, op2)
+            return _jump(op1, op2)
+        return _jge(op1, op2)
+    elif opcode == 1:
+        return _jl(op1, op2)
+    elif opcode == 2:
+        return _line(op1, op2)
+    raise NotImplementedError('Unimplemented opcode %r' % opcode)
+
+class linelog(object):
+    """Efficient cache for per-line history information."""
+
+    def __init__(self, program=None, maxrev=0):
+        if program is None:
+            # We pad the program with an extra leading EOF so that our
+            # offsets will match the C code exactly. This means we can
+            # interoperate with the C code.
+            program = [_eof(0, 0), _eof(0, 0)]
+        self._program = program
+        self._lastannotate = None
+        self._maxrev = maxrev
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._program == other._program
+                and self._maxrev == other._maxrev)
+
+    def __repr__(self):
+        return '<linelog at %s: maxrev=%d size=%d>' % (
+            hex(id(self)), self._maxrev, len(self._program))
+
+    def debugstr(self):
+        fmt = r'%%%dd %%s' % len(str(len(self._program)))
+        return pycompat.sysstr('\n').join(
+            fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1))
+
+    @classmethod
+    def fromdata(cls, buf):
+        if len(buf) % _llentry.size != 0:
+            raise LineLogError(
+                "invalid linelog buffer size %d (must be a multiple of %d)" % (
+                    len(buf), _llentry.size))
+        expected = len(buf) / _llentry.size
+        fakejge = _decodeone(buf, 0)
+        if isinstance(fakejge, _jump):
+            maxrev = 0
+        else:
+            maxrev = fakejge._cmprev
+        numentries = fakejge._target
+        if expected != numentries:
+            raise LineLogError("corrupt linelog data: claimed"
+                               " %d entries but given data for %d entries" % (
+                                   expected, numentries))
+        instructions = [_eof(0, 0)]
+        for offset in pycompat.xrange(1, numentries):
+            instructions.append(_decodeone(buf, offset * _llentry.size))
+        return cls(instructions, maxrev=maxrev)
+
+    def encode(self):
+        hdr = _jge(self._maxrev, len(self._program)).encode()
+        return hdr + ''.join(i.encode() for i in self._program[1:])
+
+    def clear(self):
+        self._program = []
+        self._maxrev = 0
+        self._lastannotate = None
+
+    def replacelines_vec(self, rev, a1, a2, blines):
+        return self.replacelines(rev, a1, a2, 0, len(blines),
+                                 _internal_blines=blines)
+
+    def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
+        """Replace lines [a1, a2) with lines [b1, b2)."""
+        if self._lastannotate:
+            # TODO(augie): make replacelines() accept a revision at
+            # which we're editing as well as a revision to mark
+            # responsible for the edits. In hg-experimental it's
+            # stateful like this, so we're doing the same thing to
+            # retain compatibility with absorb until that's imported.
+            ar = self._lastannotate
+        else:
+            ar = self.annotate(rev)
+            #        ar = self.annotate(self._maxrev)
+        if a1 > len(ar.lines):
+            raise LineLogError(
+                '%d contains %d lines, tried to access line %d' % (
+                    rev, len(ar.lines), a1))
+        elif a1 == len(ar.lines):
+            # Simulated EOF instruction since we're at EOF, which
+            # doesn't have a "real" line.
+            a1inst = _eof(0, 0)
+            a1info = lineinfo(0, 0, ar._eof)
+        else:
+            a1info = ar.lines[a1]
+            a1inst = self._program[a1info._offset]
+        programlen = self._program.__len__
+        oldproglen = programlen()
+        appendinst = self._program.append
+
+        # insert
+        blineinfos = []
+        bappend = blineinfos.append
+        if b1 < b2:
+            # Determine the jump target for the JGE at the start of
+            # the new block.
+            tgt = oldproglen + (b2 - b1 + 1)
+            # Jump to skip the insert if we're at an older revision.
+            appendinst(_jl(rev, tgt))
+            for linenum in pycompat.xrange(b1, b2):
+                if _internal_blines is None:
+                    bappend(lineinfo(rev, linenum, programlen()))
+                    appendinst(_line(rev, linenum))
+                else:
+                    newrev, newlinenum = _internal_blines[linenum]
+                    bappend(lineinfo(newrev, newlinenum, programlen()))
+                    appendinst(_line(newrev, newlinenum))
+        # delete
+        if a1 < a2:
+            if a2 > len(ar.lines):
+                raise LineLogError(
+                    '%d contains %d lines, tried to access line %d' % (
+                        rev, len(ar.lines), a2))
+            elif a2 == len(ar.lines):
+                endaddr = ar._eof
+            else:
+                endaddr = ar.lines[a2]._offset
+            if a2 > 0 and rev < self._maxrev:
+                # If we're here, we're deleting a chunk of an old
+                # commit, so we need to be careful and not touch
+                # invisible lines between a2-1 and a2 (IOW, lines that
+                # are added later).
+                endaddr = ar.lines[a2 - 1]._offset + 1
+            appendinst(_jge(rev, endaddr))
+        # copy instruction from a1
+        a1instpc = programlen()
+        appendinst(a1inst)
+        # if a1inst isn't a jump or EOF, then we need to add an unconditional
+        # jump back into the program here.
+        if not isinstance(a1inst, (_jump, _eof)):
+            appendinst(_jump(0, a1info._offset + 1))
+        # Patch instruction at a1, which makes our patch live.
+        self._program[a1info._offset] = _jump(0, oldproglen)
+
+        # Update self._lastannotate in place. This serves as a cache to avoid
+        # expensive "self.annotate" in this function, when "replacelines" is
+        # used continuously.
+        if len(self._lastannotate.lines) > a1:
+            self._lastannotate.lines[a1]._offset = a1instpc
+        else:
+            assert isinstance(a1inst, _eof)
+            self._lastannotate._eof = a1instpc
+        self._lastannotate.lines[a1:a2] = blineinfos
+        self._lastannotate.rev = max(self._lastannotate.rev, rev)
+
+        if rev > self._maxrev:
+            self._maxrev = rev
+
+    def annotate(self, rev):
+        pc = 1
+        lines = []
+        executed = 0
+        # Sanity check: if instructions executed exceeds len(program), we
+        # hit an infinite loop in the linelog program somehow and we
+        # should stop.
+        while pc is not None and executed < len(self._program):
+            inst = self._program[pc]
+            lastpc = pc
+            pc = inst.execute(rev, pc, lines.append)
+            executed += 1
+        if pc is not None:
+            raise LineLogError(
+                r'Probably hit an infinite loop in linelog. Program:\n' +
+                self.debugstr())
+        ar = annotateresult(rev, lines, lastpc)
+        self._lastannotate = ar
+        return ar
+
+    @property
+    def maxrev(self):
+        return self._maxrev
+
+    # Stateful methods which depend on the value of the last
+    # annotation run. This API is for compatiblity with the original
+    # linelog, and we should probably consider refactoring it.
+    @property
+    def annotateresult(self):
+        """Return the last annotation result. C linelog code exposed this."""
+        return [(l.rev, l.linenum) for l in self._lastannotate.lines]
+
+    def getoffset(self, line):
+        return self._lastannotate.lines[line]._offset
+
+    def getalllines(self, start=0, end=0):
+        """Get all lines that ever occurred in [start, end).
+
+        Passing start == end == 0 means "all lines ever".
+
+        This works in terms of *internal* program offsets, not line numbers.
+        """
+        pc = start or 1
+        lines = []
+        # only take as many steps as there are instructions in the
+        # program - if we don't find an EOF or our stop-line before
+        # then, something is badly broken.
+        for step in pycompat.xrange(len(self._program)):
+            inst = self._program[pc]
+            nextpc = pc + 1
+            if isinstance(inst, _jump):
+                nextpc = inst._target
+            elif isinstance(inst, _eof):
+                return lines
+            elif isinstance(inst, (_jl, _jge)):
+                pass
+            elif isinstance(inst, _line):
+                lines.append((inst._rev, inst._origlineno))
+            else:
+                raise LineLogError("Illegal instruction %r" % inst)
+            if nextpc == end:
+                return lines
+            pc = nextpc
+        raise LineLogError("Failed to perform getalllines")
--- a/mercurial/localrepo.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/localrepo.py	Fri Aug 24 12:55:05 2018 -0700
@@ -495,6 +495,11 @@
                         ' dummy changelog to prevent using the old repo layout'
                     )
             else:
+                try:
+                    self.vfs.stat()
+                except OSError as inst:
+                    if inst.errno != errno.ENOENT:
+                        raise
                 raise error.RepoError(_("repository %s not found") % path)
         elif create:
             raise error.RepoError(_("repository %s already exists") % path)
@@ -811,7 +816,7 @@
                                " working parent %s!\n") % short(node))
             return nullid
 
-    @repofilecache(narrowspec.FILENAME)
+    @storecache(narrowspec.FILENAME)
     def narrowpats(self):
         """matcher patterns for this repository's narrowspec
 
@@ -823,9 +828,9 @@
             source = hg.sharedreposource(self)
         return narrowspec.load(source)
 
-    @repofilecache(narrowspec.FILENAME)
+    @storecache(narrowspec.FILENAME)
     def _narrowmatch(self):
-        if changegroup.NARROW_REQUIREMENT not in self.requirements:
+        if repository.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always(self.root, '')
         include, exclude = self.narrowpats
         return narrowspec.match(self.root, include=include, exclude=exclude)
@@ -850,7 +855,7 @@
         if isinstance(changeid, slice):
             # wdirrev isn't contiguous so the slice shouldn't include it
             return [context.changectx(self, i)
-                    for i in xrange(*changeid.indices(len(self)))
+                    for i in pycompat.xrange(*changeid.indices(len(self)))
                     if i not in self.changelog.filteredrevs]
         try:
             return context.changectx(self, changeid)
@@ -860,7 +865,8 @@
     def __contains__(self, changeid):
         """True if the given changeid exists
 
-        error.LookupError is raised if an ambiguous node specified.
+        error.AmbiguousPrefixLookupError is raised if an ambiguous node
+        specified.
         """
         try:
             self[changeid]
@@ -1372,6 +1378,7 @@
             else:
                 # discard all changes (including ones already written
                 # out) in this transaction
+                narrowspec.restorebackup(self, 'journal.narrowspec')
                 repo.dirstate.restorebackup(None, 'journal.dirstate')
 
                 repo.invalidate(clearfilecache=True)
@@ -1385,7 +1392,7 @@
                                      releasefn=releasefn,
                                      checkambigfiles=_cachedfiles,
                                      name=desc)
-        tr.changes['revs'] = xrange(0, 0)
+        tr.changes['revs'] = pycompat.xrange(0, 0)
         tr.changes['obsmarkers'] = set()
         tr.changes['phases'] = {}
         tr.changes['bookmarks'] = {}
@@ -1460,6 +1467,7 @@
     @unfilteredmethod
     def _writejournal(self, desc):
         self.dirstate.savebackup(None, 'journal.dirstate')
+        narrowspec.savebackup(self, 'journal.narrowspec')
         self.vfs.write("journal.branch",
                           encoding.fromlocal(self.dirstate.branch()))
         self.vfs.write("journal.desc",
@@ -1547,6 +1555,7 @@
             # prevent dirstateguard from overwriting already restored one
             dsguard.close()
 
+            narrowspec.restorebackup(self, 'undo.narrowspec')
             self.dirstate.restorebackup(None, 'undo.dirstate')
             try:
                 branch = self.vfs.read('undo.branch')
@@ -1612,6 +1621,10 @@
                 rbc.branchinfo(r)
             rbc.write()
 
+            # ensure the working copy parents are in the manifestfulltextcache
+            for ctx in self['.'].parents():
+                ctx.manifest()  # accessing the manifest is enough
+
     def invalidatecaches(self):
 
         if '_tagscache' in vars(self):
@@ -2026,6 +2039,11 @@
     def commitctx(self, ctx, error=False):
         """Add a new revision to current repository.
         Revision information is passed via the context argument.
+
+        ctx.files() should list all files involved in this commit, i.e.
+        modified/added/removed files. On merge, it may be wider than the
+        ctx.files() to be committed, since any file nodes derived directly
+        from p1 or p2 are excluded from the committed ctx.files().
         """
 
         tr = None
@@ -2039,6 +2057,7 @@
 
             if ctx.manifestnode():
                 # reuse an existing manifest revision
+                self.ui.debug('reusing known manifest\n')
                 mn = ctx.manifestnode()
                 files = ctx.files()
             elif ctx.files():
@@ -2077,16 +2096,31 @@
                         raise
 
                 # update manifest
-                self.ui.note(_("committing manifest\n"))
                 removed = [f for f in sorted(removed) if f in m1 or f in m2]
                 drop = [f for f in removed if f in m]
                 for f in drop:
                     del m[f]
-                mn = mctx.write(trp, linkrev,
-                                p1.manifestnode(), p2.manifestnode(),
-                                added, drop)
                 files = changed + removed
+                md = None
+                if not files:
+                    # if no "files" actually changed in terms of the changelog,
+                    # try hard to detect unmodified manifest entry so that the
+                    # exact same commit can be reproduced later on convert.
+                    md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
+                if not files and md:
+                    self.ui.debug('not reusing manifest (no file change in '
+                                  'changelog, but manifest differs)\n')
+                if files or md:
+                    self.ui.note(_("committing manifest\n"))
+                    mn = mctx.write(trp, linkrev,
+                                    p1.manifestnode(), p2.manifestnode(),
+                                    added, drop)
+                else:
+                    self.ui.debug('reusing manifest form p1 (listed files '
+                                  'actually unchanged)\n')
+                    mn = p1.manifestnode()
             else:
+                self.ui.debug('reusing manifest from p1 (no file change)\n')
                 mn = p1.manifestnode()
                 files = []
 
--- a/mercurial/mail.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/mail.py	Fri Aug 24 12:55:05 2018 -0700
@@ -73,15 +73,24 @@
 
     def _get_socket(self, host, port, timeout):
         if self.debuglevel > 0:
-            self._ui.debug('connect: %r\n' % (host, port))
+            self._ui.debug('connect: %r\n' % ((host, port),))
         new_socket = socket.create_connection((host, port), timeout)
         new_socket = sslutil.wrapsocket(new_socket,
                                         self.keyfile, self.certfile,
                                         ui=self._ui,
                                         serverhostname=self._host)
-        self.file = smtplib.SSLFakeFile(new_socket)
+        self.file = new_socket.makefile(r'rb')
         return new_socket
 
+def _pyhastls():
+    """Returns true iff Python has TLS support, false otherwise."""
+    try:
+        import ssl
+        getattr(ssl, 'HAS_TLS', False)
+        return True
+    except ImportError:
+        return False
+
 def _smtp(ui):
     '''build an smtp connection and return a function to send mail'''
     local_hostname = ui.config('smtp', 'local_hostname')
@@ -89,7 +98,7 @@
     # backward compatible: when tls = true, we use starttls.
     starttls = tls == 'starttls' or stringutil.parsebool(tls)
     smtps = tls == 'smtps'
-    if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
+    if (starttls or smtps) and not _pyhastls():
         raise error.Abort(_("can't use TLS: Python SSL support not installed"))
     mailhost = ui.config('smtp', 'host')
     if not mailhost:
@@ -143,8 +152,9 @@
 def _sendmail(ui, sender, recipients, msg):
     '''send mail using sendmail.'''
     program = ui.config('email', 'method')
-    cmdline = '%s -f %s %s' % (program, stringutil.email(sender),
-                               ' '.join(map(stringutil.email, recipients)))
+    stremail = lambda x: stringutil.email(encoding.strtolocal(x))
+    cmdline = '%s -f %s %s' % (program, stremail(sender),
+                               ' '.join(map(stremail, recipients)))
     ui.note(_('sending mail: %s\n') % cmdline)
     fp = procutil.popen(cmdline, 'wb')
     fp.write(util.tonativeeol(msg))
@@ -160,7 +170,8 @@
     # Should be time.asctime(), but Windows prints 2-characters day
     # of month instead of one. Make them print the same thing.
     date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
-    fp.write('From %s %s\n' % (sender, date))
+    fp.write('From %s %s\n' % (encoding.strtolocal(sender),
+                               encoding.strtolocal(date)))
     fp.write(msg)
     fp.write('\n\n')
     fp.close()
@@ -209,7 +220,7 @@
 
     cs = ['us-ascii', 'utf-8', encoding.encoding, encoding.fallbackencoding]
     if display:
-        return mimetextqp(s, subtype, 'us-ascii')
+        cs = ['us-ascii']
     for charset in cs:
         try:
             s.decode(pycompat.sysstr(charset))
@@ -252,10 +263,27 @@
     order. Tries both encoding and fallbackencoding for input. Only as
     last resort send as is in fake ascii.
     Caveat: Do not use for mail parts containing patches!'''
+    sendcharsets = charsets or _charsets(ui)
+    if not isinstance(s, bytes):
+        # We have unicode data, which we need to try and encode to
+        # some reasonable-ish encoding. Try the encodings the user
+        # wants, and fall back to garbage-in-ascii.
+        for ocs in sendcharsets:
+            try:
+                return s.encode(pycompat.sysstr(ocs)), ocs
+            except UnicodeEncodeError:
+                pass
+            except LookupError:
+                ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+        else:
+            # Everything failed, ascii-armor what we've got and send it.
+            return s.encode('ascii', 'backslashreplace')
+    # We have a bytes of unknown encoding. We'll try and guess a valid
+    # encoding, falling back to pretending we had ascii even though we
+    # know that's wrong.
     try:
         s.decode('ascii')
     except UnicodeDecodeError:
-        sendcharsets = charsets or _charsets(ui)
         for ics in (encoding.encoding, encoding.fallbackencoding):
             try:
                 u = s.decode(ics)
@@ -263,7 +291,7 @@
                 continue
             for ocs in sendcharsets:
                 try:
-                    return u.encode(ocs), ocs
+                    return u.encode(pycompat.sysstr(ocs)), ocs
                 except UnicodeEncodeError:
                     pass
                 except LookupError:
@@ -280,40 +308,46 @@
     return s
 
 def _addressencode(ui, name, addr, charsets=None):
+    assert isinstance(addr, bytes)
     name = headencode(ui, name, charsets)
     try:
         acc, dom = addr.split('@')
-        acc = acc.encode('ascii')
-        dom = dom.decode(encoding.encoding).encode('idna')
+        acc.decode('ascii')
+        dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
         addr = '%s@%s' % (acc, dom)
     except UnicodeDecodeError:
         raise error.Abort(_('invalid email address: %s') % addr)
     except ValueError:
         try:
             # too strict?
-            addr = addr.encode('ascii')
+            addr.decode('ascii')
         except UnicodeDecodeError:
             raise error.Abort(_('invalid local address: %s') % addr)
-    return email.utils.formataddr((name, addr))
+    return pycompat.bytesurl(
+        email.utils.formataddr((name, encoding.strfromlocal(addr))))
 
 def addressencode(ui, address, charsets=None, display=False):
     '''Turns address into RFC-2047 compliant header.'''
     if display or not address:
         return address or ''
-    name, addr = email.utils.parseaddr(address)
-    return _addressencode(ui, name, addr, charsets)
+    name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
+    return _addressencode(ui, name, encoding.strtolocal(addr), charsets)
 
 def addrlistencode(ui, addrs, charsets=None, display=False):
     '''Turns a list of addresses into a list of RFC-2047 compliant headers.
     A single element of input list may contain multiple addresses, but output
     always has one address per item'''
+    for a in addrs:
+        assert isinstance(a, bytes), (r'%r unexpectedly not a bytestr' % a)
     if display:
         return [a.strip() for a in addrs if a.strip()]
 
     result = []
-    for name, addr in email.utils.getaddresses(addrs):
+    for name, addr in email.utils.getaddresses(
+            [encoding.strfromlocal(a) for a in addrs]):
         if name or addr:
-            result.append(_addressencode(ui, name, addr, charsets))
+            r = _addressencode(ui, name, encoding.strtolocal(addr), charsets)
+            result.append(r)
     return result
 
 def mimeencode(ui, s, charsets=None, display=False):
--- a/mercurial/manifest.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/manifest.py	Fri Aug 24 12:55:05 2018 -0700
@@ -10,6 +10,7 @@
 import heapq
 import itertools
 import struct
+import weakref
 
 from .i18n import _
 from .node import (
@@ -1136,11 +1137,120 @@
             for subtree in subm.walksubtrees(matcher=matcher):
                 yield subtree
 
+class manifestfulltextcache(util.lrucachedict):
+    """File-backed LRU cache for the manifest cache
+
+    File consists of entries, up to EOF:
+
+    - 20 bytes node, 4 bytes length, <length> manifest data
+
+    These are written in reverse cache order (oldest to newest).
+
+    """
+    def __init__(self, max):
+        super(manifestfulltextcache, self).__init__(max)
+        self._dirty = False
+        self._read = False
+        self._opener = None
+
+    def read(self):
+        if self._read or self._opener is None:
+            return
+
+        try:
+            with self._opener('manifestfulltextcache') as fp:
+                set = super(manifestfulltextcache, self).__setitem__
+                # ignore trailing data, this is a cache, corruption is skipped
+                while True:
+                    node = fp.read(20)
+                    if len(node) < 20:
+                        break
+                    try:
+                        size = struct.unpack('>L', fp.read(4))[0]
+                    except struct.error:
+                        break
+                    value = bytearray(fp.read(size))
+                    if len(value) != size:
+                        break
+                    set(node, value)
+        except IOError:
+            # the file is allowed to be missing
+            pass
+
+        self._read = True
+        self._dirty = False
+
+    def write(self):
+        if not self._dirty or self._opener is None:
+            return
+        # rotate backwards to the first used node
+        with self._opener(
+                'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
+            ) as fp:
+            node = self._head.prev
+            while True:
+                if node.key in self._cache:
+                    fp.write(node.key)
+                    fp.write(struct.pack('>L', len(node.value)))
+                    fp.write(node.value)
+                if node is self._head:
+                    break
+                node = node.prev
+
+    def __len__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__len__()
+
+    def __contains__(self, k):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__contains__(k)
+
+    def __iter__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__iter__()
+
+    def __getitem__(self, k):
+        if not self._read:
+            self.read()
+        # the cache lru order can change on read
+        setdirty = self._cache.get(k) is not self._head
+        value = super(manifestfulltextcache, self).__getitem__(k)
+        if setdirty:
+            self._dirty = True
+        return value
+
+    def __setitem__(self, k, v):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__setitem__(k, v)
+        self._dirty = True
+
+    def __delitem__(self, k):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__delitem__(k)
+        self._dirty = True
+
+    def get(self, k, default=None):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).get(k, default=default)
+
+    def clear(self, clear_persisted_data=False):
+        super(manifestfulltextcache, self).clear()
+        if clear_persisted_data:
+            self._dirty = True
+            self.write()
+        self._read = False
+
 class manifestrevlog(revlog.revlog):
     '''A revlog that stores manifest texts. This is responsible for caching the
     full-text manifest contents.
     '''
-    def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
+    def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
                  treemanifest=False):
         """Constructs a new manifest revlog
 
@@ -1164,36 +1274,58 @@
 
         self._treeondisk = optiontreemanifest or treemanifest
 
-        self._fulltextcache = util.lrucachedict(cachesize)
+        self._fulltextcache = manifestfulltextcache(cachesize)
 
-        if dir:
+        if tree:
             assert self._treeondisk, 'opts is %r' % opts
 
         if indexfile is None:
             indexfile = '00manifest.i'
-            if dir:
-                indexfile = "meta/" + dir + indexfile
+            if tree:
+                indexfile = "meta/" + tree + indexfile
 
-        self._dir = dir
+        self._tree = tree
         # The dirlogcache is kept on the root manifest log
-        if dir:
+        if tree:
             self._dirlogcache = dirlogcache
         else:
             self._dirlogcache = {'': self}
 
         super(manifestrevlog, self).__init__(opener, indexfile,
                                              # only root indexfile is cached
-                                             checkambig=not bool(dir),
+                                             checkambig=not bool(tree),
                                              mmaplargeindex=True)
 
+    def _setupmanifestcachehooks(self, repo):
+        """Persist the manifestfulltextcache on lock release"""
+        if not util.safehasattr(repo, '_lockref'):
+            return
+
+        self._fulltextcache._opener = repo.cachevfs
+        reporef = weakref.ref(repo)
+        manifestrevlogref = weakref.ref(self)
+
+        def persistmanifestcache():
+            repo = reporef()
+            self = manifestrevlogref()
+            if repo is None or self is None:
+                return
+            if repo.manifestlog._revlog is not self:
+                # there's a different manifest in play now, abort
+                return
+            self._fulltextcache.write()
+
+        if repo._currentlock(repo._lockref) is not None:
+            repo._afterlock(persistmanifestcache)
+
     @property
     def fulltextcache(self):
         return self._fulltextcache
 
-    def clearcaches(self):
+    def clearcaches(self, clear_persisted_data=False):
         super(manifestrevlog, self).clearcaches()
-        self._fulltextcache.clear()
-        self._dirlogcache = {'': self}
+        self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
+        self._dirlogcache = {self._tree: self}
 
     def dirlog(self, d):
         if d:
@@ -1228,8 +1360,8 @@
             # process.
             if self._treeondisk:
                 assert readtree, "readtree must be set for treemanifest writes"
-                m1 = readtree(self._dir, p1)
-                m2 = readtree(self._dir, p2)
+                m1 = readtree(self._tree, p1)
+                m2 = readtree(self._tree, p2)
                 n = self._addtree(m, transaction, link, m1, m2, readtree)
                 arraytext = None
             else:
@@ -1245,7 +1377,8 @@
     def _addtree(self, m, transaction, link, m1, m2, readtree):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
-        if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
+        if self._tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
+            m2)):
             return m.node()
         def writesubtree(subm, subp1, subp2):
             sublog = self.dirlog(subm.dir())
@@ -1254,7 +1387,7 @@
         m.writesubtrees(m1, m2, writesubtree)
         text = m.dirtext()
         n = None
-        if self._dir != '':
+        if self._tree != '':
             # Double-check whether contents are unchanged to one parent
             if text == m1.dirtext():
                 n = m1.node()
@@ -1285,9 +1418,11 @@
         if opts is not None:
             usetreemanifest = opts.get('treemanifest', usetreemanifest)
             cachesize = opts.get('manifestcachesize', cachesize)
-        self._treeinmem = usetreemanifest
+
+        self._treemanifests = usetreemanifest
 
         self._revlog = repo._constructmanifest()
+        self._revlog._setupmanifestcachehooks(repo)
         self._narrowmatch = repo.narrowmatch()
 
         # A cache of the manifestctx or treemanifestctx for each directory
@@ -1302,59 +1437,59 @@
         """
         return self.get('', node)
 
-    def get(self, dir, node, verify=True):
+    def get(self, tree, node, verify=True):
         """Retrieves the manifest instance for the given node. Throws a
         LookupError if not found.
 
         `verify` - if True an exception will be thrown if the node is not in
                    the revlog
         """
-        if node in self._dirmancache.get(dir, ()):
-            return self._dirmancache[dir][node]
+        if node in self._dirmancache.get(tree, ()):
+            return self._dirmancache[tree][node]
 
         if not self._narrowmatch.always():
-            if not self._narrowmatch.visitdir(dir[:-1] or '.'):
-                return excludeddirmanifestctx(dir, node)
-        if dir:
+            if not self._narrowmatch.visitdir(tree[:-1] or '.'):
+                return excludeddirmanifestctx(tree, node)
+        if tree:
             if self._revlog._treeondisk:
                 if verify:
-                    dirlog = self._revlog.dirlog(dir)
-                    if node not in dirlog.nodemap:
-                        raise LookupError(node, dirlog.indexfile,
-                                          _('no node'))
-                m = treemanifestctx(self, dir, node)
+                    # Side-effect is LookupError is raised if node doesn't
+                    # exist.
+                    self.getstorage(tree).rev(node)
+
+                m = treemanifestctx(self, tree, node)
             else:
                 raise error.Abort(
                         _("cannot ask for manifest directory '%s' in a flat "
-                          "manifest") % dir)
+                          "manifest") % tree)
         else:
             if verify:
-                if node not in self._revlog.nodemap:
-                    raise LookupError(node, self._revlog.indexfile,
-                                      _('no node'))
-            if self._treeinmem:
+                # Side-effect is LookupError is raised if node doesn't exist.
+                self._revlog.rev(node)
+
+            if self._treemanifests:
                 m = treemanifestctx(self, '', node)
             else:
                 m = manifestctx(self, node)
 
         if node != revlog.nullid:
-            mancache = self._dirmancache.get(dir)
+            mancache = self._dirmancache.get(tree)
             if not mancache:
                 mancache = util.lrucachedict(self._cachesize)
-                self._dirmancache[dir] = mancache
+                self._dirmancache[tree] = mancache
             mancache[node] = m
         return m
 
-    def clearcaches(self):
+    def getstorage(self, tree):
+        return self._revlog.dirlog(tree)
+
+    def clearcaches(self, clear_persisted_data=False):
         self._dirmancache.clear()
-        self._revlog.clearcaches()
+        self._revlog.clearcaches(clear_persisted_data=clear_persisted_data)
 
     def rev(self, node):
         return self._revlog.rev(node)
 
-    def addgroup(self, deltas, linkmapper, transaction):
-        return self._revlog.addgroup(deltas, linkmapper, transaction)
-
 @interfaceutil.implementer(repository.imanifestrevisionwritable)
 class memmanifestctx(object):
     def __init__(self, manifestlog):
@@ -1421,9 +1556,12 @@
                 self._data = manifestdict()
             else:
                 rl = self._revlog()
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl._fulltextcache[self._node] = arraytext
+                if self._node in rl._fulltextcache:
+                    text = pycompat.bytestr(rl._fulltextcache[self._node])
+                else:
+                    text = rl.revision(self._node)
+                    arraytext = bytearray(text)
+                    rl._fulltextcache[self._node] = arraytext
                 self._data = manifestdict(text)
         return self._data
 
@@ -1504,7 +1642,7 @@
         if not narrowmatch.always():
             if not narrowmatch.visitdir(self._dir[:-1] or '.'):
                 return excludedmanifestrevlog(self._dir)
-        return self._manifestlog._revlog.dirlog(self._dir)
+        return self._manifestlog.getstorage(self._dir)
 
     def read(self):
         if self._data is None:
@@ -1523,9 +1661,12 @@
                 m.setnode(self._node)
                 self._data = m
             else:
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl.fulltextcache[self._node] = arraytext
+                if self._node in rl.fulltextcache:
+                    text = pycompat.bytestr(rl.fulltextcache[self._node])
+                else:
+                    text = rl.revision(self._node)
+                    arraytext = bytearray(text)
+                    rl.fulltextcache[self._node] = arraytext
                 self._data = treemanifest(dir=self._dir, text=text)
 
         return self._data
--- a/mercurial/match.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/match.py	Fri Aug 24 12:55:05 2018 -0700
@@ -8,6 +8,7 @@
 from __future__ import absolute_import, print_function
 
 import copy
+import itertools
 import os
 import re
 
@@ -331,6 +332,49 @@
         '''
         return True
 
+    def visitchildrenset(self, dir):
+        '''Decides whether a directory should be visited based on whether it
+        has potential matches in it or one of its subdirectories, and
+        potentially lists which subdirectories of that directory should be
+        visited. This is based on the match's primary, included, and excluded
+        patterns.
+
+        This function is very similar to 'visitdir', and the following mapping
+        can be applied:
+
+             visitdir | visitchildrenlist
+            ----------+-------------------
+             False    | set()
+             'all'    | 'all'
+             True     | 'this' OR non-empty set of subdirs -or files- to visit
+
+        Example:
+          Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
+          the following values (assuming the implementation of visitchildrenset
+          is capable of recognizing this; some implementations are not).
+
+          '.' -> {'foo', 'qux'}
+          'baz' -> set()
+          'foo' -> {'bar'}
+          # Ideally this would be 'all', but since the prefix nature of matchers
+          # is applied to the entire matcher, we have to downgrade this to
+          # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
+          # in.
+          'foo/bar' -> 'this'
+          'qux' -> 'this'
+
+        Important:
+          Most matchers do not know if they're representing files or
+          directories. They see ['path:dir/f'] and don't know whether 'f' is a
+          file or a directory, so visitchildrenset('dir') for most matchers will
+          return {'f'}, but if the matcher knows it's a file (like exactmatcher
+          does), it may return 'this'. Do not rely on the return being a set
+          indicating that there are no files in this dir to investigate (or
+          equivalently that if there are files to investigate in 'dir' that it
+          will always return 'this').
+        '''
+        return 'this'
+
     def always(self):
         '''Matcher will match everything and .files() will be empty --
         optimization might be possible.'''
@@ -367,6 +411,9 @@
     def visitdir(self, dir):
         return 'all'
 
+    def visitchildrenset(self, dir):
+        return 'all'
+
     def __repr__(self):
         return r'<alwaysmatcher>'
 
@@ -390,6 +437,9 @@
     def visitdir(self, dir):
         return False
 
+    def visitchildrenset(self, dir):
+        return set()
+
     def __repr__(self):
         return r'<nevermatcher>'
 
@@ -430,6 +480,15 @@
                 any(parentdir in self._fileset
                     for parentdir in util.finddirs(dir)))
 
+    def visitchildrenset(self, dir):
+        ret = self.visitdir(dir)
+        if ret is True:
+            return 'this'
+        elif not ret:
+            return set()
+        assert ret == 'all'
+        return 'all'
+
     def prefix(self):
         return self._prefix
 
@@ -445,11 +504,14 @@
         self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)',
                                                listsubrepos, root)
         self._prefix = _prefix(kindpats)
-        roots, dirs = _rootsanddirs(kindpats)
+        roots, dirs, parents = _rootsdirsandparents(kindpats)
         # roots are directories which are recursively included.
         self._roots = set(roots)
         # dirs are directories which are non-recursively included.
         self._dirs = set(dirs)
+        # parents are directories which are non-recursively included because
+        # they are needed to get to items in _dirs or _roots.
+        self._parents = set(parents)
 
     def visitdir(self, dir):
         if self._prefix and dir in self._roots:
@@ -457,9 +519,47 @@
         return ('.' in self._roots or
                 dir in self._roots or
                 dir in self._dirs or
+                dir in self._parents or
                 any(parentdir in self._roots
                     for parentdir in util.finddirs(dir)))
 
+    def visitchildrenset(self, dir):
+        if self._prefix and dir in self._roots:
+            return 'all'
+        # Note: this does *not* include the 'dir in self._parents' case from
+        # visitdir, that's handled below.
+        if ('.' in self._roots or
+            dir in self._roots or
+            dir in self._dirs or
+            any(parentdir in self._roots
+                for parentdir in util.finddirs(dir))):
+            return 'this'
+
+        ret = set()
+        if dir in self._parents:
+            # We add a '/' on to `dir` so that we don't return items that are
+            # prefixed by `dir` but are actually siblings of `dir`.
+            suffixeddir = dir + '/' if dir != '.' else ''
+            # Look in all _roots, _dirs, and _parents for things that start with
+            # 'suffixeddir'.
+            for d in [q for q in
+                      itertools.chain(self._roots, self._dirs, self._parents) if
+                      q.startswith(suffixeddir)]:
+                # Don't emit '.' in the response for the root directory
+                if not suffixeddir and d == '.':
+                    continue
+
+                # We return the item name without the `suffixeddir` prefix or a
+                # slash suffix
+                d = d[len(suffixeddir):]
+                if '/' in d:
+                    # This is a subdirectory-of-a-subdirectory, i.e.
+                    # suffixeddir='foo/', d was 'foo/bar/baz' before removing
+                    # 'foo/'.
+                    d = d[:d.index('/')]
+                ret.add(d)
+        return ret
+
     @encoding.strmethod
     def __repr__(self):
         return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats))
@@ -486,6 +586,26 @@
     def visitdir(self, dir):
         return dir in self._dirs
 
+    def visitchildrenset(self, dir):
+        if not self._fileset or dir not in self._dirs:
+            return set()
+
+        candidates = self._fileset | self._dirs - {'.'}
+        if dir != '.':
+            d = dir + '/'
+            candidates = set(c[len(d):] for c in candidates if
+                             c.startswith(d))
+        # self._dirs includes all of the directories, recursively, so if
+        # we're attempting to match foo/bar/baz.txt, it'll have '.', 'foo',
+        # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
+        # '/' in it, indicating a it's for a subdir-of-a-subdir; the
+        # immediate subdir will be in there without a slash.
+        ret = {c for c in candidates if '/' not in c}
+        # We really do not expect ret to be empty, since that would imply that
+        # there's something in _dirs that didn't have a file in _fileset.
+        assert ret
+        return ret
+
     def isexact(self):
         return True
 
@@ -527,6 +647,31 @@
             return False
         return bool(self._m1.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m2_set = self._m2.visitchildrenset(dir)
+        if m2_set == 'all':
+            return set()
+        m1_set = self._m1.visitchildrenset(dir)
+        # Possible values for m1: 'all', 'this', set(...), set()
+        # Possible values for m2:        'this', set(...), set()
+        # If m2 has nothing under here that we care about, return m1, even if
+        # it's 'all'. This is a change in behavior from visitdir, which would
+        # return True, not 'all', for some reason.
+        if not m2_set:
+            return m1_set
+        if m1_set in ['all', 'this']:
+            # Never return 'all' here if m2_set is any kind of non-empty (either
+            # 'this' or set(foo)), since m2 might return set() for a
+            # subdirectory.
+            return 'this'
+        # Possible values for m1:         set(...), set()
+        # Possible values for m2: 'this', set(...)
+        # We ignore m2's set results. They're possibly incorrect:
+        #  m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset('.'):
+        #    m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
+        #    return set(), which is *not* correct, we still need to visit 'dir'!
+        return m1_set
+
     def isexact(self):
         return self._m1.isexact()
 
@@ -591,6 +736,25 @@
         # bool() because visit1=True + visit2='all' should not be 'all'
         return bool(visit1 and self._m2.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m1_set = self._m1.visitchildrenset(dir)
+        if not m1_set:
+            return set()
+        m2_set = self._m2.visitchildrenset(dir)
+        if not m2_set:
+            return set()
+
+        if m1_set == 'all':
+            return m2_set
+        elif m2_set == 'all':
+            return m1_set
+
+        if m1_set == 'this' or m2_set == 'this':
+            return 'this'
+
+        assert isinstance(m1_set, set) and isinstance(m2_set, set)
+        return m1_set.intersection(m2_set)
+
     def always(self):
         return self._m1.always() and self._m2.always()
 
@@ -672,6 +836,13 @@
             dir = self._path + "/" + dir
         return self._matcher.visitdir(dir)
 
+    def visitchildrenset(self, dir):
+        if dir == '.':
+            dir = self._path
+        else:
+            dir = self._path + "/" + dir
+        return self._matcher.visitchildrenset(dir)
+
     def always(self):
         return self._always
 
@@ -744,6 +915,15 @@
             return self._matcher.visitdir(dir[len(self._pathprefix):])
         return dir in self._pathdirs
 
+    def visitchildrenset(self, dir):
+        if dir == self._path:
+            return self._matcher.visitchildrenset('.')
+        if dir.startswith(self._pathprefix):
+            return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
+        if dir in self._pathdirs:
+            return 'this'
+        return set()
+
     def isexact(self):
         return self._matcher.isexact()
 
@@ -784,6 +964,25 @@
             r |= v
         return r
 
+    def visitchildrenset(self, dir):
+        r = set()
+        this = False
+        for m in self._matchers:
+            v = m.visitchildrenset(dir)
+            if not v:
+                continue
+            if v == 'all':
+                return v
+            if this or v == 'this':
+                this = True
+                # don't break, we might have an 'all' in here.
+                continue
+            assert isinstance(v, set)
+            r = r.union(v)
+        if this:
+            return 'this'
+        return r
+
     @encoding.strmethod
     def __repr__(self):
         return ('<unionmatcher matchers=%r>' % self._matchers)
@@ -1004,40 +1203,42 @@
     roots, dirs = _patternrootsanddirs(kindpats)
     return roots
 
-def _rootsanddirs(kindpats):
+def _rootsdirsandparents(kindpats):
     '''Returns roots and exact directories from patterns.
 
-    roots are directories to match recursively, whereas exact directories should
-    be matched non-recursively. The returned (roots, dirs) tuple will also
-    include directories that need to be implicitly considered as either, such as
-    parent directories.
+    `roots` are directories to match recursively, `dirs` should
+    be matched non-recursively, and `parents` are the implicitly required
+    directories to walk to items in either roots or dirs.
 
-    >>> _rootsanddirs(
+    Returns a tuple of (roots, dirs, parents).
+
+    >>> _rootsdirsandparents(
     ...     [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
     ...      (b'glob', b'g*', b'')])
-    (['g/h', 'g/h', '.'], ['g', '.'])
-    >>> _rootsanddirs(
+    (['g/h', 'g/h', '.'], [], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
-    ([], ['g/h', '.', 'g', '.'])
-    >>> _rootsanddirs(
+    ([], ['g/h', '.'], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
     ...      (b'path', b'', b'')])
-    (['r', 'p/p', '.'], ['p', '.'])
-    >>> _rootsanddirs(
+    (['r', 'p/p', '.'], [], ['p', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relglob', b'rg*', b''), (b're', b're/', b''),
     ...      (b'relre', b'rr', b'')])
-    (['.', '.', '.'], ['.'])
+    (['.', '.', '.'], [], ['.'])
     '''
     r, d = _patternrootsanddirs(kindpats)
 
+    p = []
     # Append the parents as non-recursive/exact directories, since they must be
     # scanned to get to either the roots or the other exact directories.
-    d.extend(util.dirs(d))
-    d.extend(util.dirs(r))
+    p.extend(util.dirs(d))
+    p.extend(util.dirs(r))
     # util.dirs() does not include the root directory, so add it manually
-    d.append('.')
+    p.append('.')
 
-    return r, d
+    return r, d, p
 
 def _explicitfiles(kindpats):
     '''Returns the potential explicit filenames from the patterns.
--- a/mercurial/mdiff.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/mdiff.py	Fri Aug 24 12:55:05 2018 -0700
@@ -357,7 +357,7 @@
             # walk backwards from the start of the context up to the start of
             # the previous hunk context until we find a line starting with an
             # alphanumeric char.
-            for i in xrange(astart - 1, lastpos - 1, -1):
+            for i in pycompat.xrange(astart - 1, lastpos - 1, -1):
                 if l1[i][0:1].isalnum():
                     func = b' ' + l1[i].rstrip()
                     # split long function name if ASCII. otherwise we have no
@@ -381,7 +381,7 @@
         hunklines = (
             ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
             + delta
-            + [' ' + l1[x] for x in xrange(a2, aend)]
+            + [' ' + l1[x] for x in pycompat.xrange(a2, aend)]
         )
         # If either file ends without a newline and the last line of
         # that file is part of a hunk, a marker is printed. If the
@@ -390,7 +390,7 @@
         # which the hunk can end in a shared line without a newline.
         skip = False
         if not t1.endswith('\n') and astart + alen == len(l1) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith(('-', ' ')):
                     if hunklines[i].startswith(' '):
                         skip = True
@@ -398,7 +398,7 @@
                     hunklines.insert(i + 1, _missing_newline_marker)
                     break
         if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith('+'):
                     hunklines[i] += '\n'
                     hunklines.insert(i + 1, _missing_newline_marker)
--- a/mercurial/minifileset.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/minifileset.py	Fri Aug 24 12:55:05 2018 -0700
@@ -11,20 +11,23 @@
 from . import (
     error,
     fileset,
+    filesetlang,
     pycompat,
 )
 
 def _sizep(x):
     # i18n: "size" is a keyword
-    expr = fileset.getstring(x, _("size requires an expression"))
+    expr = filesetlang.getstring(x, _("size requires an expression"))
     return fileset.sizematcher(expr)
 
 def _compile(tree):
     if not tree:
         raise error.ParseError(_("missing argument"))
     op = tree[0]
-    if op in {'symbol', 'string', 'kindpat'}:
-        name = fileset.getpattern(tree, {'path'}, _('invalid file pattern'))
+    if op == 'withstatus':
+        return _compile(tree[1])
+    elif op in {'symbol', 'string', 'kindpat'}:
+        name = filesetlang.getpattern(tree, {'path'}, _('invalid file pattern'))
         if name.startswith('**'): # file extension test, ex. "**.tar.gz"
             ext = name[2:]
             for c in pycompat.bytestr(ext):
@@ -39,18 +42,15 @@
             return f
         raise error.ParseError(_("unsupported file pattern: %s") % name,
                                hint=_('paths must be prefixed with "path:"'))
-    elif op == 'or':
-        func1 = _compile(tree[1])
-        func2 = _compile(tree[2])
-        return lambda n, s: func1(n, s) or func2(n, s)
+    elif op in {'or', 'patterns'}:
+        funcs = [_compile(x) for x in tree[1:]]
+        return lambda n, s: any(f(n, s) for f in funcs)
     elif op == 'and':
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and func2(n, s)
     elif op == 'not':
         return lambda n, s: not _compile(tree[1])(n, s)
-    elif op == 'group':
-        return _compile(tree[1])
     elif op == 'func':
         symbols = {
             'all': lambda n, s: True,
@@ -58,7 +58,7 @@
             'size': lambda n, s: _sizep(tree[2])(s),
         }
 
-        name = fileset.getsymbol(tree[1])
+        name = filesetlang.getsymbol(tree[1])
         if name in symbols:
             return symbols[name]
 
@@ -67,11 +67,9 @@
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and not func2(n, s)
-    elif op == 'negate':
-        raise error.ParseError(_("can't use negate operator in this context"))
     elif op == 'list':
         raise error.ParseError(_("can't use a list in this context"),
-                               hint=_('see hg help "filesets.x or y"'))
+                               hint=_('see \'hg help "filesets.x or y"\''))
     raise error.ProgrammingError('illegal tree: %r' % (tree,))
 
 def compile(text):
@@ -88,5 +86,7 @@
     files whose name ends with ".zip", and all files under "bin" in the repo
     root except for "bin/README".
     """
-    tree = fileset.parse(text)
+    tree = filesetlang.parse(text)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
     return _compile(tree)
--- a/mercurial/minirst.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/minirst.py	Fri Aug 24 12:55:05 2018 -0700
@@ -316,7 +316,7 @@
 
             # column markers are ASCII so we can calculate column
             # position in bytes
-            columns = [x for x in xrange(len(div))
+            columns = [x for x in pycompat.xrange(len(div))
                        if div[x:x + 1] == '=' and (x == 0 or
                                                    div[x - 1:x] == ' ')]
             rows = []
@@ -685,7 +685,7 @@
                     if llen and llen != plen:
                         collapse = False
                     s = []
-                    for j in xrange(3, plen - 1):
+                    for j in pycompat.xrange(3, plen - 1):
                         parent = parents[j]
                         if (j >= llen or
                             lastparents[j] != parent):
--- a/mercurial/narrowspec.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/narrowspec.py	Fri Aug 24 12:55:05 2018 -0700
@@ -13,34 +13,13 @@
 from . import (
     error,
     match as matchmod,
+    repository,
+    sparse,
     util,
 )
 
 FILENAME = 'narrowspec'
 
-def _parsestoredpatterns(text):
-    """Parses the narrowspec format that's stored on disk."""
-    patlist = None
-    includepats = []
-    excludepats = []
-    for l in text.splitlines():
-        if l == '[includes]':
-            if patlist is None:
-                patlist = includepats
-            else:
-                raise error.Abort(_('narrowspec includes section must appear '
-                                    'at most once, before excludes'))
-        elif l == '[excludes]':
-            if patlist is not excludepats:
-                patlist = excludepats
-            else:
-                raise error.Abort(_('narrowspec excludes section must appear '
-                                    'at most once'))
-        else:
-            patlist.append(l)
-
-    return set(includepats), set(excludepats)
-
 def parseserverpatterns(text):
     """Parses the narrowspec format that's returned by the server."""
     includepats = set()
@@ -107,10 +86,10 @@
     return set(normalizepattern(p) for p in pats)
 
 def format(includes, excludes):
-    output = '[includes]\n'
+    output = '[include]\n'
     for i in sorted(includes - excludes):
         output += i + '\n'
-    output += '[excludes]\n'
+    output += '[exclude]\n'
     for e in sorted(excludes):
         output += e + '\n'
     return output
@@ -129,21 +108,41 @@
 
 def load(repo):
     try:
-        spec = repo.vfs.read(FILENAME)
+        spec = repo.svfs.read(FILENAME)
     except IOError as e:
         # Treat "narrowspec does not exist" the same as "narrowspec file exists
         # and is empty".
         if e.errno == errno.ENOENT:
-            # Without this the next call to load will use the cached
-            # non-existence of the file, which can cause some odd issues.
-            repo.invalidate(clearfilecache=True)
             return set(), set()
         raise
-    return _parsestoredpatterns(spec)
+    # maybe we should care about the profiles returned too
+    includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
+                                                            'narrow')
+    if profiles:
+        raise error.Abort(_("including other spec files using '%include' is not"
+                            " suported in narrowspec"))
+    return includepats, excludepats
 
 def save(repo, includepats, excludepats):
     spec = format(includepats, excludepats)
-    repo.vfs.write(FILENAME, spec)
+    repo.svfs.write(FILENAME, spec)
+
+def savebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    vfs = repo.vfs
+    vfs.tryunlink(backupname)
+    util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True)
+
+def restorebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME))
+
+def clearbackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    repo.vfs.unlink(backupname)
 
 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
     r""" Restricts the patterns according to repo settings,
--- a/mercurial/node.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/node.py	Fri Aug 24 12:55:05 2018 -0700
@@ -21,20 +21,25 @@
         raise TypeError(e)
 
 nullrev = -1
+# In hex, this is '0000000000000000000000000000000000000000'
 nullid = b"\0" * 20
 nullhex = hex(nullid)
 
 # Phony node value to stand-in for new files in some uses of
 # manifests.
-newnodeid = '!' * 20
-addednodeid = ('0' * 15) + 'added'
-modifiednodeid = ('0' * 12) + 'modified'
+# In hex, this is '2121212121212121212121212121212121212121'
+newnodeid = '!!!!!!!!!!!!!!!!!!!!'
+# In hex, this is '3030303030303030303030303030306164646564'
+addednodeid = '000000000000000added'
+# In hex, this is '3030303030303030303030306d6f646966696564'
+modifiednodeid = '000000000000modified'
 
 wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
 
 # pseudo identifiers for working directory
 # (they are experimental, so don't add too many dependencies on them)
 wdirrev = 0x7fffffff
+# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
 wdirid = b"\xff" * 20
 wdirhex = hex(wdirid)
 
--- a/mercurial/obsolete.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/obsolete.py	Fri Aug 24 12:55:05 2018 -0700
@@ -394,7 +394,7 @@
         off = o3 + metasize * nummeta
         metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
         metadata = []
-        for idx in xrange(0, len(metapairsize), 2):
+        for idx in pycompat.xrange(0, len(metapairsize), 2):
             o1 = off + metapairsize[idx]
             o2 = o1 + metapairsize[idx + 1]
             metadata.append((data[off:o1], data[o1:o2]))
--- a/mercurial/parser.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/parser.py	Fri Aug 24 12:55:05 2018 -0700
@@ -20,7 +20,6 @@
 
 from .i18n import _
 from . import (
-    encoding,
     error,
     pycompat,
     util,
@@ -198,16 +197,11 @@
         # mangle Python's exception into our format
         raise error.ParseError(pycompat.bytestr(e).lower())
 
-def _brepr(obj):
-    if isinstance(obj, bytes):
-        return b"'%s'" % stringutil.escapestr(obj)
-    return encoding.strtolocal(repr(obj))
-
 def _prettyformat(tree, leafnodes, level, lines):
     if not isinstance(tree, tuple):
-        lines.append((level, _brepr(tree)))
+        lines.append((level, stringutil.pprint(tree)))
     elif tree[0] in leafnodes:
-        rs = map(_brepr, tree[1:])
+        rs = map(stringutil.pprint, tree[1:])
         lines.append((level, '(%s %s)' % (tree[0], ' '.join(rs))))
     else:
         lines.append((level, '(%s' % tree[0]))
--- a/mercurial/patch.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/patch.py	Fri Aug 24 12:55:05 2018 -0700
@@ -815,7 +815,7 @@
         for x, s in enumerate(self.lines):
             self.hash.setdefault(s, []).append(x)
 
-        for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
+        for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
             for toponly in [True, False]:
                 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
                 oldstart = oldstart + self.offset + self.skew
@@ -1286,7 +1286,7 @@
         self.lena = int(aend) - self.starta
         if self.starta:
             self.lena += 1
-        for x in xrange(self.lena):
+        for x in pycompat.xrange(self.lena):
             l = lr.readline()
             if l.startswith('---'):
                 # lines addition, old block is empty
@@ -1320,7 +1320,7 @@
         if self.startb:
             self.lenb += 1
         hunki = 1
-        for x in xrange(self.lenb):
+        for x in pycompat.xrange(self.lenb):
             l = lr.readline()
             if l.startswith('\ '):
                 # XXX: the only way to hit this is with an invalid line range.
@@ -1396,14 +1396,14 @@
             top = 0
             bot = 0
             hlen = len(self.hunk)
-            for x in xrange(hlen - 1):
+            for x in pycompat.xrange(hlen - 1):
                 # the hunk starts with the @@ line, so use x+1
                 if self.hunk[x + 1].startswith(' '):
                     top += 1
                 else:
                     break
             if not toponly:
-                for x in xrange(hlen - 1):
+                for x in pycompat.xrange(hlen - 1):
                     if self.hunk[hlen - bot - 1].startswith(' '):
                         bot += 1
                     else:
@@ -2326,7 +2326,7 @@
         relfiltered = True
 
     if not changes:
-        changes = repo.status(ctx1, ctx2, match=match)
+        changes = ctx1.status(ctx2, match=match)
     modified, added, removed = changes[:3]
 
     if not modified and not added and not removed:
--- a/mercurial/phases.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/phases.py	Fri Aug 24 12:55:05 2018 -0700
@@ -374,7 +374,7 @@
 
         changes = set() # set of revisions to be changed
         delroots = [] # set of root deleted by this path
-        for phase in xrange(targetphase + 1, len(allphases)):
+        for phase in pycompat.xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
             nodes = [n for n in nodes
                      if self.phase(repo, repo[n].rev()) >= phase]
@@ -420,7 +420,7 @@
             affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
 
             # find the phase of the affected revision
-            for phase in xrange(targetphase, -1, -1):
+            for phase in pycompat.xrange(targetphase, -1, -1):
                 if phase:
                     roots = oldroots[phase]
                     revs = set(repo.revs('%ln::%ld', roots, affected))
--- a/mercurial/policy.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/policy.py	Fri Aug 24 12:55:05 2018 -0700
@@ -69,7 +69,7 @@
     (r'cext', r'bdiff'): 3,
     (r'cext', r'mpatch'): 1,
     (r'cext', r'osutil'): 4,
-    (r'cext', r'parsers'): 5,
+    (r'cext', r'parsers'): 9,
 }
 
 # map import request to other package or module
--- a/mercurial/pure/osutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/pure/osutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -150,7 +150,7 @@
         rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
         rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) /
                      ctypes.sizeof(ctypes.c_int))
-        return [rfds[i] for i in xrange(rfdscount)]
+        return [rfds[i] for i in pycompat.xrange(rfdscount)]
 
 else:
     import msvcrt
--- a/mercurial/pure/parsers.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/pure/parsers.py	Fri Aug 24 12:55:05 2018 -0700
@@ -39,25 +39,21 @@
 
 class BaseIndexObject(object):
     def __len__(self):
-        return self._lgt + len(self._extra) + 1
+        return self._lgt + len(self._extra)
 
-    def insert(self, i, tup):
-        assert i == -1
+    def append(self, tup):
         self._extra.append(tup)
 
-    def _fix_index(self, i):
+    def _check_index(self, i):
         if not isinstance(i, int):
             raise TypeError("expecting int indexes")
-        if i < 0:
-            i = len(self) + i
         if i < 0 or i >= len(self):
             raise IndexError
-        return i
 
     def __getitem__(self, i):
-        i = self._fix_index(i)
-        if i == len(self) - 1:
+        if i == -1:
             return (0, 0, 0, -1, -1, -1, -1, nullid)
+        self._check_index(i)
         if i >= self._lgt:
             return self._extra[i - self._lgt]
         index = self._calculate_index(i)
@@ -82,7 +78,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._data = self._data[:i * indexsize]
             self._lgt = i
@@ -116,7 +113,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._offsets = self._offsets[:i]
             self._lgt = i
--- a/mercurial/pvec.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/pvec.py	Fri Aug 24 12:55:05 2018 -0700
@@ -52,6 +52,7 @@
 
 from .node import nullrev
 from . import (
+    pycompat,
     util,
 )
 
@@ -72,7 +73,7 @@
 
 def _str(v, l):
     bs = ""
-    for p in xrange(l):
+    for p in pycompat.xrange(l):
         bs = chr(v & 255) + bs
         v >>= 8
     return bs
@@ -91,7 +92,7 @@
             c += 1
         x >>= 1
     return c
-_htab = [_hweight(x) for x in xrange(256)]
+_htab = [_hweight(x) for x in pycompat.xrange(256)]
 
 def _hamming(a, b):
     '''find the hamming distance between two longs'''
@@ -152,7 +153,7 @@
     pvc = r._pveccache
     if ctx.rev() not in pvc:
         cl = r.changelog
-        for n in xrange(ctx.rev() + 1):
+        for n in pycompat.xrange(ctx.rev() + 1):
             if n not in pvc:
                 node = cl.node(n)
                 p1, p2 = cl.parentrevs(n)
--- a/mercurial/pycompat.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/pycompat.py	Fri Aug 24 12:55:05 2018 -0700
@@ -278,6 +278,7 @@
     hasattr = _wrapattrfunc(builtins.hasattr)
     setattr = _wrapattrfunc(builtins.setattr)
     xrange = builtins.range
+    membershiprange = builtins.range
     unicode = str
 
     def open(name, mode='r', buffering=-1, encoding=None):
@@ -331,6 +332,7 @@
 else:
     import cStringIO
 
+    xrange = xrange
     unicode = unicode
     bytechr = chr
     byterepr = repr
@@ -342,6 +344,25 @@
     strurl = identity
     bytesurl = identity
 
+    class membershiprange(object):
+        "Like xrange(a,b) but with constant-time membership test"
+        def __init__(self, a, b):
+            self._range = xrange(a, b)
+        def __getitem__(self, n):
+            return self._range[n]
+        def __hash__(self):
+            return hash(self._range)
+        def __iter__(self):
+            return iter(self._range)
+        def __len__(self):
+            return len(self._range)
+        def __reversed__(self):
+            return reversed(self._range)
+        def __contains__(self, n):
+            if not self._range:
+                return False
+            return n >= self._range[0] and n <= self._range[-1]
+
     # this can't be parsed on Python 3
     exec('def raisewithtb(exc, tb):\n'
          '    raise exc, None, tb\n')
--- a/mercurial/registrar.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/registrar.py	Fri Aug 24 12:55:05 2018 -0700
@@ -247,6 +247,19 @@
      implies 'matchctx.status()' at runtime or not (False, by
      default).
 
+    Optional argument 'weight' indicates the estimated run-time cost, useful
+    for static optimization, default is 1. Higher weight means more expensive.
+    There are predefined weights in the 'filesetlang' module.
+
+    ====== =============================================================
+    Weight Description and examples
+    ====== =============================================================
+    0.5    basic match patterns (e.g. a symbol)
+    10     computing status (e.g. added()) or accessing a few files
+    30     reading file content for each (e.g. grep())
+    50     scanning working directory (ignored())
+    ====== =============================================================
+
     'filesetpredicate' instance in example above can be used to
     decorate multiple functions.
 
@@ -259,8 +272,9 @@
     _getname = _funcregistrarbase._parsefuncdecl
     _docformat = "``%s``\n    %s"
 
-    def _extrasetup(self, name, func, callstatus=False):
+    def _extrasetup(self, name, func, callstatus=False, weight=1):
         func._callstatus = callstatus
+        func._weight = weight
 
 class _templateregistrarbase(_funcregistrarbase):
     """Base of decorator to register functions as template specific one
@@ -281,7 +295,7 @@
             '''
             pass
 
-        # old API
+        # old API (DEPRECATED)
         @templatekeyword('mykeyword')
         def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
             '''Explanation of this template keyword ....
@@ -385,7 +399,8 @@
         internalmerge = registrar.internalmerge()
 
         @internalmerge('mymerge', internalmerge.mergeonly,
-                       onfailure=None, precheck=None):
+                       onfailure=None, precheck=None,
+                       binary=False, symlink=False):
         def mymergefunc(repo, mynode, orig, fcd, fco, fca,
                         toolconf, files, labels=None):
             '''Explanation of this internal merge tool ....
@@ -416,6 +431,12 @@
     'files' and 'labels'. If it returns false value, merging is aborted
     immediately (and file is marked as "unresolved").
 
+    Optional argument 'binary' is a binary files capability of internal
+    merge tool. 'nomerge' merge type implies binary=True.
+
+    Optional argument 'symlink' is a symlinks capability of inetrnal
+    merge function. 'nomerge' merge type implies symlink=True.
+
     'internalmerge' instance in example above can be used to
     decorate multiple functions.
 
@@ -433,7 +454,14 @@
     fullmerge = 'fullmerge'  # both premerge and merge
 
     def _extrasetup(self, name, func, mergetype,
-                    onfailure=None, precheck=None):
+                    onfailure=None, precheck=None,
+                    binary=False, symlink=False):
         func.mergetype = mergetype
         func.onfailure = onfailure
         func.precheck = precheck
+
+        binarycap = binary or mergetype == self.nomerge
+        symlinkcap = symlink or mergetype == self.nomerge
+
+        # actual capabilities, which this internal merge tool has
+        func.capabilities = {"binary": binarycap, "symlink": symlinkcap}
--- a/mercurial/repair.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/repair.py	Fri Aug 24 12:55:05 2018 -0700
@@ -24,6 +24,7 @@
     exchange,
     obsolete,
     obsutil,
+    pycompat,
     util,
 )
 from .utils import (
@@ -70,7 +71,7 @@
     """find out the filelogs affected by the strip"""
     files = set()
 
-    for x in xrange(striprev, len(repo)):
+    for x in pycompat.xrange(striprev, len(repo)):
         files.update(repo[x].files())
 
     return sorted(files)
@@ -80,7 +81,7 @@
     return [revlog.linkrev(r) for r in brokenset]
 
 def _collectmanifest(repo, striprev):
-    return _collectrevlog(repo.manifestlog._revlog, striprev)
+    return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
 
 def _collectbrokencsets(repo, files, striprev):
     """return the changesets which will be broken by the truncation"""
@@ -199,7 +200,7 @@
                     repo.file(fn).strip(striprev, tr)
                 tr.endgroup()
 
-                for i in xrange(offset, len(tr.entries)):
+                for i in pycompat.xrange(offset, len(tr.entries)):
                     file, troffset, ignore = tr.entries[i]
                     with repo.svfs(file, 'a', checkambig=True) as fp:
                         fp.truncate(troffset)
@@ -297,31 +298,31 @@
         if roots:
             strip(self.ui, self.repo, roots, self.backup, self.topic)
 
-def delayedstrip(ui, repo, nodelist, topic=None):
+def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
     """like strip, but works inside transaction and won't strip irreverent revs
 
     nodelist must explicitly contain all descendants. Otherwise a warning will
     be printed that some nodes are not stripped.
 
-    Always do a backup. The last non-None "topic" will be used as the backup
-    topic name. The default backup topic name is "backup".
+    Will do a backup if `backup` is True. The last non-None "topic" will be
+    used as the backup topic name. The default backup topic name is "backup".
     """
     tr = repo.currenttransaction()
     if not tr:
         nodes = safestriproots(ui, repo, nodelist)
-        return strip(ui, repo, nodes, True, topic)
+        return strip(ui, repo, nodes, backup=backup, topic=topic)
     # transaction postclose callbacks are called in alphabet order.
     # use '\xff' as prefix so we are likely to be called last.
     callback = tr.getpostclose('\xffstrip')
     if callback is None:
-        callback = stripcallback(ui, repo, True, topic)
+        callback = stripcallback(ui, repo, backup=backup, topic=topic)
         tr.addpostclose('\xffstrip', callback)
     if topic:
         callback.topic = topic
     callback.addnodes(nodelist)
 
 def stripmanifest(repo, striprev, tr, files):
-    revlog = repo.manifestlog._revlog
+    revlog = repo.manifestlog.getstorage(b'')
     revlog.strip(striprev, tr)
     striptrees(repo, tr, striprev, files)
 
@@ -332,7 +333,7 @@
             if (unencoded.startswith('meta/') and
                 unencoded.endswith('00manifest.i')):
                 dir = unencoded[5:-12]
-                repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
+                repo.manifestlog.getstorage(dir).strip(striprev, tr)
 
 def rebuildfncache(ui, repo):
     """Rebuilds the fncache file from repo history.
--- a/mercurial/repository.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/repository.py	Fri Aug 24 12:55:05 2018 -0700
@@ -15,6 +15,10 @@
     interfaceutil,
 )
 
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = 'narrowhg-experimental'
+
 class ipeerconnection(interfaceutil.Interface):
     """Represents a "connection" to a repository.
 
@@ -314,6 +318,87 @@
             _('cannot %s; remote repository does not support the %r '
               'capability') % (purpose, name))
 
+class irevisiondelta(interfaceutil.Interface):
+    """Represents a delta between one revision and another.
+
+    Instances convey enough information to allow a revision to be exchanged
+    with another repository.
+
+    Instances represent the fulltext revision data or a delta against
+    another revision. Therefore the ``revision`` and ``delta`` attributes
+    are mutually exclusive.
+
+    Typically used for changegroup generation.
+    """
+
+    node = interfaceutil.Attribute(
+        """20 byte node of this revision.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of this revision.""")
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of this revision.""")
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node of the changelog revision this node is linked to.""")
+
+    flags = interfaceutil.Attribute(
+        """2 bytes of integer flags that apply to this revision.""")
+
+    basenode = interfaceutil.Attribute(
+        """20 byte node of the revision this data is a delta against.
+
+        ``nullid`` indicates that the revision is a full revision and not
+        a delta.
+        """)
+
+    baserevisionsize = interfaceutil.Attribute(
+        """Size of base revision this delta is against.
+
+        May be ``None`` if ``basenode`` is ``nullid``.
+        """)
+
+    revision = interfaceutil.Attribute(
+        """Raw fulltext of revision data for this node.""")
+
+    delta = interfaceutil.Attribute(
+        """Delta between ``basenode`` and ``node``.
+
+        Stored in the bdiff delta format.
+        """)
+
+class irevisiondeltarequest(interfaceutil.Interface):
+    """Represents a request to generate an ``irevisiondelta``."""
+
+    node = interfaceutil.Attribute(
+        """20 byte node of revision being requested.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of revision.""")
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of revision.""")
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node to store in ``linknode`` attribute.""")
+
+    basenode = interfaceutil.Attribute(
+        """Base revision that delta should be generated against.
+
+        If ``nullid``, the derived ``irevisiondelta`` should have its
+        ``revision`` field populated and no delta should be generated.
+
+        If ``None``, the delta may be generated against any revision that
+        is an ancestor of this revision. Or a full revision may be used.
+
+        If any other value, the delta should be produced against that
+        revision.
+        """)
+
+    ellipsis = interfaceutil.Attribute(
+        """Boolean on whether the ellipsis flag should be set.""")
+
 class ifilerevisionssequence(interfaceutil.Interface):
     """Contains index data for all revisions of a file.
 
@@ -467,9 +552,6 @@
     def deltaparent(rev):
         """"Return the revision that is a suitable parent to delta against."""
 
-    def candelta(baserev, rev):
-        """"Whether a delta can be generated between two revisions."""
-
 class ifiledata(interfaceutil.Interface):
     """Storage interface for data storage of a specific file.
 
@@ -536,6 +618,30 @@
         revision data.
         """
 
+    def emitrevisiondeltas(requests):
+        """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
+
+        Given an iterable of objects conforming to the ``irevisiondeltarequest``
+        interface, emits objects conforming to the ``irevisiondelta``
+        interface.
+
+        This method is a generator.
+
+        ``irevisiondelta`` should be emitted in the same order of
+        ``irevisiondeltarequest`` that was passed in.
+
+        The emitted objects MUST conform by the results of
+        ``irevisiondeltarequest``. Namely, they must respect any requests
+        for building a delta from a specific ``basenode`` if defined.
+
+        When sending deltas, implementations must take into account whether
+        the client has the base delta before encoding a delta against that
+        revision. A revision encountered previously in ``requests`` is
+        always a suitable base revision. An example of a bad delta is a delta
+        against a non-ancestor revision. Another example of a bad delta is a
+        delta against a censored revision.
+        """
+
 class ifilemutation(interfaceutil.Interface):
     """Storage interface for mutation events of a tracked file."""
 
@@ -614,14 +720,6 @@
         TODO this feels revlog centric and could likely be removed.
         """)
 
-    storedeltachains = interfaceutil.Attribute(
-        """Whether the store stores deltas.
-
-        TODO deltachains are revlog centric. This can probably removed
-        once there are better abstractions for obtaining/writing
-        data.
-        """)
-
     _generaldelta = interfaceutil.Attribute(
         """Whether deltas can be against any parent revision.
 
@@ -891,7 +989,13 @@
         """
 
 class imanifestlog(interfaceutil.Interface):
-    """Interface representing a collection of manifest snapshots."""
+    """Interface representing a collection of manifest snapshots.
+
+    Represents the root manifest in a repository.
+
+    Also serves as a means to access nested tree manifests and to cache
+    tree manifests.
+    """
 
     def __getitem__(node):
         """Obtain a manifest instance for a given binary node.
@@ -902,15 +1006,15 @@
         interface.
         """
 
-    def get(dir, node, verify=True):
+    def get(tree, node, verify=True):
         """Retrieve the manifest instance for a given directory and binary node.
 
         ``node`` always refers to the node of the root manifest (which will be
         the only manifest if flat manifests are being used).
 
-        If ``dir`` is the empty string, the root manifest is returned. Otherwise
-        the manifest for the specified directory will be returned (requires
-        tree manifests).
+        If ``tree`` is the empty string, the root manifest is returned.
+        Otherwise the manifest for the specified directory will be returned
+        (requires tree manifests).
 
         If ``verify`` is True, ``LookupError`` is raised if the node is not
         known.
@@ -919,6 +1023,15 @@
         interface.
         """
 
+    def getstorage(tree):
+        """Retrieve an interface to storage for a particular tree.
+
+        If ``tree`` is the empty bytestring, storage for the root manifest will
+        be returned. Otherwise storage for a tree manifest is returned.
+
+        TODO formalize interface for returned object.
+        """
+
     def clearcaches():
         """Clear caches associated with this collection."""
 
@@ -928,22 +1041,6 @@
         Raises ``error.LookupError`` if the node is not known.
         """
 
-    def addgroup(deltas, linkmapper, transaction):
-        """Process a series of deltas for storage.
-
-        ``deltas`` is an iterable of 7-tuples of
-        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
-        to add.
-
-        The ``delta`` field contains ``mpatch`` data to apply to a base
-        revision, identified by ``deltabase``. The base node can be
-        ``nullid``, in which case the header from the delta can be ignored
-        and the delta used as the fulltext.
-
-        Returns a list of nodes that were processed. A node will be in the list
-        even if it existed in the store previously.
-        """
-
 class completelocalrepository(interfaceutil.Interface):
     """Monolithic interface for local repositories.
 
--- a/mercurial/repoview.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/repoview.py	Fri Aug 24 12:55:05 2018 -0700
@@ -128,7 +128,7 @@
             firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
     # protect from nullrev root
     firstmutable = max(0, firstmutable)
-    return frozenset(xrange(firstmutable, len(cl)))
+    return frozenset(pycompat.xrange(firstmutable, len(cl)))
 
 # function to compute filtered set
 #
@@ -210,7 +210,7 @@
         unfichangelog = unfi.changelog
         # bypass call to changelog.method
         unfiindex = unfichangelog.index
-        unfilen = len(unfiindex) - 1
+        unfilen = len(unfiindex)
         unfinode = unfiindex[unfilen - 1][7]
 
         revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
--- a/mercurial/revlog.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/revlog.py	Fri Aug 24 12:55:05 2018 -0700
@@ -27,6 +27,7 @@
 from .node import (
     bin,
     hex,
+    nullhex,
     nullid,
     nullrev,
     wdirfilenodeids,
@@ -44,10 +45,12 @@
     mdiff,
     policy,
     pycompat,
+    repository,
     templatefilters,
     util,
 )
 from .utils import (
+    interfaceutil,
     stringutil,
 )
 
@@ -91,6 +94,7 @@
 
 RevlogError = error.RevlogError
 LookupError = error.LookupError
+AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
 CensoredNodeError = error.CensoredNodeError
 ProgrammingError = error.ProgrammingError
 
@@ -605,6 +609,7 @@
     chainbase = attr.ib()
     chainlen = attr.ib()
     compresseddeltalen = attr.ib()
+    snapshotdepth = attr.ib()
 
 class _deltacomputer(object):
     def __init__(self, revlog):
@@ -622,7 +627,7 @@
         p1r, p2r = revlog.rev(p1), revlog.rev(p2)
 
         # should we try to build a delta?
-        if prev != nullrev and revlog.storedeltachains:
+        if prev != nullrev and revlog._storedeltachains:
             tested = set()
             # This condition is true most of the time when processing
             # changegroup data into a generaldelta repo. The only time it
@@ -735,8 +740,21 @@
         chainlen, compresseddeltalen = revlog._chaininfo(base)
         chainlen += 1
         compresseddeltalen += deltalen
+
+        revlog = self.revlog
+        snapshotdepth = None
+        if deltabase == nullrev:
+            snapshotdepth = 0
+        elif revlog._sparserevlog and revlog.issnapshot(deltabase):
+            # A delta chain should always be one full snapshot,
+            # zero or more semi-snapshots, and zero or more deltas
+            p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
+            if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
+                snapshotdepth = len(revlog._deltachain(deltabase)[0])
+
         return _deltainfo(dist, deltalen, (header, data), deltabase,
-                         chainbase, chainlen, compresseddeltalen)
+                          chainbase, chainlen, compresseddeltalen,
+                          snapshotdepth)
 
     def finddeltainfo(self, revinfo, fh):
         """Find an acceptable delta against a candidate revision
@@ -748,15 +766,32 @@
         Returns the first acceptable candidate revision, as ordered by
         _getcandidaterevs
         """
+        if not revinfo.textlen:
+            return None # empty file do not need delta
+
         cachedelta = revinfo.cachedelta
         p1 = revinfo.p1
         p2 = revinfo.p2
         revlog = self.revlog
 
+        deltalength = self.revlog.length
+        deltaparent = self.revlog.deltaparent
+
         deltainfo = None
+        deltas_limit = revinfo.textlen * LIMIT_DELTA2TEXT
         for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
+            # filter out delta base that will never produce good delta
+            candidaterevs = [r for r in candidaterevs
+                             if self.revlog.length(r) <= deltas_limit]
             nominateddeltas = []
             for candidaterev in candidaterevs:
+                # skip over empty delta (no need to include them in a chain)
+                while candidaterev != nullrev and not deltalength(candidaterev):
+                    candidaterev = deltaparent(candidaterev)
+                # no need to try a delta against nullid, this will be handled
+                # by fulltext later.
+                if candidaterev == nullrev:
+                    continue
                 # no delta for rawtext-changing revs (see "candelta" for why)
                 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
                     continue
@@ -788,6 +823,19 @@
     cachedelta = attr.ib()
     flags = attr.ib()
 
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True, frozen=True)
+class revlogrevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    linknode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -800,6 +848,15 @@
 indexformatv0_pack = indexformatv0.pack
 indexformatv0_unpack = indexformatv0.unpack
 
+class revlogoldindex(list):
+    def __getitem__(self, i):
+        if i == -1:
+            return (0, 0, 0, -1, -1, -1, -1, nullid)
+        return list.__getitem__(self, i)
+
+# maximum <delta-chain-data>/<revision-text-length> ratio
+LIMIT_DELTA2TEXT = 2
+
 class revlogoldio(object):
     def __init__(self):
         self.size = indexformatv0.size
@@ -821,10 +878,7 @@
             nodemap[e[6]] = n
             n += 1
 
-        # add the magic null revision at -1
-        index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
-        return index, nodemap, None
+        return revlogoldindex(index), nodemap, None
 
     def packentry(self, entry, node, version, rev):
         if gettype(entry[0]):
@@ -1021,7 +1075,7 @@
             raise RevlogError(_('unknown version (%d) in revlog %s') %
                               (fmt, self.indexfile))
 
-        self.storedeltachains = True
+        self._storedeltachains = True
 
         self._io = revlogio()
         if self.version == REVLOGV0:
@@ -1071,27 +1125,33 @@
                 yield fp
 
     def tip(self):
-        return self.node(len(self.index) - 2)
+        return self.node(len(self.index) - 1)
     def __contains__(self, rev):
         return 0 <= rev < len(self)
     def __len__(self):
-        return len(self.index) - 1
+        return len(self.index)
     def __iter__(self):
-        return iter(xrange(len(self)))
+        return iter(pycompat.xrange(len(self)))
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
         step = 1
+        length = len(self)
         if stop is not None:
             if start > stop:
                 step = -1
             stop += step
+            if stop > length:
+                stop = length
         else:
-            stop = len(self)
-        return xrange(start, stop, step)
+            stop = length
+        return pycompat.xrange(start, stop, step)
 
     @util.propertycache
     def nodemap(self):
-        self.rev(self.node(0))
+        if self.index:
+            # populate mapping down to the initial node
+            node0 = self.index[0][7]  # get around changelog filtering
+            self.rev(node0)
         return self._nodecache
 
     def hasnode(self, node):
@@ -1141,10 +1201,10 @@
             i = self.index
             p = self._nodepos
             if p is None:
-                p = len(i) - 2
+                p = len(i) - 1
             else:
                 assert p < len(i)
-            for r in xrange(p, -1, -1):
+            for r in pycompat.xrange(p, -1, -1):
                 v = i[r][7]
                 n[v] = r
                 if v == node:
@@ -1711,11 +1771,6 @@
         a, b = self.rev(a), self.rev(b)
         return self.isancestorrev(a, b)
 
-    def descendant(self, a, b):
-        msg = 'revlog.descendant is deprecated, use revlog.isancestorrev'
-        util.nouideprecwarn(msg, '4.7')
-        return self.isancestorrev(a, b)
-
     def isancestorrev(self, a, b):
         """return True if revision a is an ancestor of revision b
 
@@ -1796,8 +1851,8 @@
             # parsers.c radix tree lookup gave multiple matches
             # fast path: for unfiltered changelog, radix tree is accurate
             if not getattr(self, 'filteredrevs', None):
-                raise LookupError(id, self.indexfile,
-                                  _('ambiguous identifier'))
+                raise AmbiguousPrefixLookupError(id, self.indexfile,
+                                                 _('ambiguous identifier'))
             # fall through to slow path that filters hidden revisions
         except (AttributeError, ValueError):
             # we are pure python, or key was too short to search radix tree
@@ -1814,12 +1869,14 @@
                 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
                 nl = [n for n in nl if hex(n).startswith(id) and
                       self.hasnode(n)]
+                if nullhex.startswith(id):
+                    nl.append(nullid)
                 if len(nl) > 0:
                     if len(nl) == 1 and not maybewdir:
                         self._pcache[id] = nl[0]
                         return nl[0]
-                    raise LookupError(id, self.indexfile,
-                                      _('ambiguous identifier'))
+                    raise AmbiguousPrefixLookupError(id, self.indexfile,
+                                                     _('ambiguous identifier'))
                 if maybewdir:
                     raise error.WdirUnsupported
                 return None
@@ -2070,6 +2127,25 @@
         else:
             return rev - 1
 
+    def issnapshot(self, rev):
+        """tells whether rev is a snapshot
+        """
+        if rev == nullrev:
+            return True
+        deltap = self.deltaparent(rev)
+        if deltap == nullrev:
+            return True
+        p1, p2 = self.parentrevs(rev)
+        if deltap in (p1, p2):
+            return False
+        return self.issnapshot(deltap)
+
+    def snapshotdepth(self, rev):
+        """number of snapshot in the chain before this one"""
+        if not self.issnapshot(rev):
+            raise ProgrammingError('revision %d not a snapshot')
+        return len(self._deltachain(rev)[0]) - 1
+
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions
 
@@ -2254,7 +2330,9 @@
         revlog has grown too large to be an inline revlog, it will convert it
         to use multiple index and data files.
         """
-        if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
+        tiprev = len(self) - 1
+        if (not self._inline or
+            (self.start(tiprev) + self.length(tiprev)) < _maxinline):
             return
 
         trinfo = tr.find(self.indexfile)
@@ -2268,7 +2346,7 @@
         else:
             # revlog was stripped at start of transaction, use all leftover data
             trindex = len(self) - 1
-            dataoff = self.end(-2)
+            dataoff = self.end(tiprev)
 
         tr.add(self.datafile, dataoff)
 
@@ -2454,6 +2532,11 @@
             else:
                 deltachain = []
 
+            # search for the first non-snapshot revision
+            for idx, r in enumerate(deltachain):
+                if not self.issnapshot(r):
+                    break
+            deltachain = deltachain[idx:]
             chunks = _slicechunk(self, deltachain, deltainfo)
             all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks]
             distance = max(all_span)
@@ -2471,9 +2554,45 @@
             # certain size. Be also apply this tradeoff here and relax span
             # constraint for small enought content.
             maxdist = self._srmingapsize
-        if (distance > maxdist or deltainfo.deltalen > textlen or
-            deltainfo.compresseddeltalen > textlen * 2 or
-            (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
+
+        # Bad delta from read span:
+        #
+        #   If the span of data read is larger than the maximum allowed.
+        if maxdist < distance:
+            return False
+
+        # Bad delta from new delta size:
+        #
+        #   If the delta size is larger than the target text, storing the
+        #   delta will be inefficient.
+        if textlen < deltainfo.deltalen:
+            return False
+
+        # Bad delta from cumulated payload size:
+        #
+        #   If the sum of delta get larger than K * target text length.
+        if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
+            return False
+
+        # Bad delta from chain length:
+        #
+        #   If the number of delta in the chain gets too high.
+        if self._maxchainlen and  self._maxchainlen < deltainfo.chainlen:
+            return False
+
+        # bad delta from intermediate snapshot size limit
+        #
+        #   If an intermediate snapshot size is higher than the limit.  The
+        #   limit exist to prevent endless chain of intermediate delta to be
+        #   created.
+        if (deltainfo.snapshotdepth is not None and
+                (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
+            return False
+
+        # bad delta if new intermediate snapshot is larger than the previous
+        # snapshot
+        if (deltainfo.snapshotdepth
+                and self.length(deltainfo.base) < deltainfo.deltalen):
             return False
 
         return True
@@ -2550,14 +2669,14 @@
 
         e = (offset_type(offset, flags), l, textlen,
              base, link, p1r, p2r, node)
-        self.index.insert(-1, e)
+        self.index.append(e)
         self.nodemap[node] = curr
 
         entry = self._io.packentry(e, self.node, self.version, curr)
         self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
 
         if alwayscache and rawtext is None:
-            rawtext = deltacomputer._buildtext(revinfo, fh)
+            rawtext = deltacomputer.buildtext(revinfo, fh)
 
         if type(rawtext) == bytes: # only accept immutable objects
             self._cache = (node, curr, rawtext)
@@ -2798,7 +2917,7 @@
         self._cache = None
         self._chaininfocache = {}
         self._chunkclear()
-        for x in xrange(rev, len(self)):
+        for x in pycompat.xrange(rev, len(self)):
             del self.nodemap[self.node(x)]
 
         del self.index[rev:-1]
@@ -2846,6 +2965,87 @@
             res.append(self.datafile)
         return res
 
+    def emitrevisiondeltas(self, requests):
+        frev = self.rev
+
+        prevrev = None
+        for request in requests:
+            node = request.node
+            rev = frev(node)
+
+            if prevrev is None:
+                prevrev = self.index[rev][5]
+
+            # Requesting a full revision.
+            if request.basenode == nullid:
+                baserev = nullrev
+            # Requesting an explicit revision.
+            elif request.basenode is not None:
+                baserev = frev(request.basenode)
+            # Allowing us to choose.
+            else:
+                p1rev, p2rev = self.parentrevs(rev)
+                deltaparentrev = self.deltaparent(rev)
+
+                # Avoid sending full revisions when delta parent is null. Pick
+                # prev in that case. It's tempting to pick p1 in this case, as
+                # p1 will be smaller in the common case. However, computing a
+                # delta against p1 may require resolving the raw text of p1,
+                # which could be expensive. The revlog caches should have prev
+                # cached, meaning less CPU for delta generation. There is
+                # likely room to add a flag and/or config option to control this
+                # behavior.
+                if deltaparentrev == nullrev and self._storedeltachains:
+                    baserev = prevrev
+
+                # Revlog is configured to use full snapshot for a reason.
+                # Stick to full snapshot.
+                elif deltaparentrev == nullrev:
+                    baserev = nullrev
+
+                # Pick previous when we can't be sure the base is available
+                # on consumer.
+                elif deltaparentrev not in (p1rev, p2rev, prevrev):
+                    baserev = prevrev
+                else:
+                    baserev = deltaparentrev
+
+                if baserev != nullrev and not self.candelta(baserev, rev):
+                    baserev = nullrev
+
+            revision = None
+            delta = None
+            baserevisionsize = None
+
+            if self.iscensored(baserev) or self.iscensored(rev):
+                try:
+                    revision = self.revision(node, raw=True)
+                except error.CensoredNodeError as e:
+                    revision = e.tombstone
+
+                if baserev != nullrev:
+                    baserevisionsize = self.rawsize(baserev)
+
+            elif baserev == nullrev:
+                revision = self.revision(node, raw=True)
+            else:
+                delta = self.revdiff(baserev, rev)
+
+            extraflags = REVIDX_ELLIPSIS if request.ellipsis else 0
+
+            yield revlogrevisiondelta(
+                node=node,
+                p1node=request.p1node,
+                p2node=request.p2node,
+                linknode=request.linknode,
+                basenode=self.node(baserev),
+                flags=self.flags(rev) | extraflags,
+                baserevisionsize=baserevisionsize,
+                revision=revision,
+                delta=delta)
+
+            prevrev = rev
+
     DELTAREUSEALWAYS = 'always'
     DELTAREUSESAMEREVS = 'samerevs'
     DELTAREUSENEVER = 'never'
--- a/mercurial/revset.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/revset.py	Fri Aug 24 12:55:05 2018 -0700
@@ -242,7 +242,7 @@
 
 def listset(repo, subset, *xs, **opts):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "revsets.x or y"'))
+                           hint=_('see \'hg help "revsets.x or y"\''))
 
 def keyvaluepair(repo, subset, k, v, order):
     raise error.ParseError(_("can't use a key-value pair in this context"))
--- a/mercurial/revsetlang.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/revsetlang.py	Fri Aug 24 12:55:05 2018 -0700
@@ -63,7 +63,7 @@
 _syminitletters = set(pycompat.iterbytestr(
     string.ascii_letters.encode('ascii') +
     string.digits.encode('ascii') +
-    '._@')) | set(map(pycompat.bytechr, xrange(128, 256)))
+    '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
 
 # default set of valid characters for non-initial letters of symbols
 _symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
--- a/mercurial/scmutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/scmutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -34,6 +34,7 @@
     obsutil,
     pathutil,
     phases,
+    policy,
     pycompat,
     revsetlang,
     similar,
@@ -52,6 +53,8 @@
 else:
     from . import scmposix as scmplatform
 
+parsers = policy.importmod(r'parsers')
+
 termsize = scmplatform.termsize
 
 class status(tuple):
@@ -169,64 +172,64 @@
             reason = _('timed out waiting for lock held by %r') % inst.locker
         else:
             reason = _('lock held by %r') % inst.locker
-        ui.warn(_("abort: %s: %s\n")
-                % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
+        ui.error(_("abort: %s: %s\n") % (
+            inst.desc or stringutil.forcebytestr(inst.filename), reason))
         if not inst.locker:
-            ui.warn(_("(lock might be very busy)\n"))
+            ui.error(_("(lock might be very busy)\n"))
     except error.LockUnavailable as inst:
-        ui.warn(_("abort: could not lock %s: %s\n") %
-                (inst.desc or stringutil.forcebytestr(inst.filename),
-                 encoding.strtolocal(inst.strerror)))
+        ui.error(_("abort: could not lock %s: %s\n") %
+                 (inst.desc or stringutil.forcebytestr(inst.filename),
+                  encoding.strtolocal(inst.strerror)))
     except error.OutOfBandError as inst:
         if inst.args:
             msg = _("abort: remote error:\n")
         else:
             msg = _("abort: remote error\n")
-        ui.warn(msg)
+        ui.error(msg)
         if inst.args:
-            ui.warn(''.join(inst.args))
+            ui.error(''.join(inst.args))
         if inst.hint:
-            ui.warn('(%s)\n' % inst.hint)
+            ui.error('(%s)\n' % inst.hint)
     except error.RepoError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: %s!\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except error.ResponseError as inst:
-        ui.warn(_("abort: %s") % inst.args[0])
+        ui.error(_("abort: %s") % inst.args[0])
         msg = inst.args[1]
         if isinstance(msg, type(u'')):
             msg = pycompat.sysbytes(msg)
         if not isinstance(msg, bytes):
-            ui.warn(" %r\n" % (msg,))
+            ui.error(" %r\n" % (msg,))
         elif not msg:
-            ui.warn(_(" empty string\n"))
+            ui.error(_(" empty string\n"))
         else:
-            ui.warn("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
+            ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
     except error.CensoredNodeError as inst:
-        ui.warn(_("abort: file censored %s!\n") % inst)
+        ui.error(_("abort: file censored %s!\n") % inst)
     except error.RevlogError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: %s!\n") % inst)
     except error.InterventionRequired as inst:
-        ui.warn("%s\n" % inst)
+        ui.error("%s\n" % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
         return 1
     except error.WdirUnsupported:
-        ui.warn(_("abort: working directory revision cannot be specified\n"))
+        ui.error(_("abort: working directory revision cannot be specified\n"))
     except error.Abort as inst:
-        ui.warn(_("abort: %s\n") % inst)
+        ui.error(_("abort: %s\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except ImportError as inst:
-        ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
+        ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
         m = stringutil.forcebytestr(inst).split()[-1]
         if m in "mpatch bdiff".split():
-            ui.warn(_("(did you forget to compile extensions?)\n"))
+            ui.error(_("(did you forget to compile extensions?)\n"))
         elif m in "zlib".split():
-            ui.warn(_("(is your Python install correct?)\n"))
+            ui.error(_("(is your Python install correct?)\n"))
     except IOError as inst:
         if util.safehasattr(inst, "code"):
-            ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
+            ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
         elif util.safehasattr(inst, "reason"):
             try: # usually it is in the form (errno, strerror)
                 reason = inst.reason.args[1]
@@ -236,34 +239,34 @@
             if isinstance(reason, pycompat.unicode):
                 # SSLError of Python 2.7.9 contains a unicode
                 reason = encoding.unitolocal(reason)
-            ui.warn(_("abort: error: %s\n") % reason)
+            ui.error(_("abort: error: %s\n") % reason)
         elif (util.safehasattr(inst, "args")
               and inst.args and inst.args[0] == errno.EPIPE):
             pass
         elif getattr(inst, "strerror", None):
             if getattr(inst, "filename", None):
-                ui.warn(_("abort: %s: %s\n") % (
+                ui.error(_("abort: %s: %s\n") % (
                     encoding.strtolocal(inst.strerror),
                     stringutil.forcebytestr(inst.filename)))
             else:
-                ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+                ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
         else:
             raise
     except OSError as inst:
         if getattr(inst, "filename", None) is not None:
-            ui.warn(_("abort: %s: '%s'\n") % (
+            ui.error(_("abort: %s: '%s'\n") % (
                 encoding.strtolocal(inst.strerror),
                 stringutil.forcebytestr(inst.filename)))
         else:
-            ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+            ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
     except MemoryError:
-        ui.warn(_("abort: out of memory\n"))
+        ui.error(_("abort: out of memory\n"))
     except SystemExit as inst:
         # Commands shouldn't sys.exit directly, but give a return code.
         # Just in case catch this and and pass exit code to caller.
         return inst.code
     except socket.error as inst:
-        ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
+        ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
 
     return -1
 
@@ -437,41 +440,111 @@
     return '%d:%s' % (rev, hexfunc(node))
 
 def resolvehexnodeidprefix(repo, prefix):
-    # Uses unfiltered repo because it's faster when prefix is ambiguous/
-    # This matches the shortesthexnodeidprefix() function below.
-    node = repo.unfiltered().changelog._partialmatch(prefix)
+    if (prefix.startswith('x') and
+        repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
+        prefix = prefix[1:]
+    try:
+        # Uses unfiltered repo because it's faster when prefix is ambiguous/
+        # This matches the shortesthexnodeidprefix() function below.
+        node = repo.unfiltered().changelog._partialmatch(prefix)
+    except error.AmbiguousPrefixLookupError:
+        revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+        if revset:
+            # Clear config to avoid infinite recursion
+            configoverrides = {('experimental',
+                                'revisions.disambiguatewithin'): None}
+            with repo.ui.configoverride(configoverrides):
+                revs = repo.anyrevs([revset], user=True)
+                matches = []
+                for rev in revs:
+                    node = repo.changelog.node(rev)
+                    if hex(node).startswith(prefix):
+                        matches.append(node)
+                if len(matches) == 1:
+                    return matches[0]
+        raise
     if node is None:
         return
     repo.changelog.rev(node)  # make sure node isn't filtered
     return node
 
-def shortesthexnodeidprefix(repo, node, minlength=1):
-    """Find the shortest unambiguous prefix that matches hexnode."""
+def mayberevnum(repo, prefix):
+    """Checks if the given prefix may be mistaken for a revision number"""
+    try:
+        i = int(prefix)
+        # if we are a pure int, then starting with zero will not be
+        # confused as a rev; or, obviously, if the int is larger
+        # than the value of the tip rev
+        if prefix[0:1] == b'0' or i > len(repo):
+            return False
+        return True
+    except ValueError:
+        return False
+
+def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
+    """Find the shortest unambiguous prefix that matches hexnode.
+
+    If "cache" is not None, it must be a dictionary that can be used for
+    caching between calls to this method.
+    """
     # _partialmatch() of filtered changelog could take O(len(repo)) time,
     # which would be unacceptably slow. so we look for hash collision in
     # unfiltered space, which means some hashes may be slightly longer.
-    cl = repo.unfiltered().changelog
-
-    def isrev(prefix):
-        try:
-            i = int(prefix)
-            # if we are a pure int, then starting with zero will not be
-            # confused as a rev; or, obviously, if the int is larger
-            # than the value of the tip rev
-            if prefix[0:1] == b'0' or i > len(cl):
-                return False
-            return True
-        except ValueError:
-            return False
 
     def disambiguate(prefix):
         """Disambiguate against revnums."""
+        if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
+            if mayberevnum(repo, prefix):
+                return 'x' + prefix
+            else:
+                return prefix
+
         hexnode = hex(node)
         for length in range(len(prefix), len(hexnode) + 1):
             prefix = hexnode[:length]
-            if not isrev(prefix):
+            if not mayberevnum(repo, prefix):
                 return prefix
 
+    cl = repo.unfiltered().changelog
+    revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+    if revset:
+        revs = None
+        if cache is not None:
+            revs = cache.get('disambiguationrevset')
+        if revs is None:
+            revs = repo.anyrevs([revset], user=True)
+            if cache is not None:
+                cache['disambiguationrevset'] = revs
+        if cl.rev(node) in revs:
+            hexnode = hex(node)
+            nodetree = None
+            if cache is not None:
+                nodetree = cache.get('disambiguationnodetree')
+            if not nodetree:
+                try:
+                    nodetree = parsers.nodetree(cl.index, len(revs))
+                except AttributeError:
+                    # no native nodetree
+                    pass
+                else:
+                    for r in revs:
+                        nodetree.insert(r)
+                    if cache is not None:
+                        cache['disambiguationnodetree'] = nodetree
+            if nodetree is not None:
+                length = max(nodetree.shortest(node), minlength)
+                prefix = hexnode[:length]
+                return disambiguate(prefix)
+            for length in range(minlength, len(hexnode) + 1):
+                matches = []
+                prefix = hexnode[:length]
+                for rev in revs:
+                    otherhexnode = repo[rev].hex()
+                    if prefix == otherhexnode[:length]:
+                        matches.append(otherhexnode)
+                if len(matches) == 1:
+                    return disambiguate(prefix)
+
     try:
         return disambiguate(cl.shortest(node, minlength))
     except error.LookupError:
@@ -480,8 +553,8 @@
 def isrevsymbol(repo, symbol):
     """Checks if a symbol exists in the repo.
 
-    See revsymbol() for details. Raises error.LookupError if the symbol is an
-    ambiguous nodeid prefix.
+    See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
+    symbol is an ambiguous nodeid prefix.
     """
     try:
         revsymbol(repo, symbol)
@@ -780,7 +853,7 @@
         return self._revcontains(self._torev(node))
 
 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
-                 fixphase=False, targetphase=None):
+                 fixphase=False, targetphase=None, backup=True):
     """do common cleanups when old nodes are replaced by new nodes
 
     That includes writing obsmarkers or stripping nodes, and moving bookmarks.
@@ -905,7 +978,8 @@
             from . import repair # avoid import cycle
             tostrip = list(replacements)
             if tostrip:
-                repair.delayedstrip(repo.ui, repo, tostrip, operation)
+                repair.delayedstrip(repo.ui, repo, tostrip, operation,
+                                    backup=backup)
 
 def addremove(repo, matcher, prefix, opts=None):
     if opts is None:
@@ -952,9 +1026,11 @@
         if repo.ui.verbose or not m.exact(abs):
             if abs in unknownset:
                 status = _('adding %s\n') % m.uipath(abs)
+                label = 'addremove.added'
             else:
                 status = _('removing %s\n') % m.uipath(abs)
-            repo.ui.status(status)
+                label = 'addremove.removed'
+            repo.ui.status(status, label=label)
 
     renames = _findrenames(repo, m, added + unknown, removed + deleted,
                            similarity)
@@ -1542,7 +1618,7 @@
         @reportsummary
         def reportnewcs(repo, tr):
             """Report the range of new revisions pulled/unbundled."""
-            newrevs = tr.changes.get('revs', xrange(0, 0))
+            newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
             if not newrevs:
                 return
 
@@ -1565,7 +1641,7 @@
             """Report statistics of phase changes for changesets pre-existing
             pull/unbundle.
             """
-            newrevs = tr.changes.get('revs', xrange(0, 0))
+            newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
             phasetracking = tr.changes.get('phases', {})
             if not phasetracking:
                 return
--- a/mercurial/server.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/server.py	Fri Aug 24 12:55:05 2018 -0700
@@ -79,7 +79,7 @@
             runargs.append('--daemon-postexec=unlink:%s' % lockpath)
             # Don't pass --cwd to the child process, because we've already
             # changed directory.
-            for i in xrange(1, len(runargs)):
+            for i in pycompat.xrange(1, len(runargs)):
                 if runargs[i].startswith('--cwd='):
                     del runargs[i]
                     break
--- a/mercurial/setdiscovery.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/setdiscovery.py	Fri Aug 24 12:55:05 2018 -0700
@@ -51,30 +51,25 @@
     nullrev,
 )
 from . import (
-    dagutil,
     error,
     util,
 )
 
-def _updatesample(dag, nodes, sample, quicksamplesize=0):
+def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
     """update an existing sample to match the expected size
 
-    The sample is updated with nodes exponentially distant from each head of the
-    <nodes> set. (H~1, H~2, H~4, H~8, etc).
+    The sample is updated with revs exponentially distant from each head of the
+    <revs> set. (H~1, H~2, H~4, H~8, etc).
 
     If a target size is specified, the sampling will stop once this size is
-    reached. Otherwise sampling will happen until roots of the <nodes> set are
+    reached. Otherwise sampling will happen until roots of the <revs> set are
     reached.
 
-    :dag: a dag object from dagutil
-    :nodes:  set of nodes we want to discover (if None, assume the whole dag)
+    :revs:  set of revs we want to discover (if None, assume the whole dag)
+    :heads: set of DAG head revs
     :sample: a sample to update
+    :parentfn: a callable to resolve parents for a revision
     :quicksamplesize: optional target size of the sample"""
-    # if nodes is empty we scan the entire graph
-    if nodes:
-        heads = dag.headsetofconnecteds(nodes)
-    else:
-        heads = dag.heads()
     dist = {}
     visit = collections.deque(heads)
     seen = set()
@@ -91,37 +86,69 @@
             if quicksamplesize and (len(sample) >= quicksamplesize):
                 return
         seen.add(curr)
-        for p in dag.parents(curr):
-            if not nodes or p in nodes:
+
+        for p in parentfn(curr):
+            if p != nullrev and (not revs or p in revs):
                 dist.setdefault(p, d + 1)
                 visit.append(p)
 
-def _takequicksample(dag, nodes, size):
+def _takequicksample(repo, headrevs, revs, size):
     """takes a quick sample of size <size>
 
     It is meant for initial sampling and focuses on querying heads and close
     ancestors of heads.
 
     :dag: a dag object
-    :nodes: set of nodes to discover
+    :headrevs: set of head revisions in local DAG to consider
+    :revs: set of revs to discover
     :size: the maximum size of the sample"""
-    sample = dag.headsetofconnecteds(nodes)
+    sample = set(repo.revs('heads(%ld)', revs))
+
     if len(sample) >= size:
         return _limitsample(sample, size)
-    _updatesample(dag, None, sample, quicksamplesize=size)
+
+    _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
+                  quicksamplesize=size)
     return sample
 
-def _takefullsample(dag, nodes, size):
-    sample = dag.headsetofconnecteds(nodes)
+def _takefullsample(repo, headrevs, revs, size):
+    sample = set(repo.revs('heads(%ld)', revs))
+
     # update from heads
-    _updatesample(dag, nodes, sample)
+    revsheads = set(repo.revs('heads(%ld)', revs))
+    _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
+
     # update from roots
-    _updatesample(dag.inverse(), nodes, sample)
+    revsroots = set(repo.revs('roots(%ld)', revs))
+
+    # _updatesample() essentially does interaction over revisions to look up
+    # their children. This lookup is expensive and doing it in a loop is
+    # quadratic. We precompute the children for all relevant revisions and
+    # make the lookup in _updatesample() a simple dict lookup.
+    #
+    # Because this function can be called multiple times during discovery, we
+    # may still perform redundant work and there is room to optimize this by
+    # keeping a persistent cache of children across invocations.
+    children = {}
+
+    parentrevs = repo.changelog.parentrevs
+    for rev in repo.changelog.revs(start=min(revsroots)):
+        # Always ensure revision has an entry so we don't need to worry about
+        # missing keys.
+        children.setdefault(rev, [])
+
+        for prev in parentrevs(rev):
+            if prev == nullrev:
+                continue
+
+            children.setdefault(prev, []).append(rev)
+
+    _updatesample(revs, revsroots, sample, children.__getitem__)
     assert sample
     sample = _limitsample(sample, size)
     if len(sample) < size:
         more = size - len(sample)
-        sample.update(random.sample(list(nodes - sample), more))
+        sample.update(random.sample(list(revs - sample), more))
     return sample
 
 def _limitsample(sample, desiredlen):
@@ -142,16 +169,17 @@
 
     roundtrips = 0
     cl = local.changelog
-    localsubset = None
+    clnode = cl.node
+    clrev = cl.rev
+
     if ancestorsof is not None:
-        rev = local.changelog.rev
-        localsubset = [rev(n) for n in ancestorsof]
-    dag = dagutil.revlogdag(cl, localsubset=localsubset)
+        ownheads = [clrev(n) for n in ancestorsof]
+    else:
+        ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
 
     # early exit if we know all the specified remote heads already
     ui.debug("query 1; heads\n")
     roundtrips += 1
-    ownheads = dag.heads()
     sample = _limitsample(ownheads, initialsamplesize)
     # indices between sample and externalized version must match
     sample = list(sample)
@@ -159,7 +187,7 @@
     with remote.commandexecutor() as e:
         fheads = e.callcommand('heads', {})
         fknown = e.callcommand('known', {
-            'nodes': dag.externalizeall(sample),
+            'nodes': [clnode(r) for r in sample],
         })
 
     srvheadhashes, yesno = fheads.result(), fknown.result()
@@ -173,15 +201,25 @@
     # compatibility reasons)
     ui.status(_("searching for changes\n"))
 
-    srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
+    srvheads = []
+    for node in srvheadhashes:
+        if node == nullid:
+            continue
+
+        try:
+            srvheads.append(clrev(node))
+        # Catches unknown and filtered nodes.
+        except error.LookupError:
+            continue
+
     if len(srvheads) == len(srvheadhashes):
         ui.debug("all remote heads known locally\n")
-        return (srvheadhashes, False, srvheadhashes,)
+        return srvheadhashes, False, srvheadhashes
 
     if len(sample) == len(ownheads) and all(yesno):
         ui.note(_("all local heads known remotely\n"))
-        ownheadhashes = dag.externalizeall(ownheads)
-        return (ownheadhashes, True, srvheadhashes,)
+        ownheadhashes = [clnode(r) for r in ownheads]
+        return ownheadhashes, True, srvheadhashes
 
     # full blown discovery
 
@@ -202,7 +240,12 @@
 
         if sample:
             missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
-            missing.update(dag.descendantset(missinginsample, missing))
+
+            if missing:
+                missing.update(local.revs('descendants(%ld) - descendants(%ld)',
+                                          missinginsample, missing))
+            else:
+                missing.update(local.revs('descendants(%ld)', missinginsample))
 
             undecided.difference_update(missing)
 
@@ -224,7 +267,7 @@
         if len(undecided) < targetsize:
             sample = list(undecided)
         else:
-            sample = samplefunc(dag, undecided, targetsize)
+            sample = samplefunc(local, ownheads, undecided, targetsize)
 
         roundtrips += 1
         progress.update(roundtrips)
@@ -235,7 +278,7 @@
 
         with remote.commandexecutor() as e:
             yesno = e.callcommand('known', {
-                'nodes': dag.externalizeall(sample),
+                'nodes': [clnode(r) for r in sample],
             }).result()
 
         full = True
@@ -247,10 +290,8 @@
 
     # heads(common) == heads(common.bases) since common represents common.bases
     # and all its ancestors
-    result = dag.headsetofconnecteds(common.bases)
-    # common.bases can include nullrev, but our contract requires us to not
-    # return any heads in that case, so discard that
-    result.discard(nullrev)
+    # The presence of nullrev will confuse heads(). So filter it out.
+    result = set(local.revs('heads(%ld)', common.bases - {nullrev}))
     elapsed = util.timer() - start
     progress.complete()
     ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
@@ -268,4 +309,5 @@
         return ({nullid}, True, srvheadhashes,)
 
     anyincoming = (srvheadhashes != [nullid])
-    return dag.externalizeall(result), anyincoming, srvheadhashes
+    result = {clnode(r) for r in result}
+    return result, anyincoming, srvheadhashes
--- a/mercurial/simplemerge.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/simplemerge.py	Fri Aug 24 12:55:05 2018 -0700
@@ -58,7 +58,8 @@
     """
     if (aend - astart) != (bend - bstart):
         return False
-    for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
+    for ia, ib in zip(pycompat.xrange(astart, aend),
+                      pycompat.xrange(bstart, bend)):
         if a[ia] != b[ib]:
             return False
     else:
--- a/mercurial/smartset.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/smartset.py	Fri Aug 24 12:55:05 2018 -0700
@@ -152,11 +152,11 @@
         # but start > stop is allowed, which should be an empty set.
         ys = []
         it = iter(self)
-        for x in xrange(start):
+        for x in pycompat.xrange(start):
             y = next(it, None)
             if y is None:
                 break
-        for x in xrange(stop - start):
+        for x in pycompat.xrange(stop - start):
             y = next(it, None)
             if y is None:
                 break
@@ -1005,13 +1005,13 @@
             return self.fastdesc()
 
     def fastasc(self):
-        iterrange = xrange(self._start, self._end)
+        iterrange = pycompat.xrange(self._start, self._end)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
 
     def fastdesc(self):
-        iterrange = xrange(self._end - 1, self._start - 1, -1)
+        iterrange = pycompat.xrange(self._end - 1, self._start - 1, -1)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
--- a/mercurial/sparse.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/sparse.py	Fri Aug 24 12:55:05 2018 -0700
@@ -31,9 +31,11 @@
 # a per-repo option, possibly a repo requirement.
 enabled = False
 
-def parseconfig(ui, raw):
+def parseconfig(ui, raw, action):
     """Parse sparse config file content.
 
+    action is the command which is trigerring this read, can be narrow, sparse
+
     Returns a tuple of includes, excludes, and profiles.
     """
     includes = set()
@@ -54,8 +56,8 @@
         elif line == '[include]':
             if havesection and current != includes:
                 # TODO pass filename into this API so we can report it.
-                raise error.Abort(_('sparse config cannot have includes ' +
-                                    'after excludes'))
+                raise error.Abort(_('%(action)s config cannot have includes '
+                                    'after excludes') % {'action': action})
             havesection = True
             current = includes
             continue
@@ -64,14 +66,16 @@
             current = excludes
         elif line:
             if current is None:
-                raise error.Abort(_('sparse config entry outside of '
-                                    'section: %s') % line,
+                raise error.Abort(_('%(action)s config entry outside of '
+                                    'section: %(line)s')
+                                  % {'action': action, 'line': line},
                                   hint=_('add an [include] or [exclude] line '
                                          'to declare the entry type'))
 
             if line.strip().startswith('/'):
-                ui.warn(_('warning: sparse profile cannot use' +
-                          ' paths starting with /, ignoring %s\n') % line)
+                ui.warn(_('warning: %(action)s profile cannot use'
+                          ' paths starting with /, ignoring %(line)s\n')
+                        % {'action': action, 'line': line})
                 continue
             current.add(line)
 
@@ -102,7 +106,7 @@
         raise error.Abort(_('cannot parse sparse patterns from working '
                             'directory'))
 
-    includes, excludes, profiles = parseconfig(repo.ui, raw)
+    includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
     ctx = repo[rev]
 
     if profiles:
@@ -128,7 +132,7 @@
                     repo.ui.debug(msg)
                 continue
 
-            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
+            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse')
             includes.update(pincludes)
             excludes.update(pexcludes)
             profiles.update(subprofs)
@@ -516,7 +520,7 @@
                                 force=False, removing=False):
     """Update the sparse config and working directory state."""
     raw = repo.vfs.tryread('sparse')
-    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
+    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse')
 
     oldstatus = repo.status()
     oldmatch = matcher(repo)
@@ -556,7 +560,7 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
 
         if not includes and not excludes:
             return
@@ -572,7 +576,7 @@
     with repo.wlock():
         # read current configuration
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
         aincludes, aexcludes, aprofiles = activeconfig(repo)
 
         # Import rules on top; only take in rules that are not yet
@@ -582,7 +586,8 @@
             with util.posixfile(util.expandpath(p), mode='rb') as fh:
                 raw = fh.read()
 
-            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
+            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
             oldsize = len(includes) + len(excludes) + len(profiles)
             includes.update(iincludes - aincludes)
             excludes.update(iexcludes - aexcludes)
@@ -615,7 +620,8 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
+        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
 
         if reset:
             newinclude = set()
--- a/mercurial/statprof.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/statprof.py	Fri Aug 24 12:55:05 2018 -0700
@@ -356,7 +356,7 @@
             stack = sample.stack
             sites = ['\1'.join([s.path, str(s.lineno), s.function])
                      for s in stack]
-            file.write(time + '\0' + '\0'.join(sites) + '\n')
+            file.write("%s\0%s\n" % (time, '\0'.join(sites)))
 
 def load_data(path):
     lines = open(path, 'r').read().splitlines()
--- a/mercurial/store.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/store.py	Fri Aug 24 12:55:05 2018 -0700
@@ -118,7 +118,7 @@
     def decode(s):
         i = 0
         while i < len(s):
-            for l in xrange(1, 4):
+            for l in pycompat.xrange(1, 4):
                 try:
                     yield dmap[s[i:i + l]]
                     i += l
@@ -127,7 +127,8 @@
                     pass
             else:
                 raise KeyError
-    return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]),
+    return (lambda s: ''.join([cmap[s[c:c + 1]]
+                               for c in pycompat.xrange(len(s))]),
             lambda s: ''.join(list(decode(s))))
 
 _encodefname, _decodefname = _buildencodefun()
@@ -159,7 +160,7 @@
     'the~07quick~adshot'
     '''
     xchr = pycompat.bytechr
-    cmap = dict([(xchr(x), xchr(x)) for x in xrange(127)])
+    cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
     for x in _reserved():
         cmap[xchr(x)] = "~%02x" % x
     for x in range(ord("A"), ord("Z") + 1):
@@ -316,8 +317,8 @@
         mode = None
     return mode
 
-_data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
-         ' phaseroots obsstore')
+_data = ('narrowspec data meta 00manifest.d 00manifest.i'
+         ' 00changelog.d 00changelog.i phaseroots obsstore')
 
 def isrevlog(f, kind, st):
     return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
@@ -545,7 +546,7 @@
                     raise
 
     def copylist(self):
-        d = ('data meta dh fncache phaseroots obsstore'
+        d = ('narrowspec data meta dh fncache phaseroots obsstore'
              ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
         return (['requires', '00changelog.i'] +
                 ['store/' + f for f in d.split()])
--- a/mercurial/streamclone.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/streamclone.py	Fri Aug 24 12:55:05 2018 -0700
@@ -358,7 +358,7 @@
 
         with repo.transaction('clone'):
             with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
-                for i in xrange(filecount):
+                for i in pycompat.xrange(filecount):
                     # XXX doesn't support '\n' or '\r' in filenames
                     l = fp.readline()
                     try:
--- a/mercurial/templatefilters.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templatefilters.py	Fri Aug 24 12:55:05 2018 -0700
@@ -119,7 +119,7 @@
             b = b[:len(a)]
         if a == b:
             return a
-        for i in xrange(len(a)):
+        for i in pycompat.xrange(len(a)):
             if a[i] != b[i]:
                 return a[:i]
         return a
@@ -266,7 +266,7 @@
     num_lines = len(lines)
     endswithnewline = text[-1:] == '\n'
     def indenter():
-        for i in xrange(num_lines):
+        for i in pycompat.xrange(num_lines):
             l = lines[i]
             if i and l.strip():
                 yield prefix
--- a/mercurial/templatefuncs.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templatefuncs.py	Fri Aug 24 12:55:05 2018 -0700
@@ -596,7 +596,7 @@
             yield sep
         yield argstr
 
-@templatefunc('shortest(node, minlength=4)', requires={'repo'})
+@templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
 def shortest(context, mapping, args):
     """Obtain the shortest representation of
     a node."""
@@ -629,8 +629,9 @@
             return hexnode
         if not node:
             return hexnode
+    cache = context.resource(mapping, 'cache')
     try:
-        return scmutil.shortesthexnodeidprefix(repo, node, minlength)
+        return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
     except error.RepoLookupError:
         return hexnode
 
--- a/mercurial/templatekw.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templatekw.py	Fri Aug 24 12:55:05 2018 -0700
@@ -168,9 +168,8 @@
 
 @templatekeyword('author', requires={'ctx'})
 def showauthor(context, mapping):
-    """String. The unmodified author of the changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return ctx.user()
+    """Alias for ``{user}``"""
+    return showuser(context, mapping)
 
 @templatekeyword('bisect', requires={'repo', 'ctx'})
 def showbisect(context, mapping):
@@ -293,15 +292,14 @@
                    lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
 
 def _showfilesbystat(context, mapping, name, index):
-    repo = context.resource(mapping, 'repo')
     ctx = context.resource(mapping, 'ctx')
     revcache = context.resource(mapping, 'revcache')
     if 'files' not in revcache:
-        revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
+        revcache['files'] = ctx.p1().status(ctx)[:3]
     files = revcache['files'][index]
     return compatlist(context, mapping, name, files, element='file')
 
-@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_adds', requires={'ctx', 'revcache'})
 def showfileadds(context, mapping):
     """List of strings. Files added by this changeset."""
     return _showfilesbystat(context, mapping, 'file_add', 1)
@@ -345,12 +343,12 @@
                       key='name', value='source', fmt='%s (%s)',
                       plural='file_copies')
 
-@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_dels', requires={'ctx', 'revcache'})
 def showfiledels(context, mapping):
     """List of strings. Files removed by this changeset."""
     return _showfilesbystat(context, mapping, 'file_del', 2)
 
-@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_mods', requires={'ctx', 'revcache'})
 def showfilemods(context, mapping):
     """List of strings. Files modified by this changeset."""
     return _showfilesbystat(context, mapping, 'file_mod', 0)
@@ -758,6 +756,12 @@
     ui = context.resource(mapping, 'ui')
     return ui.termwidth()
 
+@templatekeyword('user', requires={'ctx'})
+def showuser(context, mapping):
+    """String. The unmodified author of the changeset."""
+    ctx = context.resource(mapping, 'ctx')
+    return ctx.user()
+
 @templatekeyword('instabilities', requires={'ctx'})
 def showinstabilities(context, mapping):
     """List of strings. Evolution instabilities affecting the changeset.
--- a/mercurial/templates/map-cmdline.bisect	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templates/map-cmdline.bisect	Fri Aug 24 12:55:05 2018 -0700
@@ -1,10 +1,10 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}\n'
+changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}\n'
 changeset_quiet = '{lshortbisect} {rev}:{node|short}\n'
-changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # We take the zeroth word in order to omit "(implicit)" in the label
 bisectlabel = ' bisect.{word('0', bisect)}'
--- a/mercurial/templates/map-cmdline.default	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templates/map-cmdline.default	Fri Aug 24 12:55:05 2018 -0700
@@ -2,10 +2,10 @@
 # to replace some keywords with 'lkeyword', for 'labelled keyword'
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{summary}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{summary}\n'
 changeset_quiet = '{lnode}'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # File templates
 lfiles = '{if(files,
@@ -54,8 +54,8 @@
 bookmark = '{label("log.bookmark",
                    "bookmark:    {bookmark}")}\n'
 
-user = '{label("log.user",
-               "user:        {author}")}\n'
+luser = '{label("log.user",
+                "user:        {author}")}\n'
 
 summary = '{if(desc|strip, "{label('log.summary',
                                    'summary:     {desc|firstline}')}\n")}'
--- a/mercurial/templates/map-cmdline.phases	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templates/map-cmdline.phases	Fri Aug 24 12:55:05 2018 -0700
@@ -1,5 +1,5 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{summary}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{summary}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
--- a/mercurial/templates/map-cmdline.status	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templates/map-cmdline.status	Fri Aug 24 12:55:05 2018 -0700
@@ -2,9 +2,9 @@
 
 [templates]
 # Override base templates
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}{lfiles}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{description}{lfiles}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{extras}{description}{lfiles}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}{lfiles}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{description}{lfiles}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{extras}{description}{lfiles}\n'
 
 # Override the file templates
 lfiles = '{if(files,
--- a/mercurial/templateutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/templateutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -810,8 +810,9 @@
     return data
 
 def _recursivesymbolblocker(key):
-    def showrecursion(**args):
+    def showrecursion(context, mapping):
         raise error.Abort(_("recursive reference '%s' in template") % key)
+    showrecursion._requires = ()  # mark as new-style templatekw
     return showrecursion
 
 def runsymbol(context, mapping, key, default=''):
@@ -827,12 +828,16 @@
             v = default
     if callable(v) and getattr(v, '_requires', None) is None:
         # old templatekw: expand all keywords and resources
-        # (TODO: deprecate this after porting web template keywords to new API)
+        # (TODO: drop support for old-style functions. 'f._requires = ()'
+        #  can be removed.)
         props = {k: context._resources.lookup(context, mapping, k)
                  for k in context._resources.knownkeys()}
         # pass context to _showcompatlist() through templatekw._showlist()
         props['templ'] = context
         props.update(mapping)
+        ui = props.get('ui')
+        if ui:
+            ui.deprecwarn("old-style template keyword '%s'" % key, '4.8')
         return v(**pycompat.strkwargs(props))
     if callable(v):
         # new templatekw
--- a/mercurial/treediscovery.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/treediscovery.py	Fri Aug 24 12:55:05 2018 -0700
@@ -16,6 +16,7 @@
 )
 from . import (
     error,
+    pycompat,
 )
 
 def findcommonincoming(repo, remote, heads=None, force=False):
@@ -111,7 +112,7 @@
             progress.increment()
             repo.ui.debug("request %d: %s\n" %
                         (reqcnt, " ".join(map(short, r))))
-            for p in xrange(0, len(r), 10):
+            for p in pycompat.xrange(0, len(r), 10):
                 with remote.commandexecutor() as e:
                     branches = e.callcommand('branches', {
                         'nodes': r[p:p + 10],
--- a/mercurial/ui.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/ui.py	Fri Aug 24 12:55:05 2018 -0700
@@ -67,6 +67,9 @@
 update.check = noconflict
 # Show conflicts information in `hg status`
 status.verbose = True
+# Refuse to perform `hg resolve --mark` on files that still have conflict
+# markers
+resolve.mark-check = abort
 
 [diff]
 git = 1
@@ -392,7 +395,7 @@
     def readconfig(self, filename, root=None, trust=False,
                    sections=None, remap=None):
         try:
-            fp = open(filename, u'rb')
+            fp = open(filename, r'rb')
         except IOError:
             if not sections: # ignore unless we were looking for something
                 return
@@ -1420,6 +1423,7 @@
                     return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
+
     def status(self, *msg, **opts):
         '''write status message to output (if ui.quiet is False)
 
@@ -1428,6 +1432,7 @@
         if not self.quiet:
             opts[r'label'] = opts.get(r'label', '') + ' ui.status'
             self.write(*msg, **opts)
+
     def warn(self, *msg, **opts):
         '''write warning message to output (stderr)
 
@@ -1435,6 +1440,15 @@
         '''
         opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
         self.write_err(*msg, **opts)
+
+    def error(self, *msg, **opts):
+        '''write error message to output (stderr)
+
+        This adds an output label of "ui.error".
+        '''
+        opts[r'label'] = opts.get(r'label', '') + ' ui.error'
+        self.write_err(*msg, **opts)
+
     def note(self, *msg, **opts):
         '''write note to output (if ui.verbose is True)
 
@@ -1443,6 +1457,7 @@
         if self.verbose:
             opts[r'label'] = opts.get(r'label', '') + ' ui.note'
             self.write(*msg, **opts)
+
     def debug(self, *msg, **opts):
         '''write debug message to output (if ui.debugflag is True)
 
--- a/mercurial/unionrepo.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/unionrepo.py	Fri Aug 24 12:55:05 2018 -0700
@@ -73,7 +73,7 @@
             # I have no idea if csize is valid in the base revlog context.
             e = (flags, None, rsize, base,
                  link, self.rev(p1node), self.rev(p2node), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
--- a/mercurial/upgrade.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/upgrade.py	Fri Aug 24 12:55:05 2018 -0700
@@ -450,7 +450,7 @@
         return changelog.changelog(repo.svfs)
     elif path.endswith('00manifest.i'):
         mandir = path[:-len('00manifest.i')]
-        return manifest.manifestrevlog(repo.svfs, dir=mandir)
+        return manifest.manifestrevlog(repo.svfs, tree=mandir)
     else:
         #reverse of "/".join(("data", path + ".i"))
         return filelog.filelog(repo.svfs, path[5:-2])
--- a/mercurial/util.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/util.py	Fri Aug 24 12:55:05 2018 -0700
@@ -36,6 +36,10 @@
 import warnings
 import zlib
 
+from .thirdparty import (
+    attr,
+)
+from hgdemandimport import tracing
 from . import (
     encoding,
     error,
@@ -945,12 +949,12 @@
 
         self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
 
-    def setsockopt(self, level, optname, value):
+    def setsockopt(self, res, level, optname, value):
         if not self.states:
             return
 
         self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
-            self.name, level, optname, value))
+            self.name, level, optname, value, res))
 
 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
                       logdata=False, logdataapis=True):
@@ -2874,7 +2878,44 @@
     (1, 0.000000001, _('%.3f ns')),
     )
 
-_timenesting = [0]
+@attr.s
+class timedcmstats(object):
+    """Stats information produced by the timedcm context manager on entering."""
+
+    # the starting value of the timer as a float (meaning and resulution is
+    # platform dependent, see util.timer)
+    start = attr.ib(default=attr.Factory(lambda: timer()))
+    # the number of seconds as a floating point value; starts at 0, updated when
+    # the context is exited.
+    elapsed = attr.ib(default=0)
+    # the number of nested timedcm context managers.
+    level = attr.ib(default=1)
+
+    def __bytes__(self):
+        return timecount(self.elapsed) if self.elapsed else '<unknown>'
+
+    __str__ = encoding.strmethod(__bytes__)
+
+@contextlib.contextmanager
+def timedcm(whencefmt, *whenceargs):
+    """A context manager that produces timing information for a given context.
+
+    On entering a timedcmstats instance is produced.
+
+    This context manager is reentrant.
+
+    """
+    # track nested context managers
+    timedcm._nested += 1
+    timing_stats = timedcmstats(level=timedcm._nested)
+    try:
+        with tracing.log(whencefmt, *whenceargs):
+            yield timing_stats
+    finally:
+        timing_stats.elapsed = timer() - timing_stats.start
+        timedcm._nested -= 1
+
+timedcm._nested = 0
 
 def timed(func):
     '''Report the execution time of a function call to stderr.
@@ -2888,18 +2929,13 @@
     '''
 
     def wrapper(*args, **kwargs):
-        start = timer()
-        indent = 2
-        _timenesting[0] += indent
-        try:
-            return func(*args, **kwargs)
-        finally:
-            elapsed = timer() - start
-            _timenesting[0] -= indent
-            stderr = procutil.stderr
-            stderr.write('%s%s: %s\n' %
-                         (' ' * _timenesting[0], func.__name__,
-                          timecount(elapsed)))
+        with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
+            result = func(*args, **kwargs)
+        stderr = procutil.stderr
+        stderr.write('%s%s: %s\n' % (
+            ' ' * time_stats.level * 2, pycompat.bytestr(func.__name__),
+            time_stats))
+        return result
     return wrapper
 
 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
@@ -3343,6 +3379,9 @@
                 return ''.join(buf)
             chunk = self._reader(65536)
             self._decompress(chunk)
+            if not chunk and not self._pending and not self._eof:
+                # No progress and no new data, bail out
+                return ''.join(buf)
 
 class _GzipCompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh):
--- a/mercurial/utils/stringutil.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/utils/stringutil.py	Fri Aug 24 12:55:05 2018 -0700
@@ -59,6 +59,9 @@
             '%s: %s' % (pprint(k, bprefix=bprefix),
                         pprint(v, bprefix=bprefix))
             for k, v in sorted(o.items())))
+    elif isinstance(o, set):
+        return 'set([%s])' % (b', '.join(
+            pprint(k, bprefix=bprefix) for k in sorted(o)))
     elif isinstance(o, tuple):
         return '(%s)' % (b', '.join(pprint(a, bprefix=bprefix) for a in o))
     else:
@@ -111,7 +114,7 @@
     elif callable(r):
         return r()
     else:
-        return pycompat.byterepr(r)
+        return pprint(r)
 
 def binary(s):
     """return true if a string is binary data"""
@@ -424,6 +427,8 @@
     return encoding.trim(text, maxlength, ellipsis='...')
 
 def escapestr(s):
+    if isinstance(s, memoryview):
+        s = bytes(s)
     # call underlying function of s.encode('string_escape') directly for
     # Python 3 compatibility
     return codecs.escape_encode(s)[0]
@@ -464,7 +469,7 @@
         def _cutdown(self, ucstr, space_left):
             l = 0
             colwidth = encoding.ucolwidth
-            for i in xrange(len(ucstr)):
+            for i in pycompat.xrange(len(ucstr)):
                 l += colwidth(ucstr[i])
                 if space_left < l:
                     return (ucstr[:i], ucstr[i:])
--- a/mercurial/verify.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/verify.py	Fri Aug 24 12:55:05 2018 -0700
@@ -45,7 +45,7 @@
         self.errors = 0
         self.warnings = 0
         self.havecl = len(repo.changelog) > 0
-        self.havemf = len(repo.manifestlog._revlog) > 0
+        self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
         self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
         self.lrugetctx = util.lrucachefunc(repo.__getitem__)
         self.refersmf = False
@@ -205,7 +205,7 @@
         ui = self.ui
         match = self.match
         mfl = self.repo.manifestlog
-        mf = mfl._revlog.dirlog(dir)
+        mf = mfl.getstorage(dir)
 
         if not dir:
             self.ui.status(_("checking manifests\n"))
--- a/mercurial/win32.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/win32.py	Fri Aug 24 12:55:05 2018 -0700
@@ -615,7 +615,7 @@
     # callers to recreate f immediately while having other readers do their
     # implicit zombie filename blocking on a temporary name.
 
-    for tries in xrange(10):
+    for tries in pycompat.xrange(10):
         temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
         try:
             os.rename(f, temp)  # raises OSError EEXIST if temp exists
--- a/mercurial/wireprotoserver.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/wireprotoserver.py	Fri Aug 24 12:55:05 2018 -0700
@@ -502,14 +502,14 @@
     def getargs(self, args):
         data = {}
         keys = args.split()
-        for n in xrange(len(keys)):
+        for n in pycompat.xrange(len(keys)):
             argline = self._fin.readline()[:-1]
             arg, l = argline.split()
             if arg not in keys:
                 raise error.Abort(_("unexpected parameter %r") % arg)
             if arg == '*':
                 star = {}
-                for k in xrange(int(l)):
+                for k in pycompat.xrange(int(l)):
                     argline = self._fin.readline()[:-1]
                     arg, l = argline.split()
                     val = self._fin.read(int(l))
--- a/mercurial/wireprotov1peer.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/mercurial/wireprotov1peer.py	Fri Aug 24 12:55:05 2018 -0700
@@ -497,7 +497,7 @@
     def between(self, pairs):
         batch = 8 # avoid giant requests
         r = []
-        for i in xrange(0, len(pairs), batch):
+        for i in pycompat.xrange(0, len(pairs), batch):
             n = " ".join([wireprototypes.encodelist(p, '-')
                           for p in pairs[i:i + batch]])
             d = self._call("between", pairs=n)
--- a/setup.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/setup.py	Fri Aug 24 12:55:05 2018 -0700
@@ -818,6 +818,7 @@
             'mercurial.thirdparty.zope.interface',
             'mercurial.utils',
             'hgext', 'hgext.convert', 'hgext.fsmonitor',
+            'hgext.fastannotate',
             'hgext.fsmonitor.pywatchman',
             'hgext.infinitepush',
             'hgext.highlight',
--- a/tests/bzr-definitions	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/bzr-definitions	Fri Aug 24 12:55:05 2018 -0700
@@ -6,7 +6,7 @@
 
 glog()
 {
-    hg log -G --template '{rev}@{branch} "{desc|firstline}" files: {files}\n' "$@"
+    hg log -G --template '{rev}@{branch} "{desc|firstline}" files+: [{file_adds}], files-: [{file_dels}], files: [{file_mods}]\n' "$@"
 }
 
 manifest()
--- a/tests/dummysmtpd.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/dummysmtpd.py	Fri Aug 24 12:55:05 2018 -0700
@@ -26,7 +26,7 @@
     def __init__(self, localaddr):
         smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
 
-    def process_message(self, peer, mailfrom, rcpttos, data):
+    def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
         log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
 
     def handle_error(self):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/printrevset.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,41 @@
+from __future__ import absolute_import
+from mercurial import (
+  cmdutil,
+  commands,
+  extensions,
+  logcmdutil,
+  revsetlang,
+  smartset,
+)
+
+from mercurial.utils import (
+  stringutil,
+)
+
+def logrevset(repo, pats, opts):
+    revs = logcmdutil._initialrevs(repo, opts)
+    if not revs:
+        return None
+    match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
+    return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
+
+def uisetup(ui):
+    def printrevset(orig, repo, pats, opts):
+        revs, filematcher = orig(repo, pats, opts)
+        if opts.get(b'print_revset'):
+            expr = logrevset(repo, pats, opts)
+            if expr:
+                tree = revsetlang.parse(expr)
+                tree = revsetlang.analyze(tree)
+            else:
+                tree = []
+            ui = repo.ui
+            ui.write(b'%s\n' % stringutil.pprint(opts.get(b'rev', [])))
+            ui.write(revsetlang.prettyformat(tree) + b'\n')
+            ui.write(stringutil.prettyrepr(revs) + b'\n')
+            revs = smartset.baseset()  # display no revisions
+        return revs, filematcher
+    extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
+    aliases, entry = cmdutil.findcmd(b'log', commands.table)
+    entry[1].append((b'', b'print-revset', False,
+                     b'print generated revset and exit (DEPRECATED)'))
--- a/tests/run-tests.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/run-tests.py	Fri Aug 24 12:55:05 2018 -0700
@@ -64,6 +64,7 @@
 import threading
 import time
 import unittest
+import uuid
 import xml.dom.minidom as minidom
 
 try:
@@ -285,12 +286,12 @@
 
     If path does not exist, return an empty set.
     """
-    cases = set()
+    cases = []
     try:
         with open(path, 'rb') as f:
             for l in f:
                 if l.startswith(b'#testcases '):
-                    cases.update(l[11:].split())
+                    cases.append(sorted(l[11:].split()))
     except IOError as ex:
         if ex.errno != errno.ENOENT:
             raise
@@ -1068,7 +1069,10 @@
         env["HGUSER"]   = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
+        env["HGHOSTNAME"] = "test-hostname"
         env['HGIPV6'] = str(int(self._useipv6))
+        if 'HGCATAPULTSERVERPIPE' not in env:
+            env['HGCATAPULTSERVERPIPE'] = '/dev/null'
 
         extraextensions = []
         for opt in self._extraconfigopts:
@@ -1242,14 +1246,15 @@
 
     def __init__(self, path, *args, **kwds):
         # accept an extra "case" parameter
-        case = kwds.pop('case', None)
+        case = kwds.pop('case', [])
         self._case = case
-        self._allcases = parsettestcases(path)
+        self._allcases = {x for y in parsettestcases(path) for x in y}
         super(TTest, self).__init__(path, *args, **kwds)
         if case:
-            self.name = '%s#%s' % (self.name, _strpath(case))
-            self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
-            self._tmpname += b'-%s' % case
+            casepath = b'#'.join(case)
+            self.name = '%s#%s' % (self.name, _strpath(casepath))
+            self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
+            self._tmpname += b'-%s' % casepath
         self._have = {}
 
     @property
@@ -1323,10 +1328,10 @@
         reqs = []
         for arg in args:
             if arg.startswith(b'no-') and arg[3:] in self._allcases:
-                if arg[3:] == self._case:
+                if arg[3:] in self._case:
                     return False
             elif arg in self._allcases:
-                if arg != self._case:
+                if arg not in self._case:
                     return False
             else:
                 reqs.append(arg)
@@ -1342,6 +1347,20 @@
                 script.append(b'%s %d 0\n' % (salt, line))
             else:
                 script.append(b'echo %s %d $?\n' % (salt, line))
+        active = []
+        session = str(uuid.uuid4())
+        if PYTHON3:
+            session = session.encode('ascii')
+        def toggletrace(cmd):
+            quoted = shellquote(cmd.strip()).replace(b'\\', b'\\\\')
+            if active:
+                script.append(
+                    b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                        session, active[0]))
+            script.append(
+                b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                    session, quoted))
+            active[0:] = [quoted]
 
         script = []
 
@@ -1369,11 +1388,35 @@
             script.append(b'alias hg="%s"\n' % self._hgcommand)
         if os.getenv('MSYSTEM'):
             script.append(b'alias pwd="pwd -W"\n')
+
+        if os.getenv('HGCATAPULTSERVERPIPE'):
+            # Kludge: use a while loop to keep the pipe from getting
+            # closed by our echo commands. The still-running file gets
+            # reaped at the end of the script, which causes the while
+            # loop to exit and closes the pipe. Sigh.
+            script.append(
+                b'rtendtracing() {\n'
+                b'  echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                b'  rm -f "$TESTTMP/.still-running"\n'
+                b'}\n'
+                b'trap "rtendtracing" 0\n'
+                b'touch "$TESTTMP/.still-running"\n'
+                b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
+                b'> $HGCATAPULTSERVERPIPE &\n'
+                b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
+                b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                % {
+                    'name': self.name,
+                    'session': session,
+                }
+            )
+
         if self._case:
+            casestr = b'#'.join(self._case)
             if isinstance(self._case, str):
-                quoted = shellquote(self._case)
+                quoted = shellquote(casestr)
             else:
-                quoted = shellquote(self._case.decode('utf8')).encode('utf8')
+                quoted = shellquote(casestr.decode('utf8')).encode('utf8')
             script.append(b'TESTCASE=%s\n' % quoted)
             script.append(b'export TESTCASE\n')
 
@@ -1433,10 +1476,12 @@
                 prepos = pos
                 pos = n
                 addsalt(n, False)
-                cmd = l[4:].split()
+                rawcmd = l[4:]
+                cmd = rawcmd.split()
+                toggletrace(rawcmd)
                 if len(cmd) == 2 and cmd[0] == b'cd':
                     l = b'  $ cd %s || exit 1\n' % cmd[1]
-                script.append(l[4:])
+                script.append(rawcmd)
             elif l.startswith(b'  > '): # continuations
                 after.setdefault(prepos, []).append(l)
                 script.append(l[4:])
@@ -1455,7 +1500,6 @@
         if skipping is not None:
             after.setdefault(pos, []).append('  !!! missing #endif\n')
         addsalt(n + 1, False)
-
         return salt, script, after, expected
 
     def _processoutput(self, exitcode, output, salt, after, expected):
@@ -2666,31 +2710,42 @@
                 expanded_args.append(arg)
         args = expanded_args
 
-        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))')
+        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
         tests = []
         for t in args:
-            case = None
+            case = []
 
             if not (os.path.basename(t).startswith(b'test-')
                     and (t.endswith(b'.py') or t.endswith(b'.t'))):
 
                 m = testcasepattern.match(t)
                 if m is not None:
-                    t, _, case = m.groups()
+                    t, _, casestr = m.groups()
+                    if casestr:
+                        case = casestr.split(b'#')
                 else:
                     continue
 
             if t.endswith(b'.t'):
                 # .t file may contain multiple test cases
-                cases = sorted(parsettestcases(t))
-                if cases:
-                    if case is not None and case in cases:
-                        tests += [{'path': t, 'case': case}]
-                    elif case is not None and case not in cases:
+                casedimensions = parsettestcases(t)
+                if casedimensions:
+                    cases = []
+                    def addcases(case, casedimensions):
+                        if not casedimensions:
+                            cases.append(case)
+                        else:
+                            for c in casedimensions[0]:
+                                addcases(case + [c], casedimensions[1:])
+                    addcases([], casedimensions)
+                    if case and case in cases:
+                        cases = [case]
+                    elif case:
                         # Ignore invalid cases
-                        pass
+                        cases = []
                     else:
-                        tests += [{'path': t, 'case': c} for c in sorted(cases)]
+                        pass
+                    tests += [{'path': t, 'case': c} for c in sorted(cases)]
                 else:
                     tests.append({'path': t})
             else:
@@ -2701,7 +2756,7 @@
         def _reloadtest(test, i):
             # convert a test back to its description dict
             desc = {'path': test.path}
-            case = getattr(test, '_case', None)
+            case = getattr(test, '_case', [])
             if case:
                 desc['case'] = case
             return self._gettest(desc, i)
@@ -2713,7 +2768,8 @@
                     desc = testdescs[0]
                     # desc['path'] is a relative path
                     if 'case' in desc:
-                        errpath = b'%s.%s.err' % (desc['path'], desc['case'])
+                        casestr = b'#'.join(desc['case'])
+                        errpath = b'%s#%s.err' % (desc['path'], casestr)
                     else:
                         errpath = b'%s.err' % desc['path']
                     errpath = os.path.join(self._outputdir, errpath)
--- a/tests/simplestorerepo.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/simplestorerepo.py	Fri Aug 24 12:55:05 2018 -0700
@@ -22,6 +22,7 @@
     nullrev,
 )
 from mercurial.thirdparty import (
+    attr,
     cbor,
 )
 from mercurial import (
@@ -60,6 +61,19 @@
     if not isinstance(rev, int):
         raise ValueError('expected int')
 
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True, frozen=True)
+class simplestorerevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    linknode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+
 @interfaceutil.implementer(repository.ifilestorage)
 class filestorage(object):
     """Implements storage for a tracked path.
@@ -91,7 +105,6 @@
 
         # This is used by changegroup code :/
         self._generaldelta = True
-        self.storedeltachains = False
 
         self.version = 1
 
@@ -228,7 +241,7 @@
         p1node = self.parents(self.node(rev))[0]
         return self.rev(p1node)
 
-    def candelta(self, baserev, rev):
+    def _candelta(self, baserev, rev):
         validaterev(baserev)
         validaterev(rev)
 
@@ -500,6 +513,54 @@
         return mdiff.textdiff(self.revision(node1, raw=True),
                               self.revision(node2, raw=True))
 
+    def emitrevisiondeltas(self, requests):
+        for request in requests:
+            node = request.node
+            rev = self.rev(node)
+
+            if request.basenode == nullid:
+                baserev = nullrev
+            elif request.basenode is not None:
+                baserev = self.rev(request.basenode)
+            else:
+                # This is a test extension and we can do simple things
+                # for choosing a delta parent.
+                baserev = self.deltaparent(rev)
+
+                if baserev != nullrev and not self._candelta(baserev, rev):
+                    baserev = nullrev
+
+            revision = None
+            delta = None
+            baserevisionsize = None
+
+            if self.iscensored(baserev) or self.iscensored(rev):
+                try:
+                    revision = self.revision(node, raw=True)
+                except error.CensoredNodeError as e:
+                    revision = e.tombstone
+
+                if baserev != nullrev:
+                    baserevisionsize = self.rawsize(baserev)
+
+            elif baserev == nullrev:
+                revision = self.revision(node, raw=True)
+            else:
+                delta = self.revdiff(baserev, rev)
+
+            extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
+
+            yield simplestorerevisiondelta(
+                node=node,
+                p1node=request.p1node,
+                p2node=request.p2node,
+                linknode=request.linknode,
+                basenode=self.node(baserev),
+                flags=self.flags(rev) | extraflags,
+                baserevisionsize=baserevisionsize,
+                revision=revision,
+                delta=delta)
+
     def headrevs(self):
         # Assume all revisions are heads by default.
         revishead = {rev: True for rev in self._indexbyrev}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-edit-lines.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,61 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ hg init repo1
+  $ cd repo1
+
+Make some commits:
+
+  $ for i in 1 2 3; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+absorb --edit-lines will run the editor if filename is provided:
+
+  $ hg absorb --edit-lines
+  nothing applied
+  [1]
+  $ HGEDITOR=cat hg absorb --edit-lines a
+  HG: editing a
+  HG: "y" means the line to the right exists in the changeset to the top
+  HG:
+  HG: /---- 4ec16f85269a commit 1
+  HG: |/--- 5c5f95224a50 commit 2
+  HG: ||/-- 43f0a75bede7 commit 3
+  HG: |||
+      yyy : 1
+       yy : 2
+        y : 3
+  nothing applied
+  [1]
+
+Edit the file using --edit-lines:
+
+  $ cat > editortext << EOF
+  >       y : a
+  >      yy :  b
+  >      y  : c
+  >     yy  : d  
+  >     y y : e
+  >     y   : f
+  >     yyy : g
+  > EOF
+  $ HGEDITOR='cat editortext >' hg absorb -q --edit-lines a
+  $ hg cat -r 0 a
+  d  
+  e
+  f
+  g
+  $ hg cat -r 1 a
+   b
+  c
+  d  
+  g
+  $ hg cat -r 2 a
+  a
+   b
+  e
+  g
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-filefixupstate.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,207 @@
+from __future__ import absolute_import, print_function
+
+import itertools
+from mercurial import pycompat
+from hgext import absorb
+
+class simplefctx(object):
+    def __init__(self, content):
+        self.content = content
+
+    def data(self):
+        return self.content
+
+def insertreturns(x):
+    # insert "\n"s after each single char
+    if isinstance(x, bytes):
+        return b''.join(ch + b'\n' for ch in pycompat.bytestr(x))
+    else:
+        return pycompat.maplist(insertreturns, x)
+
+def removereturns(x):
+    # the revert of "insertreturns"
+    if isinstance(x, bytes):
+        return x.replace(b'\n', b'')
+    else:
+        return pycompat.maplist(removereturns, x)
+
+def assertlistequal(lhs, rhs, decorator=lambda x: x):
+    if lhs != rhs:
+        raise RuntimeError('mismatch:\n actual:   %r\n expected: %r'
+                           % tuple(map(decorator, [lhs, rhs])))
+
+def testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None):
+    """([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None
+
+    workingcopy is a string, of which every character denotes a single line.
+
+    oldcontents, expectedcontents are lists of strings, every character of
+    every string denots a single line.
+
+    if fixups is not None, it's the expected fixups list and will be checked.
+    """
+    expectedcontents = insertreturns(expectedcontents)
+    oldcontents = insertreturns(oldcontents)
+    workingcopy = insertreturns(workingcopy)
+    state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents))
+    state.diffwith(simplefctx(workingcopy))
+    if fixups is not None:
+        assertlistequal(state.fixups, fixups)
+    state.apply()
+    assertlistequal(state.finalcontents, expectedcontents, removereturns)
+
+def buildcontents(linesrevs):
+    # linesrevs: [(linecontent : str, revs : [int])]
+    revs = set(itertools.chain(*[revs for line, revs in linesrevs]))
+    return [b''] + [
+        b''.join([l for l, rs in linesrevs if r in rs])
+        for r in sorted(revs)
+    ]
+
+# input case 0: one single commit
+case0 = [b'', b'11']
+
+# replace a single chunk
+testfilefixup(case0, b'', [b'', b''])
+testfilefixup(case0, b'2', [b'', b'2'])
+testfilefixup(case0, b'22', [b'', b'22'])
+testfilefixup(case0, b'222', [b'', b'222'])
+
+# input case 1: 3 lines, each commit adds one line
+case1 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [      3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case1, b'123', case1)
+testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c'])
+testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3'])
+testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc'])
+testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23'])
+testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c'])
+testfilefixup(case1, b'ab3', [b'', b'a', b'ab', b'ab3'])
+testfilefixup(case1, b'abc', [b'', b'a', b'ab', b'abc'])
+
+# non 1:1 edits
+testfilefixup(case1, b'abcd', case1)
+testfilefixup(case1, b'ab', case1)
+
+# deletion
+testfilefixup(case1, b'',   [b'', b'', b'', b''])
+testfilefixup(case1, b'1',  [b'', b'1', b'1', b'1'])
+testfilefixup(case1, b'2',  [b'', b'', b'2', b'2'])
+testfilefixup(case1, b'3',  [b'', b'', b'', b'3'])
+testfilefixup(case1, b'13', [b'', b'1', b'1', b'13'])
+
+# replaces
+testfilefixup(case1, b'1bb3', [b'', b'1', b'1bb', b'1bb3'])
+
+# (confusing) replaces
+testfilefixup(case1, b'1bbb', case1)
+testfilefixup(case1, b'bbbb', case1)
+testfilefixup(case1, b'bbb3', case1)
+testfilefixup(case1, b'1b', case1)
+testfilefixup(case1, b'bb', case1)
+testfilefixup(case1, b'b3', case1)
+
+# insertions at the beginning and the end
+testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c'])
+testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123'])
+
+# (confusing) insertions
+testfilefixup(case1, b'1a23', case1)
+testfilefixup(case1, b'12b3', case1)
+
+# input case 2: delete in the middle
+case2 = buildcontents([
+    (b'11', [1, 2]),
+    (b'22', [1   ]),
+    (b'33', [1, 2]),
+])
+
+# deletion (optimize code should make it 2 chunks)
+testfilefixup(case2, b'', [b'', b'22', b''],
+              fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)])
+
+# 1:1 line mapping
+testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa'])
+
+# non 1:1 edits
+# note: unlike case0, the chunk is not "continuous" and no edit allowed
+testfilefixup(case2, b'aaa', case2)
+
+# input case 3: rev 3 reverts rev 2
+case3 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2   ]),
+    (b'3', [1, 2, 3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case3, b'13', case3)
+testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b'])
+testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3'])
+testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab'])
+
+# non 1:1 edits
+testfilefixup(case3, b'a', case3)
+testfilefixup(case3, b'abc', case3)
+
+# deletion
+testfilefixup(case3, b'', [b'', b'', b'2', b''])
+
+# insertion
+testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c'])
+
+# input case 4: a slightly complex case
+case4 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [1, 2,  ]),
+    (b'4', [1,    3]),
+    (b'5', [      3]),
+    (b'6', [   2, 3]),
+    (b'7', [   2   ]),
+    (b'8', [   2, 3]),
+    (b'9', [      3]),
+])
+
+testfilefixup(case4, b'1245689', case4)
+testfilefixup(case4, b'1a2456bbb', case4)
+testfilefixup(case4, b'1abc5689', case4)
+testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689'])
+testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee'])
+testfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee'])
+testfilefixup(case4, b'aaaaaa', case4)
+testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b'])
+testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689'])
+testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689'])
+testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28'])
+testfilefixup(case4, b'', [b'', b'34', b'37', b''])
+
+# input case 5: replace a small chunk which is near a deleted line
+case5 = buildcontents([
+    (b'12', [1, 2]),
+    (b'3',  [1]),
+    (b'4',  [1, 2]),
+])
+
+testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4'])
+
+# input case 6: base "changeset" is immutable
+case6 = [b'1357', b'0125678']
+
+testfilefixup(case6, b'0125678', case6)
+testfilefixup(case6, b'0a25678', case6)
+testfilefixup(case6, b'0a256b8', case6)
+testfilefixup(case6, b'abcdefg', [b'1357', b'a1c5e7g'])
+testfilefixup(case6, b'abcdef', case6)
+testfilefixup(case6, b'', [b'1357', b'157'])
+testfilefixup(case6, b'0123456789', [b'1357', b'0123456789'])
+
+# input case 7: change an empty file
+case7 = [b'']
+
+testfilefixup(case7, b'1', case7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-phase.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,30 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$RUNTESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag <<'EOS'
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOS
+
+  $ hg phase -r A --public -q
+  $ hg phase -r C --secret --force -q
+
+  $ hg update C -q
+  $ printf B1 > B
+
+  $ hg absorb -q
+
+  $ hg log -G -T '{desc} {phase}'
+  @  C secret
+  |
+  o  B draft
+  |
+  o  A public
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-rename.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,359 @@
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+rename a to b, then b to a
+
+  $ hg init repo1
+  $ cd repo1
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg mv a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+  $ hg mv b a
+  $ echo 3 >> a
+  $ hg ci -m 3
+
+  $ hg annotate -ncf a
+  0 eff892de26ec a: 1
+  1 bf56e1f4f857 b: 2
+  2 0b888b00216c a: 3
+
+  $ sedi 's/$/a/' a
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+  eff892d -1
+  bf56e1f -2
+  0b888b0 -3
+  eff892d +1a
+  bf56e1f +2a
+  0b888b0 +3a
+
+  $ hg status
+
+  $ hg annotate -ncf a
+  0 5d1c5620e6f2 a: 1a
+  1 9a14ffe67ae9 b: 2a
+  2 9191d121a268 a: 3a
+
+when the first changeset is public
+
+  $ hg phase --public -r 0
+
+  $ sedi 's/a/A/' a
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+          -1a
+  9a14ffe -2a
+  9191d12 -3a
+          +1A
+  9a14ffe +2A
+  9191d12 +3A
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,3 +1,3 @@
+  -1a
+  +1A
+   2A
+   3A
+
+copy a to b
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg cp a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:17b72129ab68 2
+  0:eff892de26ec 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/b/' b
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,1 +0,1 @@
+  eff892d -1
+  eff892d +1a
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -1
+  17b7212 -2
+          +1b
+  17b7212 +2b
+
+  $ hg diff
+  diff --git a/b b/b
+  --- a/b
+  +++ b/b
+  @@ -1,2 +1,2 @@
+  -1
+  +1b
+   2b
+
+copy b to a
+
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ echo 2 >> a
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:e62c256d8b24 2
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/a/' b
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -1
+  e62c256 -2
+          +1a
+  e62c256 +2a
+  showing changes for b
+          @@ -0,1 +0,1 @@
+  55105f9 -1
+  55105f9 +1a
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,2 +1,2 @@
+  -1
+  +1a
+   2a
+
+"move" b to both a and c, follow a - sorted alphabetically
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ hg commit -m cp
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:366daad8e679 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/c/' c
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  366daad -2
+  55105f9 +1a
+  366daad +2a
+  showing changes for c
+          @@ -0,2 +0,2 @@
+          -1
+  366daad -3
+          +1c
+  366daad +3c
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:70606019f91b cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+run absorb again would apply the change to c
+
+  $ hg absorb -pq
+  showing changes for c
+          @@ -0,1 +0,1 @@
+  7060601 -1
+  7060601 +1c
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:8bd536cce368 cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1c
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+"move" b to a, c and d, follow d if a gets renamed to e, and c is deleted
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg cp b d
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ echo 4 >> d
+  $ hg commit -m cp
+  $ hg mv a e
+  $ hg rm c
+  $ hg commit -m mv
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  2:49911557c471 mv
+  1:7bc3d43ede83 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/e/' e
+  $ sedi 's/$/d/' d
+
+  $ hg absorb -pq
+  showing changes for d
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  7bc3d43 -4
+  55105f9 +1d
+  7bc3d43 +4d
+  showing changes for e
+          @@ -0,2 +0,2 @@
+          -1
+  7bc3d43 -2
+          +1e
+  7bc3d43 +2e
+
+  $ hg diff
+  diff --git a/e b/e
+  --- a/e
+  +++ b/e
+  @@ -1,2 +1,2 @@
+  -1
+  +1e
+   2e
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  2:34be9b0c786e mv
+  |  diff --git a/c b/c
+  |  deleted file mode 100644
+  |  --- a/c
+  |  +++ /dev/null
+  |  @@ -1,2 +0,0 @@
+  |  -1
+  |  -3
+  |  diff --git a/a b/e
+  |  rename from a
+  |  rename to e
+  |
+  o  1:13e56db5948d cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +2e
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +3
+  |  diff --git a/b b/d
+  |  copy from b
+  |  copy to d
+  |  --- a/b
+  |  +++ b/d
+  |  @@ -1,1 +1,2 @@
+  |   1d
+  |  +4d
+  |
+  o  0:0037613a5dc6 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1d
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-strip.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,45 @@
+Do not strip innocent children. See https://bitbucket.org/facebook/hg-experimental/issues/6/hg-absorb-merges-diverged-commits
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$RUNTESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag << EOF
+  > E
+  > |
+  > D F
+  > |/
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOF
+
+  $ hg up E -q
+  $ echo 1 >> B
+  $ echo 2 >> D
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+
+  $ hg log -G -T '{desc}'
+  @  E
+  |
+  o  D
+  |
+  o  C
+  |
+  o  B
+  |
+  | o  F
+  | |
+  | o  C
+  | |
+  | o  B
+  |/
+  o  A
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,451 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+  $ hg init repo1
+  $ cd repo1
+
+Do not crash with empty repo:
+
+  $ hg absorb
+  abort: no changeset to change
+  [255]
+
+Make some commits:
+
+  $ for i in 1 2 3 4 5; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+  $ hg annotate a
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+Change a few lines:
+
+  $ cat > a <<EOF
+  > 1a
+  > 2b
+  > 3
+  > 4d
+  > 5e
+  > EOF
+
+Preview absorb changes:
+
+  $ hg absorb --print-changes --dry-run
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  4ec16f8 -1
+  5c5f952 -2
+  4ec16f8 +1a
+  5c5f952 +2b
+          @@ -3,2 +3,2 @@
+  ad8b8b7 -4
+  4f55fa6 -5
+  ad8b8b7 +4d
+  4f55fa6 +5e
+
+Run absorb:
+
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg annotate a
+  0: 1a
+  1: 2b
+  2: 3
+  3: 4d
+  4: 5e
+
+Delete a few lines and related commits will be removed if they will be empty:
+
+  $ cat > a <<EOF
+  > 2b
+  > 4d
+  > EOF
+  $ hg absorb
+  saved backup bundle to * (glob)
+  3 of 3 chunk(s) applied
+  $ hg annotate a
+  1: 2b
+  2: 4d
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  2 commit 4
+  |  diff -r 1cae118c7ed8 -r 58a62bade1c6 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -1,1 +1,2 @@
+  |   2b
+  |  +4d
+  |
+  o  1 commit 2
+  |  diff -r 84add69aeac0 -r 1cae118c7ed8 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -0,0 +1,1 @@
+  |  +2b
+  |
+  o  0 commit 1
+  
+
+Non 1:1 map changes will be ignored:
+
+  $ echo 1 > a
+  $ hg absorb
+  nothing applied
+  [1]
+
+Insertaions:
+
+  $ cat > a << EOF
+  > insert before 2b
+  > 2b
+  > 4d
+  > insert aftert 4d
+  > EOF
+  $ hg absorb -q
+  $ hg status
+  $ hg annotate a
+  1: insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Bookmarks are moved:
+
+  $ hg bookmark -r 1 b1
+  $ hg bookmark -r 2 b2
+  $ hg bookmark ba
+  $ hg bookmarks
+     b1                        1:b35060a57a50
+     b2                        2:946e4bc87915
+   * ba                        2:946e4bc87915
+  $ sedi 's/insert/INSERT/' a
+  $ hg absorb -q
+  $ hg status
+  $ hg bookmarks
+     b1                        1:a4183e9b3d31
+     b2                        2:c9b20c925790
+   * ba                        2:c9b20c925790
+
+Non-mofified files are ignored:
+
+  $ touch b
+  $ hg commit -A b -m b
+  $ touch c
+  $ hg add c
+  $ hg rm b
+  $ hg absorb
+  nothing applied
+  [1]
+  $ sedi 's/INSERT/Insert/' a
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg status
+  A c
+  R b
+
+Public commits will not be changed:
+
+  $ hg phase -p 1
+  $ sedi 's/Insert/insert/' a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -0,1 +0,1 @@
+          -Insert before 2b
+          +insert before 2b
+          @@ -3,1 +3,1 @@
+  85b4e0e -Insert aftert 4d
+  85b4e0e +insert aftert 4d
+  $ hg absorb
+  saved backup bundle to * (glob)
+  1 of 2 chunk(s) applied
+  $ hg diff -U 0
+  diff -r 1c8eadede62a a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	* (glob)
+  @@ -1,1 +1,1 @@
+  -Insert before 2b
+  +insert before 2b
+  $ hg annotate a
+  1: Insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Make working copy clean:
+
+  $ hg revert -q -C a b
+  $ hg forget c
+  $ rm c
+  $ hg status
+
+Merge commit will not be changed:
+
+  $ echo 1 > m1
+  $ hg commit -A m1 -m m1
+  $ hg bookmark -q -i m1
+  $ hg update -q '.^'
+  $ echo 2 > m2
+  $ hg commit -q -A m2 -m m2
+  $ hg merge -q m1
+  $ hg commit -m merge
+  $ hg bookmark -d m1
+  $ hg log -G -T '{rev} {desc} {phase}\n'
+  @    6 merge draft
+  |\
+  | o  5 m2 draft
+  | |
+  o |  4 m1 draft
+  |/
+  o  3 b draft
+  |
+  o  2 commit 4 draft
+  |
+  o  1 commit 2 public
+  |
+  o  0 commit 1 public
+  
+  $ echo 2 >> m1
+  $ echo 2 >> m2
+  $ hg absorb
+  abort: no changeset to change
+  [255]
+  $ hg revert -q -C m1 m2
+
+Use a new repo:
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+Make some commits to multiple files:
+
+  $ for f in a b; do
+  >   for i in 1 2; do
+  >     echo $f line $i >> $f
+  >     hg commit -A $f -m "commit $f $i" -q
+  >   done
+  > done
+
+Use pattern to select files to be fixed up:
+
+  $ sedi 's/line/Line/' a b
+  $ hg status
+  M a
+  M b
+  $ hg absorb a
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  M b
+  $ hg absorb --exclude b
+  nothing applied
+  [1]
+  $ hg absorb b
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  $ cat a b
+  a Line 1
+  a Line 2
+  b Line 1
+  b Line 2
+
+Test config option absorb.max-stack-size:
+
+  $ sedi 's/Line/line/' a b
+  $ hg log -T '{rev}:{node} {desc}\n'
+  3:712d16a8f445834e36145408eabc1d29df05ec09 commit b 2
+  2:74cfa6294160149d60adbf7582b99ce37a4597ec commit b 1
+  1:28f10dcf96158f84985358a2e5d5b3505ca69c22 commit a 2
+  0:f9a81da8dc53380ed91902e5b82c1b36255a4bd0 commit a 1
+  $ hg --config absorb.max-stack-size=1 absorb -pn
+  absorb: only the recent 1 changesets will be analysed
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -a Line 1
+          -a Line 2
+          +a line 1
+          +a line 2
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -b Line 1
+  712d16a -b Line 2
+          +b line 1
+  712d16a +b line 2
+
+Test obsolete markers creation:
+
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=createmarkers
+  > [absorb]
+  > add-noise=1
+  > EOF
+
+  $ hg --config absorb.max-stack-size=3 absorb
+  absorb: only the recent 3 changesets will be analysed
+  2 of 2 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  6:3dfde4199b46 commit b 2 712d16a8f445834e36145408eabc1d29df05ec09
+  5:99cfab7da5ff commit b 1 74cfa6294160149d60adbf7582b99ce37a4597ec
+  4:fec2b3bd9e08 commit a 2 28f10dcf96158f84985358a2e5d5b3505ca69c22
+  0:f9a81da8dc53 commit a 1 
+  $ hg absorb
+  1 of 1 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  10:e1c8c1e030a4 commit b 2 3dfde4199b4610ea6e3c6fa9f5bdad8939d69524
+  9:816c30955758 commit b 1 99cfab7da5ffdaf3b9fc6643b14333e194d87f46
+  8:5867d584106b commit a 2 fec2b3bd9e0834b7cb6a564348a0058171aed811
+  7:8c76602baf10 commit a 1 f9a81da8dc53380ed91902e5b82c1b36255a4bd0
+
+Executable files:
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=True
+  > EOF
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+#if execbit
+  $ echo > foo.py
+  $ chmod +x foo.py
+  $ hg add foo.py
+  $ hg commit -mfoo
+#else
+  $ hg import -q --bypass - <<EOF
+  > # HG changeset patch
+  > foo
+  > 
+  > diff --git a/foo.py b/foo.py
+  > new file mode 100755
+  > --- /dev/null
+  > +++ b/foo.py
+  > @@ -0,0 +1,1 @@
+  > +
+  > EOF
+  $ hg up -q
+#endif
+
+  $ echo bla > foo.py
+  $ hg absorb --dry-run --print-changes
+  showing changes for foo.py
+          @@ -0,1 +0,1 @@
+  99b4ae7 -
+  99b4ae7 +bla
+  $ hg absorb
+  1 of 1 chunk(s) applied
+  $ hg diff -c .
+  diff --git a/foo.py b/foo.py
+  new file mode 100755
+  --- /dev/null
+  +++ b/foo.py
+  @@ -0,0 +1,1 @@
+  +bla
+  $ hg diff
+
+Remove lines may delete changesets:
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ cat > b <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m b12 -A b
+  $ echo 3 >> b
+  $ hg commit -m b3
+  $ echo 4 >> b
+  $ hg commit -m b4
+  $ echo 1 > b
+  $ echo 3 >> a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,0 +2,1 @@
+  bfafb49 +3
+  showing changes for b
+          @@ -1,3 +1,0 @@
+  1154859 -2
+  30970db -3
+  a393a58 -4
+  $ hg absorb -v | grep became
+  bfafb49242db: 1 file(s) changed, became 1a2de97fc652
+  115485984805: 2 file(s) changed, became 0c930dfab74c
+  30970dbf7b40: became empty and was dropped
+  a393a58b9a85: became empty and was dropped
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  5 b12
+  |  diff --git a/b b/b
+  |  new file mode 100644
+  |  --- /dev/null
+  |  +++ b/b
+  |  @@ -0,0 +1,1 @@
+  |  +1
+  |
+  o  4 a12
+     diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,3 @@
+     +1
+     +2
+     +3
+  
+
+Use revert to make the current change and its parent disappear.
+This should move us to the non-obsolete ancestor.
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ hg id
+  bfafb49242db tip
+  $ echo 3 >> a
+  $ hg commit -m a123 a
+  $ echo 4 >> a
+  $ hg commit -m a1234 a
+  $ hg id
+  82dbe7fd19f0 tip
+  $ hg revert -r 0 a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,2 +2,0 @@
+  f1c23dd -3
+  82dbe7f -4
+  $ hg absorb --verbose
+  f1c23dd5d08d: became empty and was dropped
+  82dbe7fd19f0: became empty and was dropped
+  a: 1 of 1 chunk(s) applied
+  $ hg id
+  bfafb49242db tip
--- a/tests/test-add.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-add.t	Fri Aug 24 12:55:05 2018 -0700
@@ -12,6 +12,9 @@
   $ hg forget a
   $ hg add
   adding a
+  $ hg forget a
+  $ hg add --color debug
+  [addremove.added ui.status|adding a]
   $ hg st
   A a
   $ mkdir dir
--- a/tests/test-addremove.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-addremove.t	Fri Aug 24 12:55:05 2018 -0700
@@ -69,6 +69,12 @@
   removing c
   adding d
   recording removal of a as rename to b (100% similar)
+  $ hg addremove -ns 50 --color debug
+  [addremove.removed ui.status|removing a]
+  [addremove.added ui.status|adding b]
+  [addremove.removed ui.status|removing c]
+  [addremove.added ui.status|adding d]
+  [ ui.status|recording removal of a as rename to b (100% similar)]
   $ hg addremove -s 50
   removing a
   adding b
--- a/tests/test-alias.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-alias.t	Fri Aug 24 12:55:05 2018 -0700
@@ -651,81 +651,15 @@
 
   $ hg --invalid root
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid mylog
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid blank
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
 
 environment variable changes in alias commands
--- a/tests/test-amend.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-amend.t	Fri Aug 24 12:55:05 2018 -0700
@@ -331,3 +331,37 @@
   ? missing_content2_content2-untracked
   ? missing_content2_content3-untracked
   ? missing_missing_content3-untracked
+
+==========================================
+Test history-editing-backup config option|
+==========================================
+  $ hg init $TESTTMP/repo4
+  $ cd $TESTTMP/repo4
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo oops>b
+  $ hg ci -Aqm "b"
+  $ echo partiallyfixed > b
+
+#if obsstore-off
+  $ hg amend
+  saved backup bundle to $TESTTMP/repo4/.hg/strip-backup/95e899acf2ce-f11cb050-amend.hg
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#else
+  $ hg amend
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#endif
--- a/tests/test-bad-extension.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-bad-extension.t	Fri Aug 24 12:55:05 2018 -0700
@@ -72,23 +72,56 @@
   $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
   BADEXTS: badext badext2
 
+#if no-extraextensions
 show traceback for ImportError of hgext.name if devel.debug.extensions is set
 
   $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
   > | grep -v '^ ' \
   > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import'
+  debug.extensions: loading extensions
+  debug.extensions: - processing 5 entries
+  debug.extensions:   - loading extension: 'gpg'
+  debug.extensions:   > 'gpg' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'gpg'
+  debug.extensions:     - invoking registered callbacks: 'gpg'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext'
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
   Traceback (most recent call last):
   Exception: bit bucket overflow
-  could not import hgext.badext2 (No module named *badext2): trying hgext3rd.badext2 (glob)
+  debug.extensions:   - loading extension: 'baddocext'
+  debug.extensions:   > 'baddocext' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'baddocext'
+  debug.extensions:     - invoking registered callbacks: 'baddocext'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext2'
+  debug.extensions:     - could not import hgext.badext2 (No module named badext2): trying hgext3rd.badext2
   Traceback (most recent call last):
   ImportError: No module named *badext2 (glob)
-  could not import hgext3rd.badext2 (No module named *badext2): trying badext2 (glob)
+  debug.extensions:     - could not import hgext3rd.badext2 (No module named badext2): trying badext2
   Traceback (most recent call last):
   ImportError: No module named *badext2 (glob)
   *** failed to import extension badext2: No module named badext2
   Traceback (most recent call last):
   ImportError: No module named badext2
+  debug.extensions: > loaded 2 extensions, total time * (glob)
+  debug.extensions: - loading configtable attributes
+  debug.extensions: - executing uisetup hooks
+  debug.extensions:   - running uisetup for 'gpg'
+  debug.extensions:   > uisetup for 'gpg' took * (glob)
+  debug.extensions:   - running uisetup for 'baddocext'
+  debug.extensions:   > uisetup for 'baddocext' took * (glob)
+  debug.extensions: - executing extsetup hooks
+  debug.extensions:   - running extsetup for 'gpg'
+  debug.extensions:   > extsetup for 'gpg' took * (glob)
+  debug.extensions:   - running extsetup for 'baddocext'
+  debug.extensions:   > extsetup for 'baddocext' took * (glob)
+  debug.extensions: - executing remaining aftercallbacks
+  debug.extensions: > remaining aftercallbacks completed in * (glob)
+  debug.extensions: - loading extension registration objects
+  debug.extensions: > extension registration object loading took * (glob)
+  debug.extensions: extension loading complete
+#endif
 
 confirm that there's no crash when an extension's documentation is bad
 
--- a/tests/test-bundle.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-bundle.t	Fri Aug 24 12:55:05 2018 -0700
@@ -796,13 +796,13 @@
   057f4db07f61970e1c11e83be79e9d08adc4dc31
   bundle2-output-bundle: "HG20", (1 params) 2 parts total
   bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
-  bundling: 1/2 changesets (50.00%)
-  bundling: 2/2 changesets (100.00%)
-  bundling: 1/2 manifests (50.00%)
-  bundling: 2/2 manifests (100.00%)
-  bundling: b 1/3 files (33.33%)
-  bundling: b1 2/3 files (66.67%)
-  bundling: x 3/3 files (100.00%)
+  changesets: 1/2 chunks (50.00%)
+  changesets: 2/2 chunks (100.00%)
+  manifests: 1/2 chunks (50.00%)
+  manifests: 2/2 chunks (100.00%)
+  files: b 1/3 files (33.33%)
+  files: b1 2/3 files (66.67%)
+  files: x 3/3 files (100.00%)
   bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
 
 #if repobundlerepo
--- a/tests/test-bundle2-format.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-bundle2-format.t	Fri Aug 24 12:55:05 2018 -0700
@@ -873,17 +873,17 @@
   bundle2-output-part: "changegroup" (advisory) streamed payload
   bundle2-output: part 0: "changegroup"
   bundle2-output: header chunk size: 18
-  bundling: 1/4 changesets (25.00%)
-  bundling: 2/4 changesets (50.00%)
-  bundling: 3/4 changesets (75.00%)
-  bundling: 4/4 changesets (100.00%)
-  bundling: 1/4 manifests (25.00%)
-  bundling: 2/4 manifests (50.00%)
-  bundling: 3/4 manifests (75.00%)
-  bundling: 4/4 manifests (100.00%)
-  bundling: D 1/3 files (33.33%)
-  bundling: E 2/3 files (66.67%)
-  bundling: H 3/3 files (100.00%)
+  changesets: 1/4 chunks (25.00%)
+  changesets: 2/4 chunks (50.00%)
+  changesets: 3/4 chunks (75.00%)
+  changesets: 4/4 chunks (100.00%)
+  manifests: 1/4 chunks (25.00%)
+  manifests: 2/4 chunks (50.00%)
+  manifests: 3/4 chunks (75.00%)
+  manifests: 4/4 chunks (100.00%)
+  files: D 1/3 files (33.33%)
+  files: E 2/3 files (66.67%)
+  files: H 3/3 files (100.00%)
   bundle2-output: payload chunk size: 1555
   bundle2-output: closing payload chunk
   bundle2-output: end of bundle
--- a/tests/test-check-code.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-check-code.t	Fri Aug 24 12:55:05 2018 -0700
@@ -22,7 +22,7 @@
   >>> commands = []
   >>> with open('mercurial/debugcommands.py', 'rb') as fh:
   ...     for line in fh:
-  ...         m = re.match("^@command\('([a-z]+)", line)
+  ...         m = re.match(b"^@command\('([a-z]+)", line)
   ...         if m:
   ...             commands.append(m.group(1))
   >>> scommands = list(sorted(commands))
--- a/tests/test-check-interfaces.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-check-interfaces.py	Fri Aug 24 12:55:05 2018 -0700
@@ -21,6 +21,7 @@
     verify as ziverify,
 )
 from mercurial import (
+    changegroup,
     bundlerepo,
     filelog,
     httppeer,
@@ -28,6 +29,7 @@
     manifest,
     pycompat,
     repository,
+    revlog,
     sshpeer,
     statichttprepo,
     ui as uimod,
@@ -196,4 +198,29 @@
     # Conforms to imanifestdict.
     checkzobject(mctx.read())
 
+    ziverify.verifyClass(repository.irevisiondelta,
+                         revlog.revlogrevisiondelta)
+    ziverify.verifyClass(repository.irevisiondeltarequest,
+                         changegroup.revisiondeltarequest)
+
+    rd = revlog.revlogrevisiondelta(
+        node=b'',
+        p1node=b'',
+        p2node=b'',
+        basenode=b'',
+        linknode=b'',
+        flags=b'',
+        baserevisionsize=None,
+        revision=b'',
+        delta=None)
+    checkzobject(rd)
+
+    rdr = changegroup.revisiondeltarequest(
+        node=b'',
+        linknode=b'',
+        p1node=b'',
+        p2node=b'',
+        basenode=b'')
+    checkzobject(rdr)
+
 main()
--- a/tests/test-check-py3-compat.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-check-py3-compat.t	Fri Aug 24 12:55:05 2018 -0700
@@ -26,17 +26,23 @@
   $ testrepohg files 'set:(**.py) - grep(pygments)' \
   > -X hgdemandimport/demandimportpy2.py \
   > -X hgext/fsmonitor/pywatchman \
+  > -X mercurial/thirdparty/cbor \
   > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
-  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob)
-  mercurial/cffi/bdiff.py: error importing: <ImportError> cannot import name '_bdiff' (error at bdiff.py:*)
-  mercurial/cffi/bdiffbuild.py: error importing: <ImportError> No module named 'cffi' (error at bdiffbuild.py:*)
-  mercurial/cffi/mpatch.py: error importing: <ImportError> cannot import name '_mpatch' (error at mpatch.py:*)
-  mercurial/cffi/mpatchbuild.py: error importing: <ImportError> No module named 'cffi' (error at mpatchbuild.py:*)
-  mercurial/cffi/osutilbuild.py: error importing: <ImportError> No module named 'cffi' (error at osutilbuild.py:*)
-  mercurial/scmwindows.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/win32.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob)
+  contrib/python-zstandard/setup.py not using absolute_import
+  contrib/python-zstandard/setup_zstd.py not using absolute_import
+  contrib/python-zstandard/tests/common.py not using absolute_import
+  contrib/python-zstandard/tests/test_buffer_util.py not using absolute_import
+  contrib/python-zstandard/tests/test_compressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_compressor_fuzzing.py not using absolute_import
+  contrib/python-zstandard/tests/test_data_structures.py not using absolute_import
+  contrib/python-zstandard/tests/test_data_structures_fuzzing.py not using absolute_import
+  contrib/python-zstandard/tests/test_decompressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_decompressor_fuzzing.py not using absolute_import
+  contrib/python-zstandard/tests/test_estimate_sizes.py not using absolute_import
+  contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import
+  contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import
+  setup.py not using absolute_import
 
 #endif
 
--- a/tests/test-clone.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-clone.t	Fri Aug 24 12:55:05 2018 -0700
@@ -47,6 +47,7 @@
   checklink (symlink !)
   checklink-target (symlink !)
   checknoexec (execbit !)
+  manifestfulltextcache
   rbc-names-v1
   rbc-revs-v1
 
@@ -641,7 +642,7 @@
   $ mkdir a
   $ chmod 000 a
   $ hg clone a b
-  abort: repository a not found!
+  abort: Permission denied: '$TESTTMP/fail/a/.hg'
   [255]
 
 Inaccessible destination
@@ -664,7 +665,7 @@
 
   $ mkfifo a
   $ hg clone a b
-  abort: repository a not found!
+  abort: $ENOTDIR$: '$TESTTMP/fail/a/.hg'
   [255]
   $ rm a
 
--- a/tests/test-completion.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-completion.t	Fri Aug 24 12:55:05 2018 -0700
@@ -98,6 +98,7 @@
   debugknown
   debuglabelcomplete
   debuglocks
+  debugmanifestfulltextcache
   debugmergestate
   debugnamecomplete
   debugobsolete
@@ -273,7 +274,7 @@
   debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
   debugdownload: output
   debugextensions: template
-  debugfileset: rev, all-files
+  debugfileset: rev, all-files, show-matcher, show-stage
   debugformat: template
   debugfsinfo: 
   debuggetbundle: head, common, type
@@ -284,6 +285,7 @@
   debugknown: 
   debuglabelcomplete: 
   debuglocks: force-lock, force-wlock, set-lock, set-wlock
+  debugmanifestfulltextcache: clear, add
   debugmergestate: 
   debugnamecomplete: 
   debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
--- a/tests/test-conflict.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-conflict.t	Fri Aug 24 12:55:05 2018 -0700
@@ -58,7 +58,7 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg commit
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg merge --abort
   
 
   $ cat a
--- a/tests/test-contrib-perf.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-contrib-perf.t	Fri Aug 24 12:55:05 2018 -0700
@@ -55,6 +55,8 @@
                  benchmark parsing bookmarks from disk to memory
    perfbranchmap
                  benchmark the update of a branchmap
+   perfbranchmapload
+                 benchmark reading the branchmap
    perfbundleread
                  Benchmark reading of bundle files.
    perfcca       (no help text available)
@@ -82,6 +84,8 @@
                  (no help text available)
    perfheads     (no help text available)
    perfindex     (no help text available)
+   perflinelogedits
+                 (no help text available)
    perfloadmarkers
                  benchmark the time to parse the on-disk markers for a repo
    perflog       (no help text available)
@@ -156,6 +160,7 @@
 #endif
   $ hg perfheads
   $ hg perfindex
+  $ hg perflinelogedits -n 1
   $ hg perfloadmarkers
   $ hg perflog
   $ hg perflookup 2
--- a/tests/test-convert-bzr-ghosts.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-bzr-ghosts.t	Fri Aug 24 12:55:05 2018 -0700
@@ -31,9 +31,9 @@
   1 Initial layout setup
   0 Commit with ghost revision
   $ glog -R source-hg
-  o  1@source "Commit with ghost revision" files: somefile
+  o  1@source "Commit with ghost revision" files+: [], files-: [], files: [somefile]
   |
-  o  0@source "Initial layout setup" files: somefile
+  o  0@source "Initial layout setup" files+: [somefile], files-: [], files: []
   
 
   $ cd ..
--- a/tests/test-convert-bzr-merges.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-bzr-merges.t	Fri Aug 24 12:55:05 2018 -0700
@@ -13,7 +13,8 @@
   $ bzr init -q source
   $ cd source
   $ echo content > file
-  $ bzr add -q file
+  $ echo text > rename_me
+  $ bzr add -q file rename_me
   $ bzr commit -q -m 'Initial add' '--commit-time=2009-10-10 08:00:00 +0100'
   $ cd ..
   $ bzr branch -q source source-branch1
@@ -32,6 +33,8 @@
   $ cd source-branch2
   $ echo somecontent > file-branch2
   $ bzr add -q file-branch2
+  $ bzr mv -q rename_me renamed
+  $ echo change > renamed
   $ bzr commit -q -m 'Added brach2 file' '--commit-time=2009-10-10 08:00:03 +0100'
   $ sleep 1
   $ cd ../source
@@ -39,6 +42,9 @@
   $ bzr merge -q --force ../source-branch2
   $ bzr commit -q -m 'Merged branches' '--commit-time=2009-10-10 08:00:04 +0100'
   $ cd ..
+
+BUG: file-branch2 should not be added in rev 4, and the rename_me -> renamed
+move should be recorded in the fixup merge.
   $ hg convert --datesort --config convert.bzr.saverev=False source source-hg
   initializing destination source-hg repository
   scanning source...
@@ -49,18 +55,19 @@
   2 Added parent file
   1 Added brach2 file
   0 Merged branches
+  warning: can't find ancestor for 'renamed' copied from 'rename_me'!
   $ glog -R source-hg
-  o    5@source "(octopus merge fixup)" files:
+  o    5@source "(octopus merge fixup)" files+: [], files-: [], files: [renamed]
   |\
-  | o    4@source "Merged branches" files: file-branch2
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
   | |\
-  o---+  3@source-branch2 "Added brach2 file" files: file-branch2
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
    / /
-  | o  2@source "Added parent file" files: file-parent
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
   | |
-  o |  1@source-branch1 "Added branch1 file" files: file file-branch1
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
   |/
-  o  0@source "Initial add" files: file
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
   
   $ manifest source-hg tip
   % manifest of tip
@@ -68,6 +75,7 @@
   644   file-branch1
   644   file-branch2
   644   file-parent
+  644   renamed
 
   $ hg convert source-hg hg2hg
   initializing destination hg2hg repository
@@ -80,38 +88,107 @@
   2 Added brach2 file
   1 Merged branches
   0 (octopus merge fixup)
+
+BUG: The manifest entries should be the same for matching revisions, and
+nothing should be outgoing
+
+  $ hg -R source-hg manifest --debug -r tip | grep renamed
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
+  $ hg -R hg2hg manifest --debug -r tip | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R source-hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R hg2hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+
+BUG: The revisions found should be the same in both repos
+
+  $ hg --cwd source-hg log -r 'file("renamed")' -G -Tcompact
+  o    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | |
+  | o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  | |\     Merged branches
+  | ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+  $ hg --cwd hg2hg log -r 'file("renamed")' -G -Tcompact
+  o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  |\     Merged branches
+  ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+
+BUG(?): The move seems to be recorded in rev 4, so it should probably show up
+there.  It's not recorded as a move in rev 5, even in source-hg.
+
+  $ hg -R source-hg up -q tip
+  $ hg -R hg2hg up -q tip
+  $ hg --cwd source-hg log -r 'follow("renamed")' -G -Tcompact
+  @    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | :
+  o :  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :/     Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+  $ hg --cwd hg2hg log -r 'follow("renamed")' -G -Tcompact
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :    Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+
   $ hg -R hg2hg out source-hg -T compact
   comparing with source-hg
   searching for changes
-  5[tip]:4,3   6bd55e826939   2009-10-10 08:00 +0100   foo
+  5[tip]:4,3   3be2299ccd31   2009-10-10 08:00 +0100   foo
     (octopus merge fixup)
   
-XXX: The manifest lines should probably agree, to avoid changing the hash when
-converting hg -> hg
+
+  $ glog -R hg2hg
+  @    5@source "(octopus merge fixup)" files+: [], files-: [], files: []
+  |\
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
+  | |\
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
+   / /
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
+  | |
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
+  |/
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
+  
 
   $ hg -R source-hg log --debug -r tip
-  changeset:   5:b209510f11b2c987f920749cd8e352aa4b3230f2
+  changeset:   5:6652429c300ab66fdeaf2e730945676a00b53231
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    5:1109e42bdcbd1f51baa69bc91079011d77057dbb
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    5:1eabd5f5d4b985784cf2c45c717ff053eca14b0d
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
+  files:       renamed
   extra:       branch=source
   description:
   (octopus merge fixup)
   
   
   $ hg -R hg2hg log --debug -r tip
-  changeset:   5:6bd55e8269392769783345686faf7ff7b3b0215d
+  changeset:   5:3be2299ccd315ff9aab2b49bdb0d14e3244435e8
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    4:daa315d56a98ba20811fdd0d9d575861f65cfa8c
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    4:3ece3c7f2cc6df15b3cbbf3273c69869fc7c3ab0
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
   extra:       branch=source
@@ -124,21 +201,25 @@
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
   $ hg -R source-hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ hg -R hg2hg manifest --debug -r tip
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
   $ hg -R hg2hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ cd ..
--- a/tests/test-convert-bzr.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-bzr.t	Fri Aug 24 12:55:05 2018 -0700
@@ -42,9 +42,9 @@
   1 Initial add: a, c, e
   0 rename a into b, create a, rename c into d
   $ glog -R source-hg
-  o  1@source "rename a into b, create a, rename c into d" files: a b c d e f
+  o  1@source "rename a into b, create a, rename c into d" files+: [b d f], files-: [c e], files: [a]
   |
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 manifest
@@ -64,7 +64,7 @@
   converting...
   0 Initial add: a, c, e
   $ glog -R source-1-hg
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 test with filemap
@@ -147,13 +147,13 @@
   1 Editing b
   0 Merged improve branch
   $ glog -R source-hg
-  o    3@source "Merged improve branch" files:
+  o    3@source "Merged improve branch" files+: [], files-: [], files: [b]
   |\
-  | o  2@source-improve "Editing b" files: b
+  | o  2@source-improve "Editing b" files+: [], files-: [], files: [b]
   | |
-  o |  1@source "Editing a" files: a
+  o |  1@source "Editing a" files+: [], files-: [], files: [a]
   |/
-  o  0@source "Initial add" files: a b
+  o  0@source "Initial add" files+: [a b], files-: [], files: []
   
   $ cd ..
 
@@ -250,13 +250,13 @@
   0 changea
   updating tags
   $ (cd repo-bzr; glog)
-  o  3@default "update tags" files: .hgtags
+  o  3@default "update tags" files+: [.hgtags], files-: [], files: []
   |
-  o  2@default "changea" files: a
+  o  2@default "changea" files+: [], files-: [], files: [a]
   |
-  | o  1@branch "addb" files: b
+  | o  1@branch "addb" files+: [b], files-: [], files: []
   |/
-  o  0@default "adda" files: a
+  o  0@default "adda" files+: [a], files-: [], files: []
   
 
 Test tags (converted identifiers are not stable because bzr ones are
--- a/tests/test-convert-filemap.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-filemap.t	Fri Aug 24 12:55:05 2018 -0700
@@ -780,7 +780,7 @@
   converting...
   0 3
   $ hg -R .-hg log -G -T '{shortest(node)} {desc}\n{files % "- {file}\n"}\n'
-  o    e9ed 3
+  o    bbfe 3
   |\
   | o  33a0 2
   | |  - f
--- a/tests/test-convert-svn-branches.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-svn-branches.t	Fri Aug 24 12:55:05 2018 -0700
@@ -85,8 +85,8 @@
   $ hg branches
   newbranch                     11:a6d7cc050ad1
   default                       10:6e2b33404495
-  old                            9:93c4b0f99529
-  old2                           8:b52884d7bead (inactive)
+  old                            9:1b494af68c0b
+  old2                           8:5be40b8dcbf6 (inactive)
   $ hg tags -q
   tip
   $ cd ..
--- a/tests/test-convert-svn-encoding.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert-svn-encoding.t	Fri Aug 24 12:55:05 2018 -0700
@@ -52,6 +52,7 @@
   5 init projA
   source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
   converting: 0/6 revisions (0.00%)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   4 hello
@@ -118,6 +119,7 @@
   converting: 4/6 revisions (66.67%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
   scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   0 branch to branch?e
@@ -125,6 +127,7 @@
   converting: 5/6 revisions (83.33%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
   scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   reparent to file:/*/$TESTTMP/svn-repo (glob)
--- a/tests/test-convert.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-convert.t	Fri Aug 24 12:55:05 2018 -0700
@@ -533,9 +533,11 @@
 
 test bogus URL
 
+#if no-msys
   $ hg convert -q bzr+ssh://foobar@selenic.com/baz baz
   abort: bzr+ssh://foobar@selenic.com/baz: missing or unsupported repository
   [255]
+#endif
 
 test revset converted() lookup
 
--- a/tests/test-debugcommands.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-debugcommands.t	Fri Aug 24 12:55:05 2018 -0700
@@ -15,6 +15,39 @@
   adding a
   $ hg ci -Am make-it-full
 #if reporevlogstore
+  $ hg debugrevlog -c
+  format : 1
+  flags  : inline
+  
+  revisions     :   3
+      merges    :   0 ( 0.00%)
+      normal    :   3 (100.00%)
+  revisions     :   3
+      empty     :   0 ( 0.00%)
+                     text  :   0 (100.00%)
+                     delta :   0 (100.00%)
+      snapshot  :   3 (100.00%)
+        lvl-0   :         3 (100.00%)
+      deltas    :   0 ( 0.00%)
+  revision size : 191
+      snapshot  : 191 (100.00%)
+        lvl-0   :       191 (100.00%)
+      deltas    :   0 ( 0.00%)
+  
+  chunks        :   3
+      0x75 (u)  :   3 (100.00%)
+  chunks size   : 191
+      0x75 (u)  : 191 (100.00%)
+  
+  avg chain length  :  0
+  max chain length  :  0
+  max chain reach   : 67
+  compression ratio :  0
+  
+  uncompressed data size (min/max/avg) : 57 / 66 / 62
+  full revision size (min/max/avg)     : 58 / 67 / 63
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
   $ hg debugrevlog -m
   format : 1
   flags  : inline, generaldelta
@@ -23,10 +56,15 @@
       merges    :  0 ( 0.00%)
       normal    :  3 (100.00%)
   revisions     :  3
-      full      :  3 (100.00%)
+      empty     :  1 (33.33%)
+                     text  :  1 (100.00%)
+                     delta :  0 ( 0.00%)
+      snapshot  :  2 (66.67%)
+        lvl-0   :        2 (66.67%)
       deltas    :  0 ( 0.00%)
   revision size : 88
-      full      : 88 (100.00%)
+      snapshot  : 88 (100.00%)
+        lvl-0   :       88 (100.00%)
       deltas    :  0 ( 0.00%)
   
   chunks        :  3
@@ -42,7 +80,41 @@
   compression ratio :  0
   
   uncompressed data size (min/max/avg) : 0 / 43 / 28
-  full revision size (min/max/avg)     : 0 / 44 / 29
+  full revision size (min/max/avg)     : 44 / 44 / 44
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
+  $ hg debugrevlog a
+  format : 1
+  flags  : inline, generaldelta
+  
+  revisions     : 1
+      merges    : 0 ( 0.00%)
+      normal    : 1 (100.00%)
+  revisions     : 1
+      empty     : 0 ( 0.00%)
+                     text  : 0 (100.00%)
+                     delta : 0 (100.00%)
+      snapshot  : 1 (100.00%)
+        lvl-0   :       1 (100.00%)
+      deltas    : 0 ( 0.00%)
+  revision size : 3
+      snapshot  : 3 (100.00%)
+        lvl-0   :       3 (100.00%)
+      deltas    : 0 ( 0.00%)
+  
+  chunks        : 1
+      0x75 (u)  : 1 (100.00%)
+  chunks size   : 3
+      0x75 (u)  : 3 (100.00%)
+  
+  avg chain length  : 0
+  max chain length  : 0
+  max chain reach   : 3
+  compression ratio : 0
+  
+  uncompressed data size (min/max/avg) : 2 / 2 / 2
+  full revision size (min/max/avg)     : 3 / 3 / 3
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
   delta size (min/max/avg)             : 0 / 0 / 0
 #endif
 
@@ -411,6 +483,7 @@
   $ ls -r .hg/cache/*
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
+  .hg/cache/manifestfulltextcache
   .hg/cache/branch2-served
 
 Test debugcolor
--- a/tests/test-diff-color.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-diff-color.t	Fri Aug 24 12:55:05 2018 -0700
@@ -22,7 +22,7 @@
   > c
   > EOF
   $ hg ci -Am adda
-  adding a
+  \x1b[0;32madding a\x1b[0m (esc)
   $ cat > a <<EOF
   > c
   > c
@@ -218,7 +218,7 @@
   $ hg init sub
   $ echo b > sub/b
   $ hg -R sub commit -Am 'create sub'
-  adding b
+  \x1b[0;32madding b\x1b[0m (esc)
   $ echo 'sub = sub' > .hgsub
   $ hg add .hgsub
   $ hg commit -m 'add subrepo sub'
--- a/tests/test-extension.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-extension.t	Fri Aug 24 12:55:05 2018 -0700
@@ -1254,7 +1254,7 @@
   > def g():
   >     pass
   > EOF
-  $ hg --config extensions.path=./path.py help foo > /dev/null
+  $ hg --config extensions.path=./path.py help foo
   abort: no such help topic: foo
   (try 'hg help --keyword foo')
   [255]
@@ -1540,6 +1540,7 @@
   reposetup() for $TESTTMP/reposetup-test/src
   reposetup() for $TESTTMP/reposetup-test/src (chg !)
 
+#if no-extraextensions
   $ hg --cwd src debugextensions
   reposetup() for $TESTTMP/reposetup-test/src
   dodo (untested!)
@@ -1547,6 +1548,7 @@
   mq
   reposetuptest (untested!)
   strip
+#endif
 
   $ hg clone -U src clone-dst1
   reposetup() for $TESTTMP/reposetup-test/src
@@ -1683,6 +1685,7 @@
   *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
   *** (use @command decorator to register 'deprecatedcmd')
   hg: unknown command 'deprecatedcmd'
+  (use 'hg help' for a list of commands)
   [255]
 
  the extension shouldn't be loaded at all so the mq works:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-corrupt.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,83 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+  $ for i in 0 1 2 3 4; do
+  >   echo $i >> a
+  >   echo $i >> b
+  >   hg commit -A -m $i a b
+  > done
+
+use the "debugbuildannotatecache" command to build annotate cache at rev 0
+
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=0
+  fastannotate: a: 1 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"debugbuildannotatecache" should work with broken cache (and other files would
+be built without being affected). note: linelog being broken is only noticed
+when we try to append to it.
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=1
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 2 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ echo 'CANNOT REUSE!' > .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=2
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 3 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 4 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=4
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 5 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"fastannotate" should deal with file corruption as well
+
+  $ rm -rf .hg/fastannotate
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' > .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: using fast path (resolved fctx: True)
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 2 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 0
+  1: 1
+  2: 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-diffopts.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,33 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+changes to whitespaces
+
+  $ cat >> a << EOF
+  > 1
+  > 
+  >  
+  >  2
+  > EOF
+  $ hg commit -qAm '1'
+  $ cat > a << EOF
+  >  1
+  > 
+  > 2
+  > 
+  > 
+  > 3
+  > EOF
+  $ hg commit -m 2
+  $ hg fastannotate -wB a
+  0:  1
+  0: 
+  1: 2
+  0: 
+  1: 
+  1: 3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-hg.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,767 @@
+(this file is backported from core hg tests/test-annotate.t)
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > modes=fctx
+  > forcefollow=False
+  > mainbranch=.
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+init
+
+  $ hg init repo
+  $ cd repo
+
+commit
+
+  $ echo 'a' > a
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding a
+
+annotate -c
+
+  $ hg annotate -c a
+  8435f90966e4: a
+
+annotate -cl
+
+  $ hg annotate -cl a
+  8435f90966e4:1: a
+
+annotate -d
+
+  $ hg annotate -d a
+  Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -n
+
+  $ hg annotate -n a
+  0: a
+
+annotate -nl
+
+  $ hg annotate -nl a
+  0:1: a
+
+annotate -u
+
+  $ hg annotate -u a
+  nobody: a
+
+annotate -cdnu
+
+  $ hg annotate -cdnu a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -cdnul
+
+  $ hg annotate -cdnul a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
+
+annotate (JSON)
+
+  $ hg annotate -Tjson a
+  [
+   {
+    "abspath": "a",
+    "lines": [{"line": "a\n", "rev": 0}],
+    "path": "a"
+   }
+  ]
+
+  $ hg annotate -Tjson -cdfnul a
+  [
+   {
+    "abspath": "a",
+    "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
+    "path": "a"
+   }
+  ]
+
+  $ cat <<EOF >>a
+  > a
+  > a
+  > EOF
+  $ hg ci -ma1 -d '1 0'
+  $ hg cp a b
+  $ hg ci -mb -d '1 0'
+  $ cat <<EOF >> b
+  > b4
+  > b5
+  > b6
+  > EOF
+  $ hg ci -mb2 -d '2 0'
+
+annotate -n b
+
+  $ hg annotate -n b
+  0: a
+  1: a
+  1: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate --no-follow b
+
+  $ hg annotate --no-follow b
+  2: a
+  2: a
+  2: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate -nl b
+
+  $ hg annotate -nl b
+  0:1: a
+  1:2: a
+  1:3: a
+  3:4: b4
+  3:5: b5
+  3:6: b6
+
+annotate -nf b
+
+  $ hg annotate -nf b
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  3 b: b5
+  3 b: b6
+
+annotate -nlf b
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  3 b:5: b5
+  3 b:6: b6
+
+  $ hg up -C 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ hg ci -mb2.1 -d '2 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -mmergeb -d '3 0'
+
+annotate after merge
+(note: the first one falls back to the vanilla annotate which does not use linelog)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: rebuilding broken cache
+  fastannotate: b: 5 new changesets in the main branch
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+
+(difference explained below)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: using fast path (resolved fctx: False)
+  0 a: a
+  1 a: a
+  1 a: a
+  4 b: b4
+  4 b: c
+  4 b: b5
+
+annotate after merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '2..5'
+  @    5:64afcdf8e29e063c635be123d8d2fb160af00f7e
+  |\
+  | o  4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
+  | |  --- a/b
+  | |  +++ b/b
+  | |  @@ -1,3 +1,6 @@
+  | |   a
+  | |   a
+  | |   a
+  | |  +b4
+  | |  +c
+  | |  +b5
+  | |
+  o |  3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
+  |/   --- a/b
+  |    +++ b/b
+  |    @@ -1,3 +1,6 @@
+  |     a
+  |     a
+  |     a
+  |    +b4
+  |    +b5
+  |    +b6
+  |
+  o  2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
+  |  copy from a
+  ~  copy to b
+  
+
+(in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
+ and that causes the rev number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  4 b:4: b4
+  4 b:5: c
+  4 b:6: b5
+
+  $ hg up -C 1
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg cp a b
+  $ cat <<EOF > b
+  > a
+  > z
+  > a
+  > EOF
+  $ hg ci -mc -d '3 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ echo d >> b
+  $ hg ci -mmerge2 -d '4 0'
+
+annotate after rename merge
+
+  $ hg annotate -nf b
+  0 a: a
+  6 b: z
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+  7 b: d
+
+annotate after rename merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
+  @    7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
+  |\   --- a/b
+  | :  +++ b/b
+  | :  @@ -1,3 +1,7 @@
+  | :   a
+  | :   z
+  | :   a
+  | :  +b4
+  | :  +c
+  | :  +b5
+  | :  +d
+  | :
+  o :  6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
+  :/   copy from a
+  :    copy to b
+  :    --- a/a
+  :    +++ b/b
+  :    @@ -1,3 +1,3 @@
+  :    -a (?)
+  :     a
+  :    +z
+  :     a
+  :    -a (?)
+  :
+  o  1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
+  |  --- a/a
+  |  +++ b/a
+  |  @@ -1,1 +1,3 @@
+  |   a
+  |  +a
+  |  +a
+  |
+  o  0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,1 @@
+     +a
+  
+
+(note on question marks:
+ the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
+ +38ed54888617) alters the output so deletion is not always at the end of the
+ output. for example:
+ | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
+ |-------------------|
+ | a | a |  a  | -a  |
+ | a | z | +z  |  a  |
+ | a | a |  a  | +z  |
+ |   |   | -a  |  a  |
+ |-------------------|
+ | a | a |     a     |
+ | a | a |     a     |
+ | a |   |    -a     |
+ this leads to more question marks below)
+
+(rev 1 adds two "a"s and rev 6 deletes one "a".
+ the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
+ and that causes the line number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  6 b:2: z
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+  $ hg annotate -nlf b
+  0 a:1: a (?)
+  1 a:2: a (?)
+  6 b:2: z
+  1 a:2: a (?)
+  1 a:3: a (?)
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+Issue2807: alignment of line numbers with -l
+(fastannotate differs from annotate, same reason as above)
+
+  $ echo more >> b
+  $ hg ci -mmore -d '5 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '6 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '7 0'
+  $ hg annotate -nlf b
+   0 a: 1: a (?)
+   1 a: 2: a (?)
+   6 b: 2: z
+   1 a: 2: a (?)
+   1 a: 3: a (?)
+   3 b: 4: b4
+   4 b: 5: c
+   3 b: 5: b5
+   7 b: 7: d
+   8 b: 8: more
+   9 b: 9: more
+  10 b:10: more
+
+linkrev vs rev
+
+  $ hg annotate -r tip -n a
+  0: a
+  1: a
+  1: a
+
+linkrev vs rev with -l
+
+  $ hg annotate -r tip -nl a
+  0:1: a
+  1:2: a
+  1:3: a
+
+Issue589: "undelete" sequence leads to crash
+
+annotate was crashing when trying to --follow something
+
+like A -> B -> A
+
+generate ABA rename configuration
+
+  $ echo foo > foo
+  $ hg add foo
+  $ hg ci -m addfoo
+  $ hg rename foo bar
+  $ hg ci -m renamefoo
+  $ hg rename bar foo
+  $ hg ci -m renamebar
+
+annotate after ABA with follow
+
+  $ hg annotate --follow foo
+  foo: foo
+
+missing file
+
+  $ hg ann nosuchfile
+  abort: nosuchfile: no such file in rev e9e6b4fa872f
+  [255]
+
+annotate file without '\n' on last line
+
+  $ printf "" > c
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding c
+  $ hg annotate c
+  $ printf "a\nb" > c
+  $ hg ci -m test
+  $ hg annotate c
+  [0-9]+: a (re)
+  [0-9]+: b (re)
+
+Issue3841: check annotation of the file of which filelog includes
+merging between the revision and its ancestor
+
+to reproduce the situation with recent Mercurial, this script uses (1)
+"hg debugsetparents" to merge without ancestor check by "hg merge",
+and (2) the extension to allow filelog merging between the revision
+and its ancestor by overriding "repo._filecommit".
+
+  $ cat > ../legacyrepo.py <<EOF
+  > from mercurial import node, error
+  > def reposetup(ui, repo):
+  >     class legacyrepo(repo.__class__):
+  >         def _filecommit(self, fctx, manifest1, manifest2,
+  >                         linkrev, tr, changelist):
+  >             fname = fctx.path()
+  >             text = fctx.data()
+  >             flog = self.file(fname)
+  >             fparent1 = manifest1.get(fname, node.nullid)
+  >             fparent2 = manifest2.get(fname, node.nullid)
+  >             meta = {}
+  >             copy = fctx.renamed()
+  >             if copy and copy[0] != fname:
+  >                 raise error.Abort('copying is not supported')
+  >             if fparent2 != node.nullid:
+  >                 changelist.append(fname)
+  >                 return flog.add(text, meta, tr, linkrev,
+  >                                 fparent1, fparent2)
+  >             raise error.Abort('only merging is supported')
+  >     repo.__class__ = legacyrepo
+  > EOF
+
+  $ cat > baz <<EOF
+  > 1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg add baz
+  $ hg commit -m "baz:0"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:1"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg debugsetparents 17 17
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:2"
+  $ hg debugindexdot .hg/store/data/baz.i
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  16: 3
+  16: 4
+  16: 5
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:3"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4 baz:4
+  > 5
+  > EOF
+  $ hg debugsetparents 19 18
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:4"
+  $ hg debugindexdot .hg/store/data/baz.i
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  	2 -> 3
+  	3 -> 4
+  	2 -> 4
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  19: 3 baz:3
+  20: 4 baz:4
+  16: 5
+
+annotate clean file
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+
+annotate modified file
+
+  $ echo foofoo >> foo
+  $ hg annotate -r "wdir()" foo
+  11 : foo
+  20+: foofoo
+
+  $ hg annotate -cr "wdir()" foo
+  472b18db256d : foo
+  b6bedd5477e7+: foofoo
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+  $ hg annotate --debug -ncr "wdir()" foo
+  11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
+  20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
+
+  $ hg annotate -udr "wdir()" foo
+  test Thu Jan 01 00:00:00 1970 +0000: foo
+  test [A-Za-z0-9:+ ]+: foofoo (re)
+
+  $ hg annotate -ncr "wdir()" -Tjson foo
+  [
+   {
+    "abspath": "foo",
+    "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
+    "path": "foo"
+   }
+  ]
+
+annotate added file
+
+  $ echo bar > bar
+  $ hg add bar
+  $ hg annotate -ncr "wdir()" bar
+  20 b6bedd5477e7+: bar
+
+annotate renamed file
+
+  $ hg rename foo renamefoo2
+  $ hg annotate -ncr "wdir()" renamefoo2
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+annotate missing file
+
+  $ rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+annotate removed file
+
+  $ hg rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+Test annotate with whitespace options
+
+  $ cd ..
+  $ hg init repo-ws
+  $ cd repo-ws
+  $ cat > a <<EOF
+  > aa
+  > 
+  > b b
+  > EOF
+  $ hg ci -Am "adda"
+  adding a
+  $ sed 's/EOL$//g' > a <<EOF
+  > a  a
+  > 
+  >  EOL
+  > b  b
+  > EOF
+  $ hg ci -m "changea"
+
+Annotate with no option
+
+  $ hg annotate a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+Annotate with --ignore-space-change
+
+  $ hg annotate --ignore-space-change a
+  1: a  a
+  1: 
+  0:  
+  0: b  b
+
+Annotate with --ignore-all-space
+
+  $ hg annotate --ignore-all-space a
+  0: a  a
+  0: 
+  1:  
+  0: b  b
+
+Annotate with --ignore-blank-lines (similar to no options case)
+
+  $ hg annotate --ignore-blank-lines a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+  $ cd ..
+
+Annotate with linkrev pointing to another branch
+------------------------------------------------
+
+create history with a filerev whose linkrev points to another branch
+
+  $ hg init branchedlinkrev
+  $ cd branchedlinkrev
+  $ echo A > a
+  $ hg commit -Am 'contentA'
+  adding a
+  $ echo B >> a
+  $ hg commit -m 'contentB'
+  $ hg up --rev 'desc(contentA)'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo unrelated > unrelated
+  $ hg commit -Am 'unrelated'
+  adding unrelated
+  created new head
+  $ hg graft -r 'desc(contentB)'
+  grafting 1:fd27c222e3e6 "contentB"
+  $ echo C >> a
+  $ hg commit -m 'contentC'
+  $ echo W >> a
+  $ hg log -G
+  @  changeset:   4:072f1e8df249
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentC
+  |
+  o  changeset:   3:ff38df03cc4b
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentB
+  |
+  o  changeset:   2:62aaf3f6fc06
+  |  parent:      0:f0932f74827e
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     unrelated
+  |
+  | o  changeset:   1:fd27c222e3e6
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     contentB
+  |
+  o  changeset:   0:f0932f74827e
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     contentA
+  
+
+Annotate should list ancestor of starting revision only
+
+  $ hg annotate a
+  0: A
+  3: B
+  4: C
+
+  $ hg annotate a -r 'wdir()'
+  0 : A
+  3 : B
+  4 : C
+  4+: W
+
+Even when the starting revision is the linkrev-shadowed one:
+
+  $ hg annotate a -r 3
+  0: A
+  3: B
+
+  $ cd ..
+
+Issue5360: Deleted chunk in p1 of a merge changeset
+
+  $ hg init repo-5360
+  $ cd repo-5360
+  $ echo 1 > a
+  $ hg commit -A a -m 1
+  $ echo 2 >> a
+  $ hg commit -m 2
+  $ echo a > a
+  $ hg commit -m a
+  $ hg update '.^' -q
+  $ echo 3 >> a
+  $ hg commit -m 3 -q
+  $ hg merge 2 -q
+  $ cat > a << EOF
+  > b
+  > 1
+  > 2
+  > 3
+  > a
+  > EOF
+  $ hg resolve --mark -q
+  $ hg commit -m m
+  $ hg annotate a
+  4: b
+  0: 1
+  1: 2
+  3: 3
+  2: a
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-perfhack.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,182 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > perfhack=1
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-protocol.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,215 @@
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=@
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+setup the server repo
+
+  $ hg init repo-server
+  $ cd repo-server
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > server=1
+  > EOF
+  $ for i in 1 2 3 4; do
+  >   echo $i >> a
+  >   hg commit -A -m $i a
+  > done
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg bookmark @
+  $ cd ..
+
+setup the local repo
+
+  $ hg clone 'ssh://user@dummy/repo-server' repo-local -q
+  $ cd repo-local
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > client=1
+  > clientfetchthreshold=0
+  > EOF
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+the cache could be reused and no download is necessary
+
+  $ hg fastannotate a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+if the client agrees where the head of the master branch is, no re-download
+happens even if the client has more commits
+
+  $ echo 5 >> a
+  $ hg commit -m 5
+  $ hg bookmark -r 3 @ -f
+  $ hg fastannotate a --debug
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" (head of the master branch) and "@" is ahead
+of the server, the server can detect things are unchanged and does not return
+full contents (not that there is no "writing ... to fastannotate"), but the
+client can also build things up on its own (causing diverge)
+
+  $ hg bookmark -r 4 @ -f
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" which is behind the server. no download is
+necessary
+
+  $ hg fastannotate a --debug --config fastannotate.mainbranch=2
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+define fastannotate on-disk paths
+
+  $ p1=.hg/fastannotate/default
+  $ p2=../repo-server/.hg/fastannotate/default
+
+revert bookmark change so the client is behind the server
+
+  $ hg bookmark -r 2 @ -f
+
+in the "fctx" mode with the "annotate" command, the client also downloads the
+cache. but not in the (default) "fastannotate" mode.
+
+  $ rm $p1/a.l $p1/a.m
+  $ hg annotate a --debug | grep 'fastannotate: writing'
+  [1]
+  $ hg annotate a --config fastannotate.modes=fctx --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+
+the fastannotate cache (built server-side, downloaded client-side) in two repos
+have the same content (because the client downloads from the server)
+
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+in the "fctx" mode, the client could also build the cache locally
+
+  $ hg annotate a --config fastannotate.modes=fctx --debug --config fastannotate.mainbranch=4 | grep fastannotate
+  fastannotate: requesting 1 files
+  fastannotate: server returned
+  fastannotate: a: 1 new changesets in the main branch
+
+the server would rebuild broken cache automatically
+
+  $ cp $p2/a.m $p2/a.m.bak
+  $ echo BROKEN1 > $p1/a.m
+  $ echo BROKEN2 > $p2/a.m
+  $ hg fastannotate a --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  $ diff $p1/a.m $p2/a.m
+  $ diff $p2/a.m $p2/a.m.bak
+
+use the "debugbuildannotatecache" command to build annotate cache
+
+  $ rm -rf $p1 $p2
+  $ hg --cwd ../repo-server debugbuildannotatecache a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  $ hg --cwd ../repo-local debugbuildannotatecache a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: writing * (glob)
+  fastannotate: writing * (glob)
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+with the clientfetchthreshold config option, the client can build up the cache
+without downloading from the server
+
+  $ rm -rf $p1
+  $ hg fastannotate a --debug --config fastannotate.clientfetchthreshold=10
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the fastannotate directory is not writable, the fctx mode still works
+
+  $ rm -rf $p1
+  $ touch $p1
+  $ hg annotate a --debug --traceback --config fastannotate.modes=fctx
+  fastannotate: a: cache broken and deleted
+  fastannotate: prefetch failed: * (glob)
+  fastannotate: a: cache broken and deleted
+  fastannotate: falling back to the vanilla annotate: * (glob)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+with serverbuildondemand=False, the server will not build anything
+
+  $ cat >> ../repo-server/.hg/hgrc <<EOF
+  > [fastannotate]
+  > serverbuildondemand=False
+  > EOF
+  $ rm -rf $p1 $p2
+  $ hg fastannotate a --debug | grep 'fastannotate: writing'
+  [1]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-renames.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,168 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=main
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+add or rename files on top of the master branch
+
+  $ echo a1 > a
+  $ echo b1 > b
+  $ hg commit -qAm 1
+  $ hg bookmark -i main
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  0 b: b1
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: 1 new changesets in the main branch
+  0 a: a1
+  $ echo a2 >> a
+  $ cat > b << EOF
+  > b0
+  > b1
+  > EOF
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names'
+
+existing linelogs are not helpful with such renames in side branches
+
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 a: a1
+  1 b: a2
+
+move main branch forward, rebuild should happen
+
+  $ hg bookmark -i main -r . -q
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 2 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+
+for rev 0, the existing linelog is still useful for a, but not for b
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf b -r 0
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 b: b1
+
+a rebuild can also be triggered if "the main branch last time" mismatches
+
+  $ echo a3 >> a
+  $ hg commit -m a3
+  $ cat >> b << EOF
+  > b3
+  > b4
+  > EOF
+  $ hg commit -m b4
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+linelog can be updated without being helpful
+
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names again'
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+move main branch forward again, rebuilds are one-time
+
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 4 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 4 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+list changeset hashes to improve readability
+
+  $ hg log -T '{rev}:{node}\n'
+  4:980e1ab8c516350172928fba95b49ede3b643dca
+  3:14e123fedad9f491f5dde0beca2a767625a0a93a
+  2:96495c41e4c12218766f78cdf244e768d7718b0f
+  1:35c2b781234c994896aba36bd3245d3104e023df
+  0:653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+
+annotate a revision not in the linelog. linelog cannot be used, but does not get rebuilt either
+
+  $ hg fastannotate --debug -nf a -r 96495c41e4c12218766f78cdf244e768d7718b0f
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r 2
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r .
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+
+annotate an ancient revision where the path matches. linelog can be used
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf a -r 653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+  fastannotate: a: using fast path (resolved fctx: False)
+  0 a: a1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-revmap.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,191 @@
+from __future__ import absolute_import, print_function
+
+import os
+import tempfile
+
+from mercurial import util
+from hgext.fastannotate import error, revmap
+
+def genhsh(i):
+    return chr(i) + b'\0' * 19
+
+def gettemppath():
+    fd, path = tempfile.mkstemp()
+    os.close(fd)
+    os.unlink(path)
+    return path
+
+def ensure(condition):
+    if not condition:
+        raise RuntimeError('Unexpected')
+
+def testbasicreadwrite():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 0)
+    for i in xrange(5):
+        ensure(rm.rev2hsh(i) is None)
+    ensure(rm.hsh2rev(b'\0' * 20) is None)
+
+    paths = ['', 'a', None, 'b', 'b', 'c', 'c', None, 'a', 'b', 'a', 'a']
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
+
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+
+    # re-load and verify
+    rm.flush()
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    # append without calling save() explicitly
+    for i in xrange(5, 12):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i],
+                         flush=True) == i)
+
+    # re-load and verify
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 11)
+    for i in xrange(1, 12):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(rm.rev2path(i) == paths[i] or paths[i - 1])
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    os.unlink(path)
+
+    # missing keys
+    ensure(rm.rev2hsh(12) is None)
+    ensure(rm.rev2hsh(0) is None)
+    ensure(rm.rev2hsh(-1) is None)
+    ensure(rm.rev2flag(12) is None)
+    ensure(rm.rev2path(12) is None)
+    ensure(rm.hsh2rev(b'\1' * 20) is None)
+
+    # illformed hash (not 20 bytes)
+    try:
+        rm.append(b'\0')
+        ensure(False)
+    except Exception:
+        pass
+
+def testcorruptformat():
+    path = gettemppath()
+
+    # incorrect header
+    with open(path, 'w') as f:
+        f.write(b'NOT A VALID HEADER')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # rewrite the file
+    os.unlink(path)
+    rm = revmap.revmap(path)
+    rm.append(genhsh(0), flush=True)
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 1)
+
+    # corrupt the file by appending a byte
+    size = os.stat(path).st_size
+    with open(path, 'a') as f:
+        f.write('\xff')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # corrupt the file by removing the last byte
+    ensure(size > 0)
+    with open(path, 'w') as f:
+        f.truncate(size - 1)
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    os.unlink(path)
+
+def testcopyfrom():
+    path = gettemppath()
+    rm = revmap.revmap(path)
+    for i in xrange(1, 10):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=str(i // 3)) == i)
+    rm.flush()
+
+    # copy rm to rm2
+    rm2 = revmap.revmap()
+    rm2.copyfrom(rm)
+    path2 = gettemppath()
+    rm2.path = path2
+    rm2.flush()
+
+    # two files should be the same
+    ensure(len(set(util.readfile(p) for p in [path, path2])) == 1)
+
+    os.unlink(path)
+    os.unlink(path2)
+
+class fakefctx(object):
+    def __init__(self, node, path=None):
+        self._node = node
+        self._path = path
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+def testcontains():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1)) == i)
+
+    for i in xrange(1, 5):
+        ensure(((genhsh(i), None) in rm) == ((i & 1) == 0))
+        ensure((fakefctx(genhsh(i)) in rm) == ((i & 1) == 0))
+    for i in xrange(5, 10):
+        ensure(fakefctx(genhsh(i)) not in rm)
+        ensure((genhsh(i), None) not in rm)
+
+    # "contains" checks paths
+    rm = revmap.revmap()
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), path=str(i // 2)) == i)
+    for i in xrange(1, 5):
+        ensure(fakefctx(genhsh(i), path=str(i // 2)) in rm)
+        ensure(fakefctx(genhsh(i), path='a') not in rm)
+
+def testlastnode():
+    path = gettemppath()
+    ensure(revmap.getlastnode(path) is None)
+    rm = revmap.revmap(path)
+    ensure(revmap.getlastnode(path) is None)
+    for i in xrange(1, 10):
+        hsh = genhsh(i)
+        rm.append(hsh, path=str(i // 2), flush=True)
+        ensure(revmap.getlastnode(path) == hsh)
+        rm2 = revmap.revmap(path)
+        ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
+
+testbasicreadwrite()
+testcorruptformat()
+testcopyfrom()
+testcontains()
+testlastnode()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,263 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+config option "fastannotate.modes"
+
+  $ hg annotate -r 1 --debug a
+  0: 1
+  1: 2
+  $ hg annotate --config fastannotate.modes=fctx -r 1 --debug a
+  fastannotate: a: using fast path (resolved fctx: False)
+  0: 1
+  1: 2
+  $ hg fastannotate --config fastannotate.modes=fctx -h -q
+  hg: unknown command 'fastannotate'
+  (did you mean *) (glob)
+  [255]
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
+
+json format
+
+  $ hg fastannotate -Tjson -cludn b a empty
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [0.0, 0],
+    "line": "b\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   }
+  ]
+
+  $ hg fastannotate -Tjson -cludn empty
+  [
+  ]
+  $ hg fastannotate -Tjson --no-content -n a
+  [
+   {
+    "rev": 0
+   }
+  ]
+
+working copy
+
+  $ echo a >> a
+  $ hg fastannotate -r 'wdir()' a
+  abort: cannot update linelog to wdir()
+  (set fastannotate.mainbranch)
+  [255]
+  $ cat >> $HGRCPATH << EOF
+  > [fastannotate]
+  > mainbranch = .
+  > EOF
+  $ hg fastannotate -r 'wdir()' a
+  0 : a
+  1+: a
+  $ hg fastannotate -cludn -r 'wdir()' a
+  test 0 1fd620b16252  Thu Jan 01 00:00:00 1970 +0000:1: a
+  test 1 720582f5bdb6+ *:2: a (glob)
+  $ hg fastannotate -cludn -r 'wdir()' -Tjson a
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [*, 0], (glob)
+    "line": "a\n",
+    "line_number": 2,
+    "node": null,
+    "rev": null,
+    "user": "test"
+   }
+  ]
--- a/tests/test-fileset.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-fileset.t	Fri Aug 24 12:55:05 2018 -0700
@@ -18,13 +18,19 @@
 
   $ fileset -v a1
   (symbol 'a1')
+  * matcher:
+  <patternmatcher patterns='(?:a1$)'>
   a1
   $ fileset -v 'a*'
   (symbol 'a*')
+  * matcher:
+  <patternmatcher patterns='(?:a[^/]*$)'>
   a1
   a2
   $ fileset -v '"re:a\d"'
   (string 're:a\\d')
+  * matcher:
+  <patternmatcher patterns='(?:a\\d)'>
   a1
   a2
   $ fileset -v '!re:"a\d"'
@@ -32,6 +38,10 @@
     (kindpat
       (symbol 're')
       (string 'a\\d')))
+  * matcher:
+  <predicatenmatcher
+    pred=<not
+      <patternmatcher patterns='(?:a\\d)'>>>
   b1
   b2
   $ fileset -v 'path:a1 or glob:b?'
@@ -42,10 +52,12 @@
     (kindpat
       (symbol 'glob')
       (symbol 'b?')))
+  * matcher:
+  <patternmatcher patterns='(?:a1(?:/|$)|b.$)'>
   a1
   b1
   b2
-  $ fileset -v 'a1 or a2'
+  $ fileset -v --no-show-matcher 'a1 or a2'
   (or
     (symbol 'a1')
     (symbol 'a2'))
@@ -97,6 +109,15 @@
       None))
   hg: parse error: can't use negate operator in this context
   [255]
+  $ fileset -p parsed 'a, b, c'
+  * parsed:
+  (list
+    (symbol 'a')
+    (symbol 'b')
+    (symbol 'c'))
+  hg: parse error: can't use a list in this context
+  (see 'hg help "filesets.x or y"')
+  [255]
 
   $ fileset '"path":.'
   hg: parse error: not a symbol
@@ -114,6 +135,183 @@
   hg: parse error: invalid pattern kind: foo
   [255]
 
+Show parsed tree at stages:
+
+  $ fileset -p unknown a
+  abort: invalid stage name: unknown
+  [255]
+
+  $ fileset -p parsed 'path:a1 or glob:b?'
+  * parsed:
+  (or
+    (kindpat
+      (symbol 'path')
+      (symbol 'a1'))
+    (kindpat
+      (symbol 'glob')
+      (symbol 'b?')))
+  a1
+  b1
+  b2
+
+  $ fileset -p all -s 'a1 or a2 or (grep("b") & clean())'
+  * parsed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (group
+      (and
+        (func
+          (symbol 'grep')
+          (string 'b'))
+        (func
+          (symbol 'clean')
+          None))))
+  * analyzed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (and
+      (func
+        (symbol 'grep')
+        (string 'b'))
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))))
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'a2'))
+    (and
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))
+      (func
+        (symbol 'grep')
+        (string 'b'))))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|a2$)'>,
+    <intersectionmatcher
+      m1=<predicatenmatcher pred=clean>,
+      m2=<predicatenmatcher pred=grep('b')>>]>
+  a1
+  a2
+  b1
+  b2
+
+Union of basic patterns:
+
+  $ fileset -p optimized -s -r. 'a1 or a2 or path:b1'
+  * optimized:
+  (patterns
+    (symbol 'a1')
+    (symbol 'a2')
+    (kindpat
+      (symbol 'path')
+      (symbol 'b1')))
+  * matcher:
+  <patternmatcher patterns='(?:a1$|a2$|b1(?:/|$))'>
+  a1
+  a2
+  b1
+
+OR expression should be reordered by weight:
+
+  $ fileset -p optimized -s -r. 'grep("a") or a1 or grep("b") or b2'
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'b2'))
+    (func
+      (symbol 'grep')
+      (string 'a'))
+    (func
+      (symbol 'grep')
+      (string 'b')))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|b2$)'>,
+    <predicatenmatcher pred=grep('a')>,
+    <predicatenmatcher pred=grep('b')>]>
+  a1
+  a2
+  b1
+  b2
+
+Use differencematcher for 'x and not y':
+
+  $ fileset -p optimized -s 'a* and not a1'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p optimized -s '!binary() and a*'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  a1
+  a2
+
+'x - y' is rewritten to 'x and not y' first so the operands can be reordered:
+
+  $ fileset -p analyzed -p optimized -s 'a* - a1'
+  * analyzed:
+  (and
+    (symbol 'a*')
+    (not
+      (symbol 'a1')))
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p analyzed -p optimized -s 'binary() - a*'
+  * analyzed:
+  (and
+    (func
+      (symbol 'binary')
+      None)
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<predicatenmatcher
+      pred=<not
+        <patternmatcher patterns='(?:a[^/]*$)'>>>,
+    m2=<predicatenmatcher pred=binary>>
+
 Test files status
 
   $ rm a1
@@ -180,6 +378,156 @@
   b2
   c1
 
+Test insertion of status hints
+
+  $ fileset -p optimized 'added()'
+  * optimized:
+  (withstatus
+    (func
+      (symbol 'added')
+      None)
+    (string 'added'))
+  c1
+
+  $ fileset -p optimized 'a* & removed()'
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a2
+
+  $ fileset -p optimized 'a* - removed()'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a1
+
+  $ fileset -p analyzed -p optimized '(added() + removed()) - a*'
+  * analyzed:
+  (and
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed'))
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed')))
+  c1
+
+  $ fileset -p optimized 'a* + b* + added() + unknown()'
+  * optimized:
+  (withstatus
+    (or
+      (patterns
+        (symbol 'a*')
+        (symbol 'b*'))
+      (func
+        (symbol 'added')
+        None)
+      (func
+        (symbol 'unknown')
+        None))
+    (string 'added unknown'))
+  a1
+  a2
+  b1
+  b2
+  c1
+  c3
+
+  $ fileset -p analyzed -p optimized 'removed() & missing() & a*'
+  * analyzed:
+  (and
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing'))
+    (symbol 'a*'))
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing')))
+
+  $ fileset -p optimized 'clean() & revs(0, added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'revs')
+      (list
+        (symbol '0')
+        (withstatus
+          (func
+            (symbol 'added')
+            None)
+          (string 'added')))))
+  b1
+
+  $ fileset -p optimized 'clean() & status(null, 0, b* & added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'status')
+      (list
+        (symbol 'null')
+        (symbol '0')
+        (and
+          (symbol 'b*')
+          (withstatus
+            (func
+              (symbol 'added')
+              None)
+            (string 'added'))))))
+  b1
+
 Test files properties
 
   >>> open('bin', 'wb').write(b'\0a') and None
@@ -194,6 +542,19 @@
   $ fileset 'binary()'
   bin
 
+  $ fileset -p optimized -s 'binary() and b*'
+  * optimized:
+  (and
+    (symbol 'b*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<patternmatcher patterns='(?:b[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  bin
+
   $ fileset 'grep("b{1}")'
   .hgignore
   b1
@@ -231,7 +592,7 @@
   [255]
   $ fileset '(1k, 2k)'
   hg: parse error: can't use a list in this context
-  (see hg help "filesets.x or y")
+  (see 'hg help "filesets.x or y"')
   [255]
   $ fileset 'size(1k)'
   1k
--- a/tests/test-fix.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-fix.t	Fri Aug 24 12:55:05 2018 -0700
@@ -502,12 +502,13 @@
 
   $ cd ..
 
-When a fixer prints to stderr, we assume that it has failed. We should show the
-error messages to the user, and we should not let the failing fixer affect the
-file it was fixing (many code formatters might emit error messages on stderr
-and nothing on stdout, which would cause us the clear the file). We show the
-user which fixer failed and which revision, but we assume that the fixer will
-print the filename if it is relevant.
+When a fixer prints to stderr, we don't assume that it has failed. We show the
+error messages to the user, and we still let the fixer affect the file it was
+fixing if its exit code is zero. Some code formatters might emit error messages
+on stderr and nothing on stdout, which would cause us the clear the file,
+except that they also exit with a non-zero code. We show the user which fixer
+emitted the stderr, and which revision, but we assume that the fixer will print
+the filename if it is relevant (since the issue may be non-specific).
 
   $ hg init showstderr
   $ cd showstderr
@@ -515,17 +516,37 @@
   $ printf "hello\n" > hello.txt
   $ hg add
   adding hello.txt
-  $ cat >> $TESTTMP/cmd.sh <<'EOF'
+  $ cat > $TESTTMP/fail.sh <<'EOF'
   > printf 'HELLO\n'
   > printf "$@: some\nerror" >&2
+  > exit 0 # success despite the stderr output
   > EOF
-  $ hg --config "fix.fail:command=sh $TESTTMP/cmd.sh {rootpath}" \
+  $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
   >    --config "fix.fail:fileset=hello.txt" \
   >    fix --working-dir
   [wdir] fail: hello.txt: some
   [wdir] fail: error
   $ cat hello.txt
-  hello
+  HELLO
+
+  $ printf "goodbye\n" > hello.txt
+  $ cat > $TESTTMP/work.sh <<'EOF'
+  > printf 'GOODBYE\n'
+  > printf "$@: some\nerror\n" >&2
+  > exit 42 # success despite the stdout output
+  > EOF
+  $ hg --config "fix.fail:command=sh $TESTTMP/work.sh {rootpath}" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: hello.txt: some
+  [wdir] fail: error
+  $ cat hello.txt
+  goodbye
+
+  $ hg --config "fix.fail:command=exit 42" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: exited with status 42
 
   $ cd ..
 
@@ -830,9 +851,9 @@
   
   $ hg fix -r 0:2
   $ hg log --graph --template '{node|shortest} {files}'
-  o  3801 bar.whole
+  o  b4e2 bar.whole
   |
-  o  38cc
+  o  59f4
   |
   | @  bc05 bar.whole
   | |
--- a/tests/test-fncache.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-fncache.t	Fri Aug 24 12:55:05 2018 -0700
@@ -88,6 +88,7 @@
   .hg/00manifest.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/data
@@ -121,6 +122,7 @@
   .hg/00changelog.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/dirstate
--- a/tests/test-generaldelta.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-generaldelta.t	Fri Aug 24 12:55:05 2018 -0700
@@ -267,7 +267,7 @@
        51       4        3       50    prev        356        594        611   1.02862       611         0    0.00000
        52       4        4       51      p1         58        640        669   1.04531       669         0    0.00000
        53       5        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       5        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       6        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -333,7 +333,7 @@
        51       2       13       17      p1         58        594        739   1.24411      2781      2042    2.76319
        52       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
        53       6        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       6        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       7        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -399,4 +399,4 @@
        51       2       13       17      p1         58        594        739   1.24411      2642      1903    2.57510
        52       2       14       51      p1         58        640        797   1.24531      2700      1903    2.38770
        53       4        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       4        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
--- a/tests/test-glog-beautifygraph.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-glog-beautifygraph.t	Fri Aug 24 12:55:05 2018 -0700
@@ -80,52 +80,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > 
-  > from mercurial.utils import (
-  >   stringutil,
-  > )
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
   $ echo "beautifygraph=" >> $HGRCPATH
 
 Set a default of narrow-text UTF-8.
@@ -2043,7 +1999,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-glog.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-glog.t	Fri Aug 24 12:55:05 2018 -0700
@@ -81,49 +81,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > from mercurial.utils import stringutil
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
 
   $ hg init repo
   $ cd repo
@@ -1890,7 +1849,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-graft.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-graft.t	Fri Aug 24 12:55:05 2018 -0700
@@ -237,7 +237,7 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg graft --continue
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg graft --abort
   
 
 Commit while interrupted should fail:
@@ -699,8 +699,24 @@
   summary:     2
   
 ... grafts of grafts unfortunately can't
-  $ hg graft -q 13
+  $ hg graft -q 13 --debug
+  scanning for duplicate grafts
+  grafting 13:7a4785234d87 "2"
+    searching for copies back to rev 12
+    unmatched files in other (from topological common ancestor):
+     g
+    unmatched files new in both:
+     b
+  resolving manifests
+   branchmerge: True, force: True, partial: False
+   ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87
+  starting 4 threads for background file closing (?)
+  committing files:
+  b
   warning: can't find ancestor for 'b' copied from 'a'!
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
   $ hg log -r 'destination(13)'
 All copies of a cset
   $ hg log -r 'origin(13) or destination(origin(13))'
@@ -731,7 +747,7 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     2
   
-  changeset:   22:d1cb6591fa4b
+  changeset:   22:3a4e92d81b97
   branch:      dev
   tag:         tip
   user:        foo
@@ -743,8 +759,8 @@
 
   $ hg graft 'origin(13) or destination(origin(13))'
   skipping ancestor revision 21:7e61b508e709
-  skipping ancestor revision 22:d1cb6591fa4b
-  skipping revision 2:5c095ad7e90f (already grafted to 22:d1cb6591fa4b)
+  skipping ancestor revision 22:3a4e92d81b97
+  skipping revision 2:5c095ad7e90f (already grafted to 22:3a4e92d81b97)
   grafting 7:ef0ef43d49e7 "2"
   warning: can't find ancestor for 'b' copied from 'a'!
   grafting 13:7a4785234d87 "2"
@@ -758,7 +774,7 @@
   $ hg graft 19 0 6
   skipping ungraftable merge revision 6
   skipping ancestor revision 0:68795b066622
-  skipping already grafted revision 19:9627f653b421 (22:d1cb6591fa4b also has origin 2:5c095ad7e90f)
+  skipping already grafted revision 19:9627f653b421 (22:3a4e92d81b97 also has origin 2:5c095ad7e90f)
   [255]
   $ hg graft 19 0 6 --force
   skipping ungraftable merge revision 6
@@ -773,12 +789,12 @@
   $ hg ci -m 28
   $ hg backout 28
   reverting a
-  changeset 29:53177ba928f6 backs out changeset 28:50a516bb8b57
+  changeset 29:9d95e865b00c backs out changeset 28:cc20d29aec8d
   $ hg graft 28
-  skipping ancestor revision 28:50a516bb8b57
+  skipping ancestor revision 28:cc20d29aec8d
   [255]
   $ hg graft 28 --force
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   merging a
   $ cat a
   abc
@@ -788,7 +804,7 @@
   $ echo def > a
   $ hg ci -m 31
   $ hg graft 28 --force --tool internal:fail
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   abort: unresolved conflicts, can't continue
   (use 'hg resolve' and 'hg graft --continue')
   [255]
@@ -801,7 +817,7 @@
   (no more unresolved files)
   continue: hg graft --continue
   $ hg graft -c
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   $ cat a
   abc
 
@@ -822,8 +838,8 @@
   $ hg tag -f something
   $ hg graft -qr 27
   $ hg graft -f 27
-  grafting 27:ed6c7e54e319 "28"
-  note: graft of 27:ed6c7e54e319 created no changes to commit
+  grafting 27:17d42b8f5d50 "28"
+  note: graft of 27:17d42b8f5d50 created no changes to commit
 
   $ cd ..
 
--- a/tests/test-grep.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-grep.t	Fri Aug 24 12:55:05 2018 -0700
@@ -18,7 +18,7 @@
 pattern error
 
   $ hg grep '**test**'
-  grep: invalid match pattern: nothing to repeat
+  grep: invalid match pattern: nothing to repeat* (glob)
   [1]
 
 simple
@@ -491,3 +491,17 @@
   ]
 
   $ cd ..
+
+test -rMULTIREV with --all-files
+
+  $ cd sng
+  $ hg rm um
+  $ hg commit -m "deletes um"
+  $ hg grep -r "0:2" "unmod" --all-files
+  um:0:unmod
+  um:1:unmod
+  $ hg grep -r "0:2" "unmod" --all-files um
+  um:0:unmod
+  um:1:unmod
+  $ cd ..
+
--- a/tests/test-hardlinks.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-hardlinks.t	Fri Aug 24 12:55:05 2018 -0700
@@ -241,6 +241,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   ? r4/.hg/cache/checklink-target (glob) (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/dirstate
@@ -291,6 +292,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   2 r4/.hg/cache/checklink-target (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   1 r4/.hg/dirstate
--- a/tests/test-help.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-help.t	Fri Aug 24 12:55:05 2018 -0700
@@ -652,29 +652,7 @@
 
   $ hg skjdfks
   hg: unknown command 'skjdfks'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
 
 Typoed command gives suggestion
@@ -966,6 +944,9 @@
    debuginstall  test Mercurial installation
    debugknown    test whether node ids are known to a repo
    debuglocks    show or modify state of locks
+   debugmanifestfulltextcache
+                 show, clear or amend the contents of the manifest fulltext
+                 cache
    debugmergestate
                  print merge state
    debugnamecomplete
@@ -1499,7 +1480,7 @@
   Commands:
   $ hg help -c commit > /dev/null
   $ hg help -e -c commit > /dev/null
-  $ hg help -e commit > /dev/null
+  $ hg help -e commit
   abort: no such help topic: commit
   (try 'hg help --keyword commit')
   [255]
@@ -1848,18 +1829,26 @@
         This implies premerge. Therefore, files aren't dumped, if premerge runs
         successfully. Use :forcedump to forcibly write files out.
   
+        (actual capabilities: binary, symlink)
+  
       ":fail"
         Rather than attempting to merge files that were modified on both
         branches, it marks them as unresolved. The resolve command must be used
         to resolve these conflicts.
   
+        (actual capabilities: binary, symlink)
+  
       ":forcedump"
         Creates three versions of the files as same as :dump, but omits
         premerge.
   
+        (actual capabilities: binary, symlink)
+  
       ":local"
         Uses the local 'p1()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":merge"
         Uses the internal non-interactive simple merge algorithm for merging
         files. It will fail if there are any conflicts and leave markers in the
@@ -1883,10 +1872,14 @@
       ":other"
         Uses the other 'p2()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":prompt"
         Asks the user which of the local 'p1()' or the other 'p2()' version to
         keep as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":tagmerge"
         Uses the internal tag merge algorithm (experimental).
   
@@ -1896,7 +1889,8 @@
         markers are inserted.
   
       Internal tools are always available and do not require a GUI but will by
-      default not handle symlinks or binary files.
+      default not handle symlinks or binary files. See next section for detail
+      about "actual capabilities" described above.
   
       Choosing a merge tool
       =====================
@@ -1911,8 +1905,7 @@
          must be executable by the shell.
       3. If the filename of the file to be merged matches any of the patterns in
          the merge-patterns configuration section, the first usable merge tool
-         corresponding to a matching pattern is used. Here, binary capabilities
-         of the merge tool are not considered.
+         corresponding to a matching pattern is used.
       4. If ui.merge is set it will be considered next. If the value is not the
          name of a configured tool, the specified value is used and must be
          executable by the shell. Otherwise the named tool is used if it is
@@ -1925,6 +1918,21 @@
          internal ":merge" is used.
       8. Otherwise, ":prompt" is used.
   
+      For historical reason, Mercurial assumes capabilities of internal merge
+      tools as below while examining rules above, regardless of actual
+      capabilities of them.
+  
+      step specified via  binary symlink
+      ----------------------------------
+      1.   --tool         o      o
+      2.   HGMERGE        o      o
+      3.   merge-patterns o (*)  x (*)
+      4.   ui.merge       x (*)  x (*)
+  
+      If "merge.strict-capability-check" configuration is true, Mercurial checks
+      capabilities of internal merge tools strictly in (*) cases above. It is
+      false by default for backward compatibility.
+  
       Note:
          After selecting a merge program, Mercurial will by default attempt to
          merge the files using a simple merge algorithm first. Only if it
--- a/tests/test-http.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-http.t	Fri Aug 24 12:55:05 2018 -0700
@@ -476,7 +476,7 @@
 #endif
 
 ... and also keep partial clones and pulls working
-  $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
+  $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
   adding changesets
   adding manifests
   adding file changes
@@ -484,7 +484,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg pull -R test-partial-clone
+  $ hg pull -R test/partial/clone
   pulling from http://localhost:$HGPORT1/
   searching for changes
   adding changesets
@@ -494,6 +494,13 @@
   new changesets 5fed3813f7f5:56f9bc90cce6
   (run 'hg update' to get a working copy)
 
+  $ hg clone -U -r 0 test/partial/clone test/another/clone
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 4 changes to 4 files
+  new changesets 8b6053c928fe
+
 corrupt cookies file should yield a warning
 
   $ cat > $TESTTMP/cookies.txt << EOF
--- a/tests/test-inherit-mode.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-inherit-mode.t	Fri Aug 24 12:55:05 2018 -0700
@@ -69,6 +69,7 @@
   00600 ./.hg/00changelog.i
   00770 ./.hg/cache/
   00660 ./.hg/cache/branch2-served
+  00660 ./.hg/cache/manifestfulltextcache
   00660 ./.hg/cache/rbc-names-v1
   00660 ./.hg/cache/rbc-revs-v1
   00660 ./.hg/dirstate
--- a/tests/test-lfconvert.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-lfconvert.t	Fri Aug 24 12:55:05 2018 -0700
@@ -101,6 +101,7 @@
   largefiles
   revlogv1
   store
+  testonly-simplestore (reposimplestore !)
 
 "lfconvert" includes a newline at the end of the standin files.
   $ cat .hglf/large .hglf/sub/maybelarge.dat
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-linelog.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,193 @@
+from __future__ import absolute_import, print_function
+
+import difflib
+import random
+import unittest
+
+from mercurial import linelog
+
+vecratio = 3 # number of replacelines / number of replacelines_vec
+maxlinenum = 0xffffff
+maxb1 = 0xffffff
+maxdeltaa = 10
+maxdeltab = 10
+
+def _genedits(seed, endrev):
+    lines = []
+    random.seed(seed)
+    rev = 0
+    for rev in range(0, endrev):
+        n = len(lines)
+        a1 = random.randint(0, n)
+        a2 = random.randint(a1, min(n, a1 + maxdeltaa))
+        b1 = random.randint(0, maxb1)
+        b2 = random.randint(b1, b1 + maxdeltab)
+        usevec = not bool(random.randint(0, vecratio))
+        if usevec:
+            blines = [(random.randint(0, rev), random.randint(0, maxlinenum))
+                      for _ in range(b1, b2)]
+        else:
+            blines = [(rev, bidx) for bidx in range(b1, b2)]
+        lines[a1:a2] = blines
+        yield lines, rev, a1, a2, b1, b2, blines, usevec
+
+class linelogtests(unittest.TestCase):
+    def testlinelogencodedecode(self):
+        program = [linelog._eof(0, 0),
+                   linelog._jge(41, 42),
+                   linelog._jump(0, 43),
+                   linelog._eof(0, 0),
+                   linelog._jl(44, 45),
+                   linelog._line(46, 47),
+                   ]
+        ll = linelog.linelog(program, maxrev=100)
+        enc = ll.encode()
+        # round-trips okay
+        self.assertEqual(linelog.linelog.fromdata(enc)._program, ll._program)
+        self.assertEqual(linelog.linelog.fromdata(enc), ll)
+        # This encoding matches the encoding used by hg-experimental's
+        # linelog file, or is supposed to if it doesn't.
+        self.assertEqual(enc, (b'\x00\x00\x01\x90\x00\x00\x00\x06'
+                               b'\x00\x00\x00\xa4\x00\x00\x00*'
+                               b'\x00\x00\x00\x00\x00\x00\x00+'
+                               b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                               b'\x00\x00\x00\xb1\x00\x00\x00-'
+                               b'\x00\x00\x00\xba\x00\x00\x00/'))
+
+    def testsimpleedits(self):
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # annotation of 1 is unchanged
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        ll.annotate(3) # set internal state to revision 3
+        start = ll.getoffset(0)
+        end = ll.getoffset(1)
+        self.assertEqual(ll.getalllines(start, end), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+        ])
+        self.assertEqual(ll.getalllines(), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+            (1, 2),
+        ])
+
+    def testparseclinelogfile(self):
+        # This data is what the replacements in testsimpleedits
+        # produce when fed to the original linelog.c implementation.
+        data = (b'\x00\x00\x00\x0c\x00\x00\x00\x0f'
+                b'\x00\x00\x00\x00\x00\x00\x00\x02'
+                b'\x00\x00\x00\x05\x00\x00\x00\x06'
+                b'\x00\x00\x00\x06\x00\x00\x00\x00'
+                b'\x00\x00\x00\x00\x00\x00\x00\x07'
+                b'\x00\x00\x00\x06\x00\x00\x00\x02'
+                b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                b'\x00\x00\x00\t\x00\x00\x00\t'
+                b'\x00\x00\x00\x00\x00\x00\x00\x0c'
+                b'\x00\x00\x00\x08\x00\x00\x00\x05'
+                b'\x00\x00\x00\x06\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\x05'
+                b'\x00\x00\x00\x0c\x00\x00\x00\x05'
+                b'\x00\x00\x00\n\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\t')
+        llc = linelog.linelog.fromdata(data)
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # Check we emit the same bytecode.
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        diff = '\n   ' + '\n   '.join(difflib.unified_diff(
+            ll.debugstr().splitlines(), llc.debugstr().splitlines(),
+            'python', 'c', lineterm=''))
+        self.assertEqual(ll._program, llc._program, 'Program mismatch: ' + diff)
+        # Done as a secondary step so we get a better result if the
+        # program is where the mismatch is.
+        self.assertEqual(ll, llc)
+        self.assertEqual(ll.encode(), data)
+
+    def testanothersimplecase(self):
+        ll = linelog.linelog()
+        ll.replacelines(3, 0, 0, 0, 2)
+        ll.replacelines(4, 0, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)],
+                         [])
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(3, 0), (3, 1)])
+        # rev 2 is empty because contents were only ever introduced in rev 3
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [])
+
+    def testrandomedits(self):
+        # Inspired by original linelog tests.
+        seed = random.random()
+        numrevs = 2000
+        ll = linelog.linelog()
+        # Populate linelog
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            if usevec:
+                ll.replacelines_vec(rev, a1, a2, blines)
+            else:
+                ll.replacelines(rev, a1, a2, b1, b2)
+            ar = ll.annotate(rev)
+            self.assertEqual(ll.annotateresult, lines)
+        # Verify we can get back these states by annotating each rev
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            ar = ll.annotate(rev)
+            self.assertEqual([(l.rev, l.linenum) for l in ar], lines)
+
+    def testinfinitebadprogram(self):
+        ll = linelog.linelog.fromdata(
+            b'\x00\x00\x00\x00\x00\x00\x00\x02'  # header
+            b'\x00\x00\x00\x00\x00\x00\x00\x01'  # JUMP to self
+        )
+        with self.assertRaises(linelog.LineLogError):
+            # should not be an infinite loop and raise
+            ll.annotate(1)
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-match.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-match.py	Fri Aug 24 12:55:05 2018 -0700
@@ -6,14 +6,832 @@
 
 from mercurial import (
     match as matchmod,
+    util,
 )
 
+class BaseMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class AlwaysMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitdir(b'.'), b'all')
+        self.assertEqual(m.visitdir(b'dir'), b'all')
+
+    def testVisitchildrenset(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'all')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'all')
+
 class NeverMatcherTests(unittest.TestCase):
 
     def testVisitdir(self):
-        m = matchmod.nevermatcher('', '')
-        self.assertFalse(m.visitdir('.'))
-        self.assertFalse(m.visitdir('dir'))
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertFalse(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+
+class PredicateMatcherTests(unittest.TestCase):
+    # predicatematcher does not currently define either of these methods, so
+    # this is equivalent to BaseMatcherTests.
+
+    def testVisitdir(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class PatternMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # FIXME: These should probably be True.
+        self.assertFalse(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'dir/subdir'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        # FIXME: These should probably be {'subdir'} and 'this', respectively,
+        # or at least 'this' and 'this'.
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IncludeMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class ExactMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/foo.txt'))
+        self.assertFalse(m.visitdir(b'dir/foo'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), {b'foo.txt'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/foo.txt'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitchildrensetFilesAndDirs(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfile.txt',
+                                                b'a/file1.txt',
+                                                b'a/b/file2.txt',
+                                                # no file in a/b/c
+                                                b'a/b/c/d/file4.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'a', b'rootfile.txt'})
+        self.assertEqual(m.visitchildrenset(b'a'), {b'b', b'file1.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b'), {b'c', b'file2.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c'), {b'd'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d'), {b'file4.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d/e'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+class DifferenceMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertFalse(dm.visitdir(b'.'))
+        self.assertFalse(dm.visitdir(b'dir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/z'))
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/x'))
+        self.assertFalse(dm.visitdir(b'folder'))
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher. OPT: if m2 is a
+        # nevermatcher, we could return 'all' for these.
+        #
+        # We're testing Equal-to-True instead of just 'assertTrue' since
+        # assertTrue does NOT verify that it's a bool, just that it's truthy.
+        # While we may want to eventually make these return 'all', they should
+        # not currently do so.
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        # OPT: We could return 'all' for these.
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'folder'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(dm.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IntersectionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitdir(b'.'), b'all')
+        self.assertEqual(im.visitdir(b'dir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitchildrenset(b'.'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermatcher.
+        self.assertFalse(im.visitdir(b'.'))
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+        self.assertFalse(im.visitdir(b'folder'))
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermqtcher.
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        # OPT: We should probably return 'all' for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return 'all' for these
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is True correct here?
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is set() correct here?
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), {b'x'})
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next two could probably be set() as well.
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+class UnionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        # OPT: These two should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        # OPT: These should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'folder', b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), True)
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), {b'x', b'z'})
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+class SubdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitdir(b'.'), True)
+        self.assertEqual(sm.visitdir(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not True.
+        self.assertEqual(sm.visitdir(b'subdir/x'), True)
+        self.assertEqual(sm.visitdir(b'subdir/z'), True)
+        self.assertFalse(sm.visitdir(b'foo'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitchildrenset(b'.'), {b'subdir'})
+        self.assertEqual(sm.visitchildrenset(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not 'this'.
+        self.assertEqual(sm.visitchildrenset(b'subdir/x'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'subdir/z'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'foo'), set())
+
+class PrefixdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # `m` elides 'd' because it's part of the root, and the rest of the
+        # patterns are relative.
+        self.assertEqual(bool(m(b'a.txt')), False)
+        self.assertEqual(bool(m(b'b.txt')), False)
+        self.assertEqual(bool(m(b'e/a.txt')), True)
+        self.assertEqual(bool(m(b'e/b.txt')), False)
+        self.assertEqual(bool(m(b'e/f/b.txt')), True)
+
+        # The prefix matcher re-adds 'd' to the paths, so they need to be
+        # specified when using the prefixdirmatcher.
+        self.assertEqual(bool(pm(b'a.txt')), False)
+        self.assertEqual(bool(pm(b'b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/a.txt')), True)
+        self.assertEqual(bool(pm(b'd/e/b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/f/b.txt')), True)
+
+        self.assertEqual(m.visitdir(b'.'), True)
+        self.assertEqual(m.visitdir(b'e'), True)
+        self.assertEqual(m.visitdir(b'e/f'), True)
+        self.assertEqual(m.visitdir(b'e/f/g'), False)
+
+        self.assertEqual(pm.visitdir(b'.'), True)
+        self.assertEqual(pm.visitdir(b'd'), True)
+        self.assertEqual(pm.visitdir(b'd/e'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f/g'), False)
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # OPT: visitchildrenset could possibly return {'e'} and {'f'} for these
+        # next two, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f/g'), set())
+
+        # OPT: visitchildrenset could possibly return {'d'}, {'e'}, and {'f'}
+        # for these next three, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(pm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f/g'), set())
 
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-merge-no-file-change.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,379 @@
+  $ cat <<'EOF' >> "$HGRCPATH"
+  > [extensions]
+  > convert =
+  > [templates]
+  > l = '{rev}:{node|short} p={p1rev},{p2rev} m={manifest} f={files|json}'
+  > EOF
+
+  $ check_convert_identity () {
+  >     hg convert -q "$1" "$1.converted"
+  >     hg outgoing -q -R "$1.converted" "$1"
+  >     if [ "$?" != 1 ]; then
+  >         echo '*** BUG: hash changes on convert ***'
+  >         hg log -R "$1.converted" -GTl
+  >     fi
+  > }
+
+Files added at both parents:
+
+  $ hg init added-both
+  $ cd added-both
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:7aa8a293f5d97377037afc21e871e036e718d659
+  $ hg log -GTl
+  @    3:7aa8a293f5d9 p=2,1 m=3:8667461869a1 f=[]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+Files added at both parents, but the other removed at the merge:
+(In this case, ctx.files() after the commit contains the removed file "b", but
+its manifest does not differ from p1.)
+
+  $ hg init added-both-removed-at-merge
+  $ cd added-both-removed-at-merge
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg rm -f b
+  $ hg ci --debug -m merge
+  committing files:
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:915745f3ca3d9d699925269474c2d0a9526e8dfa
+  $ hg log -GTl
+  @    3:915745f3ca3d p=2,1 m=3:8e9cf3456921 f=["b"]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+An identical file added at both parents:
+
+  $ hg init added-identical
+  $ cd added-identical
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:de26182cd210f0c3fb175ca7616704ab963d3024
+  $ hg log -GTl
+  @    3:de26182cd210 p=2,1 m=1:686dbf0aeca4 f=[]
+  |\
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-identical
+
+#if execbit
+
+An identical file added at both parents, but the flag differs. Take local:
+
+  $ hg init flag-change-take-p1
+  $ cd flag-change-take-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod +x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:c8d50407916ef8a5a97cb6e36ca9bc844a6ee13e
+  $ hg log -GTl
+  @    3:c8d50407916e p=2,1 m=2:36b69ba4b24b f=[]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0 x b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p1
+
+An identical file added at both parents, but the flag differs. Take other:
+
+  $ hg init flag-change-take-p2
+  $ cd flag-change-take-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod -x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:06a62a687d87c7d8944743dee1ee9d8c66b3f6e3
+  $ hg log -GTl
+  @    3:06a62a687d87 p=2,1 m=3:2a315ba1aa45 f=["b"]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0   b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p2
+
+#endif
+
+An identical file added at both parents, one more file added at p2:
+
+  $ hg init added-some-p2
+  $ cd added-some-p2
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg ci -qAm2 c
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm3 b
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  c
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 4:f7fbc4e4d9a8fde03ba475adad675578c8bf472d
+  $ hg log -GTl
+  @    4:f7fbc4e4d9a8 p=3,2 m=3:92acd5bfd716 f=[]
+  |\
+  | o  3:e9d9f3cc981f p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  2:93c5529a4ec7 p=1,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p2
+
+An identical file added at both parents, one more file added at p1:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init added-some-p1
+  $ cd added-some-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b c
+  $ hg ci -qAm2 b
+  $ hg ci -qAm3 c
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 4:a9f0f589a913f5a149dc10dfbd5af726977c36c4
+  $ hg log -GTl
+  @    4:a9f0f589a913 p=3,1 m=2:ae25a31b30b3 f=[]
+  |\
+  | o  3:b8dc385241b5 p=2,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p1
+
+A file added at p2, a named branch created at p1:
+
+  $ hg init named-branch-p1
+  $ cd named-branch-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg branch -q foo
+  $ hg ci -m2
+
+  $ hg merge default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:fb97d83b02fd072295cfc2171f21b7d38509bfd7
+  $ hg log -GT'{l} branch={branch}'
+  @    3:fb97d83b02fd p=2,1 m=2:9091c64f4ea1 f=[] branch=foo
+  |\
+  | o  2:a3a9fa6587e5 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p1
+
+A file added at p1, a named branch created at p2:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init named-branch-p2
+  $ cd named-branch-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg branch -q foo
+  $ hg ci -m1
+  $ hg up -q 0
+  $ hg ci -qAm1 b
+
+  $ hg merge foo
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:036823e24692218324d4af43b07ff89f8a000096
+  $ hg log -GT'{l} branch={branch}'
+  @    3:036823e24692 p=2,1 m=1:686dbf0aeca4 f=[] branch=default
+  |\
+  | o  2:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  | |
+  o |  1:da38c8e00727 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p2
+
+A file changed once at both parents, but amended to have identical content:
+
+  $ hg init amend-p1
+  $ cd amend-p1
+  $ touch a
+  $ hg ci -qAm0 a
+  $ echo foo > a
+  $ hg ci -m1
+  $ hg up -q 0
+  $ echo bar > a
+  $ hg ci -qm2
+  $ echo foo > a
+  $ hg ci -qm3 --amend
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:314e5bc5adf5c58ea571efabe33eedba20a201aa
+  $ hg log -GT'{l} branch={branch}'
+  @    3:314e5bc5adf5 p=2,1 m=1:d33ea248bd73 f=[] branch=default
+  |\
+  | o  2:de9c64f226a3 p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  | |
+  o |  1:6a74aec01b3c p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity amend-p1
--- a/tests/test-merge-tools.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-merge-tools.t	Fri Aug 24 12:55:05 2018 -0700
@@ -1701,6 +1701,35 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg update -C 1 > /dev/null
+
+#else
+
+Match the non-portable filename commits above for test stability
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 5
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 5
+  > EOF
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 6
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 6
+  > EOF
+
 #endif
 
 Merge post-processing
@@ -1737,14 +1766,64 @@
   # hg resolve --list
   U f
 
-#if symlink
+missingbinary is a merge-tool that doesn't exist:
+
+  $ echo "missingbinary.executable=doesnotexist" >> .hg/hgrc
+  $ beforemerge
+  [merge-tools]
+  false.whatever=
+  true.priority=1
+  true.executable=cat
+  missingbinary.executable=doesnotexist
+  # hg update -C 1
+  $ hg merge -y -r 2 --config ui.merge=missingbinary
+  couldn't find merge tool missingbinary (for pattern f)
+  merging f
+  couldn't find merge tool missingbinary (for pattern f)
+  revision 1
+  space
+  revision 0
+  space
+  revision 2
+  space
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg update -q -C 1
+  $ rm f
 
 internal merge cannot handle symlinks and shouldn't try:
 
-  $ hg update -q -C 1
-  $ rm f
+#if symlink
+
   $ ln -s symlink f
   $ hg commit -qm 'f is symlink'
+
+#else
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > f is symlink
+  > 
+  > diff --git a/f b/f
+  > old mode 100644
+  > new mode 120000
+  > --- a/f
+  > +++ b/f
+  > @@ -1,2 +1,1 @@
+  > -revision 1
+  > -space
+  > +symlink
+  > \ No newline at end of file
+  > EOF
+
+Resolve 'other [destination] changed f which local [working copy] deleted' prompt
+  $ hg up -q -C --config ui.interactive=True << EOF
+  > c
+  > EOF
+
+#endif
+
   $ hg merge -r 2 --tool internal:merge
   merging f
   warning: internal :merge cannot merge symlinks for f
@@ -1753,8 +1832,6 @@
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
   [1]
 
-#endif
-
 Verify naming of temporary files and that extension is preserved:
 
   $ hg update -q -C 1
@@ -1782,6 +1859,86 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
 
+Binary files capability checking
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x00\x01\x02\x03')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#1)"
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x03\x02\x01\x00')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#2)"
+
+By default, binary files capability of internal merge tools is not
+checked strictly.
+
+(for merge-patterns, chosen unintentionally)
+
+  $ hg merge 9 \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  warning: check merge-patterns configurations, if ':merge-other' for binary file 'b' is unintentional
+  (see 'hg help merge-tools' for binary files capability)
+  merging b
+  warning: b looks like a binary file.
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+(for ui.merge, ignored unintentionally)
+
+  $ hg merge 9 \
+  > --config ui.merge=:other
+  tool :other (for pattern b) can't handle binary
+  tool true can't handle binary
+  tool false can't handle binary
+  no tool found to merge b
+  keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for b? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+With merge.strict-capability-check=true, binary files capability of
+internal merge tools is checked strictly.
+
+  $ f --hexdump b
+  b:
+  0000: 03 02 01 00                                     |....|
+
+(for merge-patterns)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  tool :merge-other (for pattern b) can't handle binary
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
+(for ui.merge)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config ui.merge=:other
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
 Check that debugpicktool examines which merge tool is chosen for
 specified file as expected
 
@@ -1790,6 +1947,7 @@
   false.whatever=
   true.priority=1
   true.executable=cat
+  missingbinary.executable=doesnotexist
   # hg update -C 1
 
 (default behavior: checking files in the working parent context)
@@ -1812,9 +1970,9 @@
 
 (-r REV causes checking files in specified revision)
 
-  $ hg manifest -r tip
+  $ hg manifest -r 8
   f.txt
-  $ hg debugpickmergetool -r tip
+  $ hg debugpickmergetool -r 8
   f.txt = true
 
 #if symlink
@@ -1824,6 +1982,36 @@
   $ hg debugpickmergetool -r 6d00b3726f6e
   f = :prompt
 
+(by default, it is assumed that no internal merge tools has symlinks
+capability)
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :prompt
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config ui.merge=:other
+  f = :prompt
+
+(with strict-capability-check=true, actual symlink capabilities are
+checked striclty)
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :other
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config ui.merge=:other
+  f = :other
+
 #endif
 
 (--verbose shows some configurations)
--- a/tests/test-narrow-clone-no-ellipsis.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-clone-no-ellipsis.t	Fri Aug 24 12:55:05 2018 -0700
@@ -30,10 +30,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -55,11 +53,9 @@
   added 40 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg update
   19 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -97,11 +93,9 @@
   added 40 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg update
   20 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -129,3 +123,39 @@
   dir/src/f9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 40 changesets with 20 changes to 20 files
+  new changesets 681085829a73:26ce255d5b5d
+  updating to branch default
+  20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/tests
+  I path:file:dir/src/f12
+  $ cd ..
--- a/tests/test-narrow-clone.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-clone.t	Fri Aug 24 12:55:05 2018 -0700
@@ -34,10 +34,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg tracked
   I path:dir/src/f10
   $ hg update
@@ -69,11 +67,9 @@
   added 21 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg tracked
   I path:dir/tests
   X path:dir/tests/t19
@@ -114,11 +110,9 @@
   added 21 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg tracked
   I path:.
   X path:dir/tests
@@ -224,3 +218,39 @@
   dir/tests/t9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 21 changesets with 20 changes to 20 files
+  new changesets f93383bb3e99:26ce255d5b5d
+  updating to branch default
+  20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/tests
+  I path:file:dir/src/f12
+  $ cd ..
--- a/tests/test-narrow-debugcommands.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-debugcommands.t	Fri Aug 24 12:55:05 2018 -0700
@@ -1,10 +1,10 @@
   $ . "$TESTDIR/narrow-library.sh"
   $ hg init repo
   $ cd repo
-  $ cat << EOF > .hg/narrowspec
-  > [includes]
+  $ cat << EOF > .hg/store/narrowspec
+  > [include]
   > path:foo
-  > [excludes]
+  > [exclude]
   > EOF
   $ echo treemanifest >> .hg/requires
   $ echo narrowhg-experimental >> .hg/requires
--- a/tests/test-narrow-expanddirstate.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-expanddirstate.t	Fri Aug 24 12:55:05 2018 -0700
@@ -27,16 +27,16 @@
 
   $ mkdir outside
   $ echo other_contents > outside/f2
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
   $ hg status
 
 `hg status` did not add outside.
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
 
 Unfortunately this is not really a candidate for adding to narrowhg proper,
@@ -115,12 +115,12 @@
 `hg status` will now add outside, but not patchdir.
   $ DIRSTATEINCLUDES=path:outside hg status
   M outside/f2
-  $ grep outside .hg/narrowspec
-  path:outside
-  $ grep outside .hg/dirstate > /dev/null
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep outside
+  I path:outside
+  $ hg files | grep outside > /dev/null
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate
+  $ hg files | grep patchdir
   [1]
 
 Get rid of the modification to outside/f2.
@@ -142,9 +142,9 @@
   1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
   abort: patch failed to apply
   [255]
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg files | grep patchdir > /dev/null
   [1]
 
 Let's make it apply cleanly and see that it *did* expand properly
@@ -159,6 +159,6 @@
   applying $TESTTMP/foo.patch
   $ cat patchdir/f3
   patched_this
-  $ grep patchdir .hg/narrowspec
-  path:patchdir
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg tracked | grep patchdir
+  I path:patchdir
+  $ hg files | grep patchdir > /dev/null
--- a/tests/test-narrow-patterns.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-patterns.t	Fri Aug 24 12:55:05 2018 -0700
@@ -88,15 +88,13 @@
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ cd narrow
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ hg manifest -r tip
   dir1/bar
   dir1/dirA/bar
@@ -144,14 +142,12 @@
   adding file changes
   added 9 changesets with 6 changes to 6 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -206,14 +202,12 @@
   adding file changes
   added 11 changesets with 7 changes to 7 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
-  path:dir2/dirA
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
+  X path:dir2/dirA
   $ find * | sort
   dir1
   dir1/bar
@@ -266,14 +260,12 @@
   adding file changes
   added 13 changesets with 8 changes to 8 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -327,13 +319,11 @@
   adding file changes
   added 13 changesets with 9 changes to 9 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
--- a/tests/test-narrow-pull.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-narrow-pull.t	Fri Aug 24 12:55:05 2018 -0700
@@ -166,7 +166,6 @@
 
 We should also be able to unshare without breaking everything:
   $ hg unshare
-  devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
   $ hg verify
   checking changesets
   checking manifests
--- a/tests/test-parseindex2.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-parseindex2.py	Fri Aug 24 12:55:05 2018 -0700
@@ -8,12 +8,14 @@
 import struct
 import subprocess
 import sys
+import unittest
 
 from mercurial.node import (
     nullid,
     nullrev,
 )
 from mercurial import (
+    node as nodemod,
     policy,
     pycompat,
 )
@@ -61,9 +63,6 @@
     e[0] = offset_type(0, type)
     index[0] = tuple(e)
 
-    # add the magic null revision at -1
-    index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
     return index, cache
 
 data_inlined = (
@@ -132,88 +131,92 @@
                          stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
     return p.communicate()  # returns stdout, stderr
 
-def printhexfail(testnumber, hexversion, stdout, expected):
+def hexfailmsg(testnumber, hexversion, stdout, expected):
     try:
         hexstring = hex(hexversion)
     except TypeError:
         hexstring = None
-    print("FAILED: version test #%s with Python %s and patched "
-          "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
-          (testnumber, sys.version_info, hexversion, hexstring, expected,
-           stdout))
-
-def testversionokay(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    if stdout:
-        printhexfail(testnumber, hexversion, stdout, expected="no stdout")
-
-def testversionfail(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    # We include versionerrortext to distinguish from other ImportErrors.
-    errtext = b"ImportError: %s" % pycompat.sysbytes(parsers.versionerrortext)
-    if errtext not in stdout:
-        printhexfail(testnumber, hexversion, stdout,
-                     expected="stdout to contain %r" % errtext)
+    return ("FAILED: version test #%s with Python %s and patched "
+            "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
+            (testnumber, sys.version_info, hexversion, hexstring, expected,
+             stdout))
 
 def makehex(major, minor, micro):
     return int("%x%02x%02x00" % (major, minor, micro), 16)
 
-def runversiontests():
-    """Check the version-detection logic when importing parsers."""
-    info = sys.version_info
-    major, minor, micro = info[0], info[1], info[2]
-    # Test same major-minor versions.
-    testversionokay(1, makehex(major, minor, micro))
-    testversionokay(2, makehex(major, minor, micro + 1))
-    # Test different major-minor versions.
-    testversionfail(3, makehex(major + 1, minor, micro))
-    testversionfail(4, makehex(major, minor + 1, micro))
-    testversionfail(5, "'foo'")
+class parseindex2tests(unittest.TestCase):
+
+    def assertversionokay(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        self.assertFalse(
+            stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout'))
+
+    def assertversionfail(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        # We include versionerrortext to distinguish from other ImportErrors.
+        errtext = b"ImportError: %s" % pycompat.sysbytes(
+            parsers.versionerrortext)
+        self.assertIn(errtext, stdout,
+                      hexfailmsg(testnumber, hexversion, stdout,
+                                 expected="stdout to contain %r" % errtext))
 
-def runtest() :
-    # Only test the version-detection logic if it is present.
-    try:
-        parsers.versionerrortext
-    except AttributeError:
-        pass
-    else:
-        runversiontests()
+    def testversiondetection(self):
+        """Check the version-detection logic when importing parsers."""
+        # Only test the version-detection logic if it is present.
+        try:
+            parsers.versionerrortext
+        except AttributeError:
+            return
+        info = sys.version_info
+        major, minor, micro = info[0], info[1], info[2]
+        # Test same major-minor versions.
+        self.assertversionokay(1, makehex(major, minor, micro))
+        self.assertversionokay(2, makehex(major, minor, micro + 1))
+        # Test different major-minor versions.
+        self.assertversionfail(3, makehex(major + 1, minor, micro))
+        self.assertversionfail(4, makehex(major, minor + 1, micro))
+        self.assertversionfail(5, "'foo'")
 
-    # Check that parse_index2() raises TypeError on bad arguments.
-    try:
-        parse_index2(0, True)
-    except TypeError:
-        pass
-    else:
-        print("Expected to get TypeError.")
+    def testbadargs(self):
+        # Check that parse_index2() raises TypeError on bad arguments.
+        with self.assertRaises(TypeError):
+            parse_index2(0, True)
 
-   # Check parsers.parse_index2() on an index file against the original
-   # Python implementation of parseindex, both with and without inlined data.
-
-    py_res_1 = py_parseindex(data_inlined, True)
-    c_res_1 = parse_index2(data_inlined, True)
+    def testparseindexfile(self):
+        # Check parsers.parse_index2() on an index file against the
+        # original Python implementation of parseindex, both with and
+        # without inlined data.
 
-    py_res_2 = py_parseindex(data_non_inlined, False)
-    c_res_2 = parse_index2(data_non_inlined, False)
+        want = py_parseindex(data_inlined, True)
+        got = parse_index2(data_inlined, True)
+        self.assertEqual(want, got) # inline data
 
-    if py_res_1 != c_res_1:
-        print("Parse index result (with inlined data) differs!")
-
-    if py_res_2 != c_res_2:
-        print("Parse index result (no inlined data) differs!")
+        want = py_parseindex(data_non_inlined, False)
+        got = parse_index2(data_non_inlined, False)
+        self.assertEqual(want, got) # no inline data
 
-    ix = parsers.parse_index2(data_inlined, True)[0]
-    for i, r in enumerate(ix):
-        if r[7] == nullid:
-            i = -1
-        try:
-            if ix[r[7]] != i:
-                print('Reverse lookup inconsistent for %r'
-                    % r[7].encode('hex'))
-        except TypeError:
-            # pure version doesn't support this
-            break
+        ix = parsers.parse_index2(data_inlined, True)[0]
+        for i, r in enumerate(ix):
+            if r[7] == nullid:
+                i = -1
+            try:
+                self.assertEqual(
+                    ix[r[7]], i,
+                    'Reverse lookup inconsistent for %r' % nodemod.hex(r[7]))
+            except TypeError:
+                # pure version doesn't support this
+                break
 
-    print("done")
+    def testminusone(self):
+        want = (0, 0, 0, -1, -1, -1, -1, nullid)
+        index, junk = parsers.parse_index2(data_inlined, True)
+        got = index[-1]
+        self.assertEqual(want, got) # inline data
 
-runtest()
+        index, junk = parsers.parse_index2(data_non_inlined, False)
+        got = index[-1]
+        self.assertEqual(want, got) # no inline data
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-parseindex2.py.out	Sat Aug 18 10:24:57 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-done
--- a/tests/test-patchbomb-bookmark.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-patchbomb-bookmark.t	Fri Aug 24 12:55:05 2018 -0700
@@ -35,7 +35,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] bookmark
-  Message-Id: <patchbomb.347155260@*> (glob)
+  Message-Id: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:00 +0000
   From: test
@@ -50,10 +50,10 @@
   X-Mercurial-Node: accde9b8b6dce861c185d0825c1affc09a79cb26
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:01 +0000
   From: test
@@ -81,10 +81,10 @@
   X-Mercurial-Node: 417defd1559c396ba06a44dce8dc1c2d2d653f3f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <417defd1559c396ba06a.347155262@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <417defd1559c396ba06a.347155262@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:02 +0000
   From: test
@@ -145,8 +145,8 @@
   X-Mercurial-Node: 8dab2639fd35f1e337ad866c372a5c44f1064e3c
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
-  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
+  Message-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
+  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Fri, 01 Jan 1982 00:01:00 +0000
   From: test
--- a/tests/test-patchbomb.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-patchbomb.t	Fri Aug 24 12:55:05 2018 -0700
@@ -2,7 +2,6 @@
 wildcards in test expectations due to how many things like hostnames
 tend to make it into outputs. As a result, you may need to perform the
 following regular expression substitutions:
-@$HOSTNAME> -> @*> (glob)
 Mercurial-patchbomb/.* -> Mercurial-patchbomb/* (glob)
 /mixed; boundary="===+[0-9]+==" -> /mixed; boundary="===*== (glob)"
 --===+[0-9]+=+--$ -> --===*=-- (glob)
@@ -45,8 +44,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -84,8 +83,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <*@*> (glob)
-  X-Mercurial-Series-Id: <*@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -159,8 +158,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -197,8 +196,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -236,7 +235,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.120@*> (glob)
+  Message-Id: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:00 +0000
   From: quux
@@ -252,10 +251,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.121@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.121@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:01 +0000
   From: quux
@@ -284,10 +283,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.122@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.122@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:02 +0000
   From: quux
@@ -366,7 +365,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -412,7 +411,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -439,10 +438,11 @@
   CgZcySARUyA2A2LGZKiZ3Y+Lu786z4z4MWXmsrAZCsqrl1az5y21PMcjpbThzWeXGT+/nutbmvvz
   zXYS3BoGxdrJDIYmlimJJiZpRokmqYYmaSYWFknmSSkmhqbmliamiZYWxuYmBhbJBgZcUBNZQe5K
   Epm7xF/LT+RLx/a9juFTomaYO/Rgsx4rwBN+IMCUDLOKAQBrsmti
+   (?)
   --===============*==-- (glob)
 
 utf-8 patch:
-  $ $PYTHON -c 'fp = open("utf", "wb"); fp.write("h\xC3\xB6mma!\n"); fp.close();'
+  $ $PYTHON -c 'fp = open("utf", "wb"); fp.write(b"h\xC3\xB6mma!\n"); fp.close();'
   $ hg commit -A -d '4 0' -m 'utf-8 content'
   adding description
   adding utf
@@ -454,14 +454,14 @@
   
   displaying [PATCH] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -487,7 +487,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
 
 mime encoded mbox (base64):
@@ -506,8 +506,8 @@
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: Q <quux>
@@ -526,7 +526,14 @@
   QEAgLTAsMCArMSwxIEBACitow7ZtbWEhCg==
   
   
-  $ $PYTHON -c 'print open("mbox").read().split("\n\n")[1].decode("base64")'
+  >>> import base64
+  >>> patch = base64.b64decode(open("mbox").read().split("\n\n")[1])
+  >>> if not isinstance(patch, str):
+  ...     import sys
+  ...     sys.stdout.flush()
+  ...     junk = sys.stdout.buffer.write(patch + b"\n")
+  ... else:
+  ...     print(patch)
   # HG changeset patch
   # User test
   # Date 4 0
@@ -551,7 +558,7 @@
   $ rm mbox
 
 mime encoded mbox (quoted-printable):
-  $ $PYTHON -c 'fp = open("long", "wb"); fp.write("%s\nfoo\n\nbar\n" % ("x" * 1024)); fp.close();'
+  $ $PYTHON -c 'fp = open("long", "wb"); fp.write(b"%s\nfoo\n\nbar\n" % (b"x" * 1024)); fp.close();'
   $ hg commit -A -d '4 0' -m 'long line'
   adding long
 
@@ -568,8 +575,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -622,8 +629,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -665,7 +672,7 @@
   $ rm mbox
 
 iso-8859-1 patch:
-  $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write("h\xF6mma!\n"); fp.close();'
+  $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write(b"h\xF6mma!\n"); fp.close();'
   $ hg commit -A -d '5 0' -m 'isolatin 8-bit encoding'
   adding isolatin
 
@@ -684,8 +691,8 @@
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
-  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
+  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:05:00 +0000
   From: quux
@@ -732,8 +739,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -791,7 +798,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -811,10 +818,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -847,10 +854,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -888,8 +895,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -931,8 +938,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -991,7 +998,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1006,10 +1013,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1044,10 +1051,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1082,10 +1089,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1142,8 +1149,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1193,8 +1200,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1260,8 +1267,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1323,7 +1330,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1338,10 +1345,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1385,10 +1392,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1432,10 +1439,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1503,7 +1510,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1519,10 +1526,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1556,7 +1563,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1573,10 +1580,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1612,7 +1619,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1628,10 +1635,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1660,10 +1667,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1699,8 +1706,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1737,8 +1744,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1779,8 +1786,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1823,7 +1830,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1838,10 +1845,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1876,10 +1883,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1923,8 +1930,8 @@
   X-Mercurial-Node: 7aead2484924c445ad8ce2613df91f52f9e502ed
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <7aead2484924c445ad8c.60@*> (glob)
-  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@*> (glob)
+  Message-Id: <7aead2484924c445ad8c.60@test-hostname>
+  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1966,8 +1973,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1998,8 +2005,8 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2038,7 +2045,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2056,10 +2063,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2088,10 +2095,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2129,8 +2136,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2167,7 +2174,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2183,10 +2190,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2215,10 +2222,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2256,8 +2263,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2293,7 +2300,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag barFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2309,10 +2316,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2341,10 +2348,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2383,8 +2390,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2422,7 +2429,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 R1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2438,10 +2445,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2469,10 +2476,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2508,8 +2515,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2531,10 +2538,11 @@
   
 
 test multi-byte domain parsing:
-  $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'`
+  >>> with open('toaddress.txt', 'wb') as f:
+  ...  f.write(b'bar@\xfcnicode.com') and None
   $ HGENCODING=iso-8859-1
   $ export HGENCODING
-  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "bar@${UUML}nicode.com" -s test -r 0
+  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "`cat toaddress.txt`" -s test -r 0
   this patch series consists of 1 patches.
   
   Cc: 
@@ -2550,8 +2558,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2625,7 +2633,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 6] test
-  Message-Id: <patchbomb.315532860@*> (glob)
+  Message-Id: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
@@ -2640,10 +2648,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 6
-  Message-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:01 +0000
   From: test
@@ -2665,16 +2673,16 @@
   
   displaying [PATCH 2 of 6] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 2 of 6] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 6
-  Message-Id: <909a00e13e9d78b575ae.315532862@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.315532862@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:02 +0000
   From: test
@@ -2699,7 +2707,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
   displaying [PATCH 3 of 6] long line ...
   MIME-Version: 1.0
@@ -2709,10 +2717,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 6
-  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:03 +0000
   From: test
@@ -2750,16 +2758,16 @@
   
   displaying [PATCH 4 of 6] isolatin 8-bit encoding ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 4 of 6] isolatin 8-bit encoding
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 4
   X-Mercurial-Series-Total: 6
-  Message-Id: <240fb913fc1b7ff15ddb.315532864@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.315532864@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:04 +0000
   From: test
@@ -2777,7 +2785,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/isolatin	Thu Jan 01 00:00:05 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xf6mma! (esc)
+  +h=F6mma!
   
   displaying [PATCH 5 of 6] Added tag zero, zero.foo for changeset 8580ff50825a ...
   MIME-Version: 1.0
@@ -2787,10 +2795,10 @@
   X-Mercurial-Node: 5d5ef15dfe5e7bd3a4ee154b5fff76c7945ec433
   X-Mercurial-Series-Index: 5
   X-Mercurial-Series-Total: 6
-  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:05 +0000
   From: test
@@ -2819,10 +2827,10 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 6
   X-Mercurial-Series-Total: 6
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:06 +0000
   From: test
@@ -2864,8 +2872,8 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
-  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
+  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
--- a/tests/test-py3-commands.t	Sat Aug 18 10:24:57 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-#require py3exe
-
-This test helps in keeping a track on which commands we can run on
-Python 3 and see what kind of errors are coming up.
-The full traceback is hidden to have a stable output.
-  $ HGBIN=`which hg`
-
-  $ for cmd in version debuginstall ; do
-  >   echo $cmd
-  >   $PYTHON3 $HGBIN $cmd 2>&1 2>&1 | tail -1
-  > done
-  version
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-  debuginstall
-  no problems detected
-
-#if test-repo
-Make a clone so that any features in the developer's .hg/hgrc that
-might confuse Python 3 don't break this test. When we can do commit in
-Python 3, we'll stop doing this. We use e76ed1e480ef for the clone
-because it has different files than 273ce12ad8f1, so we can test both
-`files` from dirstate and `files` loaded from a specific revision.
-
-  $ hg clone -r e76ed1e480ef "`dirname "$TESTDIR"`" testrepo 2>&1 | tail -1
-  15 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test using -R, which exercises some URL code:
-  $ $PYTHON3 $HGBIN -R testrepo files -r 273ce12ad8f1 | tail -1
-  testrepo/tkmerge
-
-Now prove `hg files` is reading the whole manifest. We have to grep
-out some potential warnings that come from hgrc as yet.
-  $ cd testrepo
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1
-  .hgignore
-  PKG-INFO
-  README
-  hg
-  mercurial/__init__.py
-  mercurial/byterange.py
-  mercurial/fancyopts.py
-  mercurial/hg.py
-  mercurial/mdiff.py
-  mercurial/revlog.py
-  mercurial/transaction.py
-  notes.txt
-  setup.py
-  tkmerge
-
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 | wc -l
-  \s*14 (re)
-  $ $PYTHON3 $HGBIN files | wc -l
-  \s*15 (re)
-
-Test if log-like commands work:
-
-  $ $PYTHON3 $HGBIN tip
-  changeset:   10:e76ed1e480ef
-  tag:         tip
-  user:        oxymoron@cinder.waste.org
-  date:        Tue May 03 23:37:43 2005 -0800
-  summary:     Fix linking of changeset revs when merging
-  
-
-  $ $PYTHON3 $HGBIN log -r0
-  changeset:   0:9117c6561b0b
-  user:        mpm@selenic.com
-  date:        Tue May 03 13:16:10 2005 -0800
-  summary:     Add back links from file revisions to changeset revisions
-  
-
-  $ cd ..
-#endif
-
-Test if `hg config` works:
-
-  $ $PYTHON3 $HGBIN config
-  devel.all-warnings=true
-  devel.default-date=0 0
-  largefiles.usercache=$TESTTMP/.cache/largefiles
-  ui.slash=True
-  ui.interactive=False
-  ui.mergemarkers=detailed
-  ui.promptecho=True
-  web.address=localhost
-  web.ipv6=False
-
-  $ cat > included-hgrc <<EOF
-  > [extensions]
-  > babar = imaginary_elephant
-  > EOF
-  $ cat >> $HGRCPATH <<EOF
-  > %include $TESTTMP/included-hgrc
-  > EOF
-  $ $PYTHON3 $HGBIN version | tail -1
-  *** failed to import extension babar from imaginary_elephant: *: 'imaginary_elephant' (glob)
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-  $ rm included-hgrc
-  $ touch included-hgrc
-
-Test bytes-ness of policy.policy with HGMODULEPOLICY
-
-  $ HGMODULEPOLICY=py
-  $ export HGMODULEPOLICY
-  $ $PYTHON3 `which hg` debuginstall 2>&1 2>&1 | tail -1
-  no problems detected
-
-`hg init` can create empty repos
-`hg status works fine`
-`hg summary` also works!
-
-  $ $PYTHON3 `which hg` init py3repo
-  $ cd py3repo
-  $ echo "This is the file 'iota'." > iota
-  $ $PYTHON3 $HGBIN status
-  ? iota
-  $ $PYTHON3 $HGBIN add iota
-  $ $PYTHON3 $HGBIN status
-  A iota
-  $ hg diff --nodates --git
-  diff --git a/iota b/iota
-  new file mode 100644
-  --- /dev/null
-  +++ b/iota
-  @@ -0,0 +1,1 @@
-  +This is the file 'iota'.
-  $ $PYTHON3 $HGBIN commit --message 'commit performed in Python 3'
-  $ $PYTHON3 $HGBIN status
-
-  $ mkdir A
-  $ echo "This is the file 'mu'." > A/mu
-  $ $PYTHON3 $HGBIN addremove
-  adding A/mu
-  $ $PYTHON3 $HGBIN status
-  A A/mu
-  $ HGEDITOR='echo message > ' $PYTHON3 $HGBIN commit
-  $ $PYTHON3 $HGBIN status
-  $ $PYHON3 $HGBIN summary
-  parent: 1:e1e9167203d4 tip
-   message
-  branch: default
-  commit: (clean)
-  update: (current)
-  phases: 2 draft
-
-Test weird unicode-vs-bytes stuff
-
-  $ $PYTHON3 $HGBIN help | egrep -v '^ |^$'
-  Mercurial Distributed SCM
-  list of commands:
-  additional help topics:
-  (use 'hg help -v' to show built-in aliases and global options)
-
-  $ $PYTHON3 $HGBIN help help | egrep -v '^ |^$'
-  hg help [-ecks] [TOPIC]
-  show help for a given topic or a help overview
-  options ([+] can be repeated):
-  (some details hidden, use --verbose to show complete help)
-
-  $ $PYTHON3 $HGBIN help -k notopic
-  abort: no matches
-  (try 'hg help' for a list of topics)
-  [255]
-
-Prove the repo is valid using the Python 2 `hg`:
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  2 files, 2 changesets, 2 total revisions
-  $ hg log
-  changeset:   1:e1e9167203d4
-  tag:         tip
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     message
-  
-  changeset:   0:71c96e924262
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     commit performed in Python 3
-  
-
-  $ $PYTHON3 $HGBIN log -G
-  @  changeset:   1:e1e9167203d4
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     message
-  |
-  o  changeset:   0:71c96e924262
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     commit performed in Python 3
-  
-  $ $PYTHON3 $HGBIN log -Tjson
-  [
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "message",
-    "node": "e1e9167203d450ca2f558af628955b5f5afd4489",
-    "parents": ["71c96e924262969ff0d8d3d695b0f75412ccc3d8"],
-    "phase": "draft",
-    "rev": 1,
-    "tags": ["tip"],
-    "user": "test"
-   },
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "commit performed in Python 3",
-    "node": "71c96e924262969ff0d8d3d695b0f75412ccc3d8",
-    "parents": ["0000000000000000000000000000000000000000"],
-    "phase": "draft",
-    "rev": 0,
-    "tags": [],
-    "user": "test"
-   }
-  ]
-
-Show that update works now!
-
-  $ $PYTHON3 $HGBIN up 0
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ $PYTHON3 $HGBIN identify
-  71c96e924262
-
-branches and bookmarks also works!
-
-  $ $PYTHON3 $HGBIN branches
-  default                        1:e1e9167203d4
-  $ $PYTHON3 $HGBIN bookmark book
-  $ $PYTHON3 $HGBIN bookmarks
-   * book                      0:71c96e924262
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-backup.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,150 @@
+  $ cat << EOF >> $HGRCPATH
+  > [extensions]
+  > rebase=
+  > EOF
+
+==========================================
+Test history-editing-backup config option |
+==========================================
+Test with Pre-obsmarker rebase:
+1) When config option is not set:
+  $ hg init repo1
+  $ cd repo1
+  $ echo a>a
+  $ hg ci -qAma
+  $ echo b>b
+  $ hg ci -qAmb
+  $ echo c>c
+  $ hg ci -qAmc
+  $ hg up 0 -q
+  $ echo d>d
+  $ hg ci -qAmd
+  $ echo e>e
+  $ hg ci -qAme
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  4: e
+  |
+  o  3: d
+  |
+  | o  2: c
+  | |
+  | o  1: b
+  |/
+  o  0: a
+  
+  $ hg rebase -s 1 -d .
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/d2ae7f538514-c7ed7a78-rebase.hg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  4: c
+  |
+  o  3: b
+  |
+  @  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+
+2) When config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo g>g
+  $ hg ci -Aqmg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  6: g
+  |
+  o  5: f
+  |
+  | o  4: c
+  | |
+  | o  3: b
+  |/
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+  $ hg rebase -s 3 -d .
+  rebasing 3:05bff2a95b12 "b"
+  rebasing 4:1762bde4404d "c"
+
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  6: c
+  |
+  o  5: b
+  |
+  @  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+Test when rebased revisions are stripped during abort:
+======================================================
+
+  $ echo conflict > c
+  $ hg ci -Am "conflict with c"
+  adding c
+  created new head
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  7: conflict with c
+  |
+  | o  6: c
+  | |
+  | o  5: b
+  |/
+  o  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+When history-editing-backup = True:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = True
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
+  rebase aborted
+
+When history-editing-backup = False:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
+
--- a/tests/test-rebase-inmemory.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-rebase-inmemory.t	Fri Aug 24 12:55:05 2018 -0700
@@ -156,7 +156,33 @@
   |/
   o  0: b173517d0057 'a'
   
+
+Test reporting of path conflicts
+
+  $ hg rm a
+  $ mkdir a
+  $ touch a/a
+  $ hg ci -Am "a/a"
+  adding a/a
+  $ hg tglog
+  @  4: daf7dfc139cb 'a/a'
+  |
+  o  3: 844a7de3e617 'c'
+  |
+  | o  2: 09c044d2cb43 'd'
+  | |
+  | o  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+  $ hg rebase -r . -d 2
+  rebasing 4:daf7dfc139cb "a/a" (tip)
+  saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
+
+  $ cd ..
+
 Test dry-run rebasing
+
   $ hg init repo3
   $ cd repo3
   $ echo a>a
@@ -325,6 +351,25 @@
   hit a merge conflict
   [1]
 
+In-memory rebase that fails due to merge conflicts
+
+  $ hg rebase -s 2 -d 7
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  transaction abort!
+  rollback completed
+  hit merge conflicts; re-running rebase without in-memory merge
+  rebase aborted
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
 ==========================
 Test for --confirm option|
 ==========================
@@ -509,3 +554,31 @@
   o  0:cb9a9f314b8b test
      a
   
+#if execbit
+
+Test a metadata-only in-memory merge
+  $ cd $TESTTMP
+  $ hg init no_exception
+  $ cd no_exception
+# Produce the following graph:
+#   o  'add +x to foo.txt'
+#   | o  r1  (adds bar.txt, just for something to rebase to)
+#   |/
+#   o  r0   (adds foo.txt, no +x)
+  $ echo hi > foo.txt
+  $ hg ci -qAm r0
+  $ echo hi > bar.txt
+  $ hg ci -qAm r1
+  $ hg co -qr ".^"
+  $ chmod +x foo.txt
+  $ hg ci -qAm 'add +x to foo.txt'
+issue5960: this was raising an AttributeError exception
+  $ hg rebase -r . -d 1
+  rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
+  saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
+  $ hg diff -c tip
+  diff --git a/foo.txt b/foo.txt
+  old mode 100644
+  new mode 100755
+
+#endif
--- a/tests/test-rebase-obsolete.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-rebase-obsolete.t	Fri Aug 24 12:55:05 2018 -0700
@@ -15,6 +15,7 @@
   > [extensions]
   > rebase=
   > drawdag=$TESTDIR/drawdag.py
+  > strip=
   > EOF
 
 Setup rebase canonical repo
@@ -1788,3 +1789,312 @@
   |
   o  0:426bada5c675 A
   
+====================
+Test --stop option |
+====================
+  $ cd ..
+  $ hg init rbstop
+  $ cd rbstop
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo b>b
+  $ hg ci -Aqmb
+  $ echo c>c
+  $ hg ci -Aqmc
+  $ echo d>d
+  $ hg ci -Aqmd
+  $ hg up 0 -q
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo D>d
+  $ hg ci -Aqm "conflict with d"
+  $ hg up 3 -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | x  2:177f92b77385 test
+  | |  c
+  | |
+  | x  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test it aborts if unstable csets is not allowed:
+===============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=False
+  > EOF
+
+  $ hg strip 6 --no-backup -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot remove original changesets with unrebased descendants
+  (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
+  [255]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  rebase aborted
+
+Test --stop when --keep is passed:
+==================================
+  $ hg rebase -s 1 -d 5 --keep
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop aborts when --collapse was passed:
+=============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=True
+  > EOF
+
+  $ hg strip 6
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d"
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot stop in --collapse session
+  [255]
+  $ hg rebase --abort
+  rebase aborted
+  $ hg diff
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop raise errors with conflicting options:
+=================================================
+  $ hg rebase -s 3 -d 5
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop --dry-run
+  abort: cannot specify both --dry-run and --stop
+  [255]
+
+  $ hg rebase -s 3 -d 5
+  abort: rebase in progress
+  (use 'hg rebase --continue' or 'hg rebase --abort')
+  [255]
+  $ hg rebase --stop --continue
+  abort: cannot use --stop with --continue
+  [255]
+
+Test --stop moves bookmarks of original revisions to new rebased nodes:
+======================================================================
+  $ cd ..
+  $ hg init repo
+  $ cd repo
+
+  $ echo a > a
+  $ hg ci -Am A
+  adding a
+
+  $ echo b > b
+  $ hg ci -Am B
+  adding b
+  $ hg book X
+  $ hg book Y
+
+  $ echo c > c
+  $ hg ci -Am C
+  adding c
+  $ hg book Z
+
+  $ echo d > d
+  $ hg ci -Am D
+  adding d
+
+  $ hg up 0 -q
+  $ echo e > e
+  $ hg ci -Am E
+  adding e
+  created new head
+
+  $ echo doubt > d
+  $ hg ci -Am "conflict with d"
+  adding d
+
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | o  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | o  2: 49cb3485fa0c 'C' bookmarks: Y
+  | |
+  | o  1: 6c81ed0049f8 'B' bookmarks: X
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:6c81ed0049f8 "B" (X)
+  rebasing 2:49cb3485fa0c "C" (Y)
+  rebasing 3:67a385d4e6f2 "D" (Z)
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  o  7: 9c86c650b686 'C' bookmarks: Y
+  |
+  o  6: 9b87b54e5fd8 'B' bookmarks: X
+  |
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | *  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | x  2: 49cb3485fa0c 'C' bookmarks:
+  | |
+  | x  1: 6c81ed0049f8 'B' bookmarks:
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
--- a/tests/test-rebase-parameters.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-rebase-parameters.t	Fri Aug 24 12:55:05 2018 -0700
@@ -61,7 +61,7 @@
   [1]
 
   $ hg rebase --continue --abort
-  abort: cannot use both abort and continue
+  abort: cannot use --abort with --continue
   [255]
 
   $ hg rebase --continue --collapse
--- a/tests/test-remove.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-remove.t	Fri Aug 24 12:55:05 2018 -0700
@@ -520,6 +520,14 @@
   deleting [===========================================>] 1/1\r (no-eol) (esc)
                                                               \r (no-eol) (esc)
   removing a
+  $ hg remove a -nv --color debug
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  [addremove.removed ui.status|removing a]
   $ hg diff
 
   $ cat >> .hg/hgrc <<EOF
--- a/tests/test-resolve.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-resolve.t	Fri Aug 24 12:55:05 2018 -0700
@@ -373,4 +373,219 @@
 
   $ hg resolve -l
 
+resolve -m can be configured to look for remaining conflict markers
+  $ hg up -qC 2
+  $ hg merge -q --tool=internal:merge 1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+  [1]
+  $ hg resolve -l
+  U file1
+  U file2
+  $ echo 'remove markers' > file1
+  $ hg --config commands.resolve.mark-check=abort resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  abort: conflict markers detected
+  (use --all to mark anyway)
+  [255]
+  $ hg resolve -l
+  U file1
+  U file2
+Try with --all from the hint
+  $ hg --config commands.resolve.mark-check=abort resolve -m --all
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test option value 'warn'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the file is already marked as resolved, we don't warn about it
+  $ hg resolve --unmark file1
+  $ hg resolve -l
+  U file1
+  R file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the user passes an invalid value, we treat it as 'none'.
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=nope resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test explicitly setting the otion to 'none'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=none resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+
   $ cd ..
+
+======================================================
+Test 'hg resolve' confirm config option functionality |
+======================================================
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > rebase=
+  > EOF
+
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo boss > boss
+  $ hg ci -Am "add boss"
+  adding boss
+
+  $ for emp in emp1 emp2 emp3; do echo work > $emp; done;
+  $ hg ci -Aqm "added emp1 emp2 emp3"
+
+  $ hg up 0
+  0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+
+  $ for emp in emp1 emp2 emp3; do echo nowork > $emp; done;
+  $ hg ci -Aqm "added lazy emp1 emp2 emp3"
+
+  $ hg log -GT "{rev} {node|short} {firstline(desc)}\n"
+  @  2 0acfd4a49af0 added lazy emp1 emp2 emp3
+  |
+  | o  1 f30f98a8181f added emp1 emp2 emp3
+  |/
+  o  0 88660038d466 add boss
+  
+  $ hg rebase -s 1 -d 2
+  rebasing 1:f30f98a8181f "added emp1 emp2 emp3"
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
+Test when commands.resolve.confirm config option is not set:
+===========================================================
+  $ hg resolve --all
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test when config option is set:
+==============================
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > interactive = True
+  > [commands]
+  > resolve.confirm = True
+  > EOF
+
+  $ hg resolve
+  abort: no files or directories specified
+  (use --all to re-merge all unresolved files)
+  [255]
+  $ hg resolve --all << EOF
+  > n
+  > EOF
+  re-merge all unresolved files (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve --all << EOF
+  > y
+  > EOF
+  re-merge all unresolved files (yn)? y
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test that commands.resolve.confirm respect --mark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -m emp1
+  $ hg resolve -l
+  R emp1
+  U emp2
+  U emp3
+
+  $ hg resolve -m << EOF
+  > n
+  > EOF
+  mark all unresolved files as resolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+Test that commands.resolve.confirm respect --unmark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -u emp1
+
+  $ hg resolve -l
+  U emp1
+  R emp2
+  R emp3
+
+  $ hg resolve -u << EOF
+  > n
+  > EOF
+  mark all resolved files as unresolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revisions.t	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,45 @@
+  $ hg init repo
+  $ cd repo
+
+  $ echo 0 > a
+  $ hg ci -qAm 0
+  $ for i in 5 8 14 43 167; do
+  >   hg up -q 0
+  >   echo $i > a
+  >   hg ci -qm $i
+  > done
+  $ cat <<EOF >> .hg/hgrc
+  > [alias]
+  > l = log -T '{rev}:{shortest(node,1)}\n'
+  > EOF
+
+  $ hg l
+  5:00f
+  4:7ba5d
+  3:7ba57
+  2:72
+  1:9
+  0:b
+  $ cat <<EOF >> .hg/hgrc
+  > [experimental]
+  > revisions.disambiguatewithin=not 4
+  > EOF
+  $ hg l
+  5:0
+  4:7ba5d
+  3:7b
+  2:72
+  1:9
+  0:b
+9 was unambiguous and still is
+  $ hg l -r 9
+  1:9
+7 was ambiguous and still is
+  $ hg l -r 7
+  abort: 00changelog.i@7: ambiguous identifier!
+  [255]
+7b is no longer ambiguous
+  $ hg l -r 7b
+  3:7b
+
+  $ cd ..
--- a/tests/test-revlog-raw.py	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-revlog-raw.py	Fri Aug 24 12:55:05 2018 -0700
@@ -20,7 +20,7 @@
 
 # The test wants to control whether to use delta explicitly, based on
 # "storedeltachains".
-revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self.storedeltachains
+revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
 
 def abort(msg):
     print('abort: %s' % msg)
@@ -78,7 +78,7 @@
     else:
         flags = revlog.REVIDX_DEFAULT_FLAGS
     # Change storedeltachains temporarily, to override revlog's delta decision
-    rlog.storedeltachains = isdelta
+    rlog._storedeltachains = isdelta
     try:
         rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
         return nextrev
@@ -86,7 +86,7 @@
         abort('rev %d: failed to append: %s' % (nextrev, ex))
     finally:
         # Restore storedeltachains. It is always True, see revlog.__init__
-        rlog.storedeltachains = True
+        rlog._storedeltachains = True
 
 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
     '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
--- a/tests/test-revset.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-revset.t	Fri Aug 24 12:55:05 2018 -0700
@@ -1773,6 +1773,16 @@
 
 Test hexadecimal revision
   $ log 'id(2)'
+  $ log 'id(5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x5'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
+  abort: 00changelog.i@: ambiguous identifier!
+  [255]
   $ log 'id(23268)'
   4
   $ log 'id(2785f51eece)'
--- a/tests/test-revset2.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-revset2.t	Fri Aug 24 12:55:05 2018 -0700
@@ -346,7 +346,7 @@
 test ',' in `_list`
   $ log '0,1'
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
   $ try '0,1,2'
   (list
@@ -354,7 +354,7 @@
     (symbol '1')
     (symbol '2'))
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
 
 test that chained `or` operations make balanced addsets
--- a/tests/test-run-tests.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-run-tests.t	Fri Aug 24 12:55:05 2018 -0700
@@ -850,7 +850,7 @@
   > EOF
   
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.a.err
+  +++ $TESTTMP/test-cases.t#a.err
   @@ -1,6 +1,7 @@
    #testcases a b
    #if a
@@ -861,7 +861,7 @@
      $ echo 2
   Accept this change? [n] .
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.b.err
+  +++ $TESTTMP/test-cases.t#b.err
   @@ -5,4 +5,5 @@
    #endif
    #if b
@@ -896,6 +896,40 @@
   ..
   # Ran 2 tests, 0 skipped, 0 failed.
 
+When using multiple dimensions of "#testcases" in .t files
+
+  $ cat > test-cases.t <<'EOF'
+  > #testcases a b
+  > #testcases c d
+  > #if a d
+  >   $ echo $TESTCASE
+  >   a#d
+  > #endif
+  > #if b c
+  >   $ echo yes
+  >   no
+  > #endif
+  > EOF
+  $ rt test-cases.t
+  ..
+  --- $TESTTMP/test-cases.t
+  +++ $TESTTMP/test-cases.t#b#c.err
+  @@ -6,5 +6,5 @@
+   #endif
+   #if b c
+     $ echo yes
+  -  no
+  +  yes
+   #endif
+  
+  ERROR: test-cases.t#b#c output changed
+  !.
+  Failed test-cases.t#b#c: output changed
+  # Ran 4 tests, 0 skipped, 1 failed.
+  python hash seed: * (glob)
+  [1]
+
+  $ rm test-cases.t#b#c.err
   $ rm test-cases.t
 
 (reinstall)
@@ -1540,7 +1574,7 @@
   $ rt
   .
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1563,7 +1597,7 @@
   $ rt --restart
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1584,11 +1618,11 @@
 --restart works with outputdir
 
   $ mkdir output
-  $ mv test-cases-abc.t.B.err output
+  $ mv test-cases-abc.t#B.err output
   $ rt --restart --outputdir output
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1631,7 +1665,7 @@
   $ rt "test-cases-abc.t#B"
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1654,7 +1688,7 @@
   $ rt test-cases-abc.t#B test-cases-abc.t#C
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1677,7 +1711,7 @@
   $ rt test-cases-abc.t#B test-cases-abc.t#D
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1711,7 +1745,7 @@
   $ rt test-cases-advanced-cases.t
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1721,7 +1755,7 @@
   ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
   !
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1739,7 +1773,7 @@
   $ rt "test-cases-advanced-cases.t#case-with-dashes"
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1756,7 +1790,7 @@
   $ rt "test-cases-advanced-cases.t#casewith_-.chars"
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
--- a/tests/test-share.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-share.t	Fri Aug 24 12:55:05 2018 -0700
@@ -32,6 +32,7 @@
   [1]
   $ ls -1 ../repo1/.hg/cache
   branch2-served
+  manifestfulltextcache
   rbc-names-v1
   rbc-revs-v1
   tags2-visible
@@ -297,15 +298,15 @@
 
 test behavior when sharing a shared repo
 
-  $ hg share -B repo3 repo5
+  $ hg share -B repo3 missingdir/repo5
   updating working directory
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cd repo5
+  $ cd missingdir/repo5
   $ hg book
      bm1                       3:b87954705719
      bm3                       4:62f4ded848e4
      bm4                       5:92793bfc8cad
-  $ cd ..
+  $ cd ../..
 
 test what happens when an active bookmark is deleted
 
--- a/tests/test-ssh-bundle1.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-ssh-bundle1.t	Fri Aug 24 12:55:05 2018 -0700
@@ -59,10 +59,12 @@
 
 non-existent absolute path
 
+#if no-msys
   $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
   remote: abort: repository /$TESTTMP/nonexistent not found!
   abort: no suitable response from remote hg!
   [255]
+#endif
 
 clone remote via stream
 
@@ -502,7 +504,7 @@
 
   $ cat dummylog
   Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
-  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio
+  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio (no-msys !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
--- a/tests/test-status-color.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-status-color.t	Fri Aug 24 12:55:05 2018 -0700
@@ -168,10 +168,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg ci -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ hg log --color=debug
   [log.changeset changeset.draft|changeset:   0:389aef86a55e]
   [log.tag|tag:         tip]
@@ -296,10 +296,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg commit -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ touch added unknown ignored
   $ hg add added
   $ echo "test" >> modified
@@ -393,6 +393,7 @@
 
   $ hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
 color coding of error message without curses
@@ -400,6 +401,7 @@
   $ echo 'raise ImportError' > curses.py
   $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
   $ cd ..
--- a/tests/test-strict.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-strict.t	Fri Aug 24 12:55:05 2018 -0700
@@ -15,29 +15,7 @@
 
   $ hg an a
   hg: unknown command 'an'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
   $ hg annotate a
   0: a
--- a/tests/test-template-functions.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-template-functions.t	Fri Aug 24 12:55:05 2018 -0700
@@ -892,6 +892,11 @@
   $ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
   4:107
 
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n'
+  4:x10
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
+  4:x10
+
  node 'c562' should be unique if the other 'c562' nodes are hidden
  (but we don't try the slow path to filter out hidden nodes for now)
 
--- a/tests/test-template-keywords.t	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/test-template-keywords.t	Fri Aug 24 12:55:05 2018 -0700
@@ -91,7 +91,7 @@
   $ for key in author branch branches date desc file_adds file_dels file_mods \
   >         file_copies file_copies_switch files \
   >         manifest node parents rev tags diffstat extras \
-  >         p1rev p2rev p1node p2node; do
+  >         p1rev p2rev p1node p2node user; do
   >     for mode in '' --verbose --debug; do
   >         hg log $mode --template "$key$mode: {$key}\n"
   >     done
@@ -702,6 +702,33 @@
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
+  user: test
+  user: User Name <user@hostname>
+  user: person
+  user: person
+  user: person
+  user: person
+  user: other@place
+  user: A. N. Other <other@place>
+  user: User Name <user@hostname>
+  user--verbose: test
+  user--verbose: User Name <user@hostname>
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: other@place
+  user--verbose: A. N. Other <other@place>
+  user--verbose: User Name <user@hostname>
+  user--debug: test
+  user--debug: User Name <user@hostname>
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: other@place
+  user--debug: A. N. Other <other@place>
+  user--debug: User Name <user@hostname>
 
 Add a dummy commit to make up for the instability of the above:
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-util.py	Fri Aug 24 12:55:05 2018 -0700
@@ -0,0 +1,137 @@
+# unit tests for mercuril.util utilities
+from __future__ import absolute_import
+
+import contextlib
+import itertools
+import unittest
+
+from mercurial import pycompat, util, utils
+
+@contextlib.contextmanager
+def mocktimer(incr=0.1, *additional_targets):
+    """Replaces util.timer and additional_targets with a mock
+
+    The timer starts at 0. On each call the time incremented by the value
+    of incr. If incr is an iterable, then the time is incremented by the
+    next value from that iterable, looping in a cycle when reaching the end.
+
+    additional_targets must be a sequence of (object, attribute_name) tuples;
+    the mock is set with setattr(object, attribute_name, mock).
+
+    """
+    time = [0]
+    try:
+        incr = itertools.cycle(incr)
+    except TypeError:
+        incr = itertools.repeat(incr)
+
+    def timer():
+        time[0] += next(incr)
+        return time[0]
+
+    # record original values
+    orig = util.timer
+    additional_origs = [(o, a, getattr(o, a)) for o, a in additional_targets]
+
+    # mock out targets
+    util.timer = timer
+    for obj, attr in additional_targets:
+        setattr(obj, attr, timer)
+
+    try:
+        yield
+    finally:
+        # restore originals
+        util.timer = orig
+        for args in additional_origs:
+            setattr(*args)
+
+# attr.s default factory for util.timedstats.start binds the timer we
+# need to mock out.
+_start_default = (util.timedcmstats.start.default, 'factory')
+
+@contextlib.contextmanager
+def capturestderr():
+    """Replace utils.procutil.stderr with a pycompat.bytesio instance
+
+    The instance is made available as the return value of __enter__.
+
+    This contextmanager is reentrant.
+
+    """
+    orig = utils.procutil.stderr
+    utils.procutil.stderr = pycompat.bytesio()
+    try:
+        yield utils.procutil.stderr
+    finally:
+        utils.procutil.stderr = orig
+
+class timedtests(unittest.TestCase):
+    def testtimedcmstatsstr(self):
+        stats = util.timedcmstats()
+        self.assertEqual(str(stats), '<unknown>')
+        self.assertEqual(bytes(stats), b'<unknown>')
+        stats.elapsed = 12.34
+        self.assertEqual(str(stats), pycompat.sysstr(util.timecount(12.34)))
+        self.assertEqual(bytes(stats), util.timecount(12.34))
+
+    def testtimedcmcleanexit(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            with util.timedcm('pass') as stats:
+                # actual context doesn't matter
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimedcmnested(self):
+        # timestamps 1, 3, 6, 10, elapsed times of 6 - 3 = 3 and 10 - 1 = 9
+        with mocktimer([1, 2, 3, 4], _start_default):
+            with util.timedcm('outer') as outer_stats:
+                with util.timedcm('inner') as inner_stats:
+                    # actual context doesn't matter
+                    pass
+
+        self.assertEqual(outer_stats.start, 1)
+        self.assertEqual(outer_stats.elapsed, 9)
+        self.assertEqual(outer_stats.level, 1)
+
+        self.assertEqual(inner_stats.start, 3)
+        self.assertEqual(inner_stats.elapsed, 3)
+        self.assertEqual(inner_stats.level, 2)
+
+    def testtimedcmexception(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            try:
+                with util.timedcm('exceptional') as stats:
+                    raise ValueError()
+            except ValueError:
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimeddecorator(self):
+        @util.timed
+        def testfunc(callcount=1):
+            callcount -= 1
+            if callcount:
+                testfunc(callcount)
+
+        # timestamps 1, 2, 3, 4, elapsed time of 3 - 2 = 1 and 4 - 1 = 3
+        with mocktimer(1, _start_default):
+            with capturestderr() as out:
+                testfunc(2)
+
+        self.assertEqual(out.getvalue(), (
+            b'    testfunc: 1.000 s\n'
+            b'  testfunc: 3.000 s\n'
+        ))
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/wireprotohelpers.sh	Sat Aug 18 10:24:57 2018 +0200
+++ b/tests/wireprotohelpers.sh	Fri Aug 24 12:55:05 2018 -0700
@@ -20,19 +20,19 @@
     wireprotov2server,
 )
 
-@wireprotov1server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv1(repo, proto):
     return wireprototypes.bytesresponse(b'customreadonly bytes response')
 
-@wireprotov2server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv2(repo, proto):
     return wireprototypes.cborresponse(b'customreadonly bytes response')
 
-@wireprotov1server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwrite(repo, proto):
     return wireprototypes.bytesresponse(b'customreadwrite bytes response')
 
-@wireprotov2server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwritev2(repo, proto):
     return wireprototypes.cborresponse(b'customreadwrite bytes response')
 EOF