changeset 39419:6268fed317d0

merge with stable
author Augie Fackler <augie@google.com>
date Tue, 04 Sep 2018 12:16:28 -0400
parents b69fbdd77c40 (diff) e574cae381b6 (current diff)
children 2dd9519b8c8a
files mercurial/hgweb/webcommands.py
diffstat 310 files changed, 21844 insertions(+), 6087 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Tue Sep 04 11:59:12 2018 -0400
+++ b/Makefile	Tue Sep 04 12:16:28 2018 -0400
@@ -9,7 +9,8 @@
 $(eval HGROOT := $(shell pwd))
 HGPYTHONS ?= $(HGROOT)/build/pythons
 PURE=
-PYFILES:=$(shell find mercurial hgext doc -name '*.py')
+PYFILESCMD=find mercurial hgext doc -name '*.py'
+PYFILES:=$(shell $(PYFILESCMD))
 DOCFILES=mercurial/help/*.txt
 export LANGUAGE=C
 export LC_ALL=C
@@ -145,7 +146,7 @@
         # parse them even though they are not marked for translation.
         # Extracting with an explicit encoding of ISO-8859-1 will make
         # xgettext "parse" and ignore them.
-	echo $(PYFILES) | xargs \
+	$(PYFILESCMD) | xargs \
 	  xgettext --package-name "Mercurial" \
 	  --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
 	  --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \
--- a/contrib/bash_completion	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/bash_completion	Tue Sep 04 12:16:28 2018 -0400
@@ -152,7 +152,7 @@
 {
     local cur prev cmd cmd_index opts i aliashg
     # global options that receive an argument
-    local global_args='--cwd|-R|--repository'
+    local global_args='--cwd|-R|--repository|--color|--config|--encoding|--encodingmode|--pager'
     local hg="$1"
     local canonical=0
 
@@ -206,6 +206,18 @@
             _hg_fix_wordlist
             return
         ;;
+        --color)
+            local choices='true false yes no always auto never debug'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
+        --pager)
+            local choices='true false yes no always auto never'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
     esac
 
     if [ -z "$cmd" ] || [ $COMP_CWORD -eq $i ]; then
--- a/contrib/byteify-strings.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/byteify-strings.py	Tue Sep 04 12:16:28 2018 -0400
@@ -169,6 +169,11 @@
                 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
                 continue
 
+        # Looks like "if __name__ == '__main__'".
+        if (t.type == token.NAME and t.string == '__name__'
+            and _isop(i + 1, '==')):
+            _ensuresysstr(i + 2)
+
         # Emit unmodified token.
         yield adjusttokenpos(t, coloffset)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/catapipe.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Tool read primitive events from a pipe to produce a catapult trace.
+
+For now the event stream supports
+
+  START $SESSIONID ...
+
+and
+
+  END $SESSIONID ...
+
+events. Everything after the SESSIONID (which must not contain spaces)
+is used as a label for the event. Events are timestamped as of when
+they arrive in this process and are then used to produce catapult
+traces that can be loaded in Chrome's about:tracing utility. It's
+important that the event stream *into* this process stay simple,
+because we have to emit it from the shell scripts produced by
+run-tests.py.
+
+Typically you'll want to place the path to the named pipe in the
+HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
+understand.
+"""
+from __future__ import absolute_import, print_function
+
+import argparse
+import datetime
+import json
+import os
+
+_TYPEMAP = {
+    'START': 'B',
+    'END': 'E',
+}
+
+_threadmap = {}
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('pipe', type=str, nargs=1,
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('output', default='trace.json', type=str, nargs='?',
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('--debug', default=False, action='store_true',
+                        help='Print useful debug messages')
+    args = parser.parse_args()
+    fn = args.pipe[0]
+    os.mkfifo(fn)
+    try:
+        with open(fn) as f, open(args.output, 'w') as out:
+            out.write('[\n')
+            start = datetime.datetime.now()
+            while True:
+                ev = f.readline().strip()
+                if not ev:
+                    continue
+                now = datetime.datetime.now()
+                if args.debug:
+                    print(ev)
+                verb, session, label = ev.split(' ', 2)
+                if session not in _threadmap:
+                    _threadmap[session] = len(_threadmap)
+                pid = _threadmap[session]
+                ts_micros = (now - start).total_seconds() * 1000000
+                out.write(json.dumps(
+                    {
+                        "name": label,
+                        "cat": "misc",
+                        "ph": _TYPEMAP[verb],
+                        "ts": ts_micros,
+                        "pid": pid,
+                        "tid": 1,
+                        "args": {}
+                    }))
+                out.write(',\n')
+    finally:
+        os.unlink(fn)
+
+if __name__ == '__main__':
+    main()
--- a/contrib/check-code.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/check-code.py	Tue Sep 04 12:16:28 2018 -0400
@@ -30,7 +30,7 @@
     opentext = open
 else:
     def opentext(f):
-        return open(f, encoding='ascii')
+        return open(f, encoding='latin1')
 try:
     xrange
 except NameError:
@@ -511,6 +511,7 @@
     (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
     (r'os\.getenv', "use encoding.environ.get instead"),
     (r'os\.setenv', "modifying the environ dict is not preferred"),
+    (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
   ],
   # warnings
   [],
--- a/contrib/import-checker.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/import-checker.py	Tue Sep 04 12:16:28 2018 -0400
@@ -28,6 +28,8 @@
     'mercurial.hgweb.request',
     'mercurial.i18n',
     'mercurial.node',
+    # for revlog to re-export constant to extensions
+    'mercurial.revlogutils.constants',
     # for cffi modules to re-export pure functions
     'mercurial.pure.base85',
     'mercurial.pure.bdiff',
@@ -36,6 +38,7 @@
     'mercurial.pure.parsers',
     # third-party imports should be directly imported
     'mercurial.thirdparty',
+    'mercurial.thirdparty.attr',
     'mercurial.thirdparty.cbor',
     'mercurial.thirdparty.cbor.cbor2',
     'mercurial.thirdparty.zope',
--- a/contrib/perf.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/perf.py	Tue Sep 04 12:16:28 2018 -0400
@@ -103,7 +103,7 @@
 # since ae5d60bb70c9
 if safehasattr(time, 'perf_counter'):
     util.timer = time.perf_counter
-elif os.name == 'nt':
+elif os.name == b'nt':
     util.timer = time.clock
 else:
     util.timer = time.time
@@ -123,9 +123,9 @@
 # since 1.9 (or a79fea6b3e77).
 revlogopts = getattr(cmdutil, "debugrevlogopts",
                      getattr(commands, "debugrevlogopts", [
-        ('c', 'changelog', False, ('open changelog')),
-        ('m', 'manifest', False, ('open manifest')),
-        ('', 'dir', False, ('open directory manifest')),
+        (b'c', b'changelog', False, (b'open changelog')),
+        (b'm', b'manifest', False, (b'open manifest')),
+        (b'', b'dir', False, (b'open directory manifest')),
         ]))
 
 cmdtable = {}
@@ -134,20 +134,20 @@
 # define parsealiases locally, because cmdutil.parsealiases has been
 # available since 1.5 (or 6252852b4332)
 def parsealiases(cmd):
-    return cmd.lstrip("^").split("|")
+    return cmd.lstrip(b"^").split(b"|")
 
 if safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 elif safehasattr(cmdutil, 'command'):
     command = cmdutil.command(cmdtable)
-    if 'norepo' not in getargspec(command).args:
+    if b'norepo' not in getargspec(command).args:
         # for "historical portability":
         # wrap original cmdutil.command, because "norepo" option has
         # been available since 3.1 (or 75a96326cecb)
         _command = command
         def command(name, options=(), synopsis=None, norepo=False):
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return _command(name, list(options), synopsis)
 else:
     # for "historical portability":
@@ -160,7 +160,7 @@
             else:
                 cmdtable[name] = func, list(options)
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return func
         return decorator
 
@@ -169,23 +169,23 @@
     import mercurial.configitems
     configtable = {}
     configitem = mercurial.registrar.configitem(configtable)
-    configitem('perf', 'presleep',
+    configitem(b'perf', b'presleep',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'stub',
+    configitem(b'perf', b'stub',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'parentscount',
+    configitem(b'perf', b'parentscount',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'all-timing',
+    configitem(b'perf', b'all-timing',
         default=mercurial.configitems.dynamicdefault,
     )
 except (ImportError, AttributeError):
     pass
 
 def getlen(ui):
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return lambda x: 1
     return len
 
@@ -197,14 +197,14 @@
 
     # enforce an idle period before execution to counteract power management
     # experimental config: perf.presleep
-    time.sleep(getint(ui, "perf", "presleep", 1))
+    time.sleep(getint(ui, b"perf", b"presleep", 1))
 
     if opts is None:
         opts = {}
     # redirect all to stderr unless buffer api is in use
     if not ui._buffers:
         ui = ui.copy()
-        uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
+        uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
         if uifout:
             # for "historical portability":
             # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
@@ -213,7 +213,7 @@
     # get a formatter
     uiformatter = getattr(ui, 'formatter', None)
     if uiformatter:
-        fm = uiformatter('perf', opts)
+        fm = uiformatter(b'perf', opts)
     else:
         # for "historical portability":
         # define formatter locally, because ui.formatter has been
@@ -244,15 +244,15 @@
                 self._ui.write(text, **opts)
             def end(self):
                 pass
-        fm = defaultformatter(ui, 'perf', opts)
+        fm = defaultformatter(ui, b'perf', opts)
 
     # stub function, runs code only once instead of in a loop
     # experimental config: perf.stub
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return functools.partial(stub_timer, fm), fm
 
     # experimental config: perf.all-timing
-    displayall = ui.configbool("perf", "all-timing", False)
+    displayall = ui.configbool(b"perf", b"all-timing", False)
     return functools.partial(_timer, fm, displayall=displayall), fm
 
 def stub_timer(fm, func, title=None):
@@ -280,30 +280,30 @@
     fm.startitem()
 
     if title:
-        fm.write('title', '! %s\n', title)
+        fm.write(b'title', b'! %s\n', title)
     if r:
-        fm.write('result', '! result: %s\n', r)
+        fm.write(b'result', b'! result: %s\n', r)
     def display(role, entry):
-        prefix = ''
-        if role != 'best':
-            prefix = '%s.' % role
-        fm.plain('!')
-        fm.write(prefix + 'wall', ' wall %f', entry[0])
-        fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
-        fm.write(prefix + 'user', ' user %f', entry[1])
-        fm.write(prefix + 'sys',  ' sys %f', entry[2])
-        fm.write(prefix + 'count',  ' (%s of %d)', role, count)
-        fm.plain('\n')
+        prefix = b''
+        if role != b'best':
+            prefix = b'%s.' % role
+        fm.plain(b'!')
+        fm.write(prefix + b'wall', b' wall %f', entry[0])
+        fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
+        fm.write(prefix + b'user', b' user %f', entry[1])
+        fm.write(prefix + b'sys',  b' sys %f', entry[2])
+        fm.write(prefix + b'count',  b' (%s of %d)', role, count)
+        fm.plain(b'\n')
     results.sort()
     min_val = results[0]
-    display('best', min_val)
+    display(b'best', min_val)
     if displayall:
         max_val = results[-1]
-        display('max', max_val)
+        display(b'max', max_val)
         avg = tuple([sum(x) / count for x in zip(*results)])
-        display('avg', avg)
+        display(b'avg', avg)
         median = results[len(results) // 2]
-        display('median', median)
+        display(b'median', median)
 
 # utilities for historical portability
 
@@ -316,7 +316,7 @@
     try:
         return int(v)
     except ValueError:
-        raise error.ConfigError(("%s.%s is not an integer ('%s')")
+        raise error.ConfigError((b"%s.%s is not an integer ('%s')")
                                 % (section, name, v))
 
 def safeattrsetter(obj, name, ignoremissing=False):
@@ -337,8 +337,8 @@
     if not util.safehasattr(obj, name):
         if ignoremissing:
             return None
-        raise error.Abort(("missing attribute %s of %s might break assumption"
-                           " of performance measurement") % (name, obj))
+        raise error.Abort((b"missing attribute %s of %s might break assumption"
+                           b" of performance measurement") % (name, obj))
 
     origvalue = getattr(obj, name)
     class attrutil(object):
@@ -364,8 +364,8 @@
     # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
     # branchmap and repoview modules exist, but subsettable attribute
     # doesn't)
-    raise error.Abort(("perfbranchmap not available with this Mercurial"),
-                      hint="use 2.5 or later")
+    raise error.Abort((b"perfbranchmap not available with this Mercurial"),
+                      hint=b"use 2.5 or later")
 
 def getsvfs(repo):
     """Return appropriate object to access files under .hg/store
@@ -392,22 +392,22 @@
 def repocleartagscachefunc(repo):
     """Return the function to clear tags cache according to repo internal API
     """
-    if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
+    if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
         # in this case, setattr(repo, '_tagscache', None) or so isn't
         # correct way to clear tags cache, because existing code paths
         # expect _tagscache to be a structured object.
         def clearcache():
             # _tagscache has been filteredpropertycache since 2.5 (or
             # 98c867ac1330), and delattr() can't work in such case
-            if '_tagscache' in vars(repo):
-                del repo.__dict__['_tagscache']
+            if b'_tagscache' in vars(repo):
+                del repo.__dict__[b'_tagscache']
         return clearcache
 
-    repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
+    repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
     if repotags: # since 1.4 (or 5614a628d173)
         return lambda : repotags.set(None)
 
-    repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
+    repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
     if repotagscache: # since 0.6 (or d7df759d0e97)
         return lambda : repotagscache.set(None)
 
@@ -416,7 +416,7 @@
     # - repo.tags of such Mercurial isn't "callable", and repo.tags()
     #   in perftags() causes failure soon
     # - perf.py itself has been available since 1.1 (or eb240755386d)
-    raise error.Abort(("tags API of this hg command is unknown"))
+    raise error.Abort((b"tags API of this hg command is unknown"))
 
 # utilities to clear cache
 
@@ -428,7 +428,7 @@
 
 # perf commands
 
-@command('perfwalk', formatteropts)
+@command(b'perfwalk', formatteropts)
 def perfwalk(ui, repo, *pats, **opts):
     timer, fm = gettimer(ui, opts)
     m = scmutil.match(repo[None], pats, {})
@@ -436,47 +436,47 @@
                                               ignored=False))))
     fm.end()
 
-@command('perfannotate', formatteropts)
+@command(b'perfannotate', formatteropts)
 def perfannotate(ui, repo, f, **opts):
     timer, fm = gettimer(ui, opts)
-    fc = repo['.'][f]
+    fc = repo[b'.'][f]
     timer(lambda: len(fc.annotate(True)))
     fm.end()
 
-@command('perfstatus',
-         [('u', 'unknown', False,
-           'ask status to look for unknown files')] + formatteropts)
+@command(b'perfstatus',
+         [(b'u', b'unknown', False,
+           b'ask status to look for unknown files')] + formatteropts)
 def perfstatus(ui, repo, **opts):
     #m = match.always(repo.root, repo.getcwd())
     #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
     timer, fm = gettimer(ui, opts)
-    timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
+    timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
 
-@command('perfaddremove', formatteropts)
+@command(b'perfaddremove', formatteropts)
 def perfaddremove(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     try:
         oldquiet = repo.ui.quiet
         repo.ui.quiet = True
         matcher = scmutil.match(repo[None])
-        opts['dry_run'] = True
-        timer(lambda: scmutil.addremove(repo, matcher, "", opts))
+        opts[b'dry_run'] = True
+        timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
     finally:
         repo.ui.quiet = oldquiet
         fm.end()
 
 def clearcaches(cl):
     # behave somewhat consistently across internal API changes
-    if util.safehasattr(cl, 'clearcaches'):
+    if util.safehasattr(cl, b'clearcaches'):
         cl.clearcaches()
-    elif util.safehasattr(cl, '_nodecache'):
+    elif util.safehasattr(cl, b'_nodecache'):
         from mercurial.node import nullid, nullrev
         cl._nodecache = {nullid: nullrev}
         cl._nodepos = None
 
-@command('perfheads', formatteropts)
+@command(b'perfheads', formatteropts)
 def perfheads(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
@@ -486,7 +486,7 @@
     timer(d)
     fm.end()
 
-@command('perftags', formatteropts)
+@command(b'perftags', formatteropts)
 def perftags(ui, repo, **opts):
     import mercurial.changelog
     import mercurial.manifest
@@ -501,7 +501,7 @@
     timer(t)
     fm.end()
 
-@command('perfancestors', formatteropts)
+@command(b'perfancestors', formatteropts)
 def perfancestors(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     heads = repo.changelog.headrevs()
@@ -511,7 +511,7 @@
     timer(d)
     fm.end()
 
-@command('perfancestorset', formatteropts)
+@command(b'perfancestorset', formatteropts)
 def perfancestorset(ui, repo, revset, **opts):
     timer, fm = gettimer(ui, opts)
     revs = repo.revs(revset)
@@ -523,17 +523,17 @@
     timer(d)
     fm.end()
 
-@command('perfbookmarks', formatteropts)
+@command(b'perfbookmarks', formatteropts)
 def perfbookmarks(ui, repo, **opts):
     """benchmark parsing bookmarks from disk to memory"""
     timer, fm = gettimer(ui, opts)
     def d():
-        clearfilecache(repo, '_bookmarks')
+        clearfilecache(repo, b'_bookmarks')
         repo._bookmarks
     timer(d)
     fm.end()
 
-@command('perfbundleread', formatteropts, 'BUNDLE')
+@command(b'perfbundleread', formatteropts, b'BUNDLE')
 def perfbundleread(ui, repo, bundlepath, **opts):
     """Benchmark reading of bundle files.
 
@@ -548,7 +548,7 @@
 
     def makebench(fn):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 fn(bundle)
 
@@ -556,7 +556,7 @@
 
     def makereadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 while bundle.read(size):
                     pass
@@ -565,7 +565,7 @@
 
     def makestdioread(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 while fh.read(size):
                     pass
 
@@ -601,7 +601,7 @@
 
     def makepartreadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 for part in bundle.iterparts():
                     while part.read(size):
@@ -610,49 +610,49 @@
         return run
 
     benches = [
-        (makestdioread(8192), 'read(8k)'),
-        (makestdioread(16384), 'read(16k)'),
-        (makestdioread(32768), 'read(32k)'),
-        (makestdioread(131072), 'read(128k)'),
+        (makestdioread(8192), b'read(8k)'),
+        (makestdioread(16384), b'read(16k)'),
+        (makestdioread(32768), b'read(32k)'),
+        (makestdioread(131072), b'read(128k)'),
     ]
 
-    with open(bundlepath, 'rb') as fh:
+    with open(bundlepath, b'rb') as fh:
         bundle = exchange.readbundle(ui, fh, bundlepath)
 
         if isinstance(bundle, changegroup.cg1unpacker):
             benches.extend([
-                (makebench(deltaiter), 'cg1 deltaiter()'),
-                (makebench(iterchunks), 'cg1 getchunks()'),
-                (makereadnbytes(8192), 'cg1 read(8k)'),
-                (makereadnbytes(16384), 'cg1 read(16k)'),
-                (makereadnbytes(32768), 'cg1 read(32k)'),
-                (makereadnbytes(131072), 'cg1 read(128k)'),
+                (makebench(deltaiter), b'cg1 deltaiter()'),
+                (makebench(iterchunks), b'cg1 getchunks()'),
+                (makereadnbytes(8192), b'cg1 read(8k)'),
+                (makereadnbytes(16384), b'cg1 read(16k)'),
+                (makereadnbytes(32768), b'cg1 read(32k)'),
+                (makereadnbytes(131072), b'cg1 read(128k)'),
             ])
         elif isinstance(bundle, bundle2.unbundle20):
             benches.extend([
-                (makebench(forwardchunks), 'bundle2 forwardchunks()'),
-                (makebench(iterparts), 'bundle2 iterparts()'),
-                (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
-                (makebench(seek), 'bundle2 part seek()'),
-                (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
-                (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
-                (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
-                (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
+                (makebench(forwardchunks), b'bundle2 forwardchunks()'),
+                (makebench(iterparts), b'bundle2 iterparts()'),
+                (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
+                (makebench(seek), b'bundle2 part seek()'),
+                (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
+                (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
+                (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
+                (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
             ])
         elif isinstance(bundle, streamclone.streamcloneapplier):
-            raise error.Abort('stream clone bundles not supported')
+            raise error.Abort(b'stream clone bundles not supported')
         else:
-            raise error.Abort('unhandled bundle type: %s' % type(bundle))
+            raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfchangegroupchangelog', formatteropts +
-         [('', 'version', '02', 'changegroup version'),
-          ('r', 'rev', '', 'revisions to add to changegroup')])
-def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
+@command(b'perfchangegroupchangelog', formatteropts +
+         [(b'', b'version', b'02', b'changegroup version'),
+          (b'r', b'rev', b'', b'revisions to add to changegroup')])
+def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
     """Benchmark producing a changelog group for a changegroup.
 
     This measures the time spent processing the changelog during a
@@ -663,90 +663,89 @@
     By default, all revisions are added to the changegroup.
     """
     cl = repo.changelog
-    revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
+    nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
     bundler = changegroup.getbundler(version, repo)
 
-    def lookup(node):
-        # The real bundler reads the revision in order to access the
-        # manifest node and files list. Do that here.
-        cl.read(node)
-        return node
-
     def d():
-        for chunk in bundler.group(revs, cl, lookup):
+        state, chunks = bundler._generatechangelog(cl, nodes)
+        for chunk in chunks:
             pass
 
     timer, fm = gettimer(ui, opts)
-    timer(d)
+
+    # Terminal printing can interfere with timing. So disable it.
+    with ui.configoverride({(b'progress', b'disable'): True}):
+        timer(d)
+
     fm.end()
 
-@command('perfdirs', formatteropts)
+@command(b'perfdirs', formatteropts)
 def perfdirs(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate.hasdir('a')
+        dirstate.hasdir(b'a')
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstate', formatteropts)
+@command(b'perfdirstate', formatteropts)
 def perfdirstate(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
         repo.dirstate.invalidate()
-        "a" in repo.dirstate
+        b"a" in repo.dirstate
     timer(d)
     fm.end()
 
-@command('perfdirstatedirs', formatteropts)
+@command(b'perfdirstatedirs', formatteropts)
 def perfdirstatedirs(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
-        repo.dirstate.hasdir("a")
+        repo.dirstate.hasdir(b"a")
         del repo.dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatefoldmap', formatteropts)
+@command(b'perfdirstatefoldmap', formatteropts)
 def perfdirstatefoldmap(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.filefoldmap.get('a')
+        dirstate._map.filefoldmap.get(b'a')
         del dirstate._map.filefoldmap
     timer(d)
     fm.end()
 
-@command('perfdirfoldmap', formatteropts)
+@command(b'perfdirfoldmap', formatteropts)
 def perfdirfoldmap(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.dirfoldmap.get('a')
+        dirstate._map.dirfoldmap.get(b'a')
         del dirstate._map.dirfoldmap
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatewrite', formatteropts)
+@command(b'perfdirstatewrite', formatteropts)
 def perfdirstatewrite(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     ds = repo.dirstate
-    "a" in ds
+    b"a" in ds
     def d():
         ds._dirty = True
         ds.write(repo.currenttransaction())
     timer(d)
     fm.end()
 
-@command('perfmergecalculate',
-         [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
+@command(b'perfmergecalculate',
+         [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
 def perfmergecalculate(ui, repo, rev, **opts):
     timer, fm = gettimer(ui, opts)
     wctx = repo[None]
@@ -763,7 +762,7 @@
     timer(d)
     fm.end()
 
-@command('perfpathcopies', [], "REV REV")
+@command(b'perfpathcopies', [], b"REV REV")
 def perfpathcopies(ui, repo, rev1, rev2, **opts):
     timer, fm = gettimer(ui, opts)
     ctx1 = scmutil.revsingle(repo, rev1, rev1)
@@ -773,26 +772,26 @@
     timer(d)
     fm.end()
 
-@command('perfphases',
-         [('', 'full', False, 'include file reading time too'),
-         ], "")
+@command(b'perfphases',
+         [(b'', b'full', False, b'include file reading time too'),
+          ], b"")
 def perfphases(ui, repo, **opts):
     """benchmark phasesets computation"""
     timer, fm = gettimer(ui, opts)
     _phases = repo._phasecache
-    full = opts.get('full')
+    full = opts.get(b'full')
     def d():
         phases = _phases
         if full:
-            clearfilecache(repo, '_phasecache')
+            clearfilecache(repo, b'_phasecache')
             phases = repo._phasecache
         phases.invalidate()
         phases.loadphaserevs(repo)
     timer(d)
     fm.end()
 
-@command('perfphasesremote',
-         [], "[DEST]")
+@command(b'perfphasesremote',
+         [], b"[DEST]")
 def perfphasesremote(ui, repo, dest=None, **opts):
     """benchmark time needed to analyse phases of the remote server"""
     from mercurial.node import (
@@ -805,14 +804,14 @@
     )
     timer, fm = gettimer(ui, opts)
 
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(('default repository not configured!'),
-                         hint=("see 'hg help config.paths'"))
+        raise error.Abort((b'default repository not configured!'),
+                          hint=(b"see 'hg help config.paths'"))
     dest = path.pushloc or path.loc
-    branches = (path.branch, opts.get('branch') or [])
-    ui.status(('analysing phase of %s\n') % util.hidepassword(dest))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+    branches = (path.branch, opts.get(b'branch') or [])
+    ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
+    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
     other = hg.peer(repo, opts, dest)
 
     # easier to perform discovery through the operation
@@ -822,25 +821,25 @@
     remotesubset = op.fallbackheads
 
     with other.commandexecutor() as e:
-        remotephases = e.callcommand('listkeys',
-                       {'namespace': 'phases'}).result()
+        remotephases = e.callcommand(b'listkeys',
+                       {b'namespace': b'phases'}).result()
     del other
-    publishing = remotephases.get('publishing', False)
+    publishing = remotephases.get(b'publishing', False)
     if publishing:
-        ui.status(('publishing: yes\n'))
+        ui.status((b'publishing: yes\n'))
     else:
-        ui.status(('publishing: no\n'))
+        ui.status((b'publishing: no\n'))
 
     nodemap = repo.changelog.nodemap
     nonpublishroots = 0
     for nhex, phase in remotephases.iteritems():
-        if nhex == 'publishing': # ignore data related to publish option
+        if nhex == b'publishing': # ignore data related to publish option
             continue
         node = bin(nhex)
         if node in nodemap and int(phase):
             nonpublishroots += 1
-    ui.status(('number of roots: %d\n') % len(remotephases))
-    ui.status(('number of known non public roots: %d\n') % nonpublishroots)
+    ui.status((b'number of roots: %d\n') % len(remotephases))
+    ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
     def d():
         phases.remotephasessummary(repo,
                                    remotesubset,
@@ -848,22 +847,42 @@
     timer(d)
     fm.end()
 
-@command('perfmanifest', [], 'REV')
-def perfmanifest(ui, repo, rev, **opts):
+@command(b'perfmanifest',[
+            (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
+            (b'', b'clear-disk', False, b'clear on-disk caches too'),
+         ], b'REV|NODE')
+def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
     """benchmark the time to read a manifest from disk and return a usable
     dict-like object
 
     Manifest caches are cleared before retrieval."""
     timer, fm = gettimer(ui, opts)
-    ctx = scmutil.revsingle(repo, rev, rev)
-    t = ctx.manifestnode()
+    if not manifest_rev:
+        ctx = scmutil.revsingle(repo, rev, rev)
+        t = ctx.manifestnode()
+    else:
+        from mercurial.node import bin
+
+        if len(rev) == 40:
+            t = bin(rev)
+        else:
+            try:
+                rev = int(rev)
+
+                if util.safehasattr(repo.manifestlog, b'getstorage'):
+                    t = repo.manifestlog.getstorage(b'').node(rev)
+                else:
+                    t = repo.manifestlog._revlog.lookup(rev)
+            except ValueError:
+                raise error.Abort(b'manifest revision must be integer or full '
+                                  b'node')
     def d():
-        repo.manifestlog.clearcaches()
+        repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
         repo.manifestlog[t].read()
     timer(d)
     fm.end()
 
-@command('perfchangeset', formatteropts)
+@command(b'perfchangeset', formatteropts)
 def perfchangeset(ui, repo, rev, **opts):
     timer, fm = gettimer(ui, opts)
     n = scmutil.revsingle(repo, rev).node()
@@ -873,40 +892,40 @@
     timer(d)
     fm.end()
 
-@command('perfindex', formatteropts)
+@command(b'perfindex', formatteropts)
 def perfindex(ui, repo, **opts):
     import mercurial.revlog
     timer, fm = gettimer(ui, opts)
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
-    n = repo["tip"].node()
+    n = repo[b"tip"].node()
     svfs = getsvfs(repo)
     def d():
-        cl = mercurial.revlog.revlog(svfs, "00changelog.i")
+        cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
         cl.rev(n)
     timer(d)
     fm.end()
 
-@command('perfstartup', formatteropts)
+@command(b'perfstartup', formatteropts)
 def perfstartup(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     cmd = sys.argv[0]
     def d():
-        if os.name != 'nt':
-            os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
+        if os.name != b'nt':
+            os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
         else:
-            os.environ['HGRCPATH'] = ' '
-            os.system("%s version -q > NUL" % cmd)
+            os.environ[b'HGRCPATH'] = b' '
+            os.system(b"%s version -q > NUL" % cmd)
     timer(d)
     fm.end()
 
-@command('perfparents', formatteropts)
+@command(b'perfparents', formatteropts)
 def perfparents(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     # control the number of commits perfparents iterates over
     # experimental config: perf.parentscount
-    count = getint(ui, "perf", "parentscount", 1000)
+    count = getint(ui, b"perf", b"parentscount", 1000)
     if len(repo.changelog) < count:
-        raise error.Abort("repo needs %d commits for this test" % count)
+        raise error.Abort(b"repo needs %d commits for this test" % count)
     repo = repo.unfiltered()
     nl = [repo.changelog.node(i) for i in xrange(count)]
     def d():
@@ -915,7 +934,7 @@
     timer(d)
     fm.end()
 
-@command('perfctxfiles', formatteropts)
+@command(b'perfctxfiles', formatteropts)
 def perfctxfiles(ui, repo, x, **opts):
     x = int(x)
     timer, fm = gettimer(ui, opts)
@@ -924,7 +943,7 @@
     timer(d)
     fm.end()
 
-@command('perfrawfiles', formatteropts)
+@command(b'perfrawfiles', formatteropts)
 def perfrawfiles(ui, repo, x, **opts):
     x = int(x)
     timer, fm = gettimer(ui, opts)
@@ -934,45 +953,78 @@
     timer(d)
     fm.end()
 
-@command('perflookup', formatteropts)
+@command(b'perflookup', formatteropts)
 def perflookup(ui, repo, rev, **opts):
     timer, fm = gettimer(ui, opts)
     timer(lambda: len(repo.lookup(rev)))
     fm.end()
 
-@command('perfrevrange', formatteropts)
+@command(b'perflinelogedits',
+         [(b'n', b'edits', 10000, b'number of edits'),
+          (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
+          ], norepo=True)
+def perflinelogedits(ui, **opts):
+    from mercurial import linelog
+
+    edits = opts[b'edits']
+    maxhunklines = opts[b'max_hunk_lines']
+
+    maxb1 = 100000
+    random.seed(0)
+    randint = random.randint
+    currentlines = 0
+    arglist = []
+    for rev in xrange(edits):
+        a1 = randint(0, currentlines)
+        a2 = randint(a1, min(currentlines, a1 + maxhunklines))
+        b1 = randint(0, maxb1)
+        b2 = randint(b1, b1 + maxhunklines)
+        currentlines += (b2 - b1) - (a2 - a1)
+        arglist.append((rev, a1, a2, b1, b2))
+
+    def d():
+        ll = linelog.linelog()
+        for args in arglist:
+            ll.replacelines(*args)
+
+    timer, fm = gettimer(ui, opts)
+    timer(d)
+    fm.end()
+
+@command(b'perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
     timer, fm = gettimer(ui, opts)
     revrange = scmutil.revrange
     timer(lambda: len(revrange(repo, specs)))
     fm.end()
 
-@command('perfnodelookup', formatteropts)
+@command(b'perfnodelookup', formatteropts)
 def perfnodelookup(ui, repo, rev, **opts):
     timer, fm = gettimer(ui, opts)
     import mercurial.revlog
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
     n = scmutil.revsingle(repo, rev).node()
-    cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
+    cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
     def d():
         cl.rev(n)
         clearcaches(cl)
     timer(d)
     fm.end()
 
-@command('perflog',
-         [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
+@command(b'perflog',
+         [(b'', b'rename', False, b'ask log to follow renames')
+         ] + formatteropts)
 def perflog(ui, repo, rev=None, **opts):
     if rev is None:
         rev=[]
     timer, fm = gettimer(ui, opts)
     ui.pushbuffer()
-    timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
-                               copies=opts.get('rename')))
+    timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
+                               copies=opts.get(b'rename')))
     ui.popbuffer()
     fm.end()
 
-@command('perfmoonwalk', formatteropts)
+@command(b'perfmoonwalk', formatteropts)
 def perfmoonwalk(ui, repo, **opts):
     """benchmark walking the changelog backwards
 
@@ -980,31 +1032,31 @@
     """
     timer, fm = gettimer(ui, opts)
     def moonwalk():
-        for i in xrange(len(repo), -1, -1):
+        for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
             ctx = repo[i]
             ctx.branch() # read changelog data (in addition to the index)
     timer(moonwalk)
     fm.end()
 
-@command('perftemplating',
-         [('r', 'rev', [], 'revisions to run the template on'),
-         ] + formatteropts)
+@command(b'perftemplating',
+         [(b'r', b'rev', [], b'revisions to run the template on'),
+          ] + formatteropts)
 def perftemplating(ui, repo, testedtemplate=None, **opts):
     """test the rendering time of a given template"""
     if makelogtemplater is None:
-        raise error.Abort(("perftemplating not available with this Mercurial"),
-                          hint="use 4.3 or later")
+        raise error.Abort((b"perftemplating not available with this Mercurial"),
+                          hint=b"use 4.3 or later")
 
     nullui = ui.copy()
-    nullui.fout = open(os.devnull, 'wb')
+    nullui.fout = open(os.devnull, b'wb')
     nullui.disablepager()
-    revs = opts.get('rev')
+    revs = opts.get(b'rev')
     if not revs:
-        revs = ['all()']
+        revs = [b'all()']
     revs = list(scmutil.revrange(repo, revs))
 
-    defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
-                       ' {author|person}: {desc|firstline}\n')
+    defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
+                       b' {author|person}: {desc|firstline}\n')
     if testedtemplate is None:
         testedtemplate = defaulttemplate
     displayer = makelogtemplater(nullui, repo, testedtemplate)
@@ -1018,13 +1070,13 @@
     timer(format)
     fm.end()
 
-@command('perfcca', formatteropts)
+@command(b'perfcca', formatteropts)
 def perfcca(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
     fm.end()
 
-@command('perffncacheload', formatteropts)
+@command(b'perffncacheload', formatteropts)
 def perffncacheload(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     s = repo.store
@@ -1033,14 +1085,14 @@
     timer(d)
     fm.end()
 
-@command('perffncachewrite', formatteropts)
+@command(b'perffncachewrite', formatteropts)
 def perffncachewrite(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     s = repo.store
     lock = repo.lock()
     s.fncache._load()
-    tr = repo.transaction('perffncachewrite')
-    tr.addbackup('fncache')
+    tr = repo.transaction(b'perffncachewrite')
+    tr.addbackup(b'fncache')
     def d():
         s.fncache._dirty = True
         s.fncache.write(tr)
@@ -1049,7 +1101,7 @@
     lock.release()
     fm.end()
 
-@command('perffncacheencode', formatteropts)
+@command(b'perffncacheencode', formatteropts)
 def perffncacheencode(ui, repo, **opts):
     timer, fm = gettimer(ui, opts)
     s = repo.store
@@ -1076,15 +1128,25 @@
         with ready:
             ready.wait()
 
-@command('perfbdiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test bdiffs for all associated revisions'),
-    ('', 'threads', 0, 'number of thread to use (disable with 0)'),
-    ('', 'blocks', False, 'test computing diffs into blocks'),
-    ('', 'xdiff', False, 'use xdiff algorithm'),
+def _manifestrevision(repo, mnode):
+    ml = repo.manifestlog
+
+    if util.safehasattr(ml, b'getstorage'):
+        store = ml.getstorage(b'')
+    else:
+        store = ml._revlog
+
+    return store.revision(mnode)
+
+@command(b'perfbdiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
+    (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
+    (b'', b'blocks', False, b'test computing diffs into blocks'),
+    (b'', b'xdiff', False, b'use xdiff algorithm'),
     ],
 
-    '-c|-m|FILE REV')
+    b'-c|-m|FILE REV')
 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
     """benchmark a bdiff between revisions
 
@@ -1099,31 +1161,31 @@
     """
     opts = pycompat.byteskwargs(opts)
 
-    if opts['xdiff'] and not opts['blocks']:
-        raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
+    if opts[b'xdiff'] and not opts[b'blocks']:
+        raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
 
-    if opts['alldata']:
-        opts['changelog'] = True
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfbdiff', 'invalid arguments')
+        raise error.CommandError(b'perfbdiff', b'invalid arguments')
 
-    blocks = opts['blocks']
-    xdiff = opts['xdiff']
+    blocks = opts[b'blocks']
+    xdiff = opts[b'xdiff']
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.
@@ -1177,10 +1239,10 @@
         with ready:
             ready.notify_all()
 
-@command('perfunidiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test unidiffs for all associated revisions'),
-    ], '-c|-m|FILE REV')
+@command(b'perfunidiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
+    ], b'-c|-m|FILE REV')
 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
     """benchmark a unified diff between revisions
 
@@ -1196,26 +1258,26 @@
     measure diffs for all changes related to that changeset (manifest
     and filelogs).
     """
-    if opts['alldata']:
-        opts['changelog'] = True
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfunidiff', 'invalid arguments')
+        raise error.CommandError(b'perfunidiff', b'invalid arguments')
 
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.
@@ -1234,7 +1296,7 @@
         for left, right in textpairs:
             # The date strings don't matter, so we pass empty strings.
             headerlines, hunks = mdiff.unidiff(
-                left, '', right, '', 'left', 'right', binary=False)
+                left, b'', right, b'', b'left', b'right', binary=False)
             # consume iterators in roughly the way patch.py does
             b'\n'.join(headerlines)
             b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
@@ -1242,28 +1304,28 @@
     timer(d)
     fm.end()
 
-@command('perfdiffwd', formatteropts)
+@command(b'perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
     timer, fm = gettimer(ui, opts)
     options = {
-        'w': 'ignore_all_space',
-        'b': 'ignore_space_change',
-        'B': 'ignore_blank_lines',
+        b'w': b'ignore_all_space',
+        b'b': b'ignore_space_change',
+        b'B': b'ignore_blank_lines',
         }
 
-    for diffopt in ('', 'w', 'b', 'B', 'wB'):
-        opts = dict((options[c], '1') for c in diffopt)
+    for diffopt in (b'', b'w', b'b', b'B', b'wB'):
+        opts = dict((options[c], b'1') for c in diffopt)
         def d():
             ui.pushbuffer()
             commands.diff(ui, repo, **opts)
             ui.popbuffer()
-        title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
+        title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
         timer(d, title)
     fm.end()
 
-@command('perfrevlogindex', revlogopts + formatteropts,
-         '-c|-m|FILE')
+@command(b'perfrevlogindex', revlogopts + formatteropts,
+         b'-c|-m|FILE')
 def perfrevlogindex(ui, repo, file_=None, **opts):
     """Benchmark operations against a revlog index.
 
@@ -1272,19 +1334,19 @@
     index data.
     """
 
-    rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
+    rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
 
     opener = getattr(rl, 'opener')  # trick linter
     indexfile = rl.indexfile
     data = opener.read(indexfile)
 
-    header = struct.unpack('>I', data[0:4])[0]
+    header = struct.unpack(b'>I', data[0:4])[0]
     version = header & 0xFFFF
     if version == 1:
         revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
-        raise error.Abort(('unsupported revlog version: %d') % version)
+        raise error.Abort((b'unsupported revlog version: %d') % version)
 
     rllen = len(rl)
 
@@ -1344,33 +1406,33 @@
                     pass
 
     benches = [
-        (constructor, 'revlog constructor'),
-        (read, 'read'),
-        (parseindex, 'create index object'),
-        (lambda: getentry(0), 'retrieve index entry for rev 0'),
-        (lambda: resolvenode('a' * 20), 'look up missing node'),
-        (lambda: resolvenode(node0), 'look up node at rev 0'),
-        (lambda: resolvenode(node25), 'look up node at 1/4 len'),
-        (lambda: resolvenode(node50), 'look up node at 1/2 len'),
-        (lambda: resolvenode(node75), 'look up node at 3/4 len'),
-        (lambda: resolvenode(node100), 'look up node at tip'),
+        (constructor, b'revlog constructor'),
+        (read, b'read'),
+        (parseindex, b'create index object'),
+        (lambda: getentry(0), b'retrieve index entry for rev 0'),
+        (lambda: resolvenode(b'a' * 20), b'look up missing node'),
+        (lambda: resolvenode(node0), b'look up node at rev 0'),
+        (lambda: resolvenode(node25), b'look up node at 1/4 len'),
+        (lambda: resolvenode(node50), b'look up node at 1/2 len'),
+        (lambda: resolvenode(node75), b'look up node at 3/4 len'),
+        (lambda: resolvenode(node100), b'look up node at tip'),
         # 2x variation is to measure caching impact.
         (lambda: resolvenodes(allnodes),
-         'look up all nodes (forward)'),
+         b'look up all nodes (forward)'),
         (lambda: resolvenodes(allnodes, 2),
-         'look up all nodes 2x (forward)'),
+         b'look up all nodes 2x (forward)'),
         (lambda: resolvenodes(allnodesrev),
-         'look up all nodes (reverse)'),
+         b'look up all nodes (reverse)'),
         (lambda: resolvenodes(allnodesrev, 2),
-         'look up all nodes 2x (reverse)'),
+         b'look up all nodes 2x (reverse)'),
         (lambda: getentries(allrevs),
-         'retrieve all index entries (forward)'),
+         b'retrieve all index entries (forward)'),
         (lambda: getentries(allrevs, 2),
-         'retrieve all index entries 2x (forward)'),
+         b'retrieve all index entries 2x (forward)'),
         (lambda: getentries(allrevsrev),
-         'retrieve all index entries (reverse)'),
+         b'retrieve all index entries (reverse)'),
         (lambda: getentries(allrevsrev, 2),
-         'retrieve all index entries 2x (reverse)'),
+         b'retrieve all index entries 2x (reverse)'),
     ]
 
     for fn, title in benches:
@@ -1378,11 +1440,11 @@
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevisions', revlogopts + formatteropts +
-         [('d', 'dist', 100, 'distance between the revisions'),
-          ('s', 'startrev', 0, 'revision to start reading at'),
-          ('', 'reverse', False, 'read in reverse')],
-         '-c|-m|FILE')
+@command(b'perfrevlogrevisions', revlogopts + formatteropts +
+         [(b'd', b'dist', 100, b'distance between the revisions'),
+          (b's', b'startrev', 0, b'revision to start reading at'),
+          (b'', b'reverse', False, b'read in reverse')],
+         b'-c|-m|FILE')
 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
                         **opts):
     """Benchmark reading a series of revisions from a revlog.
@@ -1392,7 +1454,7 @@
 
     The start revision can be defined via ``-s/--startrev``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
+    rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
     rllen = getlen(ui)(rl)
 
     def d():
@@ -1400,7 +1462,7 @@
 
         beginrev = startrev
         endrev = rllen
-        dist = opts['dist']
+        dist = opts[b'dist']
 
         if reverse:
             beginrev, endrev = endrev, beginrev
@@ -1415,10 +1477,10 @@
     timer(d)
     fm.end()
 
-@command('perfrevlogchunks', revlogopts + formatteropts +
-         [('e', 'engines', '', 'compression engines to use'),
-          ('s', 'startrev', 0, 'revision to start at')],
-         '-c|-m|FILE')
+@command(b'perfrevlogchunks', revlogopts + formatteropts +
+         [(b'e', b'engines', b'', b'compression engines to use'),
+          (b's', b'startrev', 0, b'revision to start at')],
+         b'-c|-m|FILE')
 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
     """Benchmark operations on revlog chunks.
 
@@ -1431,7 +1493,7 @@
     For measurements of higher-level operations like resolving revisions,
     see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
+    rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:
@@ -1441,19 +1503,19 @@
 
     # Verify engines argument.
     if engines:
-        engines = set(e.strip() for e in engines.split(','))
+        engines = set(e.strip() for e in engines.split(b','))
         for engine in engines:
             try:
                 util.compressionengines[engine]
             except KeyError:
-                raise error.Abort('unknown compression engine: %s' % engine)
+                raise error.Abort(b'unknown compression engine: %s' % engine)
     else:
         engines = []
         for e in util.compengines:
             engine = util.compengines[e]
             try:
                 if engine.available():
-                    engine.revlogcompressor().compress('dummy')
+                    engine.revlogcompressor().compress(b'dummy')
                     engines.append(e)
             except NotImplementedError:
                 pass
@@ -1513,27 +1575,27 @@
             rl._compressor = oldcompressor
 
     benches = [
-        (lambda: doread(), 'read'),
-        (lambda: doreadcachedfh(), 'read w/ reused fd'),
-        (lambda: doreadbatch(), 'read batch'),
-        (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
-        (lambda: dochunk(), 'chunk'),
-        (lambda: dochunkbatch(), 'chunk batch'),
+        (lambda: doread(), b'read'),
+        (lambda: doreadcachedfh(), b'read w/ reused fd'),
+        (lambda: doreadbatch(), b'read batch'),
+        (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
+        (lambda: dochunk(), b'chunk'),
+        (lambda: dochunkbatch(), b'chunk batch'),
     ]
 
     for engine in sorted(engines):
         compressor = util.compengines[engine].revlogcompressor()
         benches.append((functools.partial(docompress, compressor),
-                        'compress w/ %s' % engine))
+                        b'compress w/ %s' % engine))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevision', revlogopts + formatteropts +
-         [('', 'cache', False, 'use caches instead of clearing')],
-         '-c|-m|FILE REV')
+@command(b'perfrevlogrevision', revlogopts + formatteropts +
+         [(b'', b'cache', False, b'use caches instead of clearing')],
+         b'-c|-m|FILE REV')
 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
     """Benchmark obtaining a revlog revision.
 
@@ -1547,12 +1609,12 @@
 
     This command measures the time spent in each of these phases.
     """
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfrevlogrevision', 'invalid arguments')
+        raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
 
-    r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:
@@ -1627,13 +1689,13 @@
     text = mdiff.patches(text, bins)
 
     benches = [
-        (lambda: dorevision(), 'full'),
-        (lambda: dodeltachain(rev), 'deltachain'),
-        (lambda: doread(chain), 'read'),
-        (lambda: dorawchunks(data, chain), 'rawchunks'),
-        (lambda: dodecompress(rawchunks), 'decompress'),
-        (lambda: dopatch(text, bins), 'patch'),
-        (lambda: dohash(text), 'hash'),
+        (lambda: dorevision(), b'full'),
+        (lambda: dodeltachain(rev), b'deltachain'),
+        (lambda: doread(chain), b'read'),
+        (lambda: dorawchunks(data, chain), b'rawchunks'),
+        (lambda: dodecompress(rawchunks), b'decompress'),
+        (lambda: dopatch(text, bins), b'patch'),
+        (lambda: dohash(text), b'hash'),
     ]
 
     for fn, title in benches:
@@ -1641,10 +1703,10 @@
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevset',
-         [('C', 'clear', False, 'clear volatile cache between each call.'),
-          ('', 'contexts', False, 'obtain changectx for each revision')]
-         + formatteropts, "REVSET")
+@command(b'perfrevset',
+         [(b'C', b'clear', False, b'clear volatile cache between each call.'),
+          (b'', b'contexts', False, b'obtain changectx for each revision')]
+         + formatteropts, b"REVSET")
 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
     """benchmark the execution time of a revset
 
@@ -1662,9 +1724,9 @@
     timer(d)
     fm.end()
 
-@command('perfvolatilesets',
-         [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
-         ] + formatteropts)
+@command(b'perfvolatilesets',
+         [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
+          ] + formatteropts)
 def perfvolatilesets(ui, repo, *names, **opts):
     """benchmark the computation of various volatile set
 
@@ -1675,8 +1737,8 @@
     def getobs(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             obsolete.getrevs(repo, name)
         return d
 
@@ -1690,8 +1752,8 @@
     def getfiltered(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             repoview.filterrevs(repo, name)
         return d
 
@@ -1703,19 +1765,19 @@
         timer(getfiltered(name), title=name)
     fm.end()
 
-@command('perfbranchmap',
-         [('f', 'full', False,
-           'Includes build time of subset'),
-          ('', 'clear-revbranch', False,
-           'purge the revbranch cache between computation'),
-         ] + formatteropts)
+@command(b'perfbranchmap',
+         [(b'f', b'full', False,
+           b'Includes build time of subset'),
+          (b'', b'clear-revbranch', False,
+           b'purge the revbranch cache between computation'),
+          ] + formatteropts)
 def perfbranchmap(ui, repo, *filternames, **opts):
     """benchmark the update of a branchmap
 
     This benchmarks the full repo.branchmap() call with read and write disabled
     """
-    full = opts.get("full", False)
-    clear_revbranch = opts.get("clear_revbranch", False)
+    full = opts.get(b"full", False)
+    clear_revbranch = opts.get(b"clear_revbranch", False)
     timer, fm = gettimer(ui, opts)
     def getbranchmap(filtername):
         """generate a benchmark function for the filtername"""
@@ -1744,7 +1806,7 @@
             if subset not in possiblefilters:
                 break
         else:
-            assert False, 'subset cycle %s!' % possiblefilters
+            assert False, b'subset cycle %s!' % possiblefilters
         allfilters.append(name)
         possiblefilters.remove(name)
 
@@ -1752,26 +1814,51 @@
     if not full:
         for name in allfilters:
             repo.filtered(name).branchmap()
-    if not filternames or 'unfiltered' in filternames:
+    if not filternames or b'unfiltered' in filternames:
         # add unfiltered
         allfilters.append(None)
 
-    branchcacheread = safeattrsetter(branchmap, 'read')
-    branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
+    branchcacheread = safeattrsetter(branchmap, b'read')
+    branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
     branchcacheread.set(lambda repo: None)
     branchcachewrite.set(lambda bc, repo: None)
     try:
         for name in allfilters:
             printname = name
             if name is None:
-                printname = 'unfiltered'
+                printname = b'unfiltered'
             timer(getbranchmap(name), title=str(printname))
     finally:
         branchcacheread.restore()
         branchcachewrite.restore()
     fm.end()
 
-@command('perfloadmarkers')
+@command(b'perfbranchmapload', [
+     (b'f', b'filter', b'', b'Specify repoview filter'),
+     (b'', b'list', False, b'List brachmap filter caches'),
+    ] + formatteropts)
+def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
+    """benchmark reading the branchmap"""
+    if list:
+        for name, kind, st in repo.cachevfs.readdir(stat=True):
+            if name.startswith(b'branch2'):
+                filtername = name.partition(b'-')[2] or b'unfiltered'
+                ui.status(b'%s - %s\n'
+                          % (filtername, util.bytecount(st.st_size)))
+        return
+    if filter:
+        repo = repoview.repoview(repo, filter)
+    else:
+        repo = repo.unfiltered()
+    # try once without timer, the filter may not be cached
+    if branchmap.read(repo) is None:
+        raise error.Abort(b'No brachmap cached for %s repo'
+                          % (filter or b'unfiltered'))
+    timer, fm = gettimer(ui, opts)
+    timer(lambda: branchmap.read(repo) and None)
+    fm.end()
+
+@command(b'perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
 
@@ -1781,12 +1868,12 @@
     timer(lambda: len(obsolete.obsstore(svfs)))
     fm.end()
 
-@command('perflrucachedict', formatteropts +
-    [('', 'size', 4, 'size of cache'),
-     ('', 'gets', 10000, 'number of key lookups'),
-     ('', 'sets', 10000, 'number of key sets'),
-     ('', 'mixed', 10000, 'number of mixed mode operations'),
-     ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
+@command(b'perflrucachedict', formatteropts +
+    [(b'', b'size', 4, b'size of cache'),
+     (b'', b'gets', 10000, b'number of key lookups'),
+     (b'', b'sets', 10000, b'number of key sets'),
+     (b'', b'mixed', 10000, b'number of mixed mode operations'),
+     (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
     norepo=True)
 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
                  mixedgetfreq=50, **opts):
@@ -1846,10 +1933,10 @@
                 d[v] = v
 
     benches = [
-        (doinit, 'init'),
-        (dogets, 'gets'),
-        (dosets, 'sets'),
-        (domixed, 'mixed')
+        (doinit, b'init'),
+        (dogets, b'gets'),
+        (dosets, b'sets'),
+        (domixed, b'mixed')
     ]
 
     for fn, title in benches:
@@ -1857,28 +1944,28 @@
         timer(fn, title=title)
         fm.end()
 
-@command('perfwrite', formatteropts)
+@command(b'perfwrite', formatteropts)
 def perfwrite(ui, repo, **opts):
     """microbenchmark ui.write
     """
     timer, fm = gettimer(ui, opts)
     def write():
         for i in range(100000):
-            ui.write(('Testing write performance\n'))
+            ui.write((b'Testing write performance\n'))
     timer(write)
     fm.end()
 
 def uisetup(ui):
-    if (util.safehasattr(cmdutil, 'openrevlog') and
-        not util.safehasattr(commands, 'debugrevlogopts')):
+    if (util.safehasattr(cmdutil, b'openrevlog') and
+        not util.safehasattr(commands, b'debugrevlogopts')):
         # for "historical portability":
         # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
         # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
         # openrevlog() should cause failure, because it has been
         # available since 3.5 (or 49c583ca48c4).
         def openrevlog(orig, repo, cmd, file_, opts):
-            if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
-                raise error.Abort("This version doesn't support --dir option",
-                                  hint="use 3.5 or later")
+            if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
+                raise error.Abort(b"This version doesn't support --dir option",
+                                  hint=b"use 3.5 or later")
             return orig(repo, cmd, file_, opts)
-        extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
+        extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
--- a/contrib/phabricator.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/phabricator.py	Tue Sep 04 12:16:28 2018 -0400
@@ -570,6 +570,7 @@
                 drevid = drevids[i]
                 drev = [d for d in drevs if int(d[r'id']) == drevid][0]
                 newdesc = getdescfromdrev(drev)
+                newdesc = encoding.unitolocal(newdesc)
                 # Make sure commit message contain "Differential Revision"
                 if old.description() != newdesc:
                     parents = [
--- a/contrib/python3-whitelist	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/python3-whitelist	Tue Sep 04 12:16:28 2018 -0400
@@ -1,4 +1,7 @@
 test-abort-checkin.t
+test-absorb-filefixupstate.py
+test-absorb-phase.t
+test-absorb-strip.t
 test-add.t
 test-addremove-similar.t
 test-addremove.t
@@ -48,10 +51,12 @@
 test-cbor.py
 test-censor.t
 test-changelog-exec.t
+test-check-code.t
 test-check-commit.t
 test-check-execute.t
 test-check-interfaces.py
 test-check-module-imports.t
+test-check-py3-compat.t
 test-check-pyflakes.t
 test-check-pylint.t
 test-check-shbang.t
@@ -179,9 +184,12 @@
 test-generaldelta.t
 test-getbundle.t
 test-git-export.t
+test-glog-beautifygraph.t
 test-glog-topological.t
+test-glog.t
 test-gpg.t
 test-graft.t
+test-grep.t
 test-hg-parseurl.py
 test-hghave.t
 test-hgignore.t
@@ -254,6 +262,7 @@
 test-largefiles.t
 test-lfs-largefiles.t
 test-lfs-pointer.py
+test-linelog.py
 test-linerange.py
 test-locate.t
 test-lock-badness.t
@@ -277,6 +286,7 @@
 test-merge-halt.t
 test-merge-internal-tools-pattern.t
 test-merge-local.t
+test-merge-no-file-change.t
 test-merge-remove.t
 test-merge-revert.t
 test-merge-revert2.t
@@ -296,6 +306,7 @@
 test-minifileset.py
 test-minirst.py
 test-mq-git.t
+test-mq-guards.t
 test-mq-header-date.t
 test-mq-header-from.t
 test-mq-merge.t
@@ -308,6 +319,7 @@
 test-mq-qimport-fail-cleanup.t
 test-mq-qnew.t
 test-mq-qpush-exact.t
+test-mq-qpush-fail.t
 test-mq-qqueue.t
 test-mq-qrefresh-interactive.t
 test-mq-qrefresh-replace-log-message.t
@@ -318,6 +330,7 @@
 test-mq-subrepo.t
 test-mq-symlinks.t
 test-mv-cp-st-diff.t
+test-narrow-acl.t
 test-narrow-archive.t
 test-narrow-clone-no-ellipsis.t
 test-narrow-clone-non-narrow-server.t
@@ -339,6 +352,7 @@
 test-narrow-shallow.t
 test-narrow-strip.t
 test-narrow-update.t
+test-narrow-widen-no-ellipsis.t
 test-narrow-widen.t
 test-narrow.t
 test-nested-repo.t
@@ -358,6 +372,9 @@
 test-parseindex2.py
 test-patch-offset.t
 test-patch.t
+test-patchbomb-bookmark.t
+test-patchbomb-tls.t
+test-patchbomb.t
 test-pathconflicts-merge.t
 test-pathconflicts-update.t
 test-pathencode.py
@@ -405,6 +422,7 @@
 test-pushvars.t
 test-qrecord.t
 test-rebase-abort.t
+test-rebase-backup.t
 test-rebase-base-flag.t
 test-rebase-bookmarks.t
 test-rebase-brute-force.t
@@ -446,6 +464,7 @@
 test-revert-flags.t
 test-revert-interactive.t
 test-revert-unknown.t
+test-revisions.t
 test-revlog-ancestry.py
 test-revlog-group-emptyiter.t
 test-revlog-mmapindex.t
@@ -529,6 +548,7 @@
 test-url-rev.t
 test-url.py
 test-username-newline.t
+test-util.py
 test-verify.t
 test-walk.t
 test-walkrepo.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/relnotes	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,181 @@
+#!/usr/bin/env python3
+"""Generate release notes from our commit log.
+
+This uses the relnotes extension directives when they're available,
+and falls back to our old pre-relnotes logic that used to live in the
+release-tools repo.
+"""
+import argparse
+import re
+import subprocess
+
+# Regenerate this list with
+#   hg export 'grep("\.\. [a-z]+::")' | grep '^\.\.' | \
+#     sed 's/.. //;s/::.*//' | sort -u
+rnsections = ["api", "bc", "container", "feature", "fix", "note", "perf"]
+
+rules = {
+    # keep
+    r"\(issue": 100,
+    r"\(BC\)": 100,
+    r"\(API\)": 100,
+    # core commands, bump up
+    r"(commit|files|log|pull|push|patch|status|tag|summary)(|s|es):": 20,
+    r"(annotate|alias|branch|bookmark|clone|graft|import|verify).*:": 20,
+    # extensions, bump up
+    r"(mq|shelve|rebase):": 20,
+    # newsy
+    r": deprecate": 20,
+    r"(option|feature|command|support)": 10,
+    # bug-like?
+    r"(fix|don't break|improve)": 7,
+    # boring stuff, bump down
+    r"^contrib": -5,
+    r"debug": -5,
+    r"help": -5,
+    r"(doc|bundle2|obsolete|obsmarker|rpm|setup|debug\S+:)": -15,
+    r"(check-code|check-commit|import-checker)": -20,
+    # cleanups and refactoring
+    r"(cleanup|whitespace|nesting|indent|spelling|comment)": -20,
+    r"(typo|hint|note|style:|correct doc)": -20,
+    r"_": -10,
+    r"(argument|absolute_import|attribute|assignment|mutable)": -15,
+    r"(unused|useless|unnecessary|duplicate|deprecated|scope|True|False)": -10,
+    r"(redundant|pointless|confusing|uninitialized|meaningless|dead)": -10,
+    r": (drop|remove|inherit|rename|simplify|naming|inline)": -10,
+    r"(docstring|document .* method)": -20,
+    r"(factor|extract|prepare|split|replace| import)": -20,
+    r": add.*(function|method|implementation|test|example)": -10,
+    r": (move|extract) .* (to|into|from)": -20,
+    r": implement ": -5,
+    r": use .* implementation": -20,
+    r"\S\S\S+\.\S\S\S\S+": -5,
+    r": use .* instead of": -20,
+    r"__": -5,
+    # dumb keywords
+    r"\S+/\S+:": -10,
+    r"\S+\.\S+:": -10,
+    # drop
+    r"^i18n-": -50,
+    r"^i18n:.*(hint|comment)": -50,
+    r"perf:": -50,
+    r"check-code:": -50,
+    r"Added.*for changeset": -50,
+    r"tests?:": -50,
+    r"test-": -50,
+    r"add.* tests": -50,
+    r"^_": -50,
+}
+
+cutoff = 10
+commits = []
+
+groupings = [
+    (r"util|parsers|repo|ctx|context|revlog|filelog|alias|cmdutil", "core"),
+    (r"revset|templater|ui|dirstate|hook|i18n|transaction|wire", "core"),
+    (r"color|pager", "core"),
+    (r"hgweb|paper|coal|gitweb", "hgweb"),
+    (r"pull|push|revert|resolve|annotate|bookmark|branch|clone", "commands"),
+    (r"commands|commit|config|files|graft|import|log|merge|patch", "commands"),
+    (r"phases|status|summary|amend|tag|help|verify", "commands"),
+    (r"rebase|mq|convert|eol|histedit|largefiles", "extensions"),
+    (r"shelve|unshelve", "extensions"),
+]
+
+def main():
+    ap = argparse.ArgumentParser()
+    ap.add_argument(
+        "startrev",
+        metavar="REV",
+        type=str,
+        nargs=1,
+        help=(
+            "Starting revision for the release notes. This revision "
+            "won't be included, but later revisions will."
+        ),
+    )
+    ap.add_argument(
+        "--stoprev",
+        metavar="REV",
+        type=str,
+        default="@",
+        nargs=1,
+        help=(
+            "Stop revision for release notes. This revision will be included,"
+            " but no later revisions will. This revision needs to be "
+            "a descendant of startrev."
+        ),
+    )
+    args = ap.parse_args()
+    fromext = subprocess.check_output(
+        [
+            "hg",
+            "--config",
+            "extensions.releasenotes=",
+            "releasenotes",
+            "-r",
+            "%s::%s" % (args.startrev[0], args.stoprev[0]),
+        ]
+    ).decode("utf-8")
+    # Find all release notes from un-relnotes-flagged commits.
+    for entry in sorted(
+        subprocess.check_output(
+            [
+                "hg",
+                "log",
+                "-r",
+                r'%s::%s - merge() - grep("\n\.\. (%s)::")'
+                % (args.startrev[0], args.stoprev[0], "|".join(rnsections)),
+                "-T",
+                r"{desc|firstline}\n",
+            ]
+        )
+        .decode("utf-8")
+        .splitlines()
+    ):
+        desc = entry.replace("`", "'")
+
+        score = 0
+        for rule, val in rules.items():
+            if re.search(rule, desc):
+                score += val
+
+        desc = desc.replace("(issue", "(Bts:issue")
+
+        if score >= cutoff:
+            commits.append(desc)
+    # Group unflagged notes.
+    groups = {}
+    bcs = []
+    apis = []
+
+    for d in commits:
+        if "(BC)" in d:
+            bcs.append(d)
+        if "(API)" in d:
+            apis.append(d)
+        for rule, g in groupings:
+            if re.match(rule, d):
+                groups.setdefault(g, []).append(d)
+                break
+        else:
+            groups.setdefault("unsorted", []).append(d)
+    print(fromext)
+    # print legacy release notes sections
+    for g in sorted(groups):
+        print("\n=== %s ===" % g)
+        for d in sorted(groups[g]):
+            print(" * %s" % d)
+
+    print("\n=== BC ===\n")
+
+    for d in sorted(bcs):
+        print(" * %s" % d)
+
+    print("\n=== API Changes ===\n")
+
+    for d in sorted(apis):
+        print(" * %s" % d)
+
+if __name__ == "__main__":
+    main()
--- a/contrib/wix/help.wxs	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/wix/help.wxs	Tue Sep 04 12:16:28 2018 -0400
@@ -43,9 +43,11 @@
           <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'>
             <File Id="internals.bundle2.txt"      Name="bundle2.txt" />
             <File Id="internals.bundles.txt"      Name="bundles.txt" KeyPath="yes" />
+            <File Id="internals.cbor.txt"         Name="cbor.txt" />
             <File Id="internals.censor.txt"       Name="censor.txt" />
             <File Id="internals.changegroups.txt" Name="changegroups.txt" />
             <File Id="internals.config.txt"       Name="config.txt" />
+            <File Id="internals.linelog.txt"      Name="linelog.txt" />
             <File Id="internals.requirements.txt" Name="requirements.txt" />
             <File Id="internals.revlogs.txt"      Name="revlogs.txt" />
             <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
--- a/contrib/zsh_completion	Tue Sep 04 11:59:12 2018 -0400
+++ b/contrib/zsh_completion	Tue Sep 04 12:16:28 2018 -0400
@@ -82,7 +82,7 @@
 
   if [[ -z "$cmd" ]]
   then
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
     ':mercurial command:_hg_commands'
     return
   fi
@@ -119,7 +119,7 @@
     _hg_cmd_${cmd}
   else
     # complete unknown commands normally
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
       '*:files:_hg_files'
   fi
 }
@@ -139,7 +139,7 @@
   typeset -gA _hg_alias_list
   local hline cmd cmdalias
 
-  _call_program hg hg debugcomplete -v | while read -A hline
+  _call_program hg HGPLAINEXCEPT=alias hg debugcomplete -v | while read -A hline
   do
     cmd=$hline[1]
     _hg_cmd_list+=($cmd)
@@ -245,10 +245,10 @@
   _wanted files expl 'missing files' _multi_parts / status_files
 }
 
-_hg_modified() {
+_hg_committable() {
   typeset -a status_files
-  _hg_status m
-  _wanted files expl 'modified files' _multi_parts / status_files
+  _hg_status mar
+  _wanted files expl 'modified, added or removed files' _multi_parts / status_files
 }
 
 _hg_resolve() {
@@ -281,6 +281,18 @@
     (( $#items )) && _describe -t config 'config item' items
 }
 
+_hg_internal_merge_tools=(
+  \\:dump \\:fail \\:forcedump \\:local \\:merge \\:merge-local \\:merge-other
+  \\:merge3 \\:other \\:prompt \\:tagmerge \\:union
+)
+
+_hg_merge_tools() {
+  typeset -a external_tools
+  _describe -t internal_tools 'internal merge tools' _hg_internal_merge_tools
+  external_tools=(${(f)"$(_hg_cmd showconfig merge-tools | cut -d . -f 2)"})
+  (( $#external_tools )) && _describe -t external_tools 'external merge tools' external_tools
+}
+
 _hg_addremove() {
   _alternative 'files:unknown files:_hg_unknown' \
     'files:missing files:_hg_missing'
@@ -371,22 +383,24 @@
 
 # Common options
 _hg_global_opts=(
-    '(--repository -R)'{-R+,--repository=}'[repository root directory]:repository:_files -/'
-    '--cwd[change working directory]:new working directory:_files -/'
-    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, assume yes for any required answers]'
+    '(--repository -R)'{-R+,--repository=}'[repository root directory or name of overlay bundle file]:repository:_files -/'
+    '--cwd=[change working directory]:new working directory:_files -/'
+    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, automatically pick the first choice for all prompts]'
     '(--verbose -v)'{-v,--verbose}'[enable additional output]'
     '*--config[set/override config option]:defined config items:_hg_config'
     '(--quiet -q)'{-q,--quiet}'[suppress output]'
     '(--help -h)'{-h,--help}'[display help and exit]'
-    '--debug[debug mode]'
+    '--debug[enable debugging output]'
     '--debugger[start debugger]'
-    '--encoding[set the charset encoding]'
-    '--encodingmode[set the charset encoding mode]'
-    '--lsprof[print improved command execution profile]'
-    '--traceback[print traceback on exception]'
+    '--encoding=[set the charset encoding]:encoding'
+    '--encodingmode=[set the charset encoding mode]:encoding mode'
+    '--traceback[always print a traceback on exception]'
     '--time[time how long the command takes]'
-    '--profile[profile]'
+    '--profile[print command execution profile]'
     '--version[output version information and exit]'
+    '--hidden[consider hidden changesets]'
+    '--color=[when to colorize]:when:(true false yes no always auto never debug)'
+    '--pager=[when to paginate (default: auto)]:when:(true false yes no always auto never)'
 )
 
 _hg_pat_opts=(
@@ -402,8 +416,8 @@
 _hg_date_user_opts=(
   '(--currentdate -D)'{-D,--currentdate}'[record the current date as commit date]'
   '(--currentuser -U)'{-U,--currentuser}'[record the current user as committer]'
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:'
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:')
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date'
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user')
 
 _hg_gitlike_opts=(
   '(--git -g)'{-g,--git}'[use git extended diff format]')
@@ -414,7 +428,8 @@
   '--nodates[omit dates from diff headers]')
 
 _hg_mergetool_opts=(
-  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:tool:')
+  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:merge tool:_hg_merge_tools'
+)
 
 _hg_dryrun_opts=(
   '(--dry-run -n)'{-n,--dry-run}'[do not perform actions, just print output]')
@@ -422,28 +437,33 @@
 _hg_ignore_space_opts=(
   '(--ignore-all-space -w)'{-w,--ignore-all-space}'[ignore white space when comparing lines]'
   '(--ignore-space-change -b)'{-b,--ignore-space-change}'[ignore changes in the amount of white space]'
-  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]')
+  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]'
+  '(--ignore-space-at-eol -Z)'{-Z,--ignore-space-at-eol}'[ignore changes in whitespace at EOL]'
+)
 
-_hg_style_opts=(
-  '--style[display using template map file]:'
-  '--template[display with template]:')
+_hg_template_opts=(
+  '--template[display with template]:template'
+)
 
 _hg_log_opts=(
-  $_hg_global_opts $_hg_style_opts $_hg_gitlike_opts
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:'
+  $_hg_global_opts $_hg_template_opts $_hg_gitlike_opts
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit'
   '(--no-merges -M)'{-M,--no-merges}'[do not show merges]'
   '(--patch -p)'{-p,--patch}'[show patch]'
   '--stat[output diffstat-style summary of changes]'
+  '(--graph -G)'{-G,--graph}'[show the revision DAG]'
 )
 
 _hg_commit_opts=(
   '(-m --message -l --logfile --edit -e)'{-e,--edit}'[edit commit message]'
-  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message:'
+  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message'
   '(-e --edit -m --message --logfile -l)'{-l+,--logfile=}'[read the commit message from <file>]:log file:_files')
 
 _hg_remote_opts=(
-  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:'
-  '--remotecmd[specify hg command to run on the remote side]:')
+  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:command'
+  '--remotecmd=[specify hg command to run on the remote side]:remote command'
+  '--insecure[do not verify server certificate (ignoring web.cacerts config)]'
+)
 
 _hg_branch_bmark_opts=(
   '(--bookmark -B)'{-B+,--bookmark=}'[specify bookmark(s)]:bookmark:_hg_bookmarks'
@@ -458,50 +478,53 @@
 }
 
 _hg_cmd_add() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
   '*:unknown files:_hg_unknown'
 }
 
 _hg_cmd_addremove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:unknown or missing files:_hg_addremove'
 }
 
 _hg_cmd_annotate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_ignore_space_opts $_hg_pat_opts \
   '(--rev -r)'{-r+,--rev=}'[annotate the specified revision]:revision:_hg_labels' \
-  '(--follow -f)'{-f,--follow}'[follow file copies and renames]' \
+  "--no-follow[don't follow copies and renames]" \
   '(--text -a)'{-a,--text}'[treat all files as text]' \
-  '(--user -u)'{-u,--user}'[list the author]' \
-  '(--date -d)'{-d,--date}'[list the date]' \
+  '(--user -u)'{-u,--user}'[list the author (long with -v)]' \
+  '(--file -f)'{-f,--file}'[list the filename]' \
+  '(--date -d)'{-d,--date}'[list the date (short with -q)]' \
   '(--number -n)'{-n,--number}'[list the revision number (default)]' \
   '(--changeset -c)'{-c,--changeset}'[list the changeset]' \
+  '(--line-number -l)'{-l,--line-number}'[show line number at the first appearance]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_archive() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '--no-decode[do not pass files through decoders]' \
-  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:' \
+  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:prefix' \
   '(--rev -r)'{-r+,--rev=}'[revision to distribute]:revision:_hg_labels' \
   '(--type -t)'{-t+,--type=}'[type of distribution to create]:archive type:(files tar tbz2 tgz uzip zip)' \
   '*:destination:_files'
 }
 
 _hg_cmd_backout() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
     '--merge[merge with old dirstate parent after backout]' \
-    '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
+    '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
     '--parent[parent to choose when backing out merge]' \
-    '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-    '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
-    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files'
+    '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+    '(--rev -r 1)'{-r+,--rev=}'[revision to backout]:revision:_hg_labels' \
+    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
+    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
+    ':revision:_hg_labels'
 }
 
 _hg_cmd_bisect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(-)'{-r,--reset}'[reset bisect state]' \
   '(--extend -e)'{-e,--extend}'[extend the bisect range]' \
   '(--good -g --bad -b --skip -s --reset -r)'{-g,--good}'[mark changeset good]'::revision:_hg_labels \
@@ -512,7 +535,7 @@
 }
 
 _hg_cmd_bookmarks() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[force]' \
   '(--inactive -i)'{-i,--inactive}'[mark a bookmark inactive]' \
   '(--rev -r --delete -d --rename -m)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
@@ -522,112 +545,130 @@
 }
 
 _hg_cmd_branch() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[set branch name even if it shadows an existing branch]' \
   '(--clean -C)'{-C,--clean}'[reset branch name to parent branch name]'
 }
 
 _hg_cmd_branches() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--active -a)'{-a,--active}'[show only branches that have unmerge heads]' \
+  _arguments -s -S : $_hg_global_opts \
   '(--closed -c)'{-c,--closed}'[show normal and closed branches]'
 }
 
 _hg_cmd_bundle() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
-  '(--force -f)'{-f,--force}'[run even when remote repository is unrelated]' \
-  '(2)*--base[a base changeset to specify instead of a destination]:revision:_hg_labels' \
-  '(--branch -b)'{-b+,--branch=}'[a specific branch to bundle]:' \
-  '(--rev -r)'{-r+,--rev=}'[changeset(s) to bundle]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '(2)*--base[a base changeset assumed to be available at the destination]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[a specific branch you would like to bundle]:branch:_hg_branches' \
+  '*'{-r+,--rev=}'[a changeset intended to be added to the destination]:revision:_hg_labels' \
   '--all[bundle all changesets in the repository]' \
+  '--type[bundle compression type to use (default: bzip2)]:bundle type' \
   ':output file:_files' \
   ':destination repository:_files -/'
 }
 
 _hg_cmd_cat() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
   '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
   '--decode[apply any matching decode filter]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_clone() {
-  _arguments -s -w : $_hg_global_opts $_hg_clone_opts \
-  '(--rev -r)'{-r+,--rev=}'[a changeset you would like to have after cloning]:' \
-  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag or branch to check out]:' \
-  '(--branch -b)'{-b+,--branch=}'[clone only the specified branch]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_clone_opts \
+  '*'{-r+,--rev=}'[do not clone everything, but include this changeset and its ancestors]:revision' \
+  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag or branch to check out]:revision' \
+  '*'{-b+,--branch=}"[do not clone everything, but include this branch's changesets and their ancestors]:branch" \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_commit() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
-  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
+  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
   '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '--amend[amend the parent of the working dir]' \
-  '--close-branch[mark a branch as closed]' \
-  '*:file:_hg_files'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '--amend[amend the parent of the working directory]' \
+  '--close-branch[mark a branch head as closed]' \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
+  '(--secret -s)'{-s,--secret}'[use the secret phase for committing]' \
+  '*:file:_hg_committable'
 }
 
 _hg_cmd_copy() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a copy that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_diff() {
+  local context state state_descr line ret=1
   typeset -A opt_args
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
+
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
                      $_hg_pat_opts $_hg_subrepos_opts \
   '*'{-r+,--rev=}'[revision]:revision:_hg_revrange' \
+  '--noprefix[omit a/ and b/ prefixes from filenames]' \
   '(--show-function -p)'{-p,--show-function}'[show which function each change is in]' \
-  '(--change -c)'{-c+,--change=}'[change made by revision]:' \
+  '(--change -c)'{-c+,--change=}'[change made by revision]:revision:_hg_labels' \
   '(--text -a)'{-a,--text}'[treat all files as text]' \
   '--reverse[produce a diff that undoes the changes]' \
-  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:' \
+  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:count' \
   '--stat[output diffstat-style summary of changes]' \
-  '*:file:->diff_files'
+  '--root=[produce diffs relative to subdirectory]:directory:_files -/' \
+  '*:file:->diff_files' && ret=0
 
   if [[ $state == 'diff_files' ]]
   then
-    if [[ -n $opt_args[-r] ]]
+    if [[ -n ${opt_args[(I)-r|--rev]} ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
-      _hg_modified
+      _hg_committable && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_export() {
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts \
-  '(--outout -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
   '--switch-parent[diff against the second parent]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[revisions to export]:revision:_hg_labels' \
   '*:revision:_hg_labels'
 }
 
+_hg_cmd_files() {
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  '(--rev -r)'{-r+,--rev=}'[search the repository as it is in REV]:revision:_hg_labels' \
+  '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
+  '*:file:_hg_files'
+}
+
 _hg_cmd_forget() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_graft() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
                      $_hg_date_user_opts $_hg_mergetool_opts \
-  '(--continue -c)'{-c,--continue}'[resume interrupted graft]' \
+  '*'{-r+,--rev=}'[revisions to graft]:revision:_hg_labels' \
+  '(--continue -c --abort -a)'{-c,--continue}'[resume interrupted graft]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort interrupted graft]' \
   '(--edit -e)'{-e,--edit}'[invoke editor on commit messages]' \
   '--log[append graft info to log message]' \
   '*:revision:_hg_labels'
 }
 
 _hg_cmd_grep() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL]' \
   '--all[print all revisions with matches]' \
   '(--follow -f)'{-f,--follow}'[follow changeset or file history]' \
@@ -642,93 +683,92 @@
 }
 
 _hg_cmd_heads() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
   '(--topo -t)'{-t,--topo}'[show topological heads only]' \
   '(--closed -c)'{-c,--closed}'[show normal and closed branch heads]' \
   '(--rev -r)'{-r+,--rev=}'[show only heads which are descendants of rev]:revision:_hg_labels'
 }
 
 _hg_cmd_help() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--extension -e)'{-e,--extension}'[show only help for extensions]' \
   '(--command -c)'{-c,--command}'[show only help for commands]' \
-  '(--keyword -k)'{-k+,--keyword}'[show topics matching keyword]' \
+  '(--keyword -k)'{-k,--keyword}'[show topics matching keyword]' \
   '*:mercurial help topic:_hg_help_topics'
 }
 
 _hg_cmd_identify() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   '(--rev -r)'{-r+,--rev=}'[identify the specified rev]:revision:_hg_labels' \
   '(--num -n)'{-n,--num}'[show local revision number]' \
   '(--id -i)'{-i,--id}'[show global revision id]' \
   '(--branch -b)'{-b,--branch}'[show branch]' \
-  '(--bookmark -B)'{-B,--bookmark}'[show bookmarks]' \
+  '(--bookmarks -B)'{-B,--bookmarks}'[show bookmarks]' \
   '(--tags -t)'{-t,--tags}'[show tags]'
 }
 
 _hg_cmd_import() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts \
-  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count:' \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts \
+  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count' \
   '(--force -f)'{-f,--force}'[skip check for outstanding uncommitted changes]' \
   '--bypass[apply patch without touching the working directory]' \
   '--no-commit[do not commit, just update the working directory]' \
   '--partial[commit even if some hunks fail]' \
-  '--exact[apply patch to the nodes from which it was generated]' \
+  '--exact[abort if patch would apply lossily]' \
   '--import-branch[use any branch information in patch (implied by --exact)]' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:patch:_files'
 }
 
 _hg_cmd_incoming() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
                      $_hg_subrepos_opts \
   '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision up to which you would like to pull]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
   '--bundle[file to store the bundles into]:bundle file:_files' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_init() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   ':dir:_files -/'
 }
 
 _hg_cmd_locate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--rev -r)'{-r+,--rev=}'[search repository as it stood at revision]:revision:_hg_labels' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '(--fullpath -f)'{-f,--fullpath}'[print complete paths]' \
+  '(--fullpath -f)'{-f,--fullpath}'[print complete paths from the filesystem root]' \
   '*:search pattern:_hg_files'
 }
 
 _hg_cmd_log() {
-  _arguments -s -w : $_hg_log_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_pat_opts \
   '(--follow --follow-first -f)'{-f,--follow}'[follow changeset or history]' \
   '(-f --follow)--follow-first[only follow the first parent of merge changesets]' \
   '(--copies -C)'{-C,--copies}'[show copied files]' \
-  '(--keyword -k)'{-k+,--keyword}'[search for a keyword]:' \
+  '*'{-k+,--keyword=}'[search for a keyword]:keyword' \
   '*'{-r+,--rev=}'[show the specified revision or revset]:revision:_hg_revrange' \
   '(--only-merges -m)'{-m,--only-merges}'[show only merges]' \
-  '(--prune -P)'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
-  '(--graph -G)'{-G,--graph}'[show the revision DAG]' \
-  '(--branch -b)'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
-  '(--user -u)'{-u+,--user=}'[revisions committed by user]:user:' \
-  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date:' \
+  '*'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
+  '*'{-u+,--user=}'[revisions committed by user]:user' \
+  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_manifest() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--all[list files from all revisions]' \
   '(--rev -r)'{-r+,--rev=}'[revision to display]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
 _hg_cmd_merge() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
   '(--force -f)'{-f,--force}'[force a merge with outstanding changes]' \
   '(--rev -r 1)'{-r+,--rev=}'[revision to merge]:revision:_hg_mergerevs' \
   '(--preview -P)'{-P,--preview}'[review revisions to merge (no merge is performed)]' \
@@ -736,167 +776,176 @@
 }
 
 _hg_cmd_outgoing() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
                      $_hg_subrepos_opts \
-  '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '*'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_revrange' \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_revrange' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_parents() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
   '(--rev -r)'{-r+,--rev=}'[show parents of the specified rev]:revision:_hg_labels' \
   ':last modified file:_hg_files'
 }
 
 _hg_cmd_paths() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':path:_hg_paths'
 }
 
 _hg_cmd_phase() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--public -p)'{-p,--public}'[set changeset phase to public]' \
-  '(--draft -d)'{-d,--draft}'[set changeset phase to draft]' \
-  '(--secret -s)'{-s,--secret}'[set changeset phase to secret]' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--public -p --draft -d --secret -s)'{-p,--public}'[set changeset phase to public]' \
+  '(--public -p --draft -d --secret -s)'{-d,--draft}'[set changeset phase to draft]' \
+  '(--public -p --draft -d --secret -s)'{-s,--secret}'[set changeset phase to secret]' \
   '(--force -f)'{-f,--force}'[allow to move boundary backward]' \
-  '(--rev -r)'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  '*'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 _hg_cmd_pull() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--update -u)'{-u,--update}'[update to new tip if changesets were pulled]' \
-  '(--rev -r)'{-r+,--rev}'[a specific revision up to which you would like to pull]:revision:' \
+  '(--update -u)'{-u,--update}'[update to new branch head if new descendants were pulled]' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_push() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[force push]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_labels' \
   '--new-branch[allow pushing a new branch]' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_remove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--after -A)'{-A,--after}'[record remove that has already occurred]' \
-  '(--force -f)'{-f,--force}'[remove file even if modified]' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  '(--after -A)'{-A,--after}'[record delete for missing files]' \
+  '(--force -f)'{-f,--force}'[forget added files, delete modified files]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_rename() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a rename that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_resolve() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
   '(--all -a)'{-a,--all}'[select all unresolved files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--list -l --mark -m --unmark -u)'{-l,--list}'[list state of files needing merge]:*:merged files:->resolve_files' \
   '(--mark -m --list -l --unmark -u)'{-m,--mark}'[mark files as resolved]:*:unresolved files:_hg_unresolved' \
-  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[unmark files as resolved]:*:resolved files:_hg_resolved' \
-  '*:file:_hg_unresolved'
+  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[mark files as unresolved]:*:resolved files:_hg_resolved' \
+  '*:file:_hg_unresolved' && ret=0
 
   if [[ $state == 'resolve_files' ]]
   then
     _alternative 'files:resolved files:_hg_resolved' \
-      'files:unresolved files:_hg_unresolved'
+      'files:unresolved files:_hg_unresolved' && ret=0
   fi
+
+  return ret
 }
 
 _hg_cmd_revert() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--all -a :)'{-a,--all}'[revert all changes when no arguments given]' \
   '(--rev -r)'{-r+,--rev=}'[revision to revert to]:revision:_hg_labels' \
   '(--no-backup -C)'{-C,--no-backup}'[do not save backup copies of files]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date code:' \
-  '*:file:->diff_files'
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--interactive -i)'{-i,--interactive}'[interactively select the changes]' \
+  '*:file:->revert_files' && ret=0
 
-  if [[ $state == 'diff_files' ]]
+  if [[ $state == 'revert_files' ]]
   then
-    if [[ -n $opt_args[-r] ]]
+    if [[ -n ${opt_args[(I)-r|--rev]} ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
       typeset -a status_files
       _hg_status mard
-      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files
+      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_rollback() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
   '(--force -f)'{-f,--force}'[ignore safety measures]' \
 }
 
 _hg_cmd_serve() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file]:log file:_files' \
-  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file]:log file:_files' \
+  _arguments -s -S : $_hg_global_opts $_hg_subrepos_opts \
+  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file to write to]:log file:_files' \
+  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file to write to]:log file:_files' \
   '(--daemon -d)'{-d,--daemon}'[run server in background]' \
-  '(--port -p)'{-p+,--port=}'[listen port]:listen port:' \
-  '(--address -a)'{-a+,--address=}'[interface address]:interface address:' \
-  '--prefix[prefix path to serve from]:directory:_files' \
-  '(--name -n)'{-n+,--name=}'[name to show in web pages]:repository name:' \
-  '--web-conf[name of the hgweb config file]:webconf_file:_files' \
-  '--pid-file[name of file to write process ID to]:pid_file:_files' \
-  '--cmdserver[cmdserver mode]:mode:' \
-  '(--templates -t)'{-t,--templates}'[web template directory]:template dir:_files -/' \
-  '--style[web template style]:style' \
+  '(--port -p)'{-p+,--port=}'[port to listen on (default: 8000)]:listen port' \
+  '(--address -a)'{-a+,--address=}'[address to listen on (default: all interfaces)]:interface address' \
+  '--prefix=[prefix path to serve from (default: server root)]:directory:_files' \
+  '(--name -n)'{-n+,--name=}'[name to show in web pages (default: working directory)]:repository name' \
+  '--web-conf=[name of the hgweb config file]:config file:_files' \
+  '--pid-file=[name of file to write process ID to]:pid file:_files' \
+  '--cmdserver[for remote clients]' \
+  '(--templates -t)'{-t+,--templates=}'[web template directory]:template dir:_files -/' \
+  '--style=[template style to use]:style' \
   '--stdio[for remote clients]' \
-  '--certificate[certificate file]:cert_file:_files' \
-  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]'
+  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]' \
+  '--certificate=[SSL certificate file]:certificate file:_files' \
+  '--print-url[start and print only the URL]'
 }
 
 _hg_cmd_showconfig() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--untrusted -u)'{-u,--untrusted}'[show untrusted configuration options]' \
-  ':config item:_hg_config'
+  '(--edit -e)'{-e,--edit}'[edit user config]' \
+  '(--local -l --global -g)'{-l,--local}'[edit repository config]' \
+  '(--local -l --global -g)'{-g,--global}'[edit global config]' \
+  '*:config item:_hg_config'
 }
 
 _hg_cmd_status() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--all -A)'{-A,--all}'[show status of all files]' \
   '(--modified -m)'{-m,--modified}'[show only modified files]' \
   '(--added -a)'{-a,--added}'[show only added files]' \
   '(--removed -r)'{-r,--removed}'[show only removed files]' \
   '(--deleted -d)'{-d,--deleted}'[show only deleted (but tracked) files]' \
   '(--clean -c)'{-c,--clean}'[show only files without changes]' \
-  '(--unknown -u)'{-u,--unknown}'[show only unknown files]' \
+  '(--unknown -u)'{-u,--unknown}'[show only unknown (not tracked) files]' \
   '(--ignored -i)'{-i,--ignored}'[show ignored files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--copies -C)'{-C,--copies}'[show source of copied files]' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '--rev[show difference from revision]:revision:_hg_labels' \
-  '--change[list the changed files of a revision]:revision:_hg_labels' \
+  '*--rev=[show difference from revision]:revision:_hg_labels' \
+  '--change=[list the changed files of a revision]:revision:_hg_labels' \
   '*:files:_files'
 }
 
 _hg_cmd_summary() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--remote[check for push and pull]'
 }
 
 _hg_cmd_tag() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--local -l)'{-l,--local}'[make the tag local]' \
-  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message:' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
+  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
   '(--rev -r)'{-r+,--rev=}'[revision to tag]:revision:_hg_labels' \
   '(--force -f)'{-f,--force}'[force tag]' \
   '--remove[remove a tag]' \
@@ -905,22 +954,23 @@
 }
 
 _hg_cmd_tip() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts $_hg_template_opts \
   '(--patch -p)'{-p,--patch}'[show patch]'
 }
 
 _hg_cmd_unbundle() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--update -u)'{-u,--update}'[update to new tip if changesets were unbundled]' \
-  ':files:_files'
+  '*:files:_files'
 }
 
 _hg_cmd_update() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--clean -C)'{-C,--clean}'[overwrite locally modified files]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  '(--check -c)'{-c,--check}'[update across branches if no uncommitted changes]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
+  '(--clean -C)'{-C,--clean}'[discard uncommitted changes (no backup)]' \
+  '(--check -c)'{-c,--check}'[require clean working directory]' \
+  '(--merge -m)'{-m,--merge}'[merge uncommitted changes]' \
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--rev -r 1)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
@@ -928,8 +978,8 @@
 
 # HGK
 _hg_cmd_view() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit' \
   ':revision range:_hg_labels'
 }
 
@@ -983,54 +1033,54 @@
   '(--summary -s)'{-s,--summary}'[print first line of patch header]')
 
 _hg_cmd_qapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--last -1)'{-1,--last}'[show only the preceding applied patch]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qclone() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
   '(--patches -p)'{-p+,--patches=}'[location of source patch repository]:' \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_qdelete() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--keep -k)'{-k,--keep}'[keep patch file]' \
   '*'{-r+,--rev=}'[stop managing a revision]:applied patch:_hg_revrange' \
   '*:unapplied patch:_hg_qdeletable'
 }
 
 _hg_cmd_qdiff() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
                      $_hg_ignore_space_opts \
   '*:pattern:_hg_files'
 }
 
 _hg_cmd_qfinish() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--applied -a)'{-a,--applied}'[finish all applied patches]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qfold() {
-  _arguments -s -w : $_hg_global_opts $_h_commit_opts \
-  '(--keep,-k)'{-k,--keep}'[keep folded patch files]' \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts \
+  '(--keep -k)'{-k,--keep}'[keep folded patch files]' \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--no-backup[do not save backup copies of files]' \
   '*:unapplied patch:_hg_qunapplied'
 }
 
 _hg_cmd_qgoto() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qguard() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--list -l)'{-l,--list}'[list all patches and guards]' \
   '(--none -n)'{-n,--none}'[drop all guards]' \
   ':patch:_hg_qseries' \
@@ -1038,14 +1088,14 @@
 }
 
 _hg_cmd_qheader() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qimport() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts \
   '(--existing -e)'{-e,--existing}'[import file in patch dir]' \
-  '(--name -n 2)'{-n+,--name}'[patch file name]:name:' \
+  '(--name -n 2)'{-n+,--name}'[patch file name]:name' \
   '(--force -f)'{-f,--force}'[overwrite existing files]' \
   '*'{-r+,--rev=}'[place existing revisions under mq control]:revision:_hg_revrange' \
   '(--push -P)'{-P,--push}'[qpush after importing]' \
@@ -1053,16 +1103,16 @@
 }
 
 _hg_cmd_qnew() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
   ':patch:'
 }
 
 _hg_cmd_qnext() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpop() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[pop all patches]' \
   '(--force -f)'{-f,--force}'[forget any local changes]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
@@ -1071,11 +1121,11 @@
 }
 
 _hg_cmd_qprev() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpush() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[apply all patches]' \
   '(--list -l)'{-l,--list}'[list patch name in commit text]' \
   '(--force -f)'{-f,--force}'[apply if the patch has rejects]' \
@@ -1087,19 +1137,19 @@
 }
 
 _hg_cmd_qrefresh() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
   '(--short -s)'{-s,--short}'[short refresh]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_qrename() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qunapplied' \
   ':destination:'
 }
 
 _hg_cmd_qselect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--none -n :)'{-n,--none}'[disable all guards]' \
   '(--series -s :)'{-s,--series}'[list all guards in series file]' \
   '--pop[pop to before first guarded applied patch]' \
@@ -1108,32 +1158,32 @@
 }
 
 _hg_cmd_qseries() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--missing -m)'{-m,--missing}'[print patches not in series]'
 }
 
 _hg_cmd_qunapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--first -1)'{-1,--first}'[show only the first patch]'
 }
 
 _hg_cmd_qtop() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_strip() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--force -f)'{-f,--force}'[force removal, discard uncommitted changes, no backup]' \
-  '(--no-backup -n)'{-n,--no-backup}'[no backups]' \
-  '(--keep -k)'{-k,--keep}'[do not modify working copy during strip]' \
-  '(--bookmark -B)'{-B+,--bookmark=}'[remove revs only reachable from given bookmark]:bookmark:_hg_bookmarks' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  _arguments -s -S : $_hg_global_opts \
+  '(--force -f)'{-f,--force}'[force removal of changesets, discard uncommitted changes (no backup)]' \
+  '--no-backup[no backups]' \
+  '(--keep -k)'{-k,--keep}'[do not modify working directory during strip]' \
+  '*'{-B+,--bookmark=}'[remove revs only reachable from given bookmark]:bookmark:_hg_bookmarks' \
+  '*'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 # Patchbomb
 _hg_cmd_email() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
   '--plain[omit hg patch header]' \
   '--body[send patches as inline message text (default)]' \
   '(--outgoing -o)'{-o,--outgoing}'[send changes not found in the target repository]' \
@@ -1141,79 +1191,76 @@
   '--bundlename[name of the bundle attachment file (default: bundle)]:' \
   '*'{-r+,--rev=}'[search in given revision range]:revision:_hg_revrange' \
   '--force[run even when remote repository is unrelated (with -b/--bundle)]' \
-  '*--base[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
+  '*--base=[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
   '--intro[send an introduction email for a single patch]' \
   '(--inline -i --attach -a)'{-a,--attach}'[send patches as attachments]' \
   '(--attach -a --inline -i)'{-i,--inline}'[send patches as inline attachments]' \
-  '*--bcc[email addresses of blind carbon copy recipients]:email:' \
-  '*'{-c+,--cc}'[email addresses of copy recipients]:email:' \
+  '*--bcc=[email addresses of blind carbon copy recipients]:email' \
+  '*'{-c+,--cc=}'[email addresses of copy recipients]:email' \
   '(--diffstat -d)'{-d,--diffstat}'[add diffstat output to messages]' \
-  '--date[use the given date as the sending date]:date:' \
-  '--desc[use the given file as the series description]:files:_files' \
-  '(--from -f)'{-f,--from}'[email address of sender]:email:' \
+  '--date=[use the given date as the sending date]:date' \
+  '--desc=[use the given file as the series description]:files:_files' \
+  '(--from -f)'{-f+,--from=}'[email address of sender]:email' \
   '(--test -n)'{-n,--test}'[print messages that would be sent]' \
-  '(--mbox -m)'{-m,--mbox}'[write messages to mbox file instead of sending them]:file:' \
-  '*--reply-to[email addresses replies should be sent to]:email:' \
-  '(--subject -s)'{-s,--subject}'[subject of first message (intro or single patch)]:subject:' \
-  '--in-reply-to[message identifier to reply to]:msgid:' \
-  '*--flag[flags to add in subject prefixes]:flag:' \
-  '*'{-t,--to}'[email addresses of recipients]:email:' \
+  '(--mbox -m)'{-m+,--mbox=}'[write messages to mbox file instead of sending them]:file:_files' \
+  '*--reply-to=[email addresses replies should be sent to]:email' \
+  '(--subject -s)'{-s+,--subject=}'[subject of first message (intro or single patch)]:subject' \
+  '--in-reply-to=[message identifier to reply to]:msgid' \
+  '*--flag=[flags to add in subject prefixes]:flag' \
+  '*'{-t+,--to=}'[email addresses of recipients]:email' \
   ':revision:_hg_revrange'
 }
 
 # Rebase
 _hg_cmd_rebase() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts $_hg_dryrun_opts  \
   '*'{-r+,--rev=}'[rebase these revisions]:revision:_hg_revrange' \
-  '(--source -s)'{-s+,--source=}'[rebase from the specified changeset]:revision:_hg_labels' \
-  '(--base -b)'{-b+,--base=}'[rebase from the base of the specified changeset]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-s+,--source=}'[rebase the specified changeset and descendants]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-b+,--base=}'[rebase everything from branching point of specified changeset]:revision:_hg_labels' \
   '(--dest -d)'{-d+,--dest=}'[rebase onto the specified changeset]:revision:_hg_labels' \
-  '--collapse[collapse the rebased changeset]' \
-  '--keep[keep original changeset]' \
-  '--keepbranches[keep original branch name]' \
-  '(--continue -c)'{-c,--continue}'[continue an interrupted rebase]' \
-  '(--abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
+  '--collapse[collapse the rebased changesets]' \
+  '(--keep -k)'{-k,--keep}'[keep original changesets]' \
+  '--keepbranches[keep original branch names]' \
+  '(--continue -c --abort -a)'{-c,--continue}'[continue an interrupted rebase]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
 }
 
 # Record
 _hg_cmd_record() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
                      $_hg_ignore_space_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
   '--close-branch[mark a branch as closed, hiding it from the branch list]' \
   '--amend[amend the parent of the working dir]' \
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:' \
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user'
 }
 
 _hg_cmd_qrecord() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
                      $_hg_pat_opts $_hg_ignore_space_opts $_hg_subrepos_opts
 }
 
 # Convert
 _hg_cmd_convert() {
-_arguments -s -w : $_hg_global_opts \
-  '(--source-type -s)'{-s,--source-type}'[source repository type]' \
-  '(--dest-type -d)'{-d,--dest-type}'[destination repository type]' \
-  '(--rev -r)'{-r+,--rev=}'[import up to target revision]:revision:' \
+_arguments -s -S : $_hg_global_opts \
+  '(--source-type -s)'{-s+,--source-type=}'[source repository type]:type:(hg cvs darcs git svn mtn gnuarch bzr p4)' \
+  '(--dest-type -d)'{-d+,--dest-type=}'[destination repository type]:type:(hg svn)' \
+  '*'{-r+,--rev=}'[import up to target revision]:revision' \
   '(--authormap -A)'{-A+,--authormap=}'[remap usernames using this file]:file:_files' \
-  '--filemap[remap file names using contents of file]:file:_files' \
-  '--splicemap[splice synthesized history into place]:file:_files' \
-  '--branchmap[change branch names while converting]:file:_files' \
+  '--filemap=[remap file names using contents of file]:file:_files' \
+  '--full[apply filemap changes by converting all files again]' \
+  '--splicemap=[splice synthesized history into place]:file:_files' \
+  '--branchmap=[change branch names while converting]:file:_files' \
   '--branchsort[try to sort changesets by branches]' \
   '--datesort[try to sort changesets by date]' \
-  '--sourcesort[preserve source changesets order]'
-}
-
-# Graphlog
-_hg_cmd_glog() {
-  _hg_cmd_log $@
+  '--sourcesort[preserve source changesets order]' \
+  '--closesort[try to reorder closed revisions]'
 }
 
 # Purge
 _hg_cmd_purge() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--abort-on-err -a)'{-a,--abort-on-err}'[abort if an error occurs]' \
   '--all[purge ignored files too]' \
   '(--print -p)'{-p,--print}'[print filenames instead of deleting them]' \
--- a/hgdemandimport/demandimportpy2.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgdemandimport/demandimportpy2.py	Tue Sep 04 12:16:28 2018 -0400
@@ -30,6 +30,8 @@
 import contextlib
 import sys
 
+from . import tracing
+
 contextmanager = contextlib.contextmanager
 
 _origimport = __import__
@@ -86,52 +88,55 @@
 
     def _load(self):
         if not self._module:
-            head, globals, locals, after, level, modrefs = self._data
-            mod = _hgextimport(_origimport, head, globals, locals, None, level)
-            if mod is self:
-                # In this case, _hgextimport() above should imply
-                # _demandimport(). Otherwise, _hgextimport() never
-                # returns _demandmod. This isn't intentional behavior,
-                # in fact. (see also issue5304 for detail)
-                #
-                # If self._module is already bound at this point, self
-                # should be already _load()-ed while _hgextimport().
-                # Otherwise, there is no way to import actual module
-                # as expected, because (re-)invoking _hgextimport()
-                # should cause same result.
-                # This is reason why _load() returns without any more
-                # setup but assumes self to be already bound.
-                mod = self._module
-                assert mod and mod is not self, "%s, %s" % (self, mod)
-                return
+            with tracing.log('demandimport %s', self._data[0]):
+                head, globals, locals, after, level, modrefs = self._data
+                mod = _hgextimport(
+                    _origimport, head, globals, locals, None, level)
+                if mod is self:
+                    # In this case, _hgextimport() above should imply
+                    # _demandimport(). Otherwise, _hgextimport() never
+                    # returns _demandmod. This isn't intentional behavior,
+                    # in fact. (see also issue5304 for detail)
+                    #
+                    # If self._module is already bound at this point, self
+                    # should be already _load()-ed while _hgextimport().
+                    # Otherwise, there is no way to import actual module
+                    # as expected, because (re-)invoking _hgextimport()
+                    # should cause same result.
+                    # This is reason why _load() returns without any more
+                    # setup but assumes self to be already bound.
+                    mod = self._module
+                    assert mod and mod is not self, "%s, %s" % (self, mod)
+                    return
 
-            # load submodules
-            def subload(mod, p):
-                h, t = p, None
-                if '.' in p:
-                    h, t = p.split('.', 1)
-                if getattr(mod, h, nothing) is nothing:
-                    setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__,
-                                               level=1))
-                elif t:
-                    subload(getattr(mod, h), t)
+                # load submodules
+                def subload(mod, p):
+                    h, t = p, None
+                    if '.' in p:
+                        h, t = p.split('.', 1)
+                    if getattr(mod, h, nothing) is nothing:
+                        setattr(mod, h, _demandmod(
+                            p, mod.__dict__, mod.__dict__, level=1))
+                    elif t:
+                        subload(getattr(mod, h), t)
 
-            for x in after:
-                subload(mod, x)
+                for x in after:
+                    subload(mod, x)
 
-            # Replace references to this proxy instance with the actual module.
-            if locals:
-                if locals.get(head) is self:
-                    locals[head] = mod
-                elif locals.get(head + r'mod') is self:
-                    locals[head + r'mod'] = mod
+                # Replace references to this proxy instance with the
+                # actual module.
+                if locals:
+                    if locals.get(head) is self:
+                        locals[head] = mod
+                    elif locals.get(head + r'mod') is self:
+                        locals[head + r'mod'] = mod
 
-            for modname in modrefs:
-                modref = sys.modules.get(modname, None)
-                if modref and getattr(modref, head, None) is self:
-                    setattr(modref, head, mod)
+                for modname in modrefs:
+                    modref = sys.modules.get(modname, None)
+                    if modref and getattr(modref, head, None) is self:
+                        setattr(modref, head, mod)
 
-            object.__setattr__(self, r"_module", mod)
+                object.__setattr__(self, r"_module", mod)
 
     def __repr__(self):
         if self._module:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgdemandimport/tracing.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,44 @@
+# Support code for event tracing in Mercurial. Lives in demandimport
+# so it can also be used in demandimport.
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+_pipe = None
+_checked = False
+
+@contextlib.contextmanager
+def log(whencefmt, *whenceargs):
+    global _pipe, _session, _checked
+    if _pipe is None:
+        if _checked:
+            yield
+            return
+        _checked = True
+        if 'HGCATAPULTSERVERPIPE' not in os.environ:
+            yield
+            return
+        _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
+        _session = os.environ.get('HGCATAPULTSESSION', 'none')
+    whence = whencefmt % whenceargs
+    try:
+        # Both writes to the pipe are wrapped in try/except to ignore
+        # errors, as we can see mysterious errors in here if the pager
+        # is active. Presumably other conditions could trigger
+        # problems too.
+        try:
+            _pipe.write('START %s %s\n' % (_session, whence))
+        except IOError:
+            pass
+        yield
+    finally:
+        try:
+            _pipe.write('END %s %s\n' % (_session, whence))
+        except IOError:
+            pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/absorb.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,977 @@
+# absorb.py
+#
+# Copyright 2016 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""apply working directory changes to changesets (EXPERIMENTAL)
+
+The absorb extension provides a command to use annotate information to
+amend modified chunks into the corresponding non-public changesets.
+
+::
+
+    [absorb]
+    # only check 50 recent non-public changesets at most
+    max-stack-size = 50
+    # whether to add noise to new commits to avoid obsolescence cycle
+    add-noise = 1
+    # make `amend --correlated` a shortcut to the main command
+    amend-flag = correlated
+
+    [color]
+    absorb.node = blue bold
+    absorb.path = bold
+"""
+
+# TODO:
+#  * Rename config items to [commands] namespace
+#  * Converge getdraftstack() with other code in core
+#  * move many attributes on fixupstate to be private
+
+from __future__ import absolute_import
+
+import collections
+
+from mercurial.i18n import _
+from mercurial import (
+    cmdutil,
+    commands,
+    context,
+    crecord,
+    error,
+    linelog,
+    mdiff,
+    node,
+    obsolete,
+    patch,
+    phases,
+    pycompat,
+    registrar,
+    repair,
+    scmutil,
+    util,
+)
+from mercurial.utils import (
+    stringutil,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('absorb', 'add-noise', default=True)
+configitem('absorb', 'amend-flag', default=None)
+configitem('absorb', 'max-stack-size', default=50)
+
+colortable = {
+    'absorb.node': 'blue bold',
+    'absorb.path': 'bold',
+}
+
+defaultdict = collections.defaultdict
+
+class nullui(object):
+    """blank ui object doing nothing"""
+    debugflag = False
+    verbose = False
+    quiet = True
+
+    def __getitem__(name):
+        def nullfunc(*args, **kwds):
+            return
+        return nullfunc
+
+class emptyfilecontext(object):
+    """minimal filecontext representing an empty file"""
+    def data(self):
+        return ''
+
+    def node(self):
+        return node.nullid
+
+def uniq(lst):
+    """list -> list. remove duplicated items without changing the order"""
+    seen = set()
+    result = []
+    for x in lst:
+        if x not in seen:
+            seen.add(x)
+            result.append(x)
+    return result
+
+def getdraftstack(headctx, limit=None):
+    """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
+
+    changesets are sorted in topo order, oldest first.
+    return at most limit items, if limit is a positive number.
+
+    merges are considered as non-draft as well. i.e. every commit
+    returned has and only has 1 parent.
+    """
+    ctx = headctx
+    result = []
+    while ctx.phase() != phases.public:
+        if limit and len(result) >= limit:
+            break
+        parents = ctx.parents()
+        if len(parents) != 1:
+            break
+        result.append(ctx)
+        ctx = parents[0]
+    result.reverse()
+    return result
+
+def getfilestack(stack, path, seenfctxs=None):
+    """([ctx], str, set) -> [fctx], {ctx: fctx}
+
+    stack is a list of contexts, from old to new. usually they are what
+    "getdraftstack" returns.
+
+    follows renames, but not copies.
+
+    seenfctxs is a set of filecontexts that will be considered "immutable".
+    they are usually what this function returned in earlier calls, useful
+    to avoid issues that a file was "moved" to multiple places and was then
+    modified differently, like: "a" was copied to "b", "a" was also copied to
+    "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
+    and we enforce only one of them to be able to affect "a"'s content.
+
+    return an empty list and an empty dict, if the specified path does not
+    exist in stack[-1] (the top of the stack).
+
+    otherwise, return a list of de-duplicated filecontexts, and the map to
+    convert ctx in the stack to fctx, for possible mutable fctxs. the first item
+    of the list would be outside the stack and should be considered immutable.
+    the remaining items are within the stack.
+
+    for example, given the following changelog and corresponding filelog
+    revisions:
+
+      changelog: 3----4----5----6----7
+      filelog:   x    0----1----1----2 (x: no such file yet)
+
+    - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
+    - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
+      dummy empty filecontext.
+    - if stack = [2], returns ([], {})
+    - if stack = [7], returns ([1, 2], {7: 2})
+    - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
+      removed, since 1 is immutable.
+    """
+    if seenfctxs is None:
+        seenfctxs = set()
+    assert stack
+
+    if path not in stack[-1]:
+        return [], {}
+
+    fctxs = []
+    fctxmap = {}
+
+    pctx = stack[0].p1() # the public (immutable) ctx we stop at
+    for ctx in reversed(stack):
+        if path not in ctx: # the file is added in the next commit
+            pctx = ctx
+            break
+        fctx = ctx[path]
+        fctxs.append(fctx)
+        if fctx in seenfctxs: # treat fctx as the immutable one
+            pctx = None # do not add another immutable fctx
+            break
+        fctxmap[ctx] = fctx # only for mutable fctxs
+        renamed = fctx.renamed()
+        if renamed:
+            path = renamed[0] # follow rename
+            if path in ctx: # but do not follow copy
+                pctx = ctx.p1()
+                break
+
+    if pctx is not None: # need an extra immutable fctx
+        if path in pctx:
+            fctxs.append(pctx[path])
+        else:
+            fctxs.append(emptyfilecontext())
+
+    fctxs.reverse()
+    # note: we rely on a property of hg: filerev is not reused for linear
+    # history. i.e. it's impossible to have:
+    #   changelog:  4----5----6 (linear, no merges)
+    #   filelog:    1----2----1
+    #                         ^ reuse filerev (impossible)
+    # because parents are part of the hash. if that's not true, we need to
+    # remove uniq and find a different way to identify fctxs.
+    return uniq(fctxs), fctxmap
+
+class overlaystore(patch.filestore):
+    """read-only, hybrid store based on a dict and ctx.
+    memworkingcopy: {path: content}, overrides file contents.
+    """
+    def __init__(self, basectx, memworkingcopy):
+        self.basectx = basectx
+        self.memworkingcopy = memworkingcopy
+
+    def getfile(self, path):
+        """comply with mercurial.patch.filestore.getfile"""
+        if path not in self.basectx:
+            return None, None, None
+        fctx = self.basectx[path]
+        if path in self.memworkingcopy:
+            content = self.memworkingcopy[path]
+        else:
+            content = fctx.data()
+        mode = (fctx.islink(), fctx.isexec())
+        renamed = fctx.renamed() # False or (path, node)
+        return content, mode, (renamed and renamed[0])
+
+def overlaycontext(memworkingcopy, ctx, parents=None, extra=None):
+    """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
+    memworkingcopy overrides file contents.
+    """
+    # parents must contain 2 items: (node1, node2)
+    if parents is None:
+        parents = ctx.repo().changelog.parents(ctx.node())
+    if extra is None:
+        extra = ctx.extra()
+    date = ctx.date()
+    desc = ctx.description()
+    user = ctx.user()
+    files = set(ctx.files()).union(memworkingcopy)
+    store = overlaystore(ctx, memworkingcopy)
+    return context.memctx(
+        repo=ctx.repo(), parents=parents, text=desc,
+        files=files, filectxfn=store, user=user, date=date,
+        branch=None, extra=extra)
+
+class filefixupstate(object):
+    """state needed to apply fixups to a single file
+
+    internally, it keeps file contents of several revisions and a linelog.
+
+    the linelog uses odd revision numbers for original contents (fctxs passed
+    to __init__), and even revision numbers for fixups, like:
+
+        linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
+        linelog rev 2: fixups made to self.fctxs[0]
+        linelog rev 3: self.fctxs[1] (a child of fctxs[0])
+        linelog rev 4: fixups made to self.fctxs[1]
+        ...
+
+    a typical use is like:
+
+        1. call diffwith, to calculate self.fixups
+        2. (optionally), present self.fixups to the user, or change it
+        3. call apply, to apply changes
+        4. read results from "finalcontents", or call getfinalcontent
+    """
+
+    def __init__(self, fctxs, ui=None, opts=None):
+        """([fctx], ui or None) -> None
+
+        fctxs should be linear, and sorted by topo order - oldest first.
+        fctxs[0] will be considered as "immutable" and will not be changed.
+        """
+        self.fctxs = fctxs
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+
+        # following fields are built from fctxs. they exist for perf reason
+        self.contents = [f.data() for f in fctxs]
+        self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
+        self.linelog = self._buildlinelog()
+        if self.ui.debugflag:
+            assert self._checkoutlinelog() == self.contents
+
+        # following fields will be filled later
+        self.chunkstats = [0, 0] # [adopted, total : int]
+        self.targetlines = [] # [str]
+        self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
+        self.finalcontents = [] # [str]
+
+    def diffwith(self, targetfctx, showchanges=False):
+        """calculate fixups needed by examining the differences between
+        self.fctxs[-1] and targetfctx, chunk by chunk.
+
+        targetfctx is the target state we move towards. we may or may not be
+        able to get there because not all modified chunks can be amended into
+        a non-public fctx unambiguously.
+
+        call this only once, before apply().
+
+        update self.fixups, self.chunkstats, and self.targetlines.
+        """
+        a = self.contents[-1]
+        alines = self.contentlines[-1]
+        b = targetfctx.data()
+        blines = mdiff.splitnewlines(b)
+        self.targetlines = blines
+
+        self.linelog.annotate(self.linelog.maxrev)
+        annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
+        assert len(annotated) == len(alines)
+        # add a dummy end line to make insertion at the end easier
+        if annotated:
+            dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
+            annotated.append(dummyendline)
+
+        # analyse diff blocks
+        for chunk in self._alldiffchunks(a, b, alines, blines):
+            newfixups = self._analysediffchunk(chunk, annotated)
+            self.chunkstats[0] += bool(newfixups) # 1 or 0
+            self.chunkstats[1] += 1
+            self.fixups += newfixups
+            if showchanges:
+                self._showchanges(alines, blines, chunk, newfixups)
+
+    def apply(self):
+        """apply self.fixups. update self.linelog, self.finalcontents.
+
+        call this only once, before getfinalcontent(), after diffwith().
+        """
+        # the following is unnecessary, as it's done by "diffwith":
+        #   self.linelog.annotate(self.linelog.maxrev)
+        for rev, a1, a2, b1, b2 in reversed(self.fixups):
+            blines = self.targetlines[b1:b2]
+            if self.ui.debugflag:
+                idx = (max(rev - 1, 0)) // 2
+                self.ui.write(_('%s: chunk %d:%d -> %d lines\n')
+                              % (node.short(self.fctxs[idx].node()),
+                                 a1, a2, len(blines)))
+            self.linelog.replacelines(rev, a1, a2, b1, b2)
+        if self.opts.get('edit_lines', False):
+            self.finalcontents = self._checkoutlinelogwithedits()
+        else:
+            self.finalcontents = self._checkoutlinelog()
+
+    def getfinalcontent(self, fctx):
+        """(fctx) -> str. get modified file content for a given filecontext"""
+        idx = self.fctxs.index(fctx)
+        return self.finalcontents[idx]
+
+    def _analysediffchunk(self, chunk, annotated):
+        """analyse a different chunk and return new fixups found
+
+        return [] if no lines from the chunk can be safely applied.
+
+        the chunk (or lines) cannot be safely applied, if, for example:
+          - the modified (deleted) lines belong to a public changeset
+            (self.fctxs[0])
+          - the chunk is a pure insertion and the adjacent lines (at most 2
+            lines) belong to different non-public changesets, or do not belong
+            to any non-public changesets.
+          - the chunk is modifying lines from different changesets.
+            in this case, if the number of lines deleted equals to the number
+            of lines added, assume it's a simple 1:1 map (could be wrong).
+            otherwise, give up.
+          - the chunk is modifying lines from a single non-public changeset,
+            but other revisions touch the area as well. i.e. the lines are
+            not continuous as seen from the linelog.
+        """
+        a1, a2, b1, b2 = chunk
+        # find involved indexes from annotate result
+        involved = annotated[a1:a2]
+        if not involved and annotated: # a1 == a2 and a is not empty
+            # pure insertion, check nearby lines. ignore lines belong
+            # to the public (first) changeset (i.e. annotated[i][0] == 1)
+            nearbylinenums = {a2, max(0, a1 - 1)}
+            involved = [annotated[i]
+                        for i in nearbylinenums if annotated[i][0] != 1]
+        involvedrevs = list(set(r for r, l in involved))
+        newfixups = []
+        if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
+            # chunk belongs to a single revision
+            rev = involvedrevs[0]
+            if rev > 1:
+                fixuprev = rev + 1
+                newfixups.append((fixuprev, a1, a2, b1, b2))
+        elif a2 - a1 == b2 - b1 or b1 == b2:
+            # 1:1 line mapping, or chunk was deleted
+            for i in pycompat.xrange(a1, a2):
+                rev, linenum = annotated[i]
+                if rev > 1:
+                    if b1 == b2: # deletion, simply remove that single line
+                        nb1 = nb2 = 0
+                    else: # 1:1 line mapping, change the corresponding rev
+                        nb1 = b1 + i - a1
+                        nb2 = nb1 + 1
+                    fixuprev = rev + 1
+                    newfixups.append((fixuprev, i, i + 1, nb1, nb2))
+        return self._optimizefixups(newfixups)
+
+    @staticmethod
+    def _alldiffchunks(a, b, alines, blines):
+        """like mdiff.allblocks, but only care about differences"""
+        blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
+        for chunk, btype in blocks:
+            if btype != '!':
+                continue
+            yield chunk
+
+    def _buildlinelog(self):
+        """calculate the initial linelog based on self.content{,line}s.
+        this is similar to running a partial "annotate".
+        """
+        llog = linelog.linelog()
+        a, alines = '', []
+        for i in pycompat.xrange(len(self.contents)):
+            b, blines = self.contents[i], self.contentlines[i]
+            llrev = i * 2 + 1
+            chunks = self._alldiffchunks(a, b, alines, blines)
+            for a1, a2, b1, b2 in reversed(list(chunks)):
+                llog.replacelines(llrev, a1, a2, b1, b2)
+            a, alines = b, blines
+        return llog
+
+    def _checkoutlinelog(self):
+        """() -> [str]. check out file contents from linelog"""
+        contents = []
+        for i in pycompat.xrange(len(self.contents)):
+            rev = (i + 1) * 2
+            self.linelog.annotate(rev)
+            content = ''.join(map(self._getline, self.linelog.annotateresult))
+            contents.append(content)
+        return contents
+
+    def _checkoutlinelogwithedits(self):
+        """() -> [str]. prompt all lines for edit"""
+        alllines = self.linelog.getalllines()
+        # header
+        editortext = (_('HG: editing %s\nHG: "y" means the line to the right '
+                        'exists in the changeset to the top\nHG:\n')
+                      % self.fctxs[-1].path())
+        # [(idx, fctx)]. hide the dummy emptyfilecontext
+        visiblefctxs = [(i, f)
+                        for i, f in enumerate(self.fctxs)
+                        if not isinstance(f, emptyfilecontext)]
+        for i, (j, f) in enumerate(visiblefctxs):
+            editortext += (_('HG: %s/%s %s %s\n') %
+                           ('|' * i, '-' * (len(visiblefctxs) - i + 1),
+                            node.short(f.node()),
+                            f.description().split('\n',1)[0]))
+        editortext += _('HG: %s\n') % ('|' * len(visiblefctxs))
+        # figure out the lifetime of a line, this is relatively inefficient,
+        # but probably fine
+        lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
+        for i, f in visiblefctxs:
+            self.linelog.annotate((i + 1) * 2)
+            for l in self.linelog.annotateresult:
+                lineset[l].add(i)
+        # append lines
+        for l in alllines:
+            editortext += ('    %s : %s' %
+                           (''.join([('y' if i in lineset[l] else ' ')
+                                     for i, _f in visiblefctxs]),
+                            self._getline(l)))
+        # run editor
+        editedtext = self.ui.edit(editortext, '', action='absorb')
+        if not editedtext:
+            raise error.Abort(_('empty editor text'))
+        # parse edited result
+        contents = ['' for i in self.fctxs]
+        leftpadpos = 4
+        colonpos = leftpadpos + len(visiblefctxs) + 1
+        for l in mdiff.splitnewlines(editedtext):
+            if l.startswith('HG:'):
+                continue
+            if l[colonpos - 1:colonpos + 2] != ' : ':
+                raise error.Abort(_('malformed line: %s') % l)
+            linecontent = l[colonpos + 2:]
+            for i, ch in enumerate(l[leftpadpos:colonpos - 1]):
+                if ch == 'y':
+                    contents[visiblefctxs[i][0]] += linecontent
+        # chunkstats is hard to calculate if anything changes, therefore
+        # set them to just a simple value (1, 1).
+        if editedtext != editortext:
+            self.chunkstats = [1, 1]
+        return contents
+
+    def _getline(self, lineinfo):
+        """((rev, linenum)) -> str. convert rev+line number to line content"""
+        rev, linenum = lineinfo
+        if rev & 1: # odd: original line taken from fctxs
+            return self.contentlines[rev // 2][linenum]
+        else: # even: fixup line from targetfctx
+            return self.targetlines[linenum]
+
+    def _iscontinuous(self, a1, a2, closedinterval=False):
+        """(a1, a2 : int) -> bool
+
+        check if these lines are continuous. i.e. no other insertions or
+        deletions (from other revisions) among these lines.
+
+        closedinterval decides whether a2 should be included or not. i.e. is
+        it [a1, a2), or [a1, a2] ?
+        """
+        if a1 >= a2:
+            return True
+        llog = self.linelog
+        offset1 = llog.getoffset(a1)
+        offset2 = llog.getoffset(a2) + int(closedinterval)
+        linesinbetween = llog.getalllines(offset1, offset2)
+        return len(linesinbetween) == a2 - a1 + int(closedinterval)
+
+    def _optimizefixups(self, fixups):
+        """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
+        merge adjacent fixups to make them less fragmented.
+        """
+        result = []
+        pcurrentchunk = [[-1, -1, -1, -1, -1]]
+
+        def pushchunk():
+            if pcurrentchunk[0][0] != -1:
+                result.append(tuple(pcurrentchunk[0]))
+
+        for i, chunk in enumerate(fixups):
+            rev, a1, a2, b1, b2 = chunk
+            lastrev = pcurrentchunk[0][0]
+            lasta2 = pcurrentchunk[0][2]
+            lastb2 = pcurrentchunk[0][4]
+            if (a1 == lasta2 and b1 == lastb2 and rev == lastrev and
+                    self._iscontinuous(max(a1 - 1, 0), a1)):
+                # merge into currentchunk
+                pcurrentchunk[0][2] = a2
+                pcurrentchunk[0][4] = b2
+            else:
+                pushchunk()
+                pcurrentchunk[0] = list(chunk)
+        pushchunk()
+        return result
+
+    def _showchanges(self, alines, blines, chunk, fixups):
+        ui = self.ui
+
+        def label(line, label):
+            if line.endswith('\n'):
+                line = line[:-1]
+            return ui.label(line, label)
+
+        # this is not optimized for perf but _showchanges only gets executed
+        # with an extra command-line flag.
+        a1, a2, b1, b2 = chunk
+        aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
+        for idx, fa1, fa2, fb1, fb2 in fixups:
+            for i in pycompat.xrange(fa1, fa2):
+                aidxs[i - a1] = (max(idx, 1) - 1) // 2
+            for i in pycompat.xrange(fb1, fb2):
+                bidxs[i - b1] = (max(idx, 1) - 1) // 2
+
+        buf = [] # [(idx, content)]
+        buf.append((0, label('@@ -%d,%d +%d,%d @@'
+                             % (a1, a2 - a1, b1, b2 - b1), 'diff.hunk')))
+        buf += [(aidxs[i - a1], label('-' + alines[i], 'diff.deleted'))
+                for i in pycompat.xrange(a1, a2)]
+        buf += [(bidxs[i - b1], label('+' + blines[i], 'diff.inserted'))
+                for i in pycompat.xrange(b1, b2)]
+        for idx, line in buf:
+            shortnode = idx and node.short(self.fctxs[idx].node()) or ''
+            ui.write(ui.label(shortnode[0:7].ljust(8), 'absorb.node') +
+                     line + '\n')
+
+class fixupstate(object):
+    """state needed to run absorb
+
+    internally, it keeps paths and filefixupstates.
+
+    a typical use is like filefixupstates:
+
+        1. call diffwith, to calculate fixups
+        2. (optionally), present fixups to the user, or edit fixups
+        3. call apply, to apply changes to memory
+        4. call commit, to commit changes to hg database
+    """
+
+    def __init__(self, stack, ui=None, opts=None):
+        """([ctx], ui or None) -> None
+
+        stack: should be linear, and sorted by topo order - oldest first.
+        all commits in stack are considered mutable.
+        """
+        assert stack
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+        self.stack = stack
+        self.repo = stack[-1].repo().unfiltered()
+
+        # following fields will be filled later
+        self.paths = [] # [str]
+        self.status = None # ctx.status output
+        self.fctxmap = {} # {path: {ctx: fctx}}
+        self.fixupmap = {} # {path: filefixupstate}
+        self.replacemap = {} # {oldnode: newnode or None}
+        self.finalnode = None # head after all fixups
+
+    def diffwith(self, targetctx, match=None, showchanges=False):
+        """diff and prepare fixups. update self.fixupmap, self.paths"""
+        # only care about modified files
+        self.status = self.stack[-1].status(targetctx, match)
+        self.paths = []
+        # but if --edit-lines is used, the user may want to edit files
+        # even if they are not modified
+        editopt = self.opts.get('edit_lines')
+        if not self.status.modified and editopt and match:
+            interestingpaths = match.files()
+        else:
+            interestingpaths = self.status.modified
+        # prepare the filefixupstate
+        seenfctxs = set()
+        # sorting is necessary to eliminate ambiguity for the "double move"
+        # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
+        for path in sorted(interestingpaths):
+            self.ui.debug('calculating fixups for %s\n' % path)
+            targetfctx = targetctx[path]
+            fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
+            # ignore symbolic links or binary, or unchanged files
+            if any(f.islink() or stringutil.binary(f.data())
+                   for f in [targetfctx] + fctxs
+                   if not isinstance(f, emptyfilecontext)):
+                continue
+            if targetfctx.data() == fctxs[-1].data() and not editopt:
+                continue
+            seenfctxs.update(fctxs[1:])
+            self.fctxmap[path] = ctx2fctx
+            fstate = filefixupstate(fctxs, ui=self.ui, opts=self.opts)
+            if showchanges:
+                colorpath = self.ui.label(path, 'absorb.path')
+                header = 'showing changes for ' + colorpath
+                self.ui.write(header + '\n')
+            fstate.diffwith(targetfctx, showchanges=showchanges)
+            self.fixupmap[path] = fstate
+            self.paths.append(path)
+
+    def apply(self):
+        """apply fixups to individual filefixupstates"""
+        for path, state in self.fixupmap.iteritems():
+            if self.ui.debugflag:
+                self.ui.write(_('applying fixups to %s\n') % path)
+            state.apply()
+
+    @property
+    def chunkstats(self):
+        """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
+        return dict((path, state.chunkstats)
+                    for path, state in self.fixupmap.iteritems())
+
+    def commit(self):
+        """commit changes. update self.finalnode, self.replacemap"""
+        with self.repo.wlock(), self.repo.lock():
+            with self.repo.transaction('absorb') as tr:
+                self._commitstack()
+                self._movebookmarks(tr)
+                if self.repo['.'].node() in self.replacemap:
+                    self._moveworkingdirectoryparent()
+                if self._useobsolete:
+                    self._obsoleteoldcommits()
+            if not self._useobsolete: # strip must be outside transactions
+                self._stripoldcommits()
+        return self.finalnode
+
+    def printchunkstats(self):
+        """print things like '1 of 2 chunk(s) applied'"""
+        ui = self.ui
+        chunkstats = self.chunkstats
+        if ui.verbose:
+            # chunkstats for each file
+            for path, stat in chunkstats.iteritems():
+                if stat[0]:
+                    ui.write(_('%s: %d of %d chunk(s) applied\n')
+                             % (path, stat[0], stat[1]))
+        elif not ui.quiet:
+            # a summary for all files
+            stats = chunkstats.values()
+            applied, total = (sum(s[i] for s in stats) for i in (0, 1))
+            ui.write(_('%d of %d chunk(s) applied\n') % (applied, total))
+
+    def _commitstack(self):
+        """make new commits. update self.finalnode, self.replacemap.
+        it is splitted from "commit" to avoid too much indentation.
+        """
+        # last node (20-char) committed by us
+        lastcommitted = None
+        # p1 which overrides the parent of the next commit, "None" means use
+        # the original parent unchanged
+        nextp1 = None
+        for ctx in self.stack:
+            memworkingcopy = self._getnewfilecontents(ctx)
+            if not memworkingcopy and not lastcommitted:
+                # nothing changed, nothing commited
+                nextp1 = ctx
+                continue
+            msg = ''
+            if self._willbecomenoop(memworkingcopy, ctx, nextp1):
+                # changeset is no longer necessary
+                self.replacemap[ctx.node()] = None
+                msg = _('became empty and was dropped')
+            else:
+                # changeset needs re-commit
+                nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
+                lastcommitted = self.repo[nodestr]
+                nextp1 = lastcommitted
+                self.replacemap[ctx.node()] = lastcommitted.node()
+                if memworkingcopy:
+                    msg = _('%d file(s) changed, became %s') % (
+                        len(memworkingcopy), self._ctx2str(lastcommitted))
+                else:
+                    msg = _('became %s') % self._ctx2str(lastcommitted)
+            if self.ui.verbose and msg:
+                self.ui.write(_('%s: %s\n') % (self._ctx2str(ctx), msg))
+        self.finalnode = lastcommitted and lastcommitted.node()
+
+    def _ctx2str(self, ctx):
+        if self.ui.debugflag:
+            return ctx.hex()
+        else:
+            return node.short(ctx.node())
+
+    def _getnewfilecontents(self, ctx):
+        """(ctx) -> {path: str}
+
+        fetch file contents from filefixupstates.
+        return the working copy overrides - files different from ctx.
+        """
+        result = {}
+        for path in self.paths:
+            ctx2fctx = self.fctxmap[path] # {ctx: fctx}
+            if ctx not in ctx2fctx:
+                continue
+            fctx = ctx2fctx[ctx]
+            content = fctx.data()
+            newcontent = self.fixupmap[path].getfinalcontent(fctx)
+            if content != newcontent:
+                result[fctx.path()] = newcontent
+        return result
+
+    def _movebookmarks(self, tr):
+        repo = self.repo
+        needupdate = [(name, self.replacemap[hsh])
+                      for name, hsh in repo._bookmarks.iteritems()
+                      if hsh in self.replacemap]
+        changes = []
+        for name, hsh in needupdate:
+            if hsh:
+                changes.append((name, hsh))
+                if self.ui.verbose:
+                    self.ui.write(_('moving bookmark %s to %s\n')
+                                  % (name, node.hex(hsh)))
+            else:
+                changes.append((name, None))
+                if self.ui.verbose:
+                    self.ui.write(_('deleting bookmark %s\n') % name)
+        repo._bookmarks.applychanges(repo, tr, changes)
+
+    def _moveworkingdirectoryparent(self):
+        if not self.finalnode:
+            # Find the latest not-{obsoleted,stripped} parent.
+            revs = self.repo.revs('max(::. - %ln)', self.replacemap.keys())
+            ctx = self.repo[revs.first()]
+            self.finalnode = ctx.node()
+        else:
+            ctx = self.repo[self.finalnode]
+
+        dirstate = self.repo.dirstate
+        # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
+        # be slow. in absorb's case, no need to invalidate fsmonitorstate.
+        noop = lambda: 0
+        restore = noop
+        if util.safehasattr(dirstate, '_fsmonitorstate'):
+            bak = dirstate._fsmonitorstate.invalidate
+            def restore():
+                dirstate._fsmonitorstate.invalidate = bak
+            dirstate._fsmonitorstate.invalidate = noop
+        try:
+            with dirstate.parentchange():
+                dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
+        finally:
+            restore()
+
+    @staticmethod
+    def _willbecomenoop(memworkingcopy, ctx, pctx=None):
+        """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
+
+        if it will become an empty commit (does not change anything, after the
+        memworkingcopy overrides), return True. otherwise return False.
+        """
+        if not pctx:
+            parents = ctx.parents()
+            if len(parents) != 1:
+                return False
+            pctx = parents[0]
+        # ctx changes more files (not a subset of memworkingcopy)
+        if not set(ctx.files()).issubset(set(memworkingcopy)):
+            return False
+        for path, content in memworkingcopy.iteritems():
+            if path not in pctx or path not in ctx:
+                return False
+            fctx = ctx[path]
+            pfctx = pctx[path]
+            if pfctx.flags() != fctx.flags():
+                return False
+            if pfctx.data() != content:
+                return False
+        return True
+
+    def _commitsingle(self, memworkingcopy, ctx, p1=None):
+        """(ctx, {path: content}, node) -> node. make a single commit
+
+        the commit is a clone from ctx, with a (optionally) different p1, and
+        different file contents replaced by memworkingcopy.
+        """
+        parents = p1 and (p1, node.nullid)
+        extra = ctx.extra()
+        if self._useobsolete and self.ui.configbool('absorb', 'add-noise'):
+            extra['absorb_source'] = ctx.hex()
+        mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra)
+        # preserve phase
+        with mctx.repo().ui.configoverride({
+            ('phases', 'new-commit'): ctx.phase()}):
+            return mctx.commit()
+
+    @util.propertycache
+    def _useobsolete(self):
+        """() -> bool"""
+        return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
+
+    def _obsoleteoldcommits(self):
+        relations = [(self.repo[k], v and (self.repo[v],) or ())
+                     for k, v in self.replacemap.iteritems()]
+        if relations:
+            obsolete.createmarkers(self.repo, relations)
+
+    def _stripoldcommits(self):
+        nodelist = self.replacemap.keys()
+        # make sure we don't strip innocent children
+        revs = self.repo.revs('%ln - (::(heads(%ln::)-%ln))', nodelist,
+                              nodelist, nodelist)
+        tonode = self.repo.changelog.node
+        nodelist = [tonode(r) for r in revs]
+        if nodelist:
+            repair.strip(self.repo.ui, self.repo, nodelist)
+
+def _parsechunk(hunk):
+    """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
+    if type(hunk) not in (crecord.uihunk, patch.recordhunk):
+        return None, None
+    path = hunk.header.filename()
+    a1 = hunk.fromline + len(hunk.before) - 1
+    # remove before and after context
+    hunk.before = hunk.after = []
+    buf = util.stringio()
+    hunk.write(buf)
+    patchlines = mdiff.splitnewlines(buf.getvalue())
+    # hunk.prettystr() will update hunk.removed
+    a2 = a1 + hunk.removed
+    blines = [l[1:] for l in patchlines[1:] if l[0] != '-']
+    return path, (a1, a2, blines)
+
+def overlaydiffcontext(ctx, chunks):
+    """(ctx, [crecord.uihunk]) -> memctx
+
+    return a memctx with some [1] patches (chunks) applied to ctx.
+    [1]: modifications are handled. renames, mode changes, etc. are ignored.
+    """
+    # sadly the applying-patch logic is hardly reusable, and messy:
+    # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
+    #    needs a file stream of a patch and will re-parse it, while we have
+    #    structured hunk objects at hand.
+    # 2. a lot of different implementations about "chunk" (patch.hunk,
+    #    patch.recordhunk, crecord.uihunk)
+    # as we only care about applying changes to modified files, no mode
+    # change, no binary diff, and no renames, it's probably okay to
+    # re-invent the logic using much simpler code here.
+    memworkingcopy = {} # {path: content}
+    patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
+    for path, info in map(_parsechunk, chunks):
+        if not path or not info:
+            continue
+        patchmap[path].append(info)
+    for path, patches in patchmap.iteritems():
+        if path not in ctx or not patches:
+            continue
+        patches.sort(reverse=True)
+        lines = mdiff.splitnewlines(ctx[path].data())
+        for a1, a2, blines in patches:
+            lines[a1:a2] = blines
+        memworkingcopy[path] = ''.join(lines)
+    return overlaycontext(memworkingcopy, ctx)
+
+def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
+    """pick fixup chunks from targetctx, apply them to stack.
+
+    if targetctx is None, the working copy context will be used.
+    if stack is None, the current draft stack will be used.
+    return fixupstate.
+    """
+    if stack is None:
+        limit = ui.configint('absorb', 'max-stack-size')
+        stack = getdraftstack(repo['.'], limit)
+        if limit and len(stack) >= limit:
+            ui.warn(_('absorb: only the recent %d changesets will '
+                      'be analysed\n')
+                    % limit)
+    if not stack:
+        raise error.Abort(_('no changeset to change'))
+    if targetctx is None: # default to working copy
+        targetctx = repo[None]
+    if pats is None:
+        pats = ()
+    if opts is None:
+        opts = {}
+    state = fixupstate(stack, ui=ui, opts=opts)
+    matcher = scmutil.match(targetctx, pats, opts)
+    if opts.get('interactive'):
+        diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
+        origchunks = patch.parsepatch(diff)
+        chunks = cmdutil.recordfilter(ui, origchunks)[0]
+        targetctx = overlaydiffcontext(stack[-1], chunks)
+    state.diffwith(targetctx, matcher, showchanges=opts.get('print_changes'))
+    if not opts.get('dry_run'):
+        state.apply()
+        if state.commit():
+            state.printchunkstats()
+        elif not ui.quiet:
+            ui.write(_('nothing applied\n'))
+    return state
+
+@command('^absorb',
+         [('p', 'print-changes', None,
+           _('print which changesets are modified by which changes')),
+          ('i', 'interactive', None,
+           _('interactively select which chunks to apply (EXPERIMENTAL)')),
+          ('e', 'edit-lines', None,
+           _('edit what lines belong to which changesets before commit '
+             '(EXPERIMENTAL)')),
+         ] + commands.dryrunopts + commands.walkopts,
+         _('hg absorb [OPTION] [FILE]...'))
+def absorbcmd(ui, repo, *pats, **opts):
+    """incorporate corrections into the stack of draft changesets
+
+    absorb analyzes each change in your working directory and attempts to
+    amend the changed lines into the changesets in your stack that first
+    introduced those lines.
+
+    If absorb cannot find an unambiguous changeset to amend for a change,
+    that change will be left in the working directory, untouched. They can be
+    observed by :hg:`status` or :hg:`diff` afterwards. In other words,
+    absorb does not write to the working directory.
+
+    Changesets outside the revset `::. and not public() and not merge()` will
+    not be changed.
+
+    Changesets that become empty after applying the changes will be deleted.
+
+    If in doubt, run :hg:`absorb -pn` to preview what changesets will
+    be amended by what changed lines, without actually changing anything.
+
+    Returns 0 on success, 1 if all chunks were ignored and nothing amended.
+    """
+    state = absorb(ui, repo, pats=pats, opts=opts)
+    if sum(s[0] for s in state.chunkstats.values()) == 0:
+        return 1
--- a/hgext/acl.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/acl.py	Tue Sep 04 12:16:28 2018 -0400
@@ -220,6 +220,7 @@
     error,
     extensions,
     match,
+    pycompat,
     registrar,
     util,
 )
@@ -403,7 +404,7 @@
     allow = buildmatch(ui, repo, user, 'acl.allow')
     deny = buildmatch(ui, repo, user, 'acl.deny')
 
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         ctx = repo[rev]
         branch = ctx.branch()
         if denybranches and denybranches(branch):
--- a/hgext/beautifygraph.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/beautifygraph.py	Tue Sep 04 12:16:28 2018 -0400
@@ -18,6 +18,7 @@
     encoding,
     extensions,
     graphmod,
+    pycompat,
     templatekw,
 )
 
@@ -53,8 +54,10 @@
 def convertedges(line):
     line = ' %s ' % line
     pretty = []
-    for idx in xrange(len(line) - 2):
-        pretty.append(prettyedge(line[idx], line[idx + 1], line[idx + 2]))
+    for idx in pycompat.xrange(len(line) - 2):
+        pretty.append(prettyedge(line[idx:idx + 1],
+                                 line[idx + 1:idx + 2],
+                                 line[idx + 2:idx + 3]))
     return ''.join(pretty)
 
 def getprettygraphnode(orig, *args, **kwargs):
@@ -84,7 +87,7 @@
         ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n'))
         return
 
-    if 'A' in encoding._wide:
+    if r'A' in encoding._wide:
         ui.warn(_('beautifygraph: unsupported terminal settings, '
                   'monospace narrow text required\n'))
         return
--- a/hgext/blackbox.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/blackbox.py	Tue Sep 04 12:16:28 2018 -0400
@@ -45,6 +45,7 @@
 
 from mercurial import (
     encoding,
+    pycompat,
     registrar,
     ui as uimod,
     util,
@@ -111,7 +112,7 @@
             if st.st_size >= maxsize:
                 path = vfs.join(name)
                 maxfiles = ui.configint('blackbox', 'maxfiles')
-                for i in xrange(maxfiles - 1, 1, -1):
+                for i in pycompat.xrange(maxfiles - 1, 1, -1):
                     rotate(oldpath='%s.%d' % (path, i - 1),
                            newpath='%s.%d' % (path, i))
                 rotate(oldpath=path,
--- a/hgext/censor.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/censor.py	Tue Sep 04 12:16:28 2018 -0400
@@ -32,6 +32,7 @@
 
 from mercurial import (
     error,
+    pycompat,
     registrar,
     revlog,
     scmutil,
@@ -160,7 +161,7 @@
     offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
 
     # Rewrite all following filelog revisions fixing up offsets and deltas.
-    for srev in xrange(crev + 1, len(flog)):
+    for srev in pycompat.xrange(crev + 1, len(flog)):
         if crev in flog.parentrevs(srev):
             # Immediate children of censored node must be re-added as fulltext.
             try:
--- a/hgext/commitextras.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/commitextras.py	Tue Sep 04 12:16:28 2018 -0400
@@ -17,6 +17,7 @@
     error,
     extensions,
     registrar,
+    util,
 )
 
 cmdtable = {}
@@ -43,34 +44,29 @@
         _('set a changeset\'s extra values'), _("KEY=VALUE")))
 
 def _commit(orig, ui, repo, *pats, **opts):
-    origcommit = repo.commit
-    try:
-        def _wrappedcommit(*innerpats, **inneropts):
+    if util.safehasattr(repo, 'unfiltered'):
+        repo = repo.unfiltered()
+    class repoextra(repo.__class__):
+        def commit(self, *innerpats, **inneropts):
             extras = opts.get(r'extra')
-            if extras:
-                for raw in extras:
-                    if '=' not in raw:
-                        msg = _("unable to parse '%s', should follow "
-                                "KEY=VALUE format")
-                        raise error.Abort(msg % raw)
-                    k, v = raw.split('=', 1)
-                    if not k:
-                        msg = _("unable to parse '%s', keys can't be empty")
-                        raise error.Abort(msg % raw)
-                    if re.search('[^\w-]', k):
-                        msg = _("keys can only contain ascii letters, digits,"
-                                " '_' and '-'")
-                        raise error.Abort(msg)
-                    if k in usedinternally:
-                        msg = _("key '%s' is used internally, can't be set "
-                                "manually")
-                        raise error.Abort(msg % k)
-                    inneropts[r'extra'][k] = v
-            return origcommit(*innerpats, **inneropts)
-
-        # This __dict__ logic is needed because the normal
-        # extension.wrapfunction doesn't seem to work.
-        repo.__dict__[r'commit'] = _wrappedcommit
-        return orig(ui, repo, *pats, **opts)
-    finally:
-        del repo.__dict__[r'commit']
+            for raw in extras:
+                if '=' not in raw:
+                    msg = _("unable to parse '%s', should follow "
+                            "KEY=VALUE format")
+                    raise error.Abort(msg % raw)
+                k, v = raw.split('=', 1)
+                if not k:
+                    msg = _("unable to parse '%s', keys can't be empty")
+                    raise error.Abort(msg % raw)
+                if re.search('[^\w-]', k):
+                    msg = _("keys can only contain ascii letters, digits,"
+                            " '_' and '-'")
+                    raise error.Abort(msg)
+                if k in usedinternally:
+                    msg = _("key '%s' is used internally, can't be set "
+                            "manually")
+                    raise error.Abort(msg % k)
+                inneropts[r'extra'][k] = v
+            return super(repoextra, self).commit(*innerpats, **inneropts)
+    repo.__class__ = repoextra
+    return orig(ui, repo, *pats, **opts)
--- a/hgext/convert/cvsps.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/convert/cvsps.py	Tue Sep 04 12:16:28 2018 -0400
@@ -763,7 +763,7 @@
             # branchpoints such that it is the latest possible
             # commit without any intervening, unrelated commits.
 
-            for candidate in xrange(i):
+            for candidate in pycompat.xrange(i):
                 if c.branch not in changesets[candidate].branchpoints:
                     if p is not None:
                         break
--- a/hgext/convert/hg.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/convert/hg.py	Tue Sep 04 12:16:28 2018 -0400
@@ -358,7 +358,7 @@
             p2 = node
 
         if self.filemapmode and nparents == 1:
-            man = self.repo.manifestlog._revlog
+            man = self.repo.manifestlog.getstorage(b'')
             mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
             closed = 'close' in commit.extra
             if not closed and not man.cmp(m1node, man.revision(mnode)):
--- a/hgext/eol.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/eol.py	Tue Sep 04 12:16:28 2018 -0400
@@ -266,7 +266,7 @@
     ensureenabled(ui)
     files = set()
     revs = set()
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         revs.add(rev)
         if headsonly:
             ctx = repo[rev]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/__init__.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,193 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# fastannotate: faster annotate implementation using linelog
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""yet another annotate implementation that might be faster (EXPERIMENTAL)
+
+The fastannotate extension provides a 'fastannotate' command that makes
+use of the linelog data structure as a cache layer and is expected to
+be faster than the vanilla 'annotate' if the cache is present.
+
+In most cases, fastannotate requires a setup that mainbranch is some pointer
+that always moves forward, to be most efficient.
+
+Using fastannotate together with linkrevcache would speed up building the
+annotate cache greatly. Run "debugbuildlinkrevcache" before
+"debugbuildannotatecache".
+
+::
+
+    [fastannotate]
+    # specify the main branch head. the internal linelog will only contain
+    # the linear (ignoring p2) "mainbranch". since linelog cannot move
+    # backwards without a rebuild, this should be something that always moves
+    # forward, usually it is "master" or "@".
+    mainbranch = master
+
+    # fastannotate supports different modes to expose its feature.
+    # a list of combination:
+    # - fastannotate: expose the feature via the "fastannotate" command which
+    #   deals with everything in a most efficient way, and provides extra
+    #   features like --deleted etc.
+    # - fctx: replace fctx.annotate implementation. note:
+    #     a. it is less efficient than the "fastannotate" command
+    #     b. it will make it practically impossible to access the old (disk
+    #        side-effect free) annotate implementation
+    #     c. it implies "hgweb".
+    # - hgweb: replace hgweb's annotate implementation. conflict with "fctx".
+    # (default: fastannotate)
+    modes = fastannotate
+
+    # default format when no format flags are used (default: number)
+    defaultformat = changeset, user, date
+
+    # serve the annotate cache via wire protocol (default: False)
+    # tip: the .hg/fastannotate directory is portable - can be rsynced
+    server = True
+
+    # build annotate cache on demand for every client request (default: True)
+    # disabling it could make server response faster, useful when there is a
+    # cronjob building the cache.
+    serverbuildondemand = True
+
+    # update local annotate cache from remote on demand
+    client = False
+
+    # path to use when connecting to the remote server (default: default)
+    remotepath = default
+
+    # minimal length of the history of a file required to fetch linelog from
+    # the server. (default: 10)
+    clientfetchthreshold = 10
+
+    # use flock instead of the file existence lock
+    # flock may not work well on some network filesystems, but they avoid
+    # creating and deleting files frequently, which is faster when updating
+    # the annotate cache in batch. if you have issues with this option, set it
+    # to False. (default: True if flock is supported, False otherwise)
+    useflock = True
+
+    # for "fctx" mode, always follow renames regardless of command line option.
+    # this is a BC with the original command but will reduced the space needed
+    # for annotate cache, and is useful for client-server setup since the
+    # server will only provide annotate cache with default options (i.e. with
+    # follow). do not affect "fastannotate" mode. (default: True)
+    forcefollow = True
+
+    # for "fctx" mode, always treat file as text files, to skip the "isbinary"
+    # check. this is consistent with the "fastannotate" command and could help
+    # to avoid a file fetch if remotefilelog is used. (default: True)
+    forcetext = True
+
+    # use unfiltered repo for better performance.
+    unfilteredrepo = True
+
+    # sacrifice correctness in some corner cases for performance. it does not
+    # affect the correctness of the annotate cache being built. the option
+    # is experimental and may disappear in the future (default: False)
+    perfhack = True
+"""
+
+# TODO from import:
+# * `branch` is probably the wrong term, throughout the code.
+#
+# * replace the fastannotate `modes` configuration with a collection
+#   of booleans.
+#
+# * Use the templater instead of bespoke formatting
+#
+# * rename the config knob for updating the local cache from a remote server
+#
+# * move `flock` based locking to a common area
+#
+# * revise wireprotocol for sharing annotate files
+#
+# * figure out a sensible default for `mainbranch` (with the caveat
+#   that we probably also want to figure out a better term than
+#   `branch`, see above)
+#
+# * format changes to the revmap file (maybe use length-encoding
+#   instead of null-terminated file paths at least?)
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    configitems,
+    error as hgerror,
+    localrepo,
+    registrar,
+)
+
+from . import (
+    commands,
+    context,
+    protocol,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = commands.cmdtable
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('fastannotate', 'modes', default=['fastannotate'])
+configitem('fastannotate', 'server', default=False)
+configitem('fastannotate', 'useflock', default=configitems.dynamicdefault)
+configitem('fastannotate', 'client', default=False)
+configitem('fastannotate', 'unfilteredrepo', default=True)
+configitem('fastannotate', 'defaultformat', default=['number'])
+configitem('fastannotate', 'perfhack', default=False)
+configitem('fastannotate', 'mainbranch')
+configitem('fastannotate', 'forcetext', default=True)
+configitem('fastannotate', 'forcefollow', default=True)
+configitem('fastannotate', 'clientfetchthreshold', default=10)
+configitem('fastannotate', 'serverbuildondemand', default=True)
+configitem('fastannotate', 'remotepath', default='default')
+
+def _flockavailable():
+    try:
+        import fcntl
+        fcntl.flock
+    except StandardError:
+        return False
+    else:
+        return True
+
+def uisetup(ui):
+    modes = set(ui.configlist('fastannotate', 'modes'))
+    if 'fctx' in modes:
+        modes.discard('hgweb')
+    for name in modes:
+        if name == 'fastannotate':
+            commands.registercommand()
+        elif name == 'hgweb':
+            from . import support
+            support.replacehgwebannotate()
+        elif name == 'fctx':
+            from . import support
+            support.replacefctxannotate()
+            commands.wrapdefault()
+        else:
+            raise hgerror.Abort(_('fastannotate: invalid mode: %s') % name)
+
+    if ui.configbool('fastannotate', 'server'):
+        protocol.serveruisetup(ui)
+
+    if ui.configbool('fastannotate', 'useflock', _flockavailable()):
+        context.pathhelper.lock = context.pathhelper._lockflock
+
+def extsetup(ui):
+    # fastannotate has its own locking, without depending on repo lock
+    # TODO: avoid mutating this unless the specific repo has it enabled
+    localrepo.localrepository._wlockfreeprefix.add('fastannotate/')
+
+def reposetup(ui, repo):
+    if ui.configbool('fastannotate', 'client'):
+        protocol.clientreposetup(ui, repo)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/commands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,281 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# commands: fastannotate commands
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    commands,
+    error,
+    extensions,
+    patch,
+    pycompat,
+    registrar,
+    scmutil,
+    util,
+)
+
+from . import (
+    context as facontext,
+    error as faerror,
+    formatter as faformatter,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
+    """generate paths matching given patterns"""
+    perfhack = repo.ui.configbool('fastannotate', 'perfhack')
+
+    # disable perfhack if:
+    # a) any walkopt is used
+    # b) if we treat pats as plain file names, some of them do not have
+    #    corresponding linelog files
+    if perfhack:
+        # cwd related to reporoot
+        reporoot = os.path.dirname(repo.path)
+        reldir = os.path.relpath(pycompat.getcwd(), reporoot)
+        if reldir == '.':
+            reldir = ''
+        if any(opts.get(o[1]) for o in commands.walkopts): # a)
+            perfhack = False
+        else: # b)
+            relpats = [os.path.relpath(p, reporoot) if os.path.isabs(p) else p
+                       for p in pats]
+            # disable perfhack on '..' since it allows escaping from the repo
+            if any(('..' in f or
+                    not os.path.isfile(
+                        facontext.pathhelper(repo, f, aopts).linelogpath))
+                   for f in relpats):
+                perfhack = False
+
+    # perfhack: emit paths directory without checking with manifest
+    # this can be incorrect if the rev dos not have file.
+    if perfhack:
+        for p in relpats:
+            yield os.path.join(reldir, p)
+    else:
+        def bad(x, y):
+            raise error.Abort("%s: %s" % (x, y))
+        ctx = scmutil.revsingle(repo, rev)
+        m = scmutil.match(ctx, pats, opts, badfn=bad)
+        for p in ctx.walk(m):
+            yield p
+
+fastannotatecommandargs = {
+    r'options': [
+        ('r', 'rev', '.', _('annotate the specified revision'), _('REV')),
+        ('u', 'user', None, _('list the author (long with -v)')),
+        ('f', 'file', None, _('list the filename')),
+        ('d', 'date', None, _('list the date (short with -q)')),
+        ('n', 'number', None, _('list the revision number (default)')),
+        ('c', 'changeset', None, _('list the changeset')),
+        ('l', 'line-number', None, _('show line number at the first '
+                                     'appearance')),
+        ('e', 'deleted', None, _('show deleted lines (slow) (EXPERIMENTAL)')),
+        ('', 'no-content', None, _('do not show file content (EXPERIMENTAL)')),
+        ('', 'no-follow', None, _("don't follow copies and renames")),
+        ('', 'linear', None, _('enforce linear history, ignore second parent '
+                               'of merges (EXPERIMENTAL)')),
+        ('', 'long-hash', None, _('show long changeset hash (EXPERIMENTAL)')),
+        ('', 'rebuild', None, _('rebuild cache even if it exists '
+                                '(EXPERIMENTAL)')),
+    ] + commands.diffwsopts + commands.walkopts + commands.formatteropts,
+    r'synopsis': _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+    r'inferrepo': True,
+}
+
+def fastannotate(ui, repo, *pats, **opts):
+    """show changeset information by line for each file
+
+    List changes in files, showing the revision id responsible for each line.
+
+    This command is useful for discovering when a change was made and by whom.
+
+    By default this command prints revision numbers. If you include --file,
+    --user, or --date, the revision number is suppressed unless you also
+    include --number. The default format can also be customized by setting
+    fastannotate.defaultformat.
+
+    Returns 0 on success.
+
+    .. container:: verbose
+
+        This command uses an implementation different from the vanilla annotate
+        command, which may produce slightly different (while still reasonable)
+        outputs for some cases.
+
+        Unlike the vanilla anootate, fastannotate follows rename regardless of
+        the existence of --file.
+
+        For the best performance when running on a full repo, use -c, -l,
+        avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
+
+        For the best performance when running on a shallow (remotefilelog)
+        repo, avoid --linear, --no-follow, or any diff options. As the server
+        won't be able to populate annotate cache when non-default options
+        affecting results are used.
+    """
+    if not pats:
+        raise error.Abort(_('at least one filename or pattern is required'))
+
+    # performance hack: filtered repo can be slow. unfilter by default.
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    rev = opts.get('rev', '.')
+    rebuild = opts.get('rebuild', False)
+
+    diffopts = patch.difffeatureopts(ui, opts, section='annotate',
+                                     whitespace=True)
+    aopts = facontext.annotateopts(
+        diffopts=diffopts,
+        followmerge=not opts.get('linear', False),
+        followrename=not opts.get('no_follow', False))
+
+    if not any(opts.get(s)
+               for s in ['user', 'date', 'file', 'number', 'changeset']):
+        # default 'number' for compatibility. but fastannotate is more
+        # efficient with "changeset", "line-number" and "no-content".
+        for name in ui.configlist('fastannotate', 'defaultformat', ['number']):
+            opts[name] = True
+
+    ui.pager('fastannotate')
+    template = opts.get('template')
+    if template == 'json':
+        formatter = faformatter.jsonformatter(ui, repo, opts)
+    else:
+        formatter = faformatter.defaultformatter(ui, repo, opts)
+    showdeleted = opts.get('deleted', False)
+    showlines = not bool(opts.get('no_content'))
+    showpath = opts.get('file', False)
+
+    # find the head of the main (master) branch
+    master = ui.config('fastannotate', 'mainbranch') or rev
+
+    # paths will be used for prefetching and the real annotating
+    paths = list(_matchpaths(repo, rev, pats, opts, aopts))
+
+    # for client, prefetch from the server
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        repo.prefetchfastannotate(paths)
+
+    for path in paths:
+        result = lines = existinglines = None
+        while True:
+            try:
+                with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
+                    result = a.annotate(rev, master=master, showpath=showpath,
+                                        showlines=(showlines and
+                                                   not showdeleted))
+                    if showdeleted:
+                        existinglines = set((l[0], l[1]) for l in result)
+                        result = a.annotatealllines(
+                            rev, showpath=showpath, showlines=showlines)
+                break
+            except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                # happens if master moves backwards, or the file was deleted
+                # and readded, or renamed to an existing name, or corrupted.
+                if rebuild: # give up since we have tried rebuild already
+                    raise
+                else: # try a second time rebuilding the cache (slow)
+                    rebuild = True
+                    continue
+
+        if showlines:
+            result, lines = result
+
+        formatter.write(result, lines, existinglines=existinglines)
+    formatter.end()
+
+_newopts = set([])
+_knownopts = set([opt[1].replace('-', '_') for opt in
+                  (fastannotatecommandargs[r'options'] + commands.globalopts)])
+
+def _annotatewrapper(orig, ui, repo, *pats, **opts):
+    """used by wrapdefault"""
+    # we need this hack until the obsstore has 0.0 seconds perf impact
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    # treat the file as text (skip the isbinary check)
+    if ui.configbool('fastannotate', 'forcetext'):
+        opts['text'] = True
+
+    # check if we need to do prefetch (client-side)
+    rev = opts.get('rev')
+    if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
+        paths = list(_matchpaths(repo, rev, pats, opts))
+        repo.prefetchfastannotate(paths)
+
+    return orig(ui, repo, *pats, **opts)
+
+def registercommand():
+    """register the fastannotate command"""
+    name = '^fastannotate|fastblame|fa'
+    command(name, **fastannotatecommandargs)(fastannotate)
+
+def wrapdefault():
+    """wrap the default annotate command, to be aware of the protocol"""
+    extensions.wrapcommand(commands.table, 'annotate', _annotatewrapper)
+
+@command('debugbuildannotatecache',
+         [('r', 'rev', '', _('build up to the specific revision'), _('REV'))
+         ] + commands.walkopts,
+         _('[-r REV] FILE...'))
+def debugbuildannotatecache(ui, repo, *pats, **opts):
+    """incrementally build fastannotate cache up to REV for specified files
+
+    If REV is not specified, use the config 'fastannotate.mainbranch'.
+
+    If fastannotate.client is True, download the annotate cache from the
+    server. Otherwise, build the annotate cache locally.
+
+    The annotate cache will be built using the default diff and follow
+    options and lives in '.hg/fastannotate/default'.
+    """
+    rev = opts.get('REV') or ui.config('fastannotate', 'mainbranch')
+    if not rev:
+        raise error.Abort(_('you need to provide a revision'),
+                          hint=_('set fastannotate.mainbranch or use --rev'))
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+    ctx = scmutil.revsingle(repo, rev)
+    m = scmutil.match(ctx, pats, opts)
+    paths = list(ctx.walk(m))
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        # client
+        if opts.get('REV'):
+            raise error.Abort(_('--rev cannot be used for client'))
+        repo.prefetchfastannotate(paths)
+    else:
+        # server, or full repo
+        for i, path in enumerate(paths):
+            ui.progress(_('building'), i, total=len(paths))
+            with facontext.annotatecontext(repo, path) as actx:
+                try:
+                    if actx.isuptodate(rev):
+                        continue
+                    actx.annotate(rev, rev)
+                except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                    # the cache is broken (could happen with renaming so the
+                    # file history gets invalidated). rebuild and try again.
+                    ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                             % path)
+                    actx.rebuild()
+                    try:
+                        actx.annotate(rev, rev)
+                    except Exception as ex:
+                        # possibly a bug, but should not stop us from building
+                        # cache for other files.
+                        ui.warn(_('fastannotate: %s: failed to '
+                                  'build cache: %r\n') % (path, ex))
+        # clear the progress bar
+        ui.write()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/context.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,826 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# context: context needed to annotate a file
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import contextlib
+import hashlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    linelog as linelogmod,
+    lock as lockmod,
+    mdiff,
+    node,
+    pycompat,
+    scmutil,
+    util,
+)
+from mercurial.utils import (
+    stringutil,
+)
+
+from . import (
+    error as faerror,
+    revmap as revmapmod,
+)
+
+# given path, get filelog, cached
+@util.lrucachefunc
+def _getflog(repo, path):
+    return repo.file(path)
+
+# extracted from mercurial.context.basefilectx.annotate
+def _parents(f, follow=True):
+    # Cut _descendantrev here to mitigate the penalty of lazy linkrev
+    # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
+    # from the topmost introrev (= srcrev) down to p.linkrev() if it
+    # isn't an ancestor of the srcrev.
+    f._changeid
+    pl = f.parents()
+
+    # Don't return renamed parents if we aren't following.
+    if not follow:
+        pl = [p for p in pl if p.path() == f.path()]
+
+    # renamed filectx won't have a filelog yet, so set it
+    # from the cache to save time
+    for p in pl:
+        if not '_filelog' in p.__dict__:
+            p._filelog = _getflog(f._repo, p.path())
+
+    return pl
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes a fctx instead of a pair of text and fctx.
+def _decorate(fctx):
+    text = fctx.data()
+    linecount = text.count('\n')
+    if text and not text.endswith('\n'):
+        linecount += 1
+    return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes an extra "blocks" parameter calculated elsewhere, instead of
+# calculating diff here.
+def _pair(parent, child, blocks):
+    for (a1, a2, b1, b2), t in blocks:
+        # Changed blocks ('!') or blocks made only of blank lines ('~')
+        # belong to the child.
+        if t == '=':
+            child[0][b1:b2] = parent[0][a1:a2]
+    return child
+
+# like scmutil.revsingle, but with lru cache, so their states (like manifests)
+# could be reused
+_revsingle = util.lrucachefunc(scmutil.revsingle)
+
+def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
+    """(repo, str, str) -> fctx
+
+    get the filectx object from repo, rev, path, in an efficient way.
+
+    if resolverev is True, "rev" is a revision specified by the revset
+    language, otherwise "rev" is a nodeid, or a revision number that can
+    be consumed by repo.__getitem__.
+
+    if adjustctx is not None, the returned fctx will point to a changeset
+    that introduces the change (last modified the file). if adjustctx
+    is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
+    faster for big repos but is incorrect for some cases.
+    """
+    if resolverev and not isinstance(rev, int) and rev is not None:
+        ctx = _revsingle(repo, rev)
+    else:
+        ctx = repo[rev]
+
+    # If we don't need to adjust the linkrev, create the filectx using the
+    # changectx instead of using ctx[path]. This means it already has the
+    # changectx information, so blame -u will be able to look directly at the
+    # commitctx object instead of having to resolve it by going through the
+    # manifest. In a lazy-manifest world this can prevent us from downloading a
+    # lot of data.
+    if adjustctx is None:
+        # ctx.rev() is None means it's the working copy, which is a special
+        # case.
+        if ctx.rev() is None:
+            fctx = ctx[path]
+        else:
+            fctx = repo.filectx(path, changeid=ctx.rev())
+    else:
+        fctx = ctx[path]
+        if adjustctx == 'linkrev':
+            introrev = fctx.linkrev()
+        else:
+            introrev = fctx.introrev()
+        if introrev != ctx.rev():
+            fctx._changeid = introrev
+            fctx._changectx = repo[introrev]
+    return fctx
+
+# like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
+def encodedir(path):
+    return (path
+            .replace('.hg/', '.hg.hg/')
+            .replace('.l/', '.l.hg/')
+            .replace('.m/', '.m.hg/')
+            .replace('.lock/', '.lock.hg/'))
+
+def hashdiffopts(diffopts):
+    diffoptstr = stringutil.pprint(sorted(
+        (k, getattr(diffopts, k))
+        for k in mdiff.diffopts.defaults
+    ))
+    return hashlib.sha1(diffoptstr).hexdigest()[:6]
+
+_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
+
+class annotateopts(object):
+    """like mercurial.mdiff.diffopts, but is for annotate
+
+    followrename: follow renames, like "hg annotate -f"
+    followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
+    """
+
+    defaults = {
+        'diffopts': None,
+        'followrename': True,
+        'followmerge': True,
+    }
+
+    def __init__(self, **opts):
+        for k, v in self.defaults.iteritems():
+            setattr(self, k, opts.get(k, v))
+
+    @util.propertycache
+    def shortstr(self):
+        """represent opts in a short string, suitable for a directory name"""
+        result = ''
+        if not self.followrename:
+            result += 'r0'
+        if not self.followmerge:
+            result += 'm0'
+        if self.diffopts is not None:
+            assert isinstance(self.diffopts, mdiff.diffopts)
+            diffopthash = hashdiffopts(self.diffopts)
+            if diffopthash != _defaultdiffopthash:
+                result += 'i' + diffopthash
+        return result or 'default'
+
+defaultopts = annotateopts()
+
+class _annotatecontext(object):
+    """do not use this class directly as it does not use lock to protect
+    writes. use "with annotatecontext(...)" instead.
+    """
+
+    def __init__(self, repo, path, linelogpath, revmappath, opts):
+        self.repo = repo
+        self.ui = repo.ui
+        self.path = path
+        self.opts = opts
+        self.linelogpath = linelogpath
+        self.revmappath = revmappath
+        self._linelog = None
+        self._revmap = None
+        self._node2path = {} # {str: str}
+
+    @property
+    def linelog(self):
+        if self._linelog is None:
+            if os.path.exists(self.linelogpath):
+                with open(self.linelogpath, 'rb') as f:
+                    try:
+                        self._linelog = linelogmod.linelog.fromdata(f.read())
+                    except linelogmod.LineLogError:
+                        self._linelog = linelogmod.linelog()
+            else:
+                self._linelog = linelogmod.linelog()
+        return self._linelog
+
+    @property
+    def revmap(self):
+        if self._revmap is None:
+            self._revmap = revmapmod.revmap(self.revmappath)
+        return self._revmap
+
+    def close(self):
+        if self._revmap is not None:
+            self._revmap.flush()
+            self._revmap = None
+        if self._linelog is not None:
+            with open(self.linelogpath, 'wb') as f:
+                f.write(self._linelog.encode())
+            self._linelog = None
+
+    __del__ = close
+
+    def rebuild(self):
+        """delete linelog and revmap, useful for rebuilding"""
+        self.close()
+        self._node2path.clear()
+        _unlinkpaths([self.revmappath, self.linelogpath])
+
+    @property
+    def lastnode(self):
+        """return last node in revmap, or None if revmap is empty"""
+        if self._revmap is None:
+            # fast path, read revmap without loading its full content
+            return revmapmod.getlastnode(self.revmappath)
+        else:
+            return self._revmap.rev2hsh(self._revmap.maxrev)
+
+    def isuptodate(self, master, strict=True):
+        """return True if the revmap / linelog is up-to-date, or the file
+        does not exist in the master revision. False otherwise.
+
+        it tries to be fast and could return false negatives, because of the
+        use of linkrev instead of introrev.
+
+        useful for both server and client to decide whether to update
+        fastannotate cache or not.
+
+        if strict is True, even if fctx exists in the revmap, but is not the
+        last node, isuptodate will return False. it's good for performance - no
+        expensive check was done.
+
+        if strict is False, if fctx exists in the revmap, this function may
+        return True. this is useful for the client to skip downloading the
+        cache if the client's master is behind the server's.
+        """
+        lastnode = self.lastnode
+        try:
+            f = self._resolvefctx(master, resolverev=True)
+            # choose linkrev instead of introrev as the check is meant to be
+            # *fast*.
+            linknode = self.repo.changelog.node(f.linkrev())
+            if not strict and lastnode and linknode != lastnode:
+                # check if f.node() is in the revmap. note: this loads the
+                # revmap and can be slow.
+                return self.revmap.hsh2rev(linknode) is not None
+            # avoid resolving old manifest, or slow adjustlinkrev to be fast,
+            # false negatives are acceptable in this case.
+            return linknode == lastnode
+        except LookupError:
+            # master does not have the file, or the revmap is ahead
+            return True
+
+    def annotate(self, rev, master=None, showpath=False, showlines=False):
+        """incrementally update the cache so it includes revisions in the main
+        branch till 'master'. and run annotate on 'rev', which may or may not be
+        included in the main branch.
+
+        if master is None, do not update linelog.
+
+        the first value returned is the annotate result, it is [(node, linenum)]
+        by default. [(node, linenum, path)] if showpath is True.
+
+        if showlines is True, a second value will be returned, it is a list of
+        corresponding line contents.
+        """
+
+        # the fast path test requires commit hash, convert rev number to hash,
+        # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
+        # command could give us a revision number even if the user passes a
+        # commit hash.
+        if isinstance(rev, int):
+            rev = node.hex(self.repo.changelog.node(rev))
+
+        # fast path: if rev is in the main branch already
+        directly, revfctx = self.canannotatedirectly(rev)
+        if directly:
+            if self.ui.debugflag:
+                self.ui.debug('fastannotate: %s: using fast path '
+                              '(resolved fctx: %s)\n'
+                              % (self.path, util.safehasattr(revfctx, 'node')))
+            return self.annotatedirectly(revfctx, showpath, showlines)
+
+        # resolve master
+        masterfctx = None
+        if master:
+            try:
+                masterfctx = self._resolvefctx(master, resolverev=True,
+                                               adjustctx=True)
+            except LookupError: # master does not have the file
+                pass
+            else:
+                if masterfctx in self.revmap: # no need to update linelog
+                    masterfctx = None
+
+        #                  ... - @ <- rev (can be an arbitrary changeset,
+        #                 /                not necessarily a descendant
+        #      master -> o                 of master)
+        #                |
+        #     a merge -> o         'o': new changesets in the main branch
+        #                |\        '#': revisions in the main branch that
+        #                o *            exist in linelog / revmap
+        #                | .       '*': changesets in side branches, or
+        # last master -> # .            descendants of master
+        #                | .
+        #                # *       joint: '#', and is a parent of a '*'
+        #                |/
+        #     a joint -> # ^^^^ --- side branches
+        #                |
+        #                ^ --- main branch (in linelog)
+
+        # these DFSes are similar to the traditional annotate algorithm.
+        # we cannot really reuse the code for perf reason.
+
+        # 1st DFS calculates merges, joint points, and needed.
+        # "needed" is a simple reference counting dict to free items in
+        # "hist", reducing its memory usage otherwise could be huge.
+        initvisit = [revfctx]
+        if masterfctx:
+            if masterfctx.rev() is None:
+                raise error.Abort(_('cannot update linelog to wdir()'),
+                                  hint=_('set fastannotate.mainbranch'))
+            initvisit.append(masterfctx)
+        visit = initvisit[:]
+        pcache = {}
+        needed = {revfctx: 1}
+        hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
+        while visit:
+            f = visit.pop()
+            if f in pcache or f in hist:
+                continue
+            if f in self.revmap: # in the old main branch, it's a joint
+                llrev = self.revmap.hsh2rev(f.node())
+                self.linelog.annotate(llrev)
+                result = self.linelog.annotateresult
+                hist[f] = (result, f.data())
+                continue
+            pl = self._parentfunc(f)
+            pcache[f] = pl
+            for p in pl:
+                needed[p] = needed.get(p, 0) + 1
+                if p not in pcache:
+                    visit.append(p)
+
+        # 2nd (simple) DFS calculates new changesets in the main branch
+        # ('o' nodes in # the above graph), so we know when to update linelog.
+        newmainbranch = set()
+        f = masterfctx
+        while f and f not in self.revmap:
+            newmainbranch.add(f)
+            pl = pcache[f]
+            if pl:
+                f = pl[0]
+            else:
+                f = None
+                break
+
+        # f, if present, is the position where the last build stopped at, and
+        # should be the "master" last time. check to see if we can continue
+        # building the linelog incrementally. (we cannot if diverged)
+        if masterfctx is not None:
+            self._checklastmasterhead(f)
+
+        if self.ui.debugflag:
+            if newmainbranch:
+                self.ui.debug('fastannotate: %s: %d new changesets in the main'
+                              ' branch\n' % (self.path, len(newmainbranch)))
+            elif not hist: # no joints, no updates
+                self.ui.debug('fastannotate: %s: linelog cannot help in '
+                              'annotating this revision\n' % self.path)
+
+        # prepare annotateresult so we can update linelog incrementally
+        self.linelog.annotate(self.linelog.maxrev)
+
+        # 3rd DFS does the actual annotate
+        visit = initvisit[:]
+        progress = 0
+        while visit:
+            f = visit[-1]
+            if f in hist:
+                visit.pop()
+                continue
+
+            ready = True
+            pl = pcache[f]
+            for p in pl:
+                if p not in hist:
+                    ready = False
+                    visit.append(p)
+            if not ready:
+                continue
+
+            visit.pop()
+            blocks = None # mdiff blocks, used for appending linelog
+            ismainbranch = (f in newmainbranch)
+            # curr is the same as the traditional annotate algorithm,
+            # if we only care about linear history (do not follow merge),
+            # then curr is not actually used.
+            assert f not in hist
+            curr = _decorate(f)
+            for i, p in enumerate(pl):
+                bs = list(self._diffblocks(hist[p][1], curr[1]))
+                if i == 0 and ismainbranch:
+                    blocks = bs
+                curr = _pair(hist[p], curr, bs)
+                if needed[p] == 1:
+                    del hist[p]
+                    del needed[p]
+                else:
+                    needed[p] -= 1
+
+            hist[f] = curr
+            del pcache[f]
+
+            if ismainbranch: # need to write to linelog
+                if not self.ui.quiet:
+                    progress += 1
+                    self.ui.progress(_('building cache'), progress,
+                                     total=len(newmainbranch))
+                bannotated = None
+                if len(pl) == 2 and self.opts.followmerge: # merge
+                    bannotated = curr[0]
+                if blocks is None: # no parents, add an empty one
+                    blocks = list(self._diffblocks('', curr[1]))
+                self._appendrev(f, blocks, bannotated)
+            elif showpath: # not append linelog, but we need to record path
+                self._node2path[f.node()] = f.path()
+
+        if progress: # clean progress bar
+            self.ui.write()
+
+        result = [
+            ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
+            for fr, l in hist[revfctx][0]] # [(node, linenumber)]
+        return self._refineannotateresult(result, revfctx, showpath, showlines)
+
+    def canannotatedirectly(self, rev):
+        """(str) -> bool, fctx or node.
+        return (True, f) if we can annotate without updating the linelog, pass
+        f to annotatedirectly.
+        return (False, f) if we need extra calculation. f is the fctx resolved
+        from rev.
+        """
+        result = True
+        f = None
+        if not isinstance(rev, int) and rev is not None:
+            hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
+            if hsh is not None and (hsh, self.path) in self.revmap:
+                f = hsh
+        if f is None:
+            adjustctx = 'linkrev' if self._perfhack else True
+            f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
+            result = f in self.revmap
+            if not result and self._perfhack:
+                # redo the resolution without perfhack - as we are going to
+                # do write operations, we need a correct fctx.
+                f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
+        return result, f
+
+    def annotatealllines(self, rev, showpath=False, showlines=False):
+        """(rev : str) -> [(node : str, linenum : int, path : str)]
+
+        the result has the same format with annotate, but include all (including
+        deleted) lines up to rev. call this after calling annotate(rev, ...) for
+        better performance and accuracy.
+        """
+        revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
+
+        # find a chain from rev to anything in the mainbranch
+        if revfctx not in self.revmap:
+            chain = [revfctx]
+            a = ''
+            while True:
+                f = chain[-1]
+                pl = self._parentfunc(f)
+                if not pl:
+                    break
+                if pl[0] in self.revmap:
+                    a = pl[0].data()
+                    break
+                chain.append(pl[0])
+
+            # both self.linelog and self.revmap is backed by filesystem. now
+            # we want to modify them but do not want to write changes back to
+            # files. so we create in-memory objects and copy them. it's like
+            # a "fork".
+            linelog = linelogmod.linelog()
+            linelog.copyfrom(self.linelog)
+            linelog.annotate(linelog.maxrev)
+            revmap = revmapmod.revmap()
+            revmap.copyfrom(self.revmap)
+
+            for f in reversed(chain):
+                b = f.data()
+                blocks = list(self._diffblocks(a, b))
+                self._doappendrev(linelog, revmap, f, blocks)
+                a = b
+        else:
+            # fastpath: use existing linelog, revmap as we don't write to them
+            linelog = self.linelog
+            revmap = self.revmap
+
+        lines = linelog.getalllines()
+        hsh = revfctx.node()
+        llrev = revmap.hsh2rev(hsh)
+        result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
+        # cannot use _refineannotateresult since we need custom logic for
+        # resolving line contents
+        if showpath:
+            result = self._addpathtoresult(result, revmap)
+        if showlines:
+            linecontents = self._resolvelines(result, revmap, linelog)
+            result = (result, linecontents)
+        return result
+
+    def _resolvelines(self, annotateresult, revmap, linelog):
+        """(annotateresult) -> [line]. designed for annotatealllines.
+        this is probably the most inefficient code in the whole fastannotate
+        directory. but we have made a decision that the linelog does not
+        store line contents. so getting them requires random accesses to
+        the revlog data, since they can be many, it can be very slow.
+        """
+        # [llrev]
+        revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
+        result = [None] * len(annotateresult)
+        # {(rev, linenum): [lineindex]}
+        key2idxs = collections.defaultdict(list)
+        for i in pycompat.xrange(len(result)):
+            key2idxs[(revs[i], annotateresult[i][1])].append(i)
+        while key2idxs:
+            # find an unresolved line and its linelog rev to annotate
+            hsh = None
+            try:
+                for (rev, _linenum), idxs in key2idxs.iteritems():
+                    if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
+                        continue
+                    hsh = annotateresult[idxs[0]][0]
+                    break
+            except StopIteration: # no more unresolved lines
+                return result
+            if hsh is None:
+                # the remaining key2idxs are not in main branch, resolving them
+                # using the hard way...
+                revlines = {}
+                for (rev, linenum), idxs in key2idxs.iteritems():
+                    if rev not in revlines:
+                        hsh = annotateresult[idxs[0]][0]
+                        if self.ui.debugflag:
+                            self.ui.debug('fastannotate: reading %s line #%d '
+                                          'to resolve lines %r\n'
+                                          % (node.short(hsh), linenum, idxs))
+                        fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+                        lines = mdiff.splitnewlines(fctx.data())
+                        revlines[rev] = lines
+                    for idx in idxs:
+                        result[idx] = revlines[rev][linenum]
+                assert all(x is not None for x in result)
+                return result
+
+            # run the annotate and the lines should match to the file content
+            self.ui.debug('fastannotate: annotate %s to resolve lines\n'
+                          % node.short(hsh))
+            linelog.annotate(rev)
+            fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+            annotated = linelog.annotateresult
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(annotated):
+                raise faerror.CorruptedFileError('unexpected annotated lines')
+            # resolve lines from the annotate result
+            for i, line in enumerate(lines):
+                k = annotated[i]
+                if k in key2idxs:
+                    for idx in key2idxs[k]:
+                        result[idx] = line
+                    del key2idxs[k]
+        return result
+
+    def annotatedirectly(self, f, showpath, showlines):
+        """like annotate, but when we know that f is in linelog.
+        f can be either a 20-char str (node) or a fctx. this is for perf - in
+        the best case, the user provides a node and we don't need to read the
+        filelog or construct any filecontext.
+        """
+        if isinstance(f, str):
+            hsh = f
+        else:
+            hsh = f.node()
+        llrev = self.revmap.hsh2rev(hsh)
+        if not llrev:
+            raise faerror.CorruptedFileError('%s is not in revmap'
+                                             % node.hex(hsh))
+        if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
+            raise faerror.CorruptedFileError('%s is not in revmap mainbranch'
+                                             % node.hex(hsh))
+        self.linelog.annotate(llrev)
+        result = [(self.revmap.rev2hsh(r), l)
+                  for r, l in self.linelog.annotateresult]
+        return self._refineannotateresult(result, f, showpath, showlines)
+
+    def _refineannotateresult(self, result, f, showpath, showlines):
+        """add the missing path or line contents, they can be expensive.
+        f could be either node or fctx.
+        """
+        if showpath:
+            result = self._addpathtoresult(result)
+        if showlines:
+            if isinstance(f, str): # f: node or fctx
+                llrev = self.revmap.hsh2rev(f)
+                fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
+            else:
+                fctx = f
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(result): # linelog is probably corrupted
+                raise faerror.CorruptedFileError()
+            result = (result, lines)
+        return result
+
+    def _appendrev(self, fctx, blocks, bannotated=None):
+        self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
+
+    def _diffblocks(self, a, b):
+        return mdiff.allblocks(a, b, self.opts.diffopts)
+
+    @staticmethod
+    def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
+        """append a revision to linelog and revmap"""
+
+        def getllrev(f):
+            """(fctx) -> int"""
+            # f should not be a linelog revision
+            if isinstance(f, int):
+                raise error.ProgrammingError('f should not be an int')
+            # f is a fctx, allocate linelog rev on demand
+            hsh = f.node()
+            rev = revmap.hsh2rev(hsh)
+            if rev is None:
+                rev = revmap.append(hsh, sidebranch=True, path=f.path())
+            return rev
+
+        # append sidebranch revisions to revmap
+        siderevs = []
+        siderevmap = {} # node: int
+        if bannotated is not None:
+            for (a1, a2, b1, b2), op in blocks:
+                if op != '=':
+                    # f could be either linelong rev, or fctx.
+                    siderevs += [f for f, l in bannotated[b1:b2]
+                                 if not isinstance(f, int)]
+        siderevs = set(siderevs)
+        if fctx in siderevs: # mainnode must be appended seperately
+            siderevs.remove(fctx)
+        for f in siderevs:
+            siderevmap[f] = getllrev(f)
+
+        # the changeset in the main branch, could be a merge
+        llrev = revmap.append(fctx.node(), path=fctx.path())
+        siderevmap[fctx] = llrev
+
+        for (a1, a2, b1, b2), op in reversed(blocks):
+            if op == '=':
+                continue
+            if bannotated is None:
+                linelog.replacelines(llrev, a1, a2, b1, b2)
+            else:
+                blines = [((r if isinstance(r, int) else siderevmap[r]), l)
+                          for r, l in bannotated[b1:b2]]
+                linelog.replacelines_vec(llrev, a1, a2, blines)
+
+    def _addpathtoresult(self, annotateresult, revmap=None):
+        """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
+        if revmap is None:
+            revmap = self.revmap
+
+        def _getpath(nodeid):
+            path = self._node2path.get(nodeid)
+            if path is None:
+                path = revmap.rev2path(revmap.hsh2rev(nodeid))
+                self._node2path[nodeid] = path
+            return path
+
+        return [(n, l, _getpath(n)) for n, l in annotateresult]
+
+    def _checklastmasterhead(self, fctx):
+        """check if fctx is the master's head last time, raise if not"""
+        if fctx is None:
+            llrev = 0
+        else:
+            llrev = self.revmap.hsh2rev(fctx.node())
+            if not llrev:
+                raise faerror.CannotReuseError()
+        if self.linelog.maxrev != llrev:
+            raise faerror.CannotReuseError()
+
+    @util.propertycache
+    def _parentfunc(self):
+        """-> (fctx) -> [fctx]"""
+        followrename = self.opts.followrename
+        followmerge = self.opts.followmerge
+        def parents(f):
+            pl = _parents(f, follow=followrename)
+            if not followmerge:
+                pl = pl[:1]
+            return pl
+        return parents
+
+    @util.propertycache
+    def _perfhack(self):
+        return self.ui.configbool('fastannotate', 'perfhack')
+
+    def _resolvefctx(self, rev, path=None, **kwds):
+        return resolvefctx(self.repo, rev, (path or self.path), **kwds)
+
+def _unlinkpaths(paths):
+    """silent, best-effort unlink"""
+    for path in paths:
+        try:
+            util.unlink(path)
+        except OSError:
+            pass
+
+class pathhelper(object):
+    """helper for getting paths for lockfile, linelog and revmap"""
+
+    def __init__(self, repo, path, opts=defaultopts):
+        # different options use different directories
+        self._vfspath = os.path.join('fastannotate',
+                                     opts.shortstr, encodedir(path))
+        self._repo = repo
+
+    @property
+    def dirname(self):
+        return os.path.dirname(self._repo.vfs.join(self._vfspath))
+
+    @property
+    def linelogpath(self):
+        return self._repo.vfs.join(self._vfspath + '.l')
+
+    def lock(self):
+        return lockmod.lock(self._repo.vfs, self._vfspath + '.lock')
+
+    @contextlib.contextmanager
+    def _lockflock(self):
+        """the same as 'lock' but use flock instead of lockmod.lock, to avoid
+        creating temporary symlinks."""
+        import fcntl
+        lockpath = self.linelogpath
+        util.makedirs(os.path.dirname(lockpath))
+        lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
+        fcntl.flock(lockfd, fcntl.LOCK_EX)
+        try:
+            yield
+        finally:
+            fcntl.flock(lockfd, fcntl.LOCK_UN)
+            os.close(lockfd)
+
+    @property
+    def revmappath(self):
+        return self._repo.vfs.join(self._vfspath + '.m')
+
+@contextlib.contextmanager
+def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
+    """context needed to perform (fast) annotate on a file
+
+    an annotatecontext of a single file consists of two structures: the
+    linelog and the revmap. this function takes care of locking. only 1
+    process is allowed to write that file's linelog and revmap at a time.
+
+    when something goes wrong, this function will assume the linelog and the
+    revmap are in a bad state, and remove them from disk.
+
+    use this function in the following way:
+
+        with annotatecontext(...) as actx:
+            actx. ....
+    """
+    helper = pathhelper(repo, path, opts)
+    util.makedirs(helper.dirname)
+    revmappath = helper.revmappath
+    linelogpath = helper.linelogpath
+    actx = None
+    try:
+        with helper.lock():
+            actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
+            if rebuild:
+                actx.rebuild()
+            yield actx
+    except Exception:
+        if actx is not None:
+            actx.rebuild()
+        repo.ui.debug('fastannotate: %s: cache broken and deleted\n' % path)
+        raise
+    finally:
+        if actx is not None:
+            actx.close()
+
+def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
+    """like annotatecontext but get the context from a fctx. convenient when
+    used in fctx.annotate
+    """
+    repo = fctx._repo
+    path = fctx._path
+    if repo.ui.configbool('fastannotate', 'forcefollow', True):
+        follow = True
+    aopts = annotateopts(diffopts=diffopts, followrename=follow)
+    return annotatecontext(repo, path, aopts, rebuild)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/error.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,13 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# error: errors used in fastannotate
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+class CorruptedFileError(Exception):
+    pass
+
+class CannotReuseError(Exception):
+    """cannot reuse or update the cache incrementally"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/formatter.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,161 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# format: defines the format used to output annotate result
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from mercurial import (
+    encoding,
+    node,
+    pycompat,
+    templatefilters,
+    util,
+)
+from mercurial.utils import (
+        dateutil,
+)
+
+# imitating mercurial.commands.annotate, not using the vanilla formatter since
+# the data structures are a bit different, and we have some fast paths.
+class defaultformatter(object):
+    """the default formatter that does leftpad and support some common flags"""
+
+    def __init__(self, ui, repo, opts):
+        self.ui = ui
+        self.opts = opts
+
+        if ui.quiet:
+            datefunc = dateutil.shortdate
+        else:
+            datefunc = dateutil.datestr
+        datefunc = util.cachefunc(datefunc)
+        getctx = util.cachefunc(lambda x: repo[x[0]])
+        hexfunc = self._hexfunc
+
+        # special handling working copy "changeset" and "rev" functions
+        if self.opts.get('rev') == 'wdir()':
+            orig = hexfunc
+            hexfunc = lambda x: None if x is None else orig(x)
+            wnode = hexfunc(repo[None].p1().node()) + '+'
+            wrev = str(repo[None].p1().rev())
+            wrevpad = ''
+            if not opts.get('changeset'): # only show + if changeset is hidden
+                wrev += '+'
+                wrevpad = ' '
+            revenc = lambda x: wrev if x is None else str(x) + wrevpad
+            csetenc = lambda x: wnode if x is None else str(x) + ' '
+        else:
+            revenc = csetenc = str
+
+        # opt name, separator, raw value (for json/plain), encoder (for plain)
+        opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
+                 ('number', ' ', lambda x: getctx(x).rev(), revenc),
+                 ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
+                 ('date', ' ', lambda x: getctx(x).date(), datefunc),
+                 ('file', ' ', lambda x: x[2], str),
+                 ('line_number', ':', lambda x: x[1] + 1, str)]
+        fieldnamemap = {'number': 'rev', 'changeset': 'node'}
+        funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
+                   for op, sep, get, enc in opmap
+                   if opts.get(op)]
+        # no separator for first column
+        funcmap[0] = list(funcmap[0])
+        funcmap[0][1] = ''
+        self.funcmap = funcmap
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        """(annotateresult, [str], set([rev, linenum])) -> None. write output.
+        annotateresult can be [(node, linenum, path)], or [(node, linenum)]
+        """
+        pieces = [] # [[str]]
+        maxwidths = [] # [int]
+
+        # calculate padding
+        for f, sep, name, enc in self.funcmap:
+            l = [enc(f(x)) for x in annotatedresult]
+            pieces.append(l)
+            if name in ['node', 'date']: # node and date has fixed size
+                l = l[:1]
+            widths = map(encoding.colwidth, set(l))
+            maxwidth = (max(widths) if widths else 0)
+            maxwidths.append(maxwidth)
+
+        # buffered output
+        result = ''
+        for i in pycompat.xrange(len(annotatedresult)):
+            for j, p in enumerate(pieces):
+                sep = self.funcmap[j][1]
+                padding = ' ' * (maxwidths[j] - len(p[i]))
+                result += sep + padding + p[i]
+            if lines:
+                if existinglines is None:
+                    result += ': ' + lines[i]
+                else: # extra formatting showing whether a line exists
+                    key = (annotatedresult[i][0], annotatedresult[i][1])
+                    if key in existinglines:
+                        result += ':  ' + lines[i]
+                    else:
+                        result += ': ' + self.ui.label('-' + lines[i],
+                                                       'diff.deleted')
+
+            if result[-1] != '\n':
+                result += '\n'
+
+        self.ui.write(result)
+
+    @util.propertycache
+    def _hexfunc(self):
+        if self.ui.debugflag or self.opts.get('long_hash'):
+            return node.hex
+        else:
+            return node.short
+
+    def end(self):
+        pass
+
+class jsonformatter(defaultformatter):
+    def __init__(self, ui, repo, opts):
+        super(jsonformatter, self).__init__(ui, repo, opts)
+        self.ui.write('[')
+        self.needcomma = False
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        if annotatedresult:
+            self._writecomma()
+
+        pieces = [(name, map(f, annotatedresult))
+                  for f, sep, name, enc in self.funcmap]
+        if lines is not None:
+            pieces.append(('line', lines))
+        pieces.sort()
+
+        seps = [','] * len(pieces[:-1]) + ['']
+
+        result = ''
+        lasti = len(annotatedresult) - 1
+        for i in pycompat.xrange(len(annotatedresult)):
+            result += '\n {\n'
+            for j, p in enumerate(pieces):
+                k, vs = p
+                result += ('  "%s": %s%s\n'
+                           % (k, templatefilters.json(vs[i], paranoid=False),
+                              seps[j]))
+            result += ' }%s' % ('' if i == lasti else ',')
+        if lasti >= 0:
+            self.needcomma = True
+
+        self.ui.write(result)
+
+    def _writecomma(self):
+        if self.needcomma:
+            self.ui.write(',')
+            self.needcomma = False
+
+    @util.propertycache
+    def _hexfunc(self):
+        return node.hex
+
+    def end(self):
+        self.ui.write('\n]\n')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/protocol.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,229 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# protocol: logic for a server providing fastannotate support
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    extensions,
+    hg,
+    localrepo,
+    util,
+    wireprotov1peer,
+    wireprotov1server,
+)
+from . import context
+
+# common
+
+def _getmaster(ui):
+    """get the mainbranch, and enforce it is set"""
+    master = ui.config('fastannotate', 'mainbranch')
+    if not master:
+        raise error.Abort(_('fastannotate.mainbranch is required '
+                            'for both the client and the server'))
+    return master
+
+# server-side
+
+def _capabilities(orig, repo, proto):
+    result = orig(repo, proto)
+    result.append('getannotate')
+    return result
+
+def _getannotate(repo, proto, path, lastnode):
+    # output:
+    #   FILE := vfspath + '\0' + str(size) + '\0' + content
+    #   OUTPUT := '' | FILE + OUTPUT
+    result = ''
+    buildondemand = repo.ui.configbool('fastannotate', 'serverbuildondemand',
+                                       True)
+    with context.annotatecontext(repo, path) as actx:
+        if buildondemand:
+            # update before responding to the client
+            master = _getmaster(repo.ui)
+            try:
+                if not actx.isuptodate(master):
+                    actx.annotate(master, master)
+            except Exception:
+                # non-fast-forward move or corrupted. rebuild automically.
+                actx.rebuild()
+                try:
+                    actx.annotate(master, master)
+                except Exception:
+                    actx.rebuild() # delete files
+            finally:
+                # although the "with" context will also do a close/flush, we
+                # need to do it early so we can send the correct respond to
+                # client.
+                actx.close()
+        # send back the full content of revmap and linelog, in the future we
+        # may want to do some rsync-like fancy updating.
+        # the lastnode check is not necessary if the client and the server
+        # agree where the main branch is.
+        if actx.lastnode != lastnode:
+            for p in [actx.revmappath, actx.linelogpath]:
+                if not os.path.exists(p):
+                    continue
+                content = ''
+                with open(p, 'rb') as f:
+                    content = f.read()
+                vfsbaselen = len(repo.vfs.base + '/')
+                relpath = p[vfsbaselen:]
+                result += '%s\0%s\0%s' % (relpath, len(content), content)
+    return result
+
+def _registerwireprotocommand():
+    if 'getannotate' in wireprotov1server.commands:
+        return
+    wireprotov1server.wireprotocommand(
+        'getannotate', 'path lastnode')(_getannotate)
+
+def serveruisetup(ui):
+    _registerwireprotocommand()
+    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+
+# client-side
+
+def _parseresponse(payload):
+    result = {}
+    i = 0
+    l = len(payload) - 1
+    state = 0 # 0: vfspath, 1: size
+    vfspath = size = ''
+    while i < l:
+        ch = payload[i]
+        if ch == '\0':
+            if state == 1:
+                result[vfspath] = buffer(payload, i + 1, int(size))
+                i += int(size)
+                state = 0
+                vfspath = size = ''
+            elif state == 0:
+                state = 1
+        else:
+            if state == 1:
+                size += ch
+            elif state == 0:
+                vfspath += ch
+        i += 1
+    return result
+
+def peersetup(ui, peer):
+    class fastannotatepeer(peer.__class__):
+        @wireprotov1peer.batchable
+        def getannotate(self, path, lastnode=None):
+            if not self.capable('getannotate'):
+                ui.warn(_('remote peer cannot provide annotate cache\n'))
+                yield None, None
+            else:
+                args = {'path': path, 'lastnode': lastnode or ''}
+                f = wireprotov1peer.future()
+                yield args, f
+                yield _parseresponse(f.value)
+    peer.__class__ = fastannotatepeer
+
+@contextlib.contextmanager
+def annotatepeer(repo):
+    ui = repo.ui
+
+    remotepath = ui.expandpath(
+        ui.config('fastannotate', 'remotepath', 'default'))
+    peer = hg.peer(ui, {}, remotepath)
+
+    try:
+        yield peer
+    finally:
+        peer.close()
+
+def clientfetch(repo, paths, lastnodemap=None, peer=None):
+    """download annotate cache from the server for paths"""
+    if not paths:
+        return
+
+    if peer is None:
+        with annotatepeer(repo) as peer:
+            return clientfetch(repo, paths, lastnodemap, peer)
+
+    if lastnodemap is None:
+        lastnodemap = {}
+
+    ui = repo.ui
+    results = []
+    with peer.commandexecutor() as batcher:
+        ui.debug('fastannotate: requesting %d files\n' % len(paths))
+        for p in paths:
+            results.append(batcher.callcommand(
+                'getannotate',
+                {'path': p, 'lastnode':lastnodemap.get(p)}))
+
+    ui.debug('fastannotate: server returned\n')
+    for result in results:
+        r = result.result()
+        # TODO: pconvert these paths on the server?
+        r = {util.pconvert(p): v for p, v in r.iteritems()}
+        for path in sorted(r):
+            # ignore malicious paths
+            if not path.startswith('fastannotate/') or '/../' in (path + '/'):
+                ui.debug('fastannotate: ignored malicious path %s\n' % path)
+                continue
+            content = r[path]
+            if ui.debugflag:
+                ui.debug('fastannotate: writing %d bytes to %s\n'
+                         % (len(content), path))
+            repo.vfs.makedirs(os.path.dirname(path))
+            with repo.vfs(path, 'wb') as f:
+                f.write(content)
+
+def _filterfetchpaths(repo, paths):
+    """return a subset of paths whose history is long and need to fetch linelog
+    from the server. works with remotefilelog and non-remotefilelog repos.
+    """
+    threshold = repo.ui.configint('fastannotate', 'clientfetchthreshold', 10)
+    if threshold <= 0:
+        return paths
+
+    result = []
+    for path in paths:
+        try:
+            if len(repo.file(path)) >= threshold:
+                result.append(path)
+        except Exception: # file not found etc.
+            result.append(path)
+
+    return result
+
+def localreposetup(ui, repo):
+    class fastannotaterepo(repo.__class__):
+        def prefetchfastannotate(self, paths, peer=None):
+            master = _getmaster(self.ui)
+            needupdatepaths = []
+            lastnodemap = {}
+            try:
+                for path in _filterfetchpaths(self, paths):
+                    with context.annotatecontext(self, path) as actx:
+                        if not actx.isuptodate(master, strict=False):
+                            needupdatepaths.append(path)
+                            lastnodemap[path] = actx.lastnode
+                if needupdatepaths:
+                    clientfetch(self, needupdatepaths, lastnodemap, peer)
+            except Exception as ex:
+                # could be directory not writable or so, not fatal
+                self.ui.debug('fastannotate: prefetch failed: %r\n' % ex)
+    repo.__class__ = fastannotaterepo
+
+def clientreposetup(ui, repo):
+    _registerwireprotocommand()
+    if isinstance(repo, localrepo.localrepository):
+        localreposetup(ui, repo)
+    # TODO: this mutates global state, but only if at least one repo
+    # has the extension enabled. This is probably bad for hgweb.
+    if peersetup not in hg.wirepeersetupfuncs:
+        hg.wirepeersetupfuncs.append(peersetup)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/revmap.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,254 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# revmap: trivial hg hash - linelog rev bidirectional map
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import bisect
+import os
+import struct
+
+from mercurial.node import hex
+from mercurial import (
+    error as hgerror,
+    pycompat,
+)
+from . import error
+
+# the revmap file format is straightforward:
+#
+#    8 bytes: header
+#    1 byte : flag for linelog revision 1
+#    ? bytes: (optional) '\0'-terminated path string
+#             only exists if (flag & renameflag) != 0
+#   20 bytes: hg hash for linelog revision 1
+#    1 byte : flag for linelog revision 2
+#    ? bytes: (optional) '\0'-terminated path string
+#   20 bytes: hg hash for linelog revision 2
+#   ....
+#
+# the implementation is kinda stupid: __init__ loads the whole revmap.
+# no laziness. benchmark shows loading 10000 revisions is about 0.015
+# seconds, which looks enough for our use-case. if this implementation
+# becomes a bottleneck, we can change it to lazily read the file
+# from the end.
+
+# whether the changeset is in the side branch. i.e. not in the linear main
+# branch but only got referenced by lines in merge changesets.
+sidebranchflag = 1
+
+# whether the changeset changes the file path (ie. is a rename)
+renameflag = 2
+
+# len(mercurial.node.nullid)
+_hshlen = 20
+
+class revmap(object):
+    """trivial hg bin hash - linelog rev bidirectional map
+
+    also stores a flag (uint8) for each revision, and track renames.
+    """
+
+    HEADER = b'REVMAP1\0'
+
+    def __init__(self, path=None):
+        """create or load the revmap, optionally associate to a file
+
+        if path is None, the revmap is entirely in-memory. the caller is
+        responsible for locking. concurrent writes to a same file is unsafe.
+        the caller needs to make sure one file is associated to at most one
+        revmap object at a time."""
+        self.path = path
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        # since rename does not happen frequently, do not store path for every
+        # revision. self._renamerevs can be used for bisecting.
+        self._renamerevs = [0]
+        self._renamepaths = ['']
+        self._lastmaxrev = -1
+        if path:
+            if os.path.exists(path):
+                self._load()
+            else:
+                # write the header so "append" can do incremental updates
+                self.flush()
+
+    def copyfrom(self, rhs):
+        """copy the map data from another revmap. do not affect self.path"""
+        self._rev2hsh = rhs._rev2hsh[:]
+        self._rev2flag = rhs._rev2flag[:]
+        self._hsh2rev = rhs._hsh2rev.copy()
+        self._renamerevs = rhs._renamerevs[:]
+        self._renamepaths = rhs._renamepaths[:]
+        self._lastmaxrev = -1
+
+    @property
+    def maxrev(self):
+        """return max linelog revision number"""
+        return len(self._rev2hsh) - 1
+
+    def append(self, hsh, sidebranch=False, path=None, flush=False):
+        """add a binary hg hash and return the mapped linelog revision.
+        if flush is True, incrementally update the file.
+        """
+        if hsh in self._hsh2rev:
+            raise error.CorruptedFileError('%r is in revmap already' % hex(hsh))
+        if len(hsh) != _hshlen:
+            raise hgerror.ProgrammingError('hsh must be %d-char long' % _hshlen)
+        idx = len(self._rev2hsh)
+        flag = 0
+        if sidebranch:
+            flag |= sidebranchflag
+        if path is not None and path != self._renamepaths[-1]:
+            flag |= renameflag
+            self._renamerevs.append(idx)
+            self._renamepaths.append(path)
+        self._rev2hsh.append(hsh)
+        self._rev2flag.append(flag)
+        self._hsh2rev[hsh] = idx
+        if flush:
+            self.flush()
+        return idx
+
+    def rev2hsh(self, rev):
+        """convert linelog revision to hg hash. return None if not found."""
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2hsh[rev]
+
+    def rev2flag(self, rev):
+        """get the flag (uint8) for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2flag[rev]
+
+    def rev2path(self, rev):
+        """get the path for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        idx = bisect.bisect_right(self._renamerevs, rev) - 1
+        return self._renamepaths[idx]
+
+    def hsh2rev(self, hsh):
+        """convert hg hash to linelog revision. return None if not found."""
+        return self._hsh2rev.get(hsh)
+
+    def clear(self, flush=False):
+        """make the map empty. if flush is True, write to disk"""
+        # rev 0 is reserved, real rev starts from 1
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        self._rev2path = ['']
+        self._lastmaxrev = -1
+        if flush:
+            self.flush()
+
+    def flush(self):
+        """write the state down to the file"""
+        if not self.path:
+            return
+        if self._lastmaxrev == -1: # write the entire file
+            with open(self.path, 'wb') as f:
+                f.write(self.HEADER)
+                for i in pycompat.xrange(1, len(self._rev2hsh)):
+                    self._writerev(i, f)
+        else: # append incrementally
+            with open(self.path, 'ab') as f:
+                for i in pycompat.xrange(self._lastmaxrev + 1,
+                                         len(self._rev2hsh)):
+                    self._writerev(i, f)
+        self._lastmaxrev = self.maxrev
+
+    def _load(self):
+        """load state from file"""
+        if not self.path:
+            return
+        # use local variables in a loop. CPython uses LOAD_FAST for them,
+        # which is faster than both LOAD_CONST and LOAD_GLOBAL.
+        flaglen = 1
+        hshlen = _hshlen
+        with open(self.path, 'rb') as f:
+            if f.read(len(self.HEADER)) != self.HEADER:
+                raise error.CorruptedFileError()
+            self.clear(flush=False)
+            while True:
+                buf = f.read(flaglen)
+                if not buf:
+                    break
+                flag = ord(buf)
+                rev = len(self._rev2hsh)
+                if flag & renameflag:
+                    path = self._readcstr(f)
+                    self._renamerevs.append(rev)
+                    self._renamepaths.append(path)
+                hsh = f.read(hshlen)
+                if len(hsh) != hshlen:
+                    raise error.CorruptedFileError()
+                self._hsh2rev[hsh] = rev
+                self._rev2flag.append(flag)
+                self._rev2hsh.append(hsh)
+        self._lastmaxrev = self.maxrev
+
+    def _writerev(self, rev, f):
+        """append a revision data to file"""
+        flag = self._rev2flag[rev]
+        hsh = self._rev2hsh[rev]
+        f.write(struct.pack('B', flag))
+        if flag & renameflag:
+            path = self.rev2path(rev)
+            if path is None:
+                raise error.CorruptedFileError('cannot find path for %s' % rev)
+            f.write(path + '\0')
+        f.write(hsh)
+
+    @staticmethod
+    def _readcstr(f):
+        """read a C-language-like '\0'-terminated string"""
+        buf = ''
+        while True:
+            ch = f.read(1)
+            if not ch: # unexpected eof
+                raise error.CorruptedFileError()
+            if ch == '\0':
+                break
+            buf += ch
+        return buf
+
+    def __contains__(self, f):
+        """(fctx or (node, path)) -> bool.
+        test if (node, path) is in the map, and is not in a side branch.
+        f can be either a tuple of (node, path), or a fctx.
+        """
+        if isinstance(f, tuple): # f: (node, path)
+            hsh, path = f
+        else: # f: fctx
+            hsh, path = f.node(), f.path()
+        rev = self.hsh2rev(hsh)
+        if rev is None:
+            return False
+        if path is not None and path != self.rev2path(rev):
+            return False
+        return (self.rev2flag(rev) & sidebranchflag) == 0
+
+def getlastnode(path):
+    """return the last hash in a revmap, without loading its full content.
+    this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
+    """
+    hsh = None
+    try:
+        with open(path, 'rb') as f:
+            f.seek(-_hshlen, 2)
+            if f.tell() > len(revmap.HEADER):
+                hsh = f.read(_hshlen)
+    except IOError:
+        pass
+    return hsh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/support.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,122 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# support: fastannotate support for hgweb, and filectx
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+    context as hgcontext,
+    dagop,
+    extensions,
+    hgweb,
+    patch,
+    util,
+)
+
+from . import (
+    context,
+    revmap,
+)
+
+class _lazyfctx(object):
+    """delegates to fctx but do not construct fctx when unnecessary"""
+
+    def __init__(self, repo, node, path):
+        self._node = node
+        self._path = path
+        self._repo = repo
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+    @util.propertycache
+    def _fctx(self):
+        return context.resolvefctx(self._repo, self._node, self._path)
+
+    def __getattr__(self, name):
+        return getattr(self._fctx, name)
+
+def _convertoutputs(repo, annotated, contents):
+    """convert fastannotate outputs to vanilla annotate format"""
+    # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
+    # convert to what fctx.annotate returns: [annotateline]
+    results = []
+    fctxmap = {}
+    annotateline = dagop.annotateline
+    for i, (hsh, linenum, path) in enumerate(annotated):
+        if (hsh, path) not in fctxmap:
+            fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
+        # linenum: the user wants 1-based, we have 0-based.
+        lineno = linenum + 1
+        fctx = fctxmap[(hsh, path)]
+        line = contents[i]
+        results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
+    return results
+
+def _getmaster(fctx):
+    """(fctx) -> str"""
+    return fctx._repo.ui.config('fastannotate', 'mainbranch') or 'default'
+
+def _doannotate(fctx, follow=True, diffopts=None):
+    """like the vanilla fctx.annotate, but do it via fastannotate, and make
+    the output format compatible with the vanilla fctx.annotate.
+    may raise Exception, and always return line numbers.
+    """
+    master = _getmaster(fctx)
+    annotated = contents = None
+
+    with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
+        try:
+            annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                              showpath=True, showlines=True)
+        except Exception:
+            ac.rebuild() # try rebuild once
+            fctx._repo.ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                                % fctx._path)
+            try:
+                annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                                  showpath=True, showlines=True)
+            except Exception:
+                raise
+
+    assert annotated and contents
+    return _convertoutputs(fctx._repo, annotated, contents)
+
+def _hgwebannotate(orig, fctx, ui):
+    diffopts = patch.difffeatureopts(ui, untrusted=True,
+                                     section='annotate', whitespace=True)
+    return _doannotate(fctx, diffopts=diffopts)
+
+def _fctxannotate(orig, self, follow=False, linenumber=False, skiprevs=None,
+                  diffopts=None):
+    if skiprevs:
+        # skiprevs is not supported yet
+        return orig(self, follow, linenumber, skiprevs=skiprevs,
+                    diffopts=diffopts)
+    try:
+        return _doannotate(self, follow, diffopts)
+    except Exception as ex:
+        self._repo.ui.debug('fastannotate: falling back to the vanilla '
+                            'annotate: %r\n' % ex)
+        return orig(self, follow=follow, skiprevs=skiprevs,
+                    diffopts=diffopts)
+
+def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
+    # skipset: a set-like used to test if a fctx needs to be downloaded
+    skipset = None
+    with context.fctxannotatecontext(self, follow, diffopts) as ac:
+        skipset = revmap.revmap(ac.revmappath)
+    return orig(self, follow, skiprevs=skiprevs, diffopts=diffopts,
+                prefetchskip=skipset)
+
+def replacehgwebannotate():
+    extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
+
+def replacefctxannotate():
+    extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
--- a/hgext/fix.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/fix.py	Tue Sep 04 12:16:28 2018 -0400
@@ -96,15 +96,16 @@
 # user.
 configitem('fix', 'maxfilesize', default='2MB')
 
-@command('fix',
-    [('', 'all', False, _('fix all non-public non-obsolete revisions')),
-     ('', 'base', [], _('revisions to diff against (overrides automatic '
-                        'selection, and applies to every revision being '
-                        'fixed)'), _('REV')),
-     ('r', 'rev', [], _('revisions to fix'), _('REV')),
-     ('w', 'working-dir', False, _('fix the working directory')),
-     ('', 'whole', False, _('always fix every line of a file'))],
-    _('[OPTION]... [FILE]...'))
+allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
+baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
+                             'selection, and applies to every revision being '
+                             'fixed)'), _('REV'))
+revopt = ('r', 'rev', [], _('revisions to fix'), _('REV'))
+wdiropt = ('w', 'working-dir', False, _('fix the working directory'))
+wholeopt = ('', 'whole', False, _('always fix every line of a file'))
+usage = _('[OPTION]... [FILE]...')
+
+@command('fix', [allopt, baseopt, revopt, wdiropt, wholeopt], usage)
 def fix(ui, repo, *pats, **opts):
     """rewrite file content in changesets or working directory
 
@@ -161,6 +162,7 @@
         # it makes the results more easily reproducible.
         filedata = collections.defaultdict(dict)
         replacements = {}
+        wdirwritten = False
         commitorder = sorted(revstofix, reverse=True)
         with ui.makeprogress(topic=_('fixing'), unit=_('files'),
                              total=sum(numitems.values())) as progress:
@@ -178,12 +180,28 @@
                     ctx = repo[rev]
                     if rev == wdirrev:
                         writeworkingdir(repo, ctx, filedata[rev], replacements)
+                        wdirwritten = bool(filedata[rev])
                     else:
                         replacerev(ui, repo, ctx, filedata[rev], replacements)
                     del filedata[rev]
 
-        replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
-        scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
+        cleanup(repo, replacements, wdirwritten)
+
+def cleanup(repo, replacements, wdirwritten):
+    """Calls scmutil.cleanupnodes() with the given replacements.
+
+    "replacements" is a dict from nodeid to nodeid, with one key and one value
+    for every revision that was affected by fixing. This is slightly different
+    from cleanupnodes().
+
+    "wdirwritten" is a bool which tells whether the working copy was affected by
+    fixing, since it has no entry in "replacements".
+
+    Useful as a hook point for extending "hg fix" with output summarizing the
+    effects of the command, though we choose not to output anything here.
+    """
+    replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
+    scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
 
 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
     """"Constructs the list of files to be fixed at specific revisions
@@ -267,8 +285,8 @@
     """
     files = set()
     for basectx in basectxs:
-        stat = repo.status(
-            basectx, fixctx, match=match, clean=bool(pats), unknown=bool(pats))
+        stat = basectx.status(fixctx, match=match, listclean=bool(pats),
+                              listunknown=bool(pats))
         files.update(
             set(itertools.chain(stat.added, stat.modified, stat.clean,
                                 stat.unknown)))
@@ -417,12 +435,15 @@
     starting with the file's content in the fixctx. Fixers that support line
     ranges will affect lines that have changed relative to any of the basectxs
     (i.e. they will only avoid lines that are common to all basectxs).
+
+    A fixer tool's stdout will become the file's new content if and only if it
+    exits with code zero.
     """
     newdata = fixctx[path].data()
     for fixername, fixer in fixers.iteritems():
         if fixer.affects(opts, fixctx, path):
-            ranges = lineranges(opts, path, basectxs, fixctx, newdata)
-            command = fixer.command(ui, path, ranges)
+            rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata)
+            command = fixer.command(ui, path, rangesfn)
             if command is None:
                 continue
             ui.debug('subprocess: %s\n' % (command,))
@@ -436,8 +457,11 @@
             newerdata, stderr = proc.communicate(newdata)
             if stderr:
                 showstderr(ui, fixctx.rev(), fixername, stderr)
-            else:
+            if proc.returncode == 0:
                 newdata = newerdata
+            elif not stderr:
+                showstderr(ui, fixctx.rev(), fixername,
+                           _('exited with status %d\n') % (proc.returncode,))
     return newdata
 
 def showstderr(ui, rev, fixername, stderr):
@@ -567,7 +591,7 @@
         """Should this fixer run on the file at the given path and context?"""
         return scmutil.match(fixctx, [self._fileset], opts)(path)
 
-    def command(self, ui, path, ranges):
+    def command(self, ui, path, rangesfn):
         """A shell command to use to invoke this fixer on the given file/lines
 
         May return None if there is no appropriate command to run for the given
@@ -577,6 +601,7 @@
         parts = [expand(ui, self._command,
                         {'rootpath': path, 'basename': os.path.basename(path)})]
         if self._linerange:
+            ranges = rangesfn()
             if not ranges:
                 # No line ranges to fix, so don't run the fixer.
                 return None
--- a/hgext/hgk.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/hgk.py	Tue Sep 04 12:16:28 2018 -0400
@@ -227,7 +227,7 @@
             else:
                 i -= chunk
 
-            for x in xrange(chunk):
+            for x in pycompat.xrange(chunk):
                 if i + x >= count:
                     l[chunk - x:] = [0] * (chunk - x)
                     break
@@ -238,7 +238,7 @@
                 else:
                     if (i + x) in repo:
                         l[x] = 1
-            for x in xrange(chunk - 1, -1, -1):
+            for x in pycompat.xrange(chunk - 1, -1, -1):
                 if l[x] != 0:
                     yield (i + x, full is not None and l[x] or None)
             if i == 0:
@@ -249,7 +249,7 @@
         if len(ar) == 0:
             return 1
         mask = 0
-        for i in xrange(len(ar)):
+        for i in pycompat.xrange(len(ar)):
             if sha in reachable[i]:
                 mask |= 1 << i
 
--- a/hgext/histedit.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/histedit.py	Tue Sep 04 12:16:28 2018 -0400
@@ -386,7 +386,7 @@
         rules = []
         rulelen = int(lines[index])
         index += 1
-        for i in xrange(rulelen):
+        for i in pycompat.xrange(rulelen):
             ruleaction = lines[index]
             index += 1
             rule = lines[index]
@@ -397,7 +397,7 @@
         replacements = []
         replacementlen = int(lines[index])
         index += 1
-        for i in xrange(replacementlen):
+        for i in pycompat.xrange(replacementlen):
             replacement = lines[index]
             original = node.bin(replacement[:40])
             succ = [node.bin(replacement[i:i + 40]) for i in
@@ -1084,7 +1084,7 @@
             raise error.Abort(_('only --commands argument allowed with '
                                '--edit-plan'))
     else:
-        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
+        if state.inprogress():
             raise error.Abort(_('history edit already in progress, try '
                                '--continue or --abort'))
         if outg:
@@ -1624,8 +1624,8 @@
 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
     if isinstance(nodelist, str):
         nodelist = [nodelist]
-    if os.path.exists(os.path.join(repo.path, 'histedit-state')):
-        state = histeditstate(repo)
+    state = histeditstate(repo)
+    if state.inprogress():
         state.read()
         histedit_nodes = {action.node for action
                           in state.actions if action.node}
@@ -1638,9 +1638,9 @@
 extensions.wrapfunction(repair, 'strip', stripwrapper)
 
 def summaryhook(ui, repo):
-    if not os.path.exists(repo.vfs.join('histedit-state')):
+    state = histeditstate(repo)
+    if not state.inprogress():
         return
-    state = histeditstate(repo)
     state.read()
     if state.actions:
         # i18n: column positioning for "hg summary"
--- a/hgext/keyword.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/keyword.py	Tue Sep 04 12:16:28 2018 -0400
@@ -576,8 +576,8 @@
         label = 'kwfiles.' + kwstate
         for f in filenames:
             fm.startitem()
-            fm.write('kwstatus path', fmt, char,
-                     repo.pathto(f, cwd), label=label)
+            fm.data(kwstatus=char, path=f)
+            fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
     fm.end()
 
 @command('kwshrink',
--- a/hgext/largefiles/basestore.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/largefiles/basestore.py	Tue Sep 04 12:16:28 2018 -0400
@@ -62,25 +62,24 @@
 
         at = 0
         available = self.exists(set(hash for (_filename, hash) in files))
-        progress = ui.makeprogress(_('getting largefiles'), unit=_('files'),
-                                   total=len(files))
-        for filename, hash in files:
-            progress.update(at)
-            at += 1
-            ui.note(_('getting %s:%s\n') % (filename, hash))
+        with ui.makeprogress(_('getting largefiles'), unit=_('files'),
+                             total=len(files)) as progress:
+            for filename, hash in files:
+                progress.update(at)
+                at += 1
+                ui.note(_('getting %s:%s\n') % (filename, hash))
 
-            if not available.get(hash):
-                ui.warn(_('%s: largefile %s not available from %s\n')
-                        % (filename, hash, util.hidepassword(self.url)))
-                missing.append(filename)
-                continue
+                if not available.get(hash):
+                    ui.warn(_('%s: largefile %s not available from %s\n')
+                            % (filename, hash, util.hidepassword(self.url)))
+                    missing.append(filename)
+                    continue
 
-            if self._gethash(filename, hash):
-                success.append((filename, hash))
-            else:
-                missing.append(filename)
+                if self._gethash(filename, hash):
+                    success.append((filename, hash))
+                else:
+                    missing.append(filename)
 
-        progress.complete()
         return (success, missing)
 
     def _gethash(self, filename, hash):
--- a/hgext/largefiles/lfcommands.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/largefiles/lfcommands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -118,14 +118,13 @@
                 matcher = None
 
             lfiletohash = {}
-            progress = ui.makeprogress(_('converting revisions'),
-                                       unit=_('revisions'),
-                                       total=rsrc['tip'].rev())
-            for ctx in ctxs:
-                progress.update(ctx.rev())
-                _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
-                    lfiles, normalfiles, matcher, size, lfiletohash)
-            progress.complete()
+            with ui.makeprogress(_('converting revisions'),
+                                 unit=_('revisions'),
+                                 total=rsrc['tip'].rev()) as progress:
+                for ctx in ctxs:
+                    progress.update(ctx.rev())
+                    _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
+                        lfiles, normalfiles, matcher, size, lfiletohash)
 
             if rdst.wvfs.exists(lfutil.shortname):
                 rdst.wvfs.rmtree(lfutil.shortname)
@@ -370,18 +369,17 @@
     files = [h for h in files if not retval[h]]
     ui.debug("%d largefiles need to be uploaded\n" % len(files))
 
-    progress = ui.makeprogress(_('uploading largefiles'), unit=_('files'),
-                               total=len(files))
-    for hash in files:
-        progress.update(at)
-        source = lfutil.findfile(rsrc, hash)
-        if not source:
-            raise error.Abort(_('largefile %s missing from store'
-                               ' (needs to be uploaded)') % hash)
-        # XXX check for errors here
-        store.put(source, hash)
-        at += 1
-    progress.complete()
+    with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
+                         total=len(files)) as progress:
+        for hash in files:
+            progress.update(at)
+            source = lfutil.findfile(rsrc, hash)
+            if not source:
+                raise error.Abort(_('largefile %s missing from store'
+                                   ' (needs to be uploaded)') % hash)
+            # XXX check for errors here
+            store.put(source, hash)
+            at += 1
 
 def verifylfiles(ui, repo, all=False, contents=False):
     '''Verify that every largefile revision in the current changeset
--- a/hgext/largefiles/lfutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/largefiles/lfutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -501,37 +501,37 @@
     return filelist
 
 def getlfilestoupload(repo, missing, addfunc):
-    progress = repo.ui.makeprogress(_('finding outgoing largefiles'),
-                                    unit=_('revisions'), total=len(missing))
-    for i, n in enumerate(missing):
-        progress.update(i)
-        parents = [p for p in repo[n].parents() if p != node.nullid]
+    makeprogress = repo.ui.makeprogress
+    with makeprogress(_('finding outgoing largefiles'),
+                      unit=_('revisions'), total=len(missing)) as progress:
+        for i, n in enumerate(missing):
+            progress.update(i)
+            parents = [p for p in repo[n].parents() if p != node.nullid]
 
-        oldlfstatus = repo.lfstatus
-        repo.lfstatus = False
-        try:
-            ctx = repo[n]
-        finally:
-            repo.lfstatus = oldlfstatus
+            oldlfstatus = repo.lfstatus
+            repo.lfstatus = False
+            try:
+                ctx = repo[n]
+            finally:
+                repo.lfstatus = oldlfstatus
 
-        files = set(ctx.files())
-        if len(parents) == 2:
-            mc = ctx.manifest()
-            mp1 = ctx.parents()[0].manifest()
-            mp2 = ctx.parents()[1].manifest()
-            for f in mp1:
-                if f not in mc:
-                    files.add(f)
-            for f in mp2:
-                if f not in mc:
-                    files.add(f)
-            for f in mc:
-                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
-                    files.add(f)
-        for fn in files:
-            if isstandin(fn) and fn in ctx:
-                addfunc(fn, readasstandin(ctx[fn]))
-    progress.complete()
+            files = set(ctx.files())
+            if len(parents) == 2:
+                mc = ctx.manifest()
+                mp1 = ctx.parents()[0].manifest()
+                mp2 = ctx.parents()[1].manifest()
+                for f in mp1:
+                    if f not in mc:
+                        files.add(f)
+                for f in mp2:
+                    if f not in mc:
+                        files.add(f)
+                for f in mc:
+                    if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+                        files.add(f)
+            for fn in files:
+                if isstandin(fn) and fn in ctx:
+                    addfunc(fn, readasstandin(ctx[fn]))
 
 def updatestandinsbymatch(repo, match):
     '''Update standins in the working directory according to specified match
--- a/hgext/lfs/__init__.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/lfs/__init__.py	Tue Sep 04 12:16:28 2018 -0400
@@ -136,7 +136,7 @@
     exchange,
     extensions,
     filelog,
-    fileset,
+    filesetlang,
     hg,
     localrepo,
     minifileset,
@@ -261,7 +261,7 @@
         # deprecated config: lfs.threshold
         threshold = repo.ui.configbytes('lfs', 'threshold')
         if threshold:
-            fileset.parse(trackspec)  # make sure syntax errors are confined
+            filesetlang.parse(trackspec)  # make sure syntax errors are confined
             trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
 
         return minifileset.compile(trackspec)
@@ -357,11 +357,11 @@
     # when writing a bundle via "hg bundle" command, upload related LFS blobs
     wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
 
-@filesetpredicate('lfs()', callstatus=True)
+@filesetpredicate('lfs()')
 def lfsfileset(mctx, x):
     """File that uses LFS storage."""
     # i18n: "lfs" is a keyword
-    fileset.getargs(x, 0, 0, _("lfs takes no arguments"))
+    filesetlang.getargs(x, 0, 0, _("lfs takes no arguments"))
     ctx = mctx.ctx
     def lfsfilep(f):
         return wrapper.pointerfromctx(ctx, f, removed=True) is not None
--- a/hgext/lfs/blobstore.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/lfs/blobstore.py	Tue Sep 04 12:16:28 2018 -0400
@@ -405,8 +405,7 @@
         if len(objects) > 1:
             self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
                          % (len(objects), util.bytecount(total)))
-        progress = self.ui.makeprogress(topic, total=total)
-        progress.update(0)
+
         def transfer(chunk):
             for obj in chunk:
                 objsize = obj.get('size', 0)
@@ -439,14 +438,15 @@
         else:
             oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
 
-        processed = 0
-        blobs = 0
-        for _one, oid in oids:
-            processed += sizes[oid]
-            blobs += 1
-            progress.update(processed)
-            self.ui.note(_('lfs: processed: %s\n') % oid)
-        progress.complete()
+        with self.ui.makeprogress(topic, total=total) as progress:
+            progress.update(0)
+            processed = 0
+            blobs = 0
+            for _one, oid in oids:
+                processed += sizes[oid]
+                blobs += 1
+                progress.update(processed)
+                self.ui.note(_('lfs: processed: %s\n') % oid)
 
         if blobs > 0:
             if action == 'upload':
--- a/hgext/lfs/wrapper.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/lfs/wrapper.py	Tue Sep 04 12:16:28 2018 -0400
@@ -343,11 +343,15 @@
     """return a list of lfs pointers added by given revs"""
     repo.ui.debug('lfs: computing set of blobs to upload\n')
     pointers = {}
-    for r in revs:
-        ctx = repo[r]
-        for p in pointersfromctx(ctx).values():
-            pointers[p.oid()] = p
-    return sorted(pointers.values())
+
+    makeprogress = repo.ui.makeprogress
+    with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress:
+        for r in revs:
+            ctx = repo[r]
+            for p in pointersfromctx(ctx).values():
+                pointers[p.oid()] = p
+            progress.increment()
+        return sorted(pointers.values())
 
 def pointerfromctx(ctx, f, removed=False):
     """return a pointer for the named file from the given changectx, or None if
--- a/hgext/mq.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/mq.py	Tue Sep 04 12:16:28 2018 -0400
@@ -414,7 +414,7 @@
         the field and a blank line.'''
         if self.message:
             subj = 'subject: ' + self.message[0].lower()
-            for i in xrange(len(self.comments)):
+            for i in pycompat.xrange(len(self.comments)):
                 if subj == self.comments[i].lower():
                     del self.comments[i]
                     self.message = self.message[2:]
@@ -662,13 +662,13 @@
         exactneg = [g for g in patchguards
                     if g.startswith('-') and g[1:] in guards]
         if exactneg:
-            return False, pycompat.byterepr(exactneg[0])
+            return False, stringutil.pprint(exactneg[0])
         pos = [g for g in patchguards if g.startswith('+')]
         exactpos = [g for g in pos if g[1:] in guards]
         if pos:
             if exactpos:
-                return True, pycompat.byterepr(exactpos[0])
-            return False, ' '.join([pycompat.byterepr(p) for p in pos])
+                return True, stringutil.pprint(exactpos[0])
+            return False, ' '.join([stringutil.pprint(p) for p in pos])
         return True, ''
 
     def explainpushable(self, idx, all_patches=False):
@@ -1800,7 +1800,7 @@
                 # if the patch excludes a modified file, mark that
                 # file with mtime=0 so status can see it.
                 mm = []
-                for i in xrange(len(m) - 1, -1, -1):
+                for i in pycompat.xrange(len(m) - 1, -1, -1):
                     if not match1(m[i]):
                         mm.append(m[i])
                         del m[i]
@@ -1908,7 +1908,7 @@
         else:
             start = self.series.index(patch) + 1
         unapplied = []
-        for i in xrange(start, len(self.series)):
+        for i in pycompat.xrange(start, len(self.series)):
             pushable, reason = self.pushable(i)
             if pushable:
                 unapplied.append((i, self.series[i]))
@@ -1946,7 +1946,7 @@
         if not missing:
             if self.ui.verbose:
                 idxwidth = len("%d" % (start + length - 1))
-            for i in xrange(start, start + length):
+            for i in pycompat.xrange(start, start + length):
                 patch = self.series[i]
                 if patch in applied:
                     char, state = 'A', 'applied'
@@ -2091,7 +2091,7 @@
         def nextpatch(start):
             if all_patches or start >= len(self.series):
                 return start
-            for i in xrange(start, len(self.series)):
+            for i in pycompat.xrange(start, len(self.series)):
                 p, reason = self.pushable(i)
                 if p:
                     return i
@@ -2876,7 +2876,7 @@
         if args or opts.get(r'none'):
             raise error.Abort(_('cannot mix -l/--list with options or '
                                'arguments'))
-        for i in xrange(len(q.series)):
+        for i in pycompat.xrange(len(q.series)):
             status(i)
         return
     if not args or args[0][0:1] in '-+':
@@ -3179,14 +3179,16 @@
     pushable = lambda i: q.pushable(q.applied[i].name)[0]
     if args or opts.get('none'):
         old_unapplied = q.unapplied(repo)
-        old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+        old_guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
         q.setactive(args)
         q.savedirty()
         if not args:
             ui.status(_('guards deactivated\n'))
         if not opts.get('pop') and not opts.get('reapply'):
             unapplied = q.unapplied(repo)
-            guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+            guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
             if len(unapplied) != len(old_unapplied):
                 ui.status(_('number of unguarded, unapplied patches has '
                             'changed from %d to %d\n') %
@@ -3225,7 +3227,7 @@
     reapply = opts.get('reapply') and q.applied and q.applied[-1].name
     popped = False
     if opts.get('pop') or opts.get('reapply'):
-        for i in xrange(len(q.applied)):
+        for i in pycompat.xrange(len(q.applied)):
             if not pushable(i):
                 ui.status(_('popping guarded patches\n'))
                 popped = True
--- a/hgext/narrow/__init__.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/__init__.py	Tue Sep 04 12:16:28 2018 -0400
@@ -15,17 +15,15 @@
 testedwith = 'ships-with-hg-core'
 
 from mercurial import (
-    changegroup,
     extensions,
-    hg,
     localrepo,
     registrar,
+    repository,
     verify as verifymod,
 )
 
 from . import (
     narrowbundle2,
-    narrowchangegroup,
     narrowcommands,
     narrowcopies,
     narrowpatch,
@@ -55,7 +53,7 @@
 cmdtable = narrowcommands.table
 
 def featuresetup(ui, features):
-    features.add(changegroup.NARROW_REQUIREMENT)
+    features.add(repository.NARROW_REQUIREMENT)
 
 def uisetup(ui):
     """Wraps user-facing mercurial commands with narrow-aware versions."""
@@ -63,7 +61,6 @@
     narrowrevlog.setup()
     narrowbundle2.setup()
     narrowcommands.setup()
-    narrowchangegroup.setup()
     narrowwirepeer.uisetup()
 
 def reposetup(ui, repo):
@@ -71,7 +68,7 @@
     if not repo.local():
         return
 
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
         narrowcopies.setup(repo)
         narrowpatch.setup(repo)
@@ -86,8 +83,6 @@
 
 def extsetup(ui):
     extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
-    extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
-    extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
 
 templatekeyword = narrowtemplates.templatekeyword
 revsetpredicate = narrowtemplates.revsetpredicate
--- a/hgext/narrow/narrowbundle2.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/narrowbundle2.py	Tue Sep 04 12:16:28 2018 -0400
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import collections
 import errno
 import struct
 
@@ -15,17 +14,16 @@
 from mercurial.node import (
     bin,
     nullid,
-    nullrev,
 )
 from mercurial import (
     bundle2,
     changegroup,
-    dagutil,
     error,
     exchange,
     extensions,
     narrowspec,
     repair,
+    repository,
     util,
     wireprototypes,
 )
@@ -52,171 +50,12 @@
     caps[NARROWCAP] = ['v0']
     return caps
 
-def _computeellipsis(repo, common, heads, known, match, depth=None):
-    """Compute the shape of a narrowed DAG.
-
-    Args:
-      repo: The repository we're transferring.
-      common: The roots of the DAG range we're transferring.
-              May be just [nullid], which means all ancestors of heads.
-      heads: The heads of the DAG range we're transferring.
-      match: The narrowmatcher that allows us to identify relevant changes.
-      depth: If not None, only consider nodes to be full nodes if they are at
-             most depth changesets away from one of heads.
-
-    Returns:
-      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
-
-        visitnodes: The list of nodes (either full or ellipsis) which
-                    need to be sent to the client.
-        relevant_nodes: The set of changelog nodes which change a file inside
-                 the narrowspec. The client needs these as non-ellipsis nodes.
-        ellipsisroots: A dict of {rev: parents} that is used in
-                       narrowchangegroup to produce ellipsis nodes with the
-                       correct parents.
-    """
-    cl = repo.changelog
-    mfl = repo.manifestlog
-
-    cldag = dagutil.revlogdag(cl)
-    # dagutil does not like nullid/nullrev
-    commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
-    headsrevs = cldag.internalizeall(heads)
-    if depth:
-        revdepth = {h: 0 for h in headsrevs}
-
-    ellipsisheads = collections.defaultdict(set)
-    ellipsisroots = collections.defaultdict(set)
-
-    def addroot(head, curchange):
-        """Add a root to an ellipsis head, splitting heads with 3 roots."""
-        ellipsisroots[head].add(curchange)
-        # Recursively split ellipsis heads with 3 roots by finding the
-        # roots' youngest common descendant which is an elided merge commit.
-        # That descendant takes 2 of the 3 roots as its own, and becomes a
-        # root of the head.
-        while len(ellipsisroots[head]) > 2:
-            child, roots = splithead(head)
-            splitroots(head, child, roots)
-            head = child  # Recurse in case we just added a 3rd root
-
-    def splitroots(head, child, roots):
-        ellipsisroots[head].difference_update(roots)
-        ellipsisroots[head].add(child)
-        ellipsisroots[child].update(roots)
-        ellipsisroots[child].discard(child)
-
-    def splithead(head):
-        r1, r2, r3 = sorted(ellipsisroots[head])
-        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
-            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
-                            nr1, head, nr2, head)
-            for j in mid:
-                if j == nr2:
-                    return nr2, (nr1, nr2)
-                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
-                    return j, (nr1, nr2)
-        raise error.Abort('Failed to split up ellipsis node! head: %d, '
-                          'roots: %d %d %d' % (head, r1, r2, r3))
+def getbundlechangegrouppart_widen(bundler, repo, source, bundlecaps=None,
+                                   b2caps=None, heads=None, common=None,
+                                   **kwargs):
+    """Handling changegroup changegroup generation on the server when user
+    is widening their narrowspec"""
 
-    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
-    visit = reversed(missing)
-    relevant_nodes = set()
-    visitnodes = [cl.node(m) for m in missing]
-    required = set(headsrevs) | known
-    for rev in visit:
-        clrev = cl.changelogrevision(rev)
-        ps = cldag.parents(rev)
-        if depth is not None:
-            curdepth = revdepth[rev]
-            for p in ps:
-                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
-        needed = False
-        shallow_enough = depth is None or revdepth[rev] <= depth
-        if shallow_enough:
-            curmf = mfl[clrev.manifest].read()
-            if ps:
-                # We choose to not trust the changed files list in
-                # changesets because it's not always correct. TODO: could
-                # we trust it for the non-merge case?
-                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
-                needed = bool(curmf.diff(p1mf, match))
-                if not needed and len(ps) > 1:
-                    # For merge changes, the list of changed files is not
-                    # helpful, since we need to emit the merge if a file
-                    # in the narrow spec has changed on either side of the
-                    # merge. As a result, we do a manifest diff to check.
-                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
-                    needed = bool(curmf.diff(p2mf, match))
-            else:
-                # For a root node, we need to include the node if any
-                # files in the node match the narrowspec.
-                needed = any(curmf.walk(match))
-
-        if needed:
-            for head in ellipsisheads[rev]:
-                addroot(head, rev)
-            for p in ps:
-                required.add(p)
-            relevant_nodes.add(cl.node(rev))
-        else:
-            if not ps:
-                ps = [nullrev]
-            if rev in required:
-                for head in ellipsisheads[rev]:
-                    addroot(head, rev)
-                for p in ps:
-                    ellipsisheads[p].add(rev)
-            else:
-                for p in ps:
-                    ellipsisheads[p] |= ellipsisheads[rev]
-
-    # add common changesets as roots of their reachable ellipsis heads
-    for c in commonrevs:
-        for head in ellipsisheads[c]:
-            addroot(head, c)
-    return visitnodes, relevant_nodes, ellipsisroots
-
-def _packellipsischangegroup(repo, common, match, relevant_nodes,
-                             ellipsisroots, visitnodes, depth, source, version):
-    if version in ('01', '02'):
-        raise error.Abort(
-            'ellipsis nodes require at least cg3 on client and server, '
-            'but negotiated version %s' % version)
-    # We wrap cg1packer.revchunk, using a side channel to pass
-    # relevant_nodes into that area. Then if linknode isn't in the
-    # set, we know we have an ellipsis node and we should defer
-    # sending that node's data. We override close() to detect
-    # pending ellipsis nodes and flush them.
-    packer = changegroup.getbundler(version, repo)
-    # Let the packer have access to the narrow matcher so it can
-    # omit filelogs and dirlogs as needed
-    packer._narrow_matcher = lambda : match
-    # Give the packer the list of nodes which should not be
-    # ellipsis nodes. We store this rather than the set of nodes
-    # that should be an ellipsis because for very large histories
-    # we expect this to be significantly smaller.
-    packer.full_nodes = relevant_nodes
-    # Maps ellipsis revs to their roots at the changelog level.
-    packer.precomputed_ellipsis = ellipsisroots
-    # Maps CL revs to per-revlog revisions. Cleared in close() at
-    # the end of each group.
-    packer.clrev_to_localrev = {}
-    packer.next_clrev_to_localrev = {}
-    # Maps changelog nodes to changelog revs. Filled in once
-    # during changelog stage and then left unmodified.
-    packer.clnode_to_rev = {}
-    packer.changelog_done = False
-    # If true, informs the packer that it is serving shallow content and might
-    # need to pack file contents not introduced by the changes being packed.
-    packer.is_shallow = depth is not None
-
-    return packer.generate(common, visitnodes, False, source)
-
-# Serve a changegroup for a client with a narrow clone.
-def getbundlechangegrouppart_narrow(bundler, repo, source,
-                                    bundlecaps=None, b2caps=None, heads=None,
-                                    common=None, **kwargs):
     cgversions = b2caps.get('changegroup')
     if cgversions:  # 3.1 and 3.2 ship with an empty value
         cgversions = [v for v in cgversions
@@ -231,32 +70,50 @@
     include = sorted(filter(bool, kwargs.get(r'includepats', [])))
     exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
     newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
-    if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
-        outgoing = exchange._computeoutgoing(repo, heads, common)
-        if not outgoing.missing:
-            return
-        def wrappedgetbundler(orig, *args, **kwargs):
-            bundler = orig(*args, **kwargs)
-            bundler._narrow_matcher = lambda : newmatch
-            return bundler
-        with extensions.wrappedfunction(changegroup, 'getbundler',
-                                        wrappedgetbundler):
-            cg = changegroup.makestream(repo, outgoing, version, source)
-        part = bundler.newpart('changegroup', data=cg)
-        part.addparam('version', version)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+    oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
+    oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
+    common = set(common or [nullid])
+
+    if (oldinclude != include or oldexclude != exclude):
+        common = repo.revs("::%ln", common)
+        commonnodes = set()
+        cl = repo.changelog
+        for c in common:
+            commonnodes.add(cl.node(c))
+        if commonnodes:
+            # XXX: we should only send the filelogs (and treemanifest). user
+            # already has the changelog and manifest
+            packer = changegroup.getbundler(version, repo,
+                                            filematcher=newmatch,
+                                            fullnodes=commonnodes)
+            cgdata = packer.generate(set([nullid]), list(commonnodes), False,
+                    source)
 
-        if include or exclude:
-            narrowspecpart = bundler.newpart(_SPECPART)
-            if include:
-                narrowspecpart.addparam(
-                    _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
-            if exclude:
-                narrowspecpart.addparam(
-                    _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
+            part = bundler.newpart('changegroup', data=cgdata)
+            part.addparam('version', version)
+            if 'treemanifest' in repo.requirements:
+                part.addparam('treemanifest', '1')
+
+# Serve a changegroup for a client with a narrow clone.
+def getbundlechangegrouppart_narrow(bundler, repo, source,
+                                    bundlecaps=None, b2caps=None, heads=None,
+                                    common=None, **kwargs):
+    assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
 
-        return
+    cgversions = b2caps.get('changegroup')
+    if cgversions:  # 3.1 and 3.2 ship with an empty value
+        cgversions = [v for v in cgversions
+                      if v in changegroup.supportedoutgoingversions(repo)]
+        if not cgversions:
+            raise ValueError(_('no common changegroup version'))
+        version = max(cgversions)
+    else:
+        raise ValueError(_("server does not advertise changegroup version,"
+                           " can't negotiate support for ellipsis nodes"))
+
+    include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+    exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+    newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
 
     depth = kwargs.get(r'depth', None)
     if depth is not None:
@@ -300,72 +157,46 @@
                 yield repo.changelog.node(r)
             yield _DONESIGNAL
         bundler.newpart(_CHANGESPECPART, data=genkills())
-        newvisit, newfull, newellipsis = _computeellipsis(
+        newvisit, newfull, newellipsis = exchange._computeellipsis(
             repo, set(), common, known, newmatch)
         if newvisit:
-            cg = _packellipsischangegroup(
-                repo, common, newmatch, newfull, newellipsis,
-                newvisit, depth, source, version)
-            part = bundler.newpart('changegroup', data=cg)
+            packer = changegroup.getbundler(version, repo,
+                                            filematcher=newmatch,
+                                            ellipses=True,
+                                            shallow=depth is not None,
+                                            ellipsisroots=newellipsis,
+                                            fullnodes=newfull)
+            cgdata = packer.generate(common, newvisit, False, source)
+
+            part = bundler.newpart('changegroup', data=cgdata)
             part.addparam('version', version)
             if 'treemanifest' in repo.requirements:
                 part.addparam('treemanifest', '1')
 
-    visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
+    visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
         repo, common, heads, set(), newmatch, depth=depth)
 
     repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
     if visitnodes:
-        cg = _packellipsischangegroup(
-            repo, common, newmatch, relevant_nodes, ellipsisroots,
-            visitnodes, depth, source, version)
-        part = bundler.newpart('changegroup', data=cg)
+        packer = changegroup.getbundler(version, repo,
+                                        filematcher=newmatch,
+                                        ellipses=True,
+                                        shallow=depth is not None,
+                                        ellipsisroots=ellipsisroots,
+                                        fullnodes=relevant_nodes)
+        cgdata = packer.generate(common, visitnodes, False, source)
+
+        part = bundler.newpart('changegroup', data=cgdata)
         part.addparam('version', version)
         if 'treemanifest' in repo.requirements:
             part.addparam('treemanifest', '1')
 
-def applyacl_narrow(repo, kwargs):
-    ui = repo.ui
-    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
-    user_includes = ui.configlist(
-        _NARROWACL_SECTION, username + '.includes',
-        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
-    user_excludes = ui.configlist(
-        _NARROWACL_SECTION, username + '.excludes',
-        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
-    if not user_includes:
-        raise error.Abort(_("{} configuration for user {} is empty")
-                          .format(_NARROWACL_SECTION, username))
-
-    user_includes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_includes]
-    user_excludes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
-
-    req_includes = set(kwargs.get(r'includepats', []))
-    req_excludes = set(kwargs.get(r'excludepats', []))
-
-    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
-        req_includes, req_excludes, user_includes, user_excludes)
-
-    if invalid_includes:
-        raise error.Abort(
-            _("The following includes are not accessible for {}: {}")
-            .format(username, invalid_includes))
-
-    new_args = {}
-    new_args.update(kwargs)
-    new_args['includepats'] = req_includes
-    if req_excludes:
-        new_args['excludepats'] = req_excludes
-    return new_args
-
 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
 def _handlechangespec_2(op, inpart):
     includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
     excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
-    if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
-        op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+    if not repository.NARROW_REQUIREMENT in op.repo.requirements:
+        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
         op.repo._writerequirements()
     op.repo.setnarrowpats(includepats, excludepats)
 
@@ -467,6 +298,7 @@
     getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
 
     getbundleargs['narrow'] = 'boolean'
+    getbundleargs['widen'] = 'boolean'
     getbundleargs['depth'] = 'plain'
     getbundleargs['oldincludepats'] = 'csv'
     getbundleargs['oldexcludepats'] = 'csv'
@@ -479,27 +311,17 @@
     def wrappedcgfn(*args, **kwargs):
         repo = args[1]
         if repo.ui.has_section(_NARROWACL_SECTION):
-            getbundlechangegrouppart_narrow(
-                *args, **applyacl_narrow(repo, kwargs))
-        elif kwargs.get(r'narrow', False):
+            kwargs = exchange.applynarrowacl(repo, kwargs)
+
+        if (kwargs.get(r'narrow', False) and
+            repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
             getbundlechangegrouppart_narrow(*args, **kwargs)
+        elif kwargs.get(r'widen', False) and kwargs.get(r'narrow', False):
+            getbundlechangegrouppart_widen(*args, **kwargs)
         else:
             origcgfn(*args, **kwargs)
     exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
 
-    # disable rev branch cache exchange when serving a narrow bundle
-    # (currently incompatible with that part)
-    origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
-    def wrappedcgfn(*args, **kwargs):
-        repo = args[1]
-        if repo.ui.has_section(_NARROWACL_SECTION):
-            return
-        elif kwargs.get(r'narrow', False):
-            return
-        else:
-            origrbcfn(*args, **kwargs)
-    exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
-
     # Extend changegroup receiver so client can fixup after widen requests.
     origcghandler = bundle2.parthandlermapping['changegroup']
     def wrappedcghandler(op, inpart):
--- a/hgext/narrow/narrowchangegroup.py	Tue Sep 04 11:59:12 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,373 +0,0 @@
-# narrowchangegroup.py - narrow clone changegroup creation and consumption
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
-    changegroup,
-    error,
-    extensions,
-    manifest,
-    match as matchmod,
-    mdiff,
-    node,
-    revlog,
-    util,
-)
-
-def setup():
-
-    def _cgmatcher(cgpacker):
-        localmatcher = cgpacker._repo.narrowmatch()
-        remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)()
-        if remotematcher:
-            return matchmod.intersectmatchers(localmatcher, remotematcher)
-        else:
-            return localmatcher
-
-    def prune(orig, self, revlog, missing, commonrevs):
-        if isinstance(revlog, manifest.manifestrevlog):
-            matcher = _cgmatcher(self)
-            if (matcher and
-                not matcher.visitdir(revlog._dir[:-1] or '.')):
-                return []
-        return orig(self, revlog, missing, commonrevs)
-
-    extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
-
-    def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
-                      source):
-        matcher = _cgmatcher(self)
-        if matcher:
-            changedfiles = list(filter(matcher, changedfiles))
-        if getattr(self, 'is_shallow', False):
-            # See comment in generate() for why this sadness is a thing.
-            mfdicts = self._mfdicts
-            del self._mfdicts
-            # In a shallow clone, the linknodes callback needs to also include
-            # those file nodes that are in the manifests we sent but weren't
-            # introduced by those manifests.
-            commonctxs = [self._repo[c] for c in commonrevs]
-            oldlinknodes = linknodes
-            clrev = self._repo.changelog.rev
-            def linknodes(flog, fname):
-                for c in commonctxs:
-                    try:
-                        fnode = c.filenode(fname)
-                        self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
-                    except error.ManifestLookupError:
-                        pass
-                links = oldlinknodes(flog, fname)
-                if len(links) != len(mfdicts):
-                    for mf, lr in mfdicts:
-                        fnode = mf.get(fname, None)
-                        if fnode in links:
-                            links[fnode] = min(links[fnode], lr, key=clrev)
-                        elif fnode:
-                            links[fnode] = lr
-                return links
-        return orig(self, changedfiles, linknodes, commonrevs, source)
-    extensions.wrapfunction(
-        changegroup.cg1packer, 'generatefiles', generatefiles)
-
-    def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
-        n = revlog_.node(rev)
-        p1n, p2n = revlog_.node(p1), revlog_.node(p2)
-        flags = revlog_.flags(rev)
-        flags |= revlog.REVIDX_ELLIPSIS
-        meta = packer.builddeltaheader(
-            n, p1n, p2n, node.nullid, linknode, flags)
-        # TODO: try and actually send deltas for ellipsis data blocks
-        diffheader = mdiff.trivialdiffheader(len(data))
-        l = len(meta) + len(diffheader) + len(data)
-        return ''.join((changegroup.chunkheader(l),
-                        meta,
-                        diffheader,
-                        data))
-
-    def close(orig, self):
-        getattr(self, 'clrev_to_localrev', {}).clear()
-        if getattr(self, 'next_clrev_to_localrev', {}):
-            self.clrev_to_localrev = self.next_clrev_to_localrev
-            del self.next_clrev_to_localrev
-        self.changelog_done = True
-        return orig(self)
-    extensions.wrapfunction(changegroup.cg1packer, 'close', close)
-
-    # In a perfect world, we'd generate better ellipsis-ified graphs
-    # for non-changelog revlogs. In practice, we haven't started doing
-    # that yet, so the resulting DAGs for the manifestlog and filelogs
-    # are actually full of bogus parentage on all the ellipsis
-    # nodes. This has the side effect that, while the contents are
-    # correct, the individual DAGs might be completely out of whack in
-    # a case like 882681bc3166 and its ancestors (back about 10
-    # revisions or so) in the main hg repo.
-    #
-    # The one invariant we *know* holds is that the new (potentially
-    # bogus) DAG shape will be valid if we order the nodes in the
-    # order that they're introduced in dramatis personae by the
-    # changelog, so what we do is we sort the non-changelog histories
-    # by the order in which they are used by the changelog.
-    def _sortgroup(orig, self, revlog, nodelist, lookup):
-        if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
-            return orig(self, revlog, nodelist, lookup)
-        key = lambda n: self.clnode_to_rev[lookup(n)]
-        return [revlog.rev(n) for n in sorted(nodelist, key=key)]
-
-    extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
-
-    def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
-        # Note: other than delegating to orig, the only deviation in
-        # logic from normal hg's generate is marked with BEGIN/END
-        # NARROW HACK.
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow bundle
-            for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
-                yield x
-            return
-
-        repo = self._repo
-        cl = repo.changelog
-        mfl = repo.manifestlog
-        mfrevlog = mfl._revlog
-
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            # BEGIN NARROW HACK
-            #
-            # Only update mfs if x is going to be sent. Otherwise we
-            # end up with bogus linkrevs specified for manifests and
-            # we skip some manifest nodes that we should otherwise
-            # have sent.
-            if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
-                n = c[0]
-                # record the first changeset introducing this manifest version
-                mfs.setdefault(n, x)
-                # Set this narrow-specific dict so we have the lowest manifest
-                # revnum to look up for this cl revnum. (Part of mapping
-                # changelog ellipsis parents to manifest ellipsis parents)
-                self.next_clrev_to_localrev.setdefault(cl.rev(x),
-                                                       mfrevlog.rev(n))
-            # We can't trust the changed files list in the changeset if the
-            # client requested a shallow clone.
-            if self.is_shallow:
-                changedfiles.update(mfl[c[0]].read().keys())
-            else:
-                changedfiles.update(c[3])
-            # END NARROW HACK
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            return x
-
-        self._verbosenote(_('uncompressed size of bundle content:\n'))
-        size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
-        self._verbosenote(_('%8.i (changelog)\n') % size)
-
-        # We need to make sure that the linkrev in the changegroup refers to
-        # the first changeset that introduced the manifest or file revision.
-        # The fastpath is usually safer than the slowpath, because the filelogs
-        # are walked in revlog order.
-        #
-        # When taking the slowpath with reorder=None and the manifest revlog
-        # uses generaldelta, the manifest may be walked in the "wrong" order.
-        # Without 'clrevorder', we would get an incorrect linkrev (see fix in
-        # cc0ff93d0c0c).
-        #
-        # When taking the fastpath, we are only vulnerable to reordering
-        # of the changelog itself. The changelog never uses generaldelta, so
-        # it is only reordered when reorder=True. To handle this case, we
-        # simply take the slowpath, which already has the 'clrevorder' logic.
-        # This was also fixed in cc0ff93d0c0c.
-        fastpathlinkrev = fastpathlinkrev and not self._reorder
-        # Treemanifests don't work correctly with fastpathlinkrev
-        # either, because we don't discover which directory nodes to
-        # send along with files. This could probably be fixed.
-        fastpathlinkrev = fastpathlinkrev and (
-            'treemanifest' not in repo.requirements)
-        # Shallow clones also don't work correctly with fastpathlinkrev
-        # because file nodes may need to be sent for a manifest even if they
-        # weren't introduced by that manifest.
-        fastpathlinkrev = fastpathlinkrev and not self.is_shallow
-
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        # BEGIN NARROW HACK
-        mfdicts = None
-        if self.is_shallow:
-            mfdicts = [(self._repo.manifestlog[n].read(), lr)
-                       for (n, lr) in mfs.iteritems()]
-        # END NARROW HACK
-        mfs.clear()
-        clrevs = set(cl.rev(x) for x in clnodes)
-
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
-
-        # BEGIN NARROW HACK
-        #
-        # We need to pass the mfdicts variable down into
-        # generatefiles(), but more than one command might have
-        # wrapped generatefiles so we can't modify the function
-        # signature. Instead, we pass the data to ourselves using an
-        # instance attribute. I'm sorry.
-        self._mfdicts = mfdicts
-        # END NARROW HACK
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
-
-        yield self.close()
-
-        if clnodes:
-            repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
-    extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
-
-    def revchunk(orig, self, revlog, rev, prev, linknode):
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow changegroup
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # build up some mapping information that's useful later. See
-        # the local() nested function below.
-        if not self.changelog_done:
-            self.clnode_to_rev[linknode] = rev
-            linkrev = rev
-            self.clrev_to_localrev[linkrev] = rev
-        else:
-            linkrev = self.clnode_to_rev[linknode]
-            self.clrev_to_localrev[linkrev] = rev
-        # This is a node to send in full, because the changeset it
-        # corresponds to was a full changeset.
-        if linknode in self.full_nodes:
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # At this point, a node can either be one we should skip or an
-        # ellipsis. If it's not an ellipsis, bail immediately.
-        if linkrev not in self.precomputed_ellipsis:
-            return
-        linkparents = self.precomputed_ellipsis[linkrev]
-        def local(clrev):
-            """Turn a changelog revnum into a local revnum.
-
-            The ellipsis dag is stored as revnums on the changelog,
-            but when we're producing ellipsis entries for
-            non-changelog revlogs, we need to turn those numbers into
-            something local. This does that for us, and during the
-            changelog sending phase will also expand the stored
-            mappings as needed.
-            """
-            if clrev == node.nullrev:
-                return node.nullrev
-            if not self.changelog_done:
-                # If we're doing the changelog, it's possible that we
-                # have a parent that is already on the client, and we
-                # need to store some extra mapping information so that
-                # our contained ellipsis nodes will be able to resolve
-                # their parents.
-                if clrev not in self.clrev_to_localrev:
-                    clnode = revlog.node(clrev)
-                    self.clnode_to_rev[clnode] = clrev
-                return clrev
-            # Walk the ellipsis-ized changelog breadth-first looking for a
-            # change that has been linked from the current revlog.
-            #
-            # For a flat manifest revlog only a single step should be necessary
-            # as all relevant changelog entries are relevant to the flat
-            # manifest.
-            #
-            # For a filelog or tree manifest dirlog however not every changelog
-            # entry will have been relevant, so we need to skip some changelog
-            # nodes even after ellipsis-izing.
-            walk = [clrev]
-            while walk:
-                p = walk[0]
-                walk = walk[1:]
-                if p in self.clrev_to_localrev:
-                    return self.clrev_to_localrev[p]
-                elif p in self.full_nodes:
-                    walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
-                                    if pp != node.nullrev])
-                elif p in self.precomputed_ellipsis:
-                    walk.extend([pp for pp in self.precomputed_ellipsis[p]
-                                    if pp != node.nullrev])
-                else:
-                    # In this case, we've got an ellipsis with parents
-                    # outside the current bundle (likely an
-                    # incremental pull). We "know" that we can use the
-                    # value of this same revlog at whatever revision
-                    # is pointed to by linknode. "Know" is in scare
-                    # quotes because I haven't done enough examination
-                    # of edge cases to convince myself this is really
-                    # a fact - it works for all the (admittedly
-                    # thorough) cases in our testsuite, but I would be
-                    # somewhat unsurprised to find a case in the wild
-                    # where this breaks down a bit. That said, I don't
-                    # know if it would hurt anything.
-                    for i in xrange(rev, 0, -1):
-                        if revlog.linkrev(i) == clrev:
-                            return i
-                    # We failed to resolve a parent for this node, so
-                    # we crash the changegroup construction.
-                    raise error.Abort(
-                        'unable to resolve parent while packing %r %r'
-                        ' for changeset %r' % (revlog.indexfile, rev, clrev))
-            return node.nullrev
-
-        if not linkparents or (
-            revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
-            p1, p2 = node.nullrev, node.nullrev
-        elif len(linkparents) == 1:
-            p1, = sorted(local(p) for p in linkparents)
-            p2 = node.nullrev
-        else:
-            p1, p2 = sorted(local(p) for p in linkparents)
-        n = revlog.node(rev)
-        yield ellipsisdata(
-            self, rev, revlog, p1, p2, revlog.revision(n), linknode)
-    extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
-
-    def deltaparent(orig, self, revlog, rev, p1, p2, prev):
-        if util.safehasattr(self, 'full_nodes'):
-            # TODO: send better deltas when in narrow mode.
-            #
-            # changegroup.group() loops over revisions to send,
-            # including revisions we'll skip. What this means is that
-            # `prev` will be a potentially useless delta base for all
-            # ellipsis nodes, as the client likely won't have it. In
-            # the future we should do bookkeeping about which nodes
-            # have been sent to the client, and try to be
-            # significantly smarter about delta bases. This is
-            # slightly tricky because this same code has to work for
-            # all revlogs, and we don't have the linkrev/linknode here.
-            return p1
-        return orig(self, revlog, rev, p1, p2, prev)
-    extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- a/hgext/narrow/narrowcommands.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/narrowcommands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -7,10 +7,10 @@
 from __future__ import absolute_import
 
 import itertools
+import os
 
 from mercurial.i18n import _
 from mercurial import (
-    changegroup,
     cmdutil,
     commands,
     discovery,
@@ -24,7 +24,9 @@
     pycompat,
     registrar,
     repair,
+    repository,
     repoview,
+    sparse,
     util,
 )
 
@@ -43,6 +45,8 @@
                      _("create a narrow clone of select files")))
     entry[1].append(('', 'depth', '',
                      _("limit the history fetched by distance from heads")))
+    entry[1].append(('', 'narrowspec', '',
+                     _("read narrowspecs from file")))
     # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
     if 'sparse' not in extensions.enabled():
         entry[1].append(('', 'include', [],
@@ -73,6 +77,27 @@
     opts = pycompat.byteskwargs(opts)
     wrappedextraprepare = util.nullcontextmanager()
     opts_narrow = opts['narrow']
+    narrowspecfile = opts['narrowspec']
+
+    if narrowspecfile:
+        filepath = os.path.join(pycompat.getcwd(), narrowspecfile)
+        ui.status(_("reading narrowspec from '%s'\n") % filepath)
+        try:
+            fp = open(filepath, 'rb')
+        except IOError:
+            raise error.Abort(_("file '%s' not found") % filepath)
+
+        includes, excludes, profiles = sparse.parseconfig(ui, fp.read(),
+                                                          'narrow')
+        if profiles:
+            raise error.Abort(_("cannot specify other files using '%include' in"
+                                " narrowspec"))
+
+        # narrowspec is passed so we should assume that user wants narrow clone
+        opts_narrow = True
+        opts['include'].extend(includes)
+        opts['exclude'].extend(excludes)
+
     if opts_narrow:
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             # Create narrow spec patterns from clone flags
@@ -101,7 +126,7 @@
 
     def pullnarrow(orig, repo, *args, **kwargs):
         if opts_narrow:
-            repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+            repo.requirements.add(repository.NARROW_REQUIREMENT)
             repo._writerequirements()
 
         return orig(repo, *args, **kwargs)
@@ -114,7 +139,7 @@
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
@@ -128,7 +153,7 @@
 
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
         includes = set(opts.get(r'include', []))
         excludes = set(opts.get(r'exclude', []))
@@ -142,7 +167,7 @@
 
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
     if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
@@ -261,6 +286,7 @@
         # The old{in,ex}cludepats have already been set by orig()
         kwargs['includepats'] = newincludes
         kwargs['excludepats'] = newexcludes
+        kwargs['widen'] = True
     wrappedextraprepare = extensions.wrappedfunction(exchange,
         '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
 
@@ -281,6 +307,7 @@
         with ds.parentchange():
             ds.setparents(p1, p2)
 
+        repo.setnewnarrowpats()
         actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
         addgaction = actions['g'].append
 
@@ -331,7 +358,7 @@
     empty and will not match any files.
     """
     opts = pycompat.byteskwargs(opts)
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         ui.warn(_('The narrow command is only supported on respositories cloned'
                   ' with --narrow.\n'))
         return 1
--- a/hgext/narrow/narrowdirstate.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/narrowdirstate.py	Tue Sep 04 12:16:28 2018 -0400
@@ -11,8 +11,6 @@
 from mercurial import (
     error,
     match as matchmod,
-    narrowspec,
-    util as hgutil,
 )
 
 def wrapdirstate(repo, dirstate):
@@ -29,10 +27,6 @@
             return fn(self, *args)
         return _wrapper
 
-    def _narrowbackupname(backupname):
-        assert 'dirstate' in backupname
-        return backupname.replace('dirstate', narrowspec.FILENAME)
-
     class narrowdirstate(dirstate.__class__):
         def walk(self, match, subrepos, unknown, ignored, full=True,
                  narrowonly=True):
@@ -78,22 +72,5 @@
                 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
             super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
 
-        def restorebackup(self, tr, backupname):
-            self._opener.rename(_narrowbackupname(backupname),
-                                narrowspec.FILENAME, checkambig=True)
-            super(narrowdirstate, self).restorebackup(tr, backupname)
-
-        def savebackup(self, tr, backupname):
-            super(narrowdirstate, self).savebackup(tr, backupname)
-
-            narrowbackupname = _narrowbackupname(backupname)
-            self._opener.tryunlink(narrowbackupname)
-            hgutil.copyfile(self._opener.join(narrowspec.FILENAME),
-                            self._opener.join(narrowbackupname), hardlink=True)
-
-        def clearbackup(self, tr, backupname):
-            super(narrowdirstate, self).clearbackup(tr, backupname)
-            self._opener.unlink(_narrowbackupname(backupname))
-
     dirstate.__class__ = narrowdirstate
     return dirstate
--- a/hgext/narrow/narrowrepo.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/narrowrepo.py	Tue Sep 04 12:16:28 2018 -0400
@@ -7,35 +7,11 @@
 
 from __future__ import absolute_import
 
-from mercurial import (
-    changegroup,
-    hg,
-    narrowspec,
-    scmutil,
-)
-
 from . import (
     narrowdirstate,
     narrowrevlog,
 )
 
-def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
-    orig(sourcerepo, destrepo, **kwargs)
-    if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements:
-        with destrepo.wlock():
-            with destrepo.vfs('shared', 'a') as fp:
-                fp.write(narrowspec.FILENAME + '\n')
-
-def unsharenarrowspec(orig, ui, repo, repopath):
-    if (changegroup.NARROW_REQUIREMENT in repo.requirements
-        and repo.path == repopath and repo.shared()):
-        srcrepo = hg.sharedreposource(repo)
-        with srcrepo.vfs(narrowspec.FILENAME) as f:
-            spec = f.read()
-        with repo.vfs(narrowspec.FILENAME, 'w') as f:
-            f.write(spec)
-    return orig(ui, repo, repopath)
-
 def wraprepo(repo):
     """Enables narrow clone functionality on a single local repository."""
 
@@ -46,23 +22,6 @@
             narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
             return fl
 
-        # I'm not sure this is the right place to do this filter.
-        # context._manifestmatches() would probably be better, or perhaps
-        # move it to a later place, in case some of the callers do want to know
-        # which directories changed. This seems to work for now, though.
-        def status(self, *args, **kwargs):
-            s = super(narrowrepository, self).status(*args, **kwargs)
-            narrowmatch = self.narrowmatch()
-            modified = list(filter(narrowmatch, s.modified))
-            added = list(filter(narrowmatch, s.added))
-            removed = list(filter(narrowmatch, s.removed))
-            deleted = list(filter(narrowmatch, s.deleted))
-            unknown = list(filter(narrowmatch, s.unknown))
-            ignored = list(filter(narrowmatch, s.ignored))
-            clean = list(filter(narrowmatch, s.clean))
-            return scmutil.status(modified, added, removed, deleted, unknown,
-                                  ignored, clean)
-
         def _makedirstate(self):
             dirstate = super(narrowrepository, self)._makedirstate()
             return narrowdirstate.wrapdirstate(self, dirstate)
--- a/hgext/narrow/narrowtemplates.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/narrow/narrowtemplates.py	Tue Sep 04 12:16:28 2018 -0400
@@ -42,7 +42,7 @@
             return 'outsidenarrow'
     return ''
 
-@revsetpredicate('ellipsis')
+@revsetpredicate('ellipsis()')
 def ellipsisrevset(repo, subset, x):
     """Changesets that are ellipsis nodes."""
     return subset.filter(lambda r: _isellipsis(repo, r))
--- a/hgext/patchbomb.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/patchbomb.py	Tue Sep 04 12:16:28 2018 -0400
@@ -73,7 +73,7 @@
 '''
 from __future__ import absolute_import
 
-import email as emailmod
+import email.encoders as emailencoders
 import email.generator as emailgen
 import email.mime.base as emimebase
 import email.mime.multipart as emimemultipart
@@ -139,6 +139,11 @@
     default=None,
 )
 
+if pycompat.ispy3:
+    _bytesgenerator = emailgen.BytesGenerator
+else:
+    _bytesgenerator = emailgen.Generator
+
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -273,10 +278,11 @@
                                                  seqno=idx, total=total)
             else:
                 patchname = cmdutil.makefilename(repo[node], '%b.patch')
-        disposition = 'inline'
+        disposition = r'inline'
         if opts.get('attach'):
-            disposition = 'attachment'
-        p['Content-Disposition'] = disposition + '; filename=' + patchname
+            disposition = r'attachment'
+        p[r'Content-Disposition'] = (
+            disposition + r'; filename=' + encoding.strfromlocal(patchname))
         msg.attach(p)
     else:
         msg = mail.mimetextpatch(body, display=opts.get('test'))
@@ -370,12 +376,12 @@
     msg = emimemultipart.MIMEMultipart()
     if body:
         msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
-    datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle')
+    datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle')
     datapart.set_payload(bundle)
     bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle')
-    datapart.add_header('Content-Disposition', 'attachment',
-                        filename=bundlename)
-    emailmod.Encoders.encode_base64(datapart)
+    datapart.add_header(r'Content-Disposition', r'attachment',
+                        filename=encoding.strfromlocal(bundlename))
+    emailencoders.encode_base64(datapart)
     msg.attach(datapart)
     msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
     return [(msg, subj, None)]
@@ -463,6 +469,11 @@
         ui.status(_("no changes found\n"))
     return revs
 
+def _msgid(node, timestamp):
+    hostname = encoding.strtolocal(socket.getfqdn())
+    hostname = encoding.environ.get('HGHOSTNAME', hostname)
+    return '<%s.%d@%s>' % (node, timestamp, hostname)
+
 emailopts = [
     ('', 'body', None, _('send patches as inline message text (default)')),
     ('a', 'attach', None, _('send patches as attachments')),
@@ -671,8 +682,7 @@
         start_time = dateutil.makedate()
 
     def genmsgid(id):
-        return '<%s.%d@%s>' % (id[:20], int(start_time[0]),
-                               encoding.strtolocal(socket.getfqdn()))
+        return _msgid(id[:20], int(start_time[0]))
 
     # deprecated config: patchbomb.from
     sender = (opts.get('from') or ui.config('email', 'from') or
@@ -780,10 +790,27 @@
             m['Bcc'] = ', '.join(bcc)
         if replyto:
             m['Reply-To'] = ', '.join(replyto)
+        # Fix up all headers to be native strings.
+        # TODO(durin42): this should probably be cleaned up above in the future.
+        if pycompat.ispy3:
+            for hdr, val in list(m.items()):
+                change = False
+                if isinstance(hdr, bytes):
+                    del m[hdr]
+                    hdr = pycompat.strurl(hdr)
+                    change = True
+                if isinstance(val, bytes):
+                    val = pycompat.strurl(val)
+                    if not change:
+                        # prevent duplicate headers
+                        del m[hdr]
+                    change = True
+                if change:
+                    m[hdr] = val
         if opts.get('test'):
             ui.status(_('displaying '), subj, ' ...\n')
             ui.pager('email')
-            generator = emailgen.Generator(ui, mangle_from_=False)
+            generator = _bytesgenerator(ui, mangle_from_=False)
             try:
                 generator.flatten(m, 0)
                 ui.write('\n')
@@ -799,8 +826,10 @@
                 # Exim does not remove the Bcc field
                 del m['Bcc']
             fp = stringio()
-            generator = emailgen.Generator(fp, mangle_from_=False)
+            generator = _bytesgenerator(fp, mangle_from_=False)
             generator.flatten(m, 0)
-            sendmail(sender_addr, to + bcc + cc, fp.getvalue())
+            alldests = to + bcc + cc
+            alldests = [encoding.strfromlocal(d) for d in alldests]
+            sendmail(sender_addr, alldests, fp.getvalue())
 
     progress.complete()
--- a/hgext/rebase.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/rebase.py	Tue Sep 04 12:16:28 2018 -0400
@@ -177,6 +177,7 @@
         if e:
             self.extrafns = [e]
 
+        self.backupf = ui.configbool('ui', 'history-editing-backup')
         self.keepf = opts.get('keep', False)
         self.keepbranchesf = opts.get('keepbranches', False)
         self.obsoletenotrebased = {}
@@ -343,7 +344,9 @@
                 msg = _('cannot continue inconsistent rebase')
                 hint = _('use "hg rebase --abort" to clear broken state')
                 raise error.Abort(msg, hint=hint)
+
         if isabort:
+            backup = backup and self.backupf
             return abort(self.repo, self.originalwd, self.destmap, self.state,
                          activebookmark=self.activebookmark, backup=backup,
                          suppwarns=suppwarns)
@@ -632,7 +635,7 @@
         if self.collapsef and not self.keepf:
             collapsedas = newnode
         clearrebased(ui, repo, self.destmap, self.state, self.skipped,
-                     collapsedas, self.keepf, fm=fm)
+                     collapsedas, self.keepf, fm=fm, backup=self.backupf)
 
         clearstatus(repo)
         clearcollapsemsg(repo)
@@ -670,6 +673,7 @@
     ('D', 'detach', False, _('(DEPRECATED)')),
     ('i', 'interactive', False, _('(DEPRECATED)')),
     ('t', 'tool', '', _('specify merge tool')),
+    ('', 'stop', False, _('stop interrupted rebase')),
     ('c', 'continue', False, _('continue an interrupted rebase')),
     ('a', 'abort', False, _('abort an interrupted rebase')),
     ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
@@ -729,7 +733,8 @@
     deleted, there is no hook presently available for this.
 
     If a rebase is interrupted to manually resolve a conflict, it can be
-    continued with --continue/-c or aborted with --abort/-a.
+    continued with --continue/-c, aborted with --abort/-a, or stopped with
+    --stop.
 
     .. container:: verbose
 
@@ -800,22 +805,20 @@
     opts = pycompat.byteskwargs(opts)
     inmemory = ui.configbool('rebase', 'experimental.inmemory')
     dryrun = opts.get('dry_run')
-    if dryrun:
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --dry-run and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --dry-run and --continue'))
-    if opts.get('confirm'):
-        dryrun = True
-        if opts.get('dry_run'):
-            raise error.Abort(_('cannot specify both --confirm and --dry-run'))
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --confirm and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --confirm and --continue'))
+    confirm = opts.get('confirm')
+    selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
+    if len(selactions) > 1:
+        raise error.Abort(_('cannot use --%s with --%s')
+                          % tuple(selactions[:2]))
+    action = selactions[0] if selactions else None
+    if dryrun and action:
+        raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
+    if confirm and action:
+        raise error.Abort(_('cannot specify both --confirm and --%s') % action)
+    if dryrun and confirm:
+        raise error.Abort(_('cannot specify both --confirm and --dry-run'))
 
-    if (opts.get('continue') or opts.get('abort') or
-        repo.currenttransaction() is not None):
+    if action or repo.currenttransaction() is not None:
         # in-memory rebase is not compatible with resuming rebases.
         # (Or if it is run within a transaction, since the restart logic can
         # fail the entire transaction.)
@@ -830,24 +833,43 @@
         opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
         opts['dest'] = '_destautoorphanrebase(SRC)'
 
-    if dryrun:
-        return _dryrunrebase(ui, repo, opts)
+    if dryrun or confirm:
+        return _dryrunrebase(ui, repo, action, opts)
+    elif action == 'stop':
+        rbsrt = rebaseruntime(repo, ui)
+        with repo.wlock(), repo.lock():
+            rbsrt.restorestatus()
+            if rbsrt.collapsef:
+                raise error.Abort(_("cannot stop in --collapse session"))
+            allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
+            if not (rbsrt.keepf or allowunstable):
+                raise error.Abort(_("cannot remove original changesets with"
+                                    " unrebased descendants"),
+                    hint=_('either enable obsmarkers to allow unstable '
+                           'revisions or use --keep to keep original '
+                           'changesets'))
+            if needupdate(repo, rbsrt.state):
+                # update to the current working revision
+                # to clear interrupted merge
+                hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
+            rbsrt._finishrebase()
+            return 0
     elif inmemory:
         try:
             # in-memory merge doesn't support conflicts, so if we hit any, abort
             # and re-run as an on-disk merge.
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                return _dorebase(ui, repo, opts, inmemory=inmemory)
+                return _dorebase(ui, repo, action, opts, inmemory=inmemory)
         except error.InMemoryMergeConflictsError:
             ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
                       ' merge\n'))
-            _dorebase(ui, repo, {'abort': True})
-            return _dorebase(ui, repo, opts, inmemory=False)
+            _dorebase(ui, repo, action='abort', opts={})
+            return _dorebase(ui, repo, action, opts, inmemory=False)
     else:
-        return _dorebase(ui, repo, opts)
+        return _dorebase(ui, repo, action, opts)
 
-def _dryrunrebase(ui, repo, opts):
+def _dryrunrebase(ui, repo, action, opts):
     rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
     confirm = opts.get('confirm')
     if confirm:
@@ -860,7 +882,7 @@
         try:
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                _origrebase(ui, repo, opts, rbsrt, inmemory=True,
+                _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
                             leaveunfinished=True)
         except error.InMemoryMergeConflictsError:
             ui.status(_('hit a merge conflict\n'))
@@ -886,11 +908,13 @@
                 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
                                               suppwarns=True)
 
-def _dorebase(ui, repo, opts, inmemory=False):
+def _dorebase(ui, repo, action, opts, inmemory=False):
     rbsrt = rebaseruntime(repo, ui, inmemory, opts)
-    return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory)
+    return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
 
-def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False):
+def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
+                leaveunfinished=False):
+    assert action != 'stop'
     with repo.wlock(), repo.lock():
         # Validate input and define rebasing points
         destf = opts.get('dest', None)
@@ -900,8 +924,6 @@
         # search default destination in this space
         # used in the 'hg pull --rebase' case, see issue 5214.
         destspace = opts.get('_destspace')
-        contf = opts.get('continue')
-        abortf = opts.get('abort')
         if opts.get('interactive'):
             try:
                 if extensions.find('histedit'):
@@ -917,22 +939,20 @@
             raise error.Abort(
                 _('message can only be specified with collapse'))
 
-        if contf or abortf:
-            if contf and abortf:
-                raise error.Abort(_('cannot use both abort and continue'))
+        if action:
             if rbsrt.collapsef:
                 raise error.Abort(
                     _('cannot use collapse with continue or abort'))
             if srcf or basef or destf:
                 raise error.Abort(
                     _('abort and continue do not allow specifying revisions'))
-            if abortf and opts.get('tool', False):
+            if action == 'abort' and opts.get('tool', False):
                 ui.warn(_('tool option will be ignored\n'))
-            if contf:
+            if action == 'continue':
                 ms = mergemod.mergestate.read(repo)
                 mergeutil.checkunresolved(ms)
 
-            retcode = rbsrt._prepareabortorcontinue(abortf)
+            retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
             if retcode is not None:
                 return retcode
         else:
@@ -1728,7 +1748,7 @@
     return originalwd, destmap, state
 
 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
-                 keepf=False, fm=None):
+                 keepf=False, fm=None, backup=True):
     """dispose of rebased revision at the end of the rebase
 
     If `collapsedas` is not None, the rebase was a collapse whose result if the
@@ -1736,6 +1756,9 @@
 
     If `keepf` is not True, the rebase has --keep set and no nodes should be
     removed (but bookmarks still need to be moved).
+
+    If `backup` is False, no backup will be stored when stripping rebased
+    revisions.
     """
     tonode = repo.changelog.node
     replacements = {}
@@ -1751,7 +1774,7 @@
                 else:
                     succs = (newnode,)
                 replacements[oldnode] = succs
-    scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
+    scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
     if fm:
         hf = fm.hexfunc
         fl = fm.formatlist
@@ -1868,7 +1891,7 @@
                 # If 'srcrev' has a successor in rebase set but none in
                 # destination (which would be catched above), we shall skip it
                 # and its descendants to avoid divergence.
-                if any(s in destmap for s in succrevs):
+                if srcrev in extinctrevs or any(s in destmap for s in succrevs):
                     obsoletewithoutsuccessorindestination.add(srcrev)
 
     return (
--- a/hgext/shelve.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/shelve.py	Tue Sep 04 12:16:28 2018 -0400
@@ -41,6 +41,7 @@
     lock as lockmod,
     mdiff,
     merge,
+    narrowspec,
     node as nodemod,
     patch,
     phases,
@@ -78,7 +79,7 @@
 
 backupdir = 'shelve-backup'
 shelvedir = 'shelved'
-shelvefileextensions = ['hg', 'patch', 'oshelve']
+shelvefileextensions = ['hg', 'patch', 'shelve']
 # universal extension is present in all types of shelves
 patchextension = 'patch'
 
@@ -159,18 +160,19 @@
             btype = 'HG20'
             compression = 'BZ'
 
-        outgoing = discovery.outgoing(self.repo, missingroots=bases,
+        repo = self.repo.unfiltered()
+
+        outgoing = discovery.outgoing(repo, missingroots=bases,
                                       missingheads=[node])
-        cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
-                                         'shelve')
+        cg = changegroup.makechangegroup(repo, outgoing, cgversion, 'shelve')
 
         bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
                                 compression=compression)
 
-    def writeobsshelveinfo(self, info):
+    def writeinfo(self, info):
         scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
 
-    def readobsshelveinfo(self):
+    def readinfo(self):
         return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
 
 class shelvedstate(object):
@@ -314,16 +316,13 @@
     '''Abort current transaction for shelve/unshelve, but keep dirstate
     '''
     tr = repo.currenttransaction()
-    backupname = 'dirstate.shelve'
-    repo.dirstate.savebackup(tr, backupname)
+    dirstatebackupname = 'dirstate.shelve'
+    narrowspecbackupname = 'narrowspec.shelve'
+    repo.dirstate.savebackup(tr, dirstatebackupname)
+    narrowspec.savebackup(repo, narrowspecbackupname)
     tr.abort()
-    repo.dirstate.restorebackup(None, backupname)
-
-def createcmd(ui, repo, pats, opts):
-    """subcommand that creates a new shelve"""
-    with repo.wlock():
-        cmdutil.checkunfinished(repo)
-        return _docreatecmd(ui, repo, pats, opts)
+    narrowspec.restorebackup(repo, narrowspecbackupname)
+    repo.dirstate.restorebackup(None, dirstatebackupname)
 
 def getshelvename(repo, parent, opts):
     """Decide on the name this shelve is going to have"""
@@ -411,6 +410,8 @@
         ui.status(_("nothing changed\n"))
 
 def _shelvecreatedcommit(repo, node, name):
+    info = {'node': nodemod.hex(node)}
+    shelvedfile(repo, name, 'shelve').writeinfo(info)
     bases = list(mutableancestors(repo[node]))
     shelvedfile(repo, name, 'hg').writebundle(bases, node)
     with shelvedfile(repo, name, patchextension).opener('wb') as fp:
@@ -426,6 +427,12 @@
 def _finishshelve(repo):
     _aborttransaction(repo)
 
+def createcmd(ui, repo, pats, opts):
+    """subcommand that creates a new shelve"""
+    with repo.wlock():
+        cmdutil.checkunfinished(repo)
+        return _docreatecmd(ui, repo, pats, opts)
+
 def _docreatecmd(ui, repo, pats, opts):
     wctx = repo[None]
     parents = wctx.parents()
@@ -456,7 +463,7 @@
 
         name = getshelvename(repo, parent, opts)
         activebookmark = _backupactivebookmark(repo)
-        extra = {}
+        extra = {'internal': 'shelve'}
         if includeunknown:
             _includeunknownfiles(repo, pats, opts, extra)
 
@@ -744,7 +751,8 @@
         return tmpwctx, addedbefore
     ui.status(_("temporarily committing pending changes "
                 "(restore with 'hg unshelve --abort')\n"))
-    commitfunc = getcommitfunc(extra=None, interactive=False,
+    extra = {'internal': 'shelve'}
+    commitfunc = getcommitfunc(extra=extra, interactive=False,
                                editor=False)
     tempopts = {}
     tempopts['message'] = "pending changes temporary commit"
@@ -756,9 +764,22 @@
 
 def _unshelverestorecommit(ui, repo, basename):
     """Recreate commit in the repository during the unshelve"""
-    with ui.configoverride({('ui', 'quiet'): True}):
-        shelvedfile(repo, basename, 'hg').applybundle()
+    repo = repo.unfiltered()
+    node = None
+    if shelvedfile(repo, basename, 'shelve').exists():
+        node = shelvedfile(repo, basename, 'shelve').readinfo()['node']
+    if node is None or node not in repo:
+        with ui.configoverride({('ui', 'quiet'): True}):
+            shelvedfile(repo, basename, 'hg').applybundle()
         shelvectx = repo['tip']
+        # We might not strip the unbundled changeset, so we should keep track of
+        # the unshelve node in case we need to reuse it (eg: unshelve --keep)
+        if node is None:
+            info = {'node': nodemod.hex(shelvectx.node())}
+            shelvedfile(repo, basename, 'shelve').writeinfo(info)
+    else:
+        shelvectx = repo[node]
+
     return repo, shelvectx
 
 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
@@ -783,7 +804,7 @@
             tr.close()
 
             nodestoremove = [repo.changelog.node(rev)
-                             for rev in xrange(oldtiprev, len(repo))]
+                             for rev in pycompat.xrange(oldtiprev, len(repo))]
             shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
                               branchtorestore, opts.get('keep'), activebookmark)
             raise error.InterventionRequired(
@@ -955,6 +976,7 @@
     if not shelvedfile(repo, basename, patchextension).exists():
         raise error.Abort(_("shelved change '%s' not found") % basename)
 
+    repo = repo.unfiltered()
     lock = tr = None
     try:
         lock = repo.lock()
--- a/hgext/uncommit.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/uncommit.py	Tue Sep 04 12:16:28 2018 -0400
@@ -182,7 +182,7 @@
 
             with repo.dirstate.parentchange():
                 repo.dirstate.setparents(newid, node.nullid)
-                s = repo.status(old.p1(), old, match=match)
+                s = old.p1().status(old, match=match)
                 _fixdirstate(repo, old, repo[newid], s)
 
 def predecessormarkers(ctx):
--- a/hgext/win32text.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/hgext/win32text.py	Tue Sep 04 12:16:28 2018 -0400
@@ -49,6 +49,7 @@
     short,
 )
 from mercurial import (
+    pycompat,
     registrar,
 )
 from mercurial.utils import (
@@ -141,7 +142,8 @@
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
     tip = repo['tip']
-    for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
+    for rev in pycompat.xrange(repo.changelog.tiprev(),
+                               repo[node].rev() - 1, -1):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
--- a/i18n/hggettext	Tue Sep 04 11:59:12 2018 -0400
+++ b/i18n/hggettext	Tue Sep 04 12:16:28 2018 -0400
@@ -63,7 +63,7 @@
 
 doctestre = re.compile(r'^ +>>> ', re.MULTILINE)
 
-def offset(src, doc, name, default):
+def offset(src, doc, name, lineno, default):
     """Compute offset or issue a warning on stdout."""
     # remove doctest part, in order to avoid backslash mismatching
     m = doctestre.search(doc)
@@ -76,8 +76,9 @@
         # This can happen if the docstring contains unnecessary escape
         # sequences such as \" in a triple-quoted string. The problem
         # is that \" is turned into " and so doc wont appear in src.
-        sys.stderr.write("warning: unknown offset in %s, assuming %d lines\n"
-                         % (name, default))
+        sys.stderr.write("%s:%d:warning:"
+                         " unknown docstr offset, assuming %d lines\n"
+                         % (name, lineno, default))
         return default
     else:
         return src.count('\n', 0, end)
@@ -106,7 +107,7 @@
     if not path.startswith('mercurial/') and mod.__doc__:
         with open(path) as fobj:
             src = fobj.read()
-        lineno = 1 + offset(src, mod.__doc__, path, 7)
+        lineno = 1 + offset(src, mod.__doc__, path, 1, 7)
         print(poentry(path, lineno, mod.__doc__))
 
     functions = list(getattr(mod, 'i18nfunctions', []))
@@ -129,7 +130,6 @@
             actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra)
 
             src = inspect.getsource(func)
-            name = "%s.%s" % (actualpath, func.__name__)
             lineno = inspect.getsourcelines(func)[1]
             doc = docobj.__doc__
             origdoc = getattr(docobj, '_origdoc', '')
@@ -137,9 +137,9 @@
                 doc = doc.rstrip()
                 origdoc = origdoc.rstrip()
             if origdoc:
-                lineno += offset(src, origdoc, name, 1)
+                lineno += offset(src, origdoc, actualpath, lineno, 1)
             else:
-                lineno += offset(src, doc, name, 1)
+                lineno += offset(src, doc, actualpath, lineno, 1)
             print(poentry(actualpath, lineno, doc))
 
 
--- a/i18n/posplit	Tue Sep 04 11:59:12 2018 -0400
+++ b/i18n/posplit	Tue Sep 04 12:16:28 2018 -0400
@@ -15,6 +15,14 @@
     e = cache.get(entry.msgid)
     if e:
         e.occurrences.extend(entry.occurrences)
+
+        # merge comments from entry
+        for comment in entry.comment.split('\n'):
+            if comment and comment not in e.comment:
+                if not e.comment:
+                    e.comment = comment
+                else:
+                    e.comment += '\n' + comment
     else:
         po.append(entry)
         cache[entry.msgid] = entry
--- a/mercurial/__init__.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/__init__.py	Tue Sep 04 12:16:28 2018 -0400
@@ -182,7 +182,7 @@
                     continue
                 r, c = t.start
                 l = (b'; from mercurial.pycompat import '
-                     b'delattr, getattr, hasattr, setattr, xrange, '
+                     b'delattr, getattr, hasattr, setattr, '
                      b'open, unicode\n')
                 for u in tokenize.tokenize(io.BytesIO(l).readline):
                     if u.type in (tokenize.ENCODING, token.ENDMARKER):
@@ -223,7 +223,7 @@
     # ``replacetoken`` or any mechanism that changes semantics of module
     # loading is changed. Otherwise cached bytecode may get loaded without
     # the new transformation mechanisms applied.
-    BYTECODEHEADER = b'HG\x00\x0a'
+    BYTECODEHEADER = b'HG\x00\x0b'
 
     class hgloader(importlib.machinery.SourceFileLoader):
         """Custom module loader that transforms source code.
--- a/mercurial/ancestor.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/ancestor.py	Tue Sep 04 12:16:28 2018 -0400
@@ -11,6 +11,9 @@
 import heapq
 
 from .node import nullrev
+from . import (
+    pycompat,
+)
 
 def commonancestorsheads(pfunc, *nodes):
     """Returns a set with the heads of all common ancestors of all nodes,
@@ -174,7 +177,7 @@
             # no revs to consider
             return
 
-        for curr in xrange(start, min(revs) - 1, -1):
+        for curr in pycompat.xrange(start, min(revs) - 1, -1):
             if curr not in bases:
                 continue
             revs.discard(curr)
@@ -215,7 +218,7 @@
         # exit.
 
         missing = []
-        for curr in xrange(start, nullrev, -1):
+        for curr in pycompat.xrange(start, nullrev, -1):
             if not revsvisit:
                 break
 
--- a/mercurial/bookmarks.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/bookmarks.py	Tue Sep 04 12:16:28 2018 -0400
@@ -240,7 +240,7 @@
             if self.active:
                 return self.active
             else:
-                raise error.Abort(_("no active bookmark"))
+                raise error.RepoLookupError(_("no active bookmark"))
         return bname
 
     def checkconflict(self, mark, force=False, target=None):
--- a/mercurial/branchmap.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/branchmap.py	Tue Sep 04 12:16:28 2018 -0400
@@ -38,15 +38,11 @@
     return filename
 
 def read(repo):
+    f = None
     try:
         f = repo.cachevfs(_filename(repo))
-        lines = f.read().split('\n')
-        f.close()
-    except (IOError, OSError):
-        return None
-
-    try:
-        cachekey = lines.pop(0).split(" ", 2)
+        lineiter = iter(f)
+        cachekey = next(lineiter).rstrip('\n').split(" ", 2)
         last, lrev = cachekey[:2]
         last, lrev = bin(last), int(lrev)
         filteredhash = None
@@ -58,7 +54,8 @@
             # invalidate the cache
             raise ValueError(r'tip differs')
         cl = repo.changelog
-        for l in lines:
+        for l in lineiter:
+            l = l.rstrip('\n')
             if not l:
                 continue
             node, state, label = l.split(" ", 2)
@@ -72,6 +69,10 @@
             partial.setdefault(label, []).append(node)
             if state == 'c':
                 partial._closednodes.add(node)
+
+    except (IOError, OSError):
+        return None
+
     except Exception as inst:
         if repo.ui.debugflag:
             msg = 'invalid branchheads cache'
@@ -80,6 +81,11 @@
             msg += ': %s\n'
             repo.ui.debug(msg % pycompat.bytestr(inst))
         partial = None
+
+    finally:
+        if f:
+            f.close()
+
     return partial
 
 ### Nearest subset relation
--- a/mercurial/bundle2.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/bundle2.py	Tue Sep 04 12:16:28 2018 -0400
@@ -2223,11 +2223,11 @@
         total += header[1] + header[2]
         utf8branch = inpart.read(header[0])
         branch = encoding.tolocal(utf8branch)
-        for x in xrange(header[1]):
+        for x in pycompat.xrange(header[1]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, False)
-        for x in xrange(header[2]):
+        for x in pycompat.xrange(header[2]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, True)
--- a/mercurial/bundlerepo.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/bundlerepo.py	Tue Sep 04 12:16:28 2018 -0400
@@ -80,7 +80,7 @@
             # start, size, full unc. size, base (unused), link, p1, p2, node
             e = (revlog.offset_type(start, flags), size, -1, baserev, link,
                  self.rev(p1), self.rev(p2), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
@@ -187,7 +187,7 @@
 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
     def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
                  dir=''):
-        manifest.manifestrevlog.__init__(self, opener, dir=dir)
+        manifest.manifestrevlog.__init__(self, opener, tree=dir)
         bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
                               linkmapper)
         if dirlogstarts is None:
--- a/mercurial/cext/parsers.c	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/cext/parsers.c	Tue Sep 04 12:16:28 2018 -0400
@@ -713,7 +713,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 5;
+static const int version = 9;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/revlog.c	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/cext/revlog.c	Tue Sep 04 12:16:28 2018 -0400
@@ -28,17 +28,33 @@
 #define PyInt_AsLong PyLong_AsLong
 #endif
 
+typedef struct indexObjectStruct indexObject;
+
+typedef struct {
+	int children[16];
+} nodetreenode;
+
 /*
  * A base-16 trie for fast node->rev mapping.
  *
  * Positive value is index of the next node in the trie
- * Negative value is a leaf: -(rev + 1)
+ * Negative value is a leaf: -(rev + 2)
  * Zero is empty
  */
 typedef struct {
-	int children[16];
+	indexObject *index;
+	nodetreenode *nodes;
+	unsigned length;     /* # nodes in use */
+	unsigned capacity;   /* # nodes allocated */
+	int depth;           /* maximum depth of tree */
+	int splits;          /* # splits performed */
 } nodetree;
 
+typedef struct {
+	PyObject_HEAD
+	nodetree nt;
+} nodetreeObject;
+
 /*
  * This class has two behaviors.
  *
@@ -51,7 +67,7 @@
  * With string keys, we lazily perform a reverse mapping from node to
  * rev, using a base-16 trie.
  */
-typedef struct {
+struct indexObjectStruct {
 	PyObject_HEAD
 	/* Type-specific fields go here. */
 	PyObject *data;        /* raw bytes of index */
@@ -63,16 +79,13 @@
 	PyObject *added;       /* populated on demand */
 	PyObject *headrevs;    /* cache, invalidated on changes */
 	PyObject *filteredrevs;/* filtered revs set */
-	nodetree *nt;          /* base-16 trie */
-	unsigned ntlength;          /* # nodes in use */
-	unsigned ntcapacity;        /* # nodes allocated */
-	int ntdepth;           /* maximum depth of tree */
-	int ntsplits;          /* # splits performed */
+	nodetree nt;           /* base-16 trie */
+	int ntinitialized;     /* 0 or 1 */
 	int ntrev;             /* last rev scanned */
 	int ntlookups;         /* # lookups */
 	int ntmisses;          /* # lookups that miss the cache */
 	int inlined;
-} indexObject;
+};
 
 static Py_ssize_t index_length(const indexObject *self)
 {
@@ -95,6 +108,36 @@
 /* A RevlogNG v1 index entry is 64 bytes long. */
 static const long v1_hdrsize = 64;
 
+static void raise_revlog_error(void)
+{
+	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
+
+	mod = PyImport_ImportModule("mercurial.error");
+	if (mod == NULL) {
+		goto cleanup;
+	}
+
+	dict = PyModule_GetDict(mod);
+	if (dict == NULL) {
+		goto cleanup;
+	}
+	Py_INCREF(dict);
+
+	errclass = PyDict_GetItemString(dict, "RevlogError");
+	if (errclass == NULL) {
+		PyErr_SetString(PyExc_SystemError,
+				"could not find RevlogError");
+		goto cleanup;
+	}
+
+	/* value of exception is ignored by callers */
+	PyErr_SetString(errclass, "RevlogError");
+
+cleanup:
+	Py_XDECREF(dict);
+	Py_XDECREF(mod);
+}
+
 /*
  * Return a pointer to the beginning of a RevlogNG record.
  */
@@ -117,9 +160,8 @@
 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
 				    int *ps, int maxrev)
 {
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-						  rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
 		ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
 	} else {
@@ -158,22 +200,19 @@
 	Py_ssize_t length = index_length(self);
 	PyObject *entry;
 
-	if (pos < 0)
-		pos += length;
+	if (pos == -1) {
+		Py_INCREF(nullentry);
+		return nullentry;
+	}
 
 	if (pos < 0 || pos >= length) {
 		PyErr_SetString(PyExc_IndexError, "revlog index out of range");
 		return NULL;
 	}
 
-	if (pos == length - 1) {
-		Py_INCREF(nullentry);
-		return nullentry;
-	}
-
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *obj;
-		obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		obj = PyList_GET_ITEM(self->added, pos - self->length);
 		Py_INCREF(obj);
 		return obj;
 	}
@@ -231,15 +270,15 @@
 	Py_ssize_t length = index_length(self);
 	const char *data;
 
-	if (pos == length - 1 || pos == INT_MAX)
+	if (pos == -1)
 		return nullid;
 
 	if (pos >= length)
 		return NULL;
 
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *tuple, *str;
-		tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		tuple = PyList_GET_ITEM(self->added, pos - self->length);
 		str = PyTuple_GetItem(tuple, 7);
 		return str ? PyBytes_AS_STRING(str) : NULL;
 	}
@@ -262,47 +301,34 @@
 	return node;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev);
+static int nt_insert(nodetree *self, const char *node, int rev);
 
-static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
+static int node_check(PyObject *obj, char **node)
 {
-	if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
+	Py_ssize_t nodelen;
+	if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
 		return -1;
-	if (*nodelen == 20)
+	if (nodelen == 20)
 		return 0;
 	PyErr_SetString(PyExc_ValueError, "20-byte hash required");
 	return -1;
 }
 
-static PyObject *index_insert(indexObject *self, PyObject *args)
+static PyObject *index_append(indexObject *self, PyObject *obj)
 {
-	PyObject *obj;
 	char *node;
-	int index;
-	Py_ssize_t len, nodelen;
-
-	if (!PyArg_ParseTuple(args, "iO", &index, &obj))
-		return NULL;
+	Py_ssize_t len;
 
 	if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
 		PyErr_SetString(PyExc_TypeError, "8-tuple required");
 		return NULL;
 	}
 
-	if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
+	if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
 		return NULL;
 
 	len = index_length(self);
 
-	if (index < 0)
-		index += len;
-
-	if (index != len - 1) {
-		PyErr_SetString(PyExc_IndexError,
-				"insert only supported at index -1");
-		return NULL;
-	}
-
 	if (self->added == NULL) {
 		self->added = PyList_New(0);
 		if (self->added == NULL)
@@ -312,42 +338,13 @@
 	if (PyList_Append(self->added, obj) == -1)
 		return NULL;
 
-	if (self->nt)
-		nt_insert(self, node, index);
+	if (self->ntinitialized)
+		nt_insert(&self->nt, node, (int)len);
 
 	Py_CLEAR(self->headrevs);
 	Py_RETURN_NONE;
 }
 
-static void _index_clearcaches(indexObject *self)
-{
-	if (self->cache) {
-		Py_ssize_t i;
-
-		for (i = 0; i < self->raw_length; i++)
-			Py_CLEAR(self->cache[i]);
-		free(self->cache);
-		self->cache = NULL;
-	}
-	if (self->offsets) {
-		PyMem_Free(self->offsets);
-		self->offsets = NULL;
-	}
-	free(self->nt);
-	self->nt = NULL;
-	Py_CLEAR(self->headrevs);
-}
-
-static PyObject *index_clearcaches(indexObject *self)
-{
-	_index_clearcaches(self);
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
-	self->ntrev = -1;
-	self->ntlookups = self->ntmisses = 0;
-	Py_RETURN_NONE;
-}
-
 static PyObject *index_stats(indexObject *self)
 {
 	PyObject *obj = PyDict_New();
@@ -376,16 +373,18 @@
 		Py_DECREF(t);
 	}
 
-	if (self->raw_length != self->length - 1)
+	if (self->raw_length != self->length)
 		istat(raw_length, "revs on disk");
 	istat(length, "revs in memory");
-	istat(ntcapacity, "node trie capacity");
-	istat(ntdepth, "node trie depth");
-	istat(ntlength, "node trie count");
 	istat(ntlookups, "node trie lookups");
 	istat(ntmisses, "node trie misses");
 	istat(ntrev, "node trie last rev scanned");
-	istat(ntsplits, "node trie splits");
+	if (self->ntinitialized) {
+		istat(nt.capacity, "node trie capacity");
+		istat(nt.depth, "node trie depth");
+		istat(nt.length, "node trie count");
+		istat(nt.splits, "node trie splits");
+	}
 
 #undef istat
 
@@ -451,7 +450,7 @@
 {
 	PyObject *iter = NULL;
 	PyObject *iter_item = NULL;
-	Py_ssize_t min_idx = index_length(self) + 1;
+	Py_ssize_t min_idx = index_length(self) + 2;
 	long iter_item_long;
 
 	if (PyList_GET_SIZE(list) != 0) {
@@ -463,7 +462,7 @@
 			Py_DECREF(iter_item);
 			if (iter_item_long < min_idx)
 				min_idx = iter_item_long;
-			phases[iter_item_long] = marker;
+			phases[iter_item_long] = (char)marker;
 		}
 		Py_DECREF(iter);
 	}
@@ -493,7 +492,7 @@
 	PyObject *reachable = NULL;
 
 	PyObject *val;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	long revnum;
 	Py_ssize_t k;
 	Py_ssize_t i;
@@ -615,7 +614,7 @@
 			      revstates[parents[1] + 1]) & RS_REACHABLE)
 			    && !(revstates[i + 1] & RS_REACHABLE)) {
 				revstates[i + 1] |= RS_REACHABLE;
-				val = PyInt_FromLong(i);
+				val = PyInt_FromSsize_t(i);
 				if (val == NULL)
 					goto bail;
 				r = PyList_Append(reachable, val);
@@ -645,7 +644,7 @@
 	PyObject *phaseset = NULL;
 	PyObject *phasessetlist = NULL;
 	PyObject *rev = NULL;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	Py_ssize_t numphase = 0;
 	Py_ssize_t minrevallphases = 0;
 	Py_ssize_t minrevphase = 0;
@@ -702,7 +701,7 @@
 		}
 	}
 	/* Transform phase list to a python list */
-	phasessize = PyInt_FromLong(len);
+	phasessize = PyInt_FromSsize_t(len);
 	if (phasessize == NULL)
 		goto release;
 	for (i = 0; i < len; i++) {
@@ -711,7 +710,7 @@
 		 * is computed as a difference */
 		if (phase != 0) {
 			phaseset = PyList_GET_ITEM(phasessetlist, phase);
-			rev = PyInt_FromLong(i);
+			rev = PyInt_FromSsize_t(i);
 			if (rev == NULL)
 				goto release;
 			PySet_Add(phaseset, rev);
@@ -756,7 +755,7 @@
 		}
 	}
 
-	len = index_length(self) - 1;
+	len = index_length(self);
 	heads = PyList_New(0);
 	if (heads == NULL)
 		goto bail;
@@ -838,9 +837,8 @@
 {
 	const char *data;
 
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-			rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
 	}
 	else {
@@ -881,7 +879,7 @@
 		return NULL;
 	}
 
-	if (rev < 0 || rev >= length - 1) {
+	if (rev < 0 || rev >= length) {
 		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
 		return NULL;
 	}
@@ -924,7 +922,7 @@
 			break;
 		}
 
-		if (iterrev >= length - 1) {
+		if (iterrev >= length) {
 			PyErr_SetString(PyExc_IndexError, "revision outside index");
 			return NULL;
 		}
@@ -984,7 +982,7 @@
  *   -2: not found
  * rest: valid rev
  */
-static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
+static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
 		   int hex)
 {
 	int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
@@ -993,9 +991,6 @@
 	if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
 		return -1;
 
-	if (self->nt == NULL)
-		return -2;
-
 	if (hex)
 		maxlevel = nodelen > 40 ? 40 : (int)nodelen;
 	else
@@ -1003,15 +998,15 @@
 
 	for (level = off = 0; level < maxlevel; level++) {
 		int k = getnybble(node, level);
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		int v = n->children[k];
 
 		if (v < 0) {
 			const char *n;
 			Py_ssize_t i;
 
-			v = -(v + 1);
-			n = index_node(self, v);
+			v = -(v + 2);
+			n = index_node(self->index, v);
 			if (n == NULL)
 				return -2;
 			for (i = level; i < maxlevel; i++)
@@ -1027,65 +1022,67 @@
 	return -4;
 }
 
-static int nt_new(indexObject *self)
+static int nt_new(nodetree *self)
 {
-	if (self->ntlength == self->ntcapacity) {
-		if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
-			PyErr_SetString(PyExc_MemoryError,
-					"overflow in nt_new");
+	if (self->length == self->capacity) {
+		unsigned newcapacity;
+		nodetreenode *newnodes;
+		newcapacity = self->capacity * 2;
+		if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
+			PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
 			return -1;
 		}
-		self->ntcapacity *= 2;
-		self->nt = realloc(self->nt,
-				   self->ntcapacity * sizeof(nodetree));
-		if (self->nt == NULL) {
+		newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
+		if (newnodes == NULL) {
 			PyErr_SetString(PyExc_MemoryError, "out of memory");
 			return -1;
 		}
-		memset(&self->nt[self->ntlength], 0,
-		       sizeof(nodetree) * (self->ntcapacity - self->ntlength));
+		self->capacity = newcapacity;
+		self->nodes = newnodes;
+		memset(&self->nodes[self->length], 0,
+		       sizeof(nodetreenode) * (self->capacity - self->length));
 	}
-	return self->ntlength++;
+	return self->length++;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev)
+static int nt_insert(nodetree *self, const char *node, int rev)
 {
 	int level = 0;
 	int off = 0;
 
 	while (level < 40) {
 		int k = nt_level(node, level);
-		nodetree *n;
+		nodetreenode *n;
 		int v;
 
-		n = &self->nt[off];
+		n = &self->nodes[off];
 		v = n->children[k];
 
 		if (v == 0) {
-			n->children[k] = -rev - 1;
+			n->children[k] = -rev - 2;
 			return 0;
 		}
 		if (v < 0) {
-			const char *oldnode = index_node_existing(self, -(v + 1));
+			const char *oldnode = index_node_existing(self->index, -(v + 2));
 			int noff;
 
 			if (oldnode == NULL)
 				return -1;
 			if (!memcmp(oldnode, node, 20)) {
-				n->children[k] = -rev - 1;
+				n->children[k] = -rev - 2;
 				return 0;
 			}
 			noff = nt_new(self);
 			if (noff == -1)
 				return -1;
-			/* self->nt may have been changed by realloc */
-			self->nt[off].children[k] = noff;
+			/* self->nodes may have been changed by realloc */
+			self->nodes[off].children[k] = noff;
 			off = noff;
-			n = &self->nt[off];
+			n = &self->nodes[off];
 			n->children[nt_level(oldnode, ++level)] = v;
-			if (level > self->ntdepth)
-				self->ntdepth = level;
-			self->ntsplits += 1;
+			if (level > self->depth)
+				self->depth = level;
+			self->splits += 1;
 		} else {
 			level += 1;
 			off = v;
@@ -1095,167 +1092,69 @@
 	return -1;
 }
 
-static int nt_init(indexObject *self)
+static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
 {
-	if (self->nt == NULL) {
-		if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
-			PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
-			return -1;
-		}
-		self->ntcapacity = self->raw_length < 4
-			? 4 : (int)self->raw_length / 2;
+	Py_ssize_t rev;
+	const char *node;
+	Py_ssize_t length;
+	if (!PyArg_ParseTuple(args, "n", &rev))
+		return NULL;
+	length = index_length(self->nt.index);
+	if (rev < 0 || rev >= length) {
+		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
+		return NULL;
+	}
+	node = index_node_existing(self->nt.index, rev);
+	if (nt_insert(&self->nt, node, (int)rev) == -1)
+		return NULL;
+	Py_RETURN_NONE;
+}
 
-		self->nt = calloc(self->ntcapacity, sizeof(nodetree));
-		if (self->nt == NULL) {
-			PyErr_NoMemory();
-			return -1;
-		}
-		self->ntlength = 1;
-		self->ntrev = (int)index_length(self) - 1;
-		self->ntlookups = 1;
-		self->ntmisses = 0;
-		if (nt_insert(self, nullid, INT_MAX) == -1)
-			return -1;
+static int nt_delete_node(nodetree *self, const char *node)
+{
+	/* rev==-2 happens to get encoded as 0, which is interpreted as not set */
+	return nt_insert(self, node, -2);
+}
+
+static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
+{
+	/* Initialize before overflow-checking to avoid nt_dealloc() crash. */
+	self->nodes = NULL;
+
+	self->index = index;
+	/* The input capacity is in terms of revisions, while the field is in
+	 * terms of nodetree nodes. */
+	self->capacity = (capacity < 4 ? 4 : capacity / 2);
+	self->depth = 0;
+	self->splits = 0;
+	if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
+		PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
+		return -1;
 	}
+	self->nodes = calloc(self->capacity, sizeof(nodetreenode));
+	if (self->nodes == NULL) {
+		PyErr_NoMemory();
+		return -1;
+	}
+	self->length = 1;
 	return 0;
 }
 
-/*
- * Return values:
- *
- *   -3: error (exception set)
- *   -2: not found (no exception set)
- * rest: valid rev
- */
-static int index_find_node(indexObject *self,
-			   const char *node, Py_ssize_t nodelen)
-{
-	int rev;
-
-	self->ntlookups++;
-	rev = nt_find(self, node, nodelen, 0);
-	if (rev >= -1)
-		return rev;
-
-	if (nt_init(self) == -1)
-		return -3;
+static PyTypeObject indexType;
 
-	/*
-	 * For the first handful of lookups, we scan the entire index,
-	 * and cache only the matching nodes. This optimizes for cases
-	 * like "hg tip", where only a few nodes are accessed.
-	 *
-	 * After that, we cache every node we visit, using a single
-	 * scan amortized over multiple lookups.  This gives the best
-	 * bulk performance, e.g. for "hg log".
-	 */
-	if (self->ntmisses++ < 4) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				if (nt_insert(self, n, rev) == -1)
-					return -3;
-				break;
-			}
-		}
-	} else {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (nt_insert(self, n, rev) == -1) {
-				self->ntrev = rev + 1;
-				return -3;
-			}
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				break;
-			}
-		}
-		self->ntrev = rev;
-	}
-
-	if (rev >= 0)
-		return rev;
-	return -2;
+static int ntobj_init(nodetreeObject *self, PyObject *args)
+{
+	PyObject *index;
+	unsigned capacity;
+	if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
+		return -1;
+	Py_INCREF(index);
+	return nt_init(&self->nt, (indexObject*)index, capacity);
 }
 
-static void raise_revlog_error(void)
-{
-	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
-
-	mod = PyImport_ImportModule("mercurial.error");
-	if (mod == NULL) {
-		goto cleanup;
-	}
-
-	dict = PyModule_GetDict(mod);
-	if (dict == NULL) {
-		goto cleanup;
-	}
-	Py_INCREF(dict);
-
-	errclass = PyDict_GetItemString(dict, "RevlogError");
-	if (errclass == NULL) {
-		PyErr_SetString(PyExc_SystemError,
-				"could not find RevlogError");
-		goto cleanup;
-	}
-
-	/* value of exception is ignored by callers */
-	PyErr_SetString(errclass, "RevlogError");
-
-cleanup:
-	Py_XDECREF(dict);
-	Py_XDECREF(mod);
-}
-
-static PyObject *index_getitem(indexObject *self, PyObject *value)
-{
-	char *node;
-	Py_ssize_t nodelen;
-	int rev;
-
-	if (PyInt_Check(value))
-		return index_get(self, PyInt_AS_LONG(value));
-
-	if (node_check(value, &node, &nodelen) == -1)
-		return NULL;
-	rev = index_find_node(self, node, nodelen);
-	if (rev >= -1)
-		return PyInt_FromLong(rev);
-	if (rev == -2)
-		raise_revlog_error();
-	return NULL;
-}
-
-/*
- * Fully populate the radix tree.
- */
-static int nt_populate(indexObject *self) {
-	int rev;
-	if (self->ntrev > 0) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -1;
-			if (nt_insert(self, n, rev) == -1)
-				return -1;
-		}
-		self->ntrev = -1;
-	}
-	return 0;
-}
-
-static int nt_partialmatch(indexObject *self, const char *node,
+static int nt_partialmatch(nodetree *self, const char *node,
 			   Py_ssize_t nodelen)
 {
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	return nt_find(self, node, nodelen, 1);
 }
 
@@ -1268,24 +1167,19 @@
  *   -2: not found (no exception set)
  * rest: length of shortest prefix
  */
-static int nt_shortest(indexObject *self, const char *node)
+static int nt_shortest(nodetree *self, const char *node)
 {
 	int level, off;
 
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	for (level = off = 0; level < 40; level++) {
 		int k, v;
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		k = nt_level(node, level);
 		v = n->children[k];
 		if (v < 0) {
 			const char *n;
-			v = -(v + 1);
-			n = index_node_existing(self, v);
+			v = -(v + 2);
+			n = index_node_existing(self->index, v);
 			if (n == NULL)
 				return -3;
 			if (memcmp(node, n, 20) != 0)
@@ -1310,6 +1204,204 @@
 	return -3;
 }
 
+static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
+{
+	PyObject *val;
+	char *node;
+	int length;
+
+	if (!PyArg_ParseTuple(args, "O", &val))
+		return NULL;
+	if (node_check(val, &node) == -1)
+		return NULL;
+
+	length = nt_shortest(&self->nt, node);
+	if (length == -3)
+		return NULL;
+	if (length == -2) {
+		raise_revlog_error();
+		return NULL;
+	}
+	return PyInt_FromLong(length);
+}
+
+static void nt_dealloc(nodetree *self)
+{
+	free(self->nodes);
+	self->nodes = NULL;
+}
+
+static void ntobj_dealloc(nodetreeObject *self)
+{
+	Py_XDECREF(self->nt.index);
+	nt_dealloc(&self->nt);
+	PyObject_Del(self);
+}
+
+static PyMethodDef ntobj_methods[] = {
+	{"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
+	 "insert an index entry"},
+	{"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
+	 "find length of shortest hex nodeid of a binary ID"},
+	{NULL} /* Sentinel */
+};
+
+static PyTypeObject nodetreeType = {
+	PyVarObject_HEAD_INIT(NULL, 0) /* header */
+	"parsers.nodetree",        /* tp_name */
+	sizeof(nodetreeObject) ,   /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)ntobj_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	"nodetree",                /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	ntobj_methods,             /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)ntobj_init,      /* tp_init */
+	0,                         /* tp_alloc */
+};
+
+static int index_init_nt(indexObject *self)
+{
+	if (!self->ntinitialized) {
+		if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
+			nt_dealloc(&self->nt);
+			return -1;
+		}
+		if (nt_insert(&self->nt, nullid, -1) == -1) {
+			nt_dealloc(&self->nt);
+			return -1;
+		}
+		self->ntinitialized = 1;
+		self->ntrev = (int)index_length(self);
+		self->ntlookups = 1;
+		self->ntmisses = 0;
+	}
+	return 0;
+}
+
+/*
+ * Return values:
+ *
+ *   -3: error (exception set)
+ *   -2: not found (no exception set)
+ * rest: valid rev
+ */
+static int index_find_node(indexObject *self,
+			   const char *node, Py_ssize_t nodelen)
+{
+	int rev;
+
+	if (index_init_nt(self) == -1)
+		return -3;
+
+	self->ntlookups++;
+	rev = nt_find(&self->nt, node, nodelen, 0);
+	if (rev >= -1)
+		return rev;
+
+	/*
+	 * For the first handful of lookups, we scan the entire index,
+	 * and cache only the matching nodes. This optimizes for cases
+	 * like "hg tip", where only a few nodes are accessed.
+	 *
+	 * After that, we cache every node we visit, using a single
+	 * scan amortized over multiple lookups.  This gives the best
+	 * bulk performance, e.g. for "hg log".
+	 */
+	if (self->ntmisses++ < 4) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				if (nt_insert(&self->nt, n, rev) == -1)
+					return -3;
+				break;
+			}
+		}
+	} else {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (nt_insert(&self->nt, n, rev) == -1) {
+				self->ntrev = rev + 1;
+				return -3;
+			}
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				break;
+			}
+		}
+		self->ntrev = rev;
+	}
+
+	if (rev >= 0)
+		return rev;
+	return -2;
+}
+
+static PyObject *index_getitem(indexObject *self, PyObject *value)
+{
+	char *node;
+	int rev;
+
+	if (PyInt_Check(value))
+		return index_get(self, PyInt_AS_LONG(value));
+
+	if (node_check(value, &node) == -1)
+		return NULL;
+	rev = index_find_node(self, node, 20);
+	if (rev >= -1)
+		return PyInt_FromLong(rev);
+	if (rev == -2)
+		raise_revlog_error();
+	return NULL;
+}
+
+/*
+ * Fully populate the radix tree.
+ */
+static int index_populate_nt(indexObject *self) {
+	int rev;
+	if (self->ntrev > 0) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -1;
+			if (nt_insert(&self->nt, n, rev) == -1)
+				return -1;
+		}
+		self->ntrev = -1;
+	}
+	return 0;
+}
+
 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
 {
 	const char *fullnode;
@@ -1338,12 +1430,15 @@
 		Py_RETURN_NONE;
 	}
 
-	rev = nt_partialmatch(self, node, nodelen);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	rev = nt_partialmatch(&self->nt, node, nodelen);
 
 	switch (rev) {
 	case -4:
 		raise_revlog_error();
-	case -3:
 		return NULL;
 	case -2:
 		Py_RETURN_NONE;
@@ -1360,18 +1455,21 @@
 
 static PyObject *index_shortest(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int length;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
 
 	self->ntlookups++;
-	length = nt_shortest(self, node);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	length = nt_shortest(&self->nt, node);
 	if (length == -3)
 		return NULL;
 	if (length == -2) {
@@ -1383,16 +1481,15 @@
 
 static PyObject *index_m_get(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int rev;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
-	rev = index_find_node(self, node, nodelen);
+	rev = index_find_node(self, node, 20);
 	if (rev == -3)
 		return NULL;
 	if (rev == -2)
@@ -1403,17 +1500,16 @@
 static int index_contains(indexObject *self, PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 
 	if (PyInt_Check(value)) {
 		long rev = PyInt_AS_LONG(value);
 		return rev >= -1 && rev < index_length(self);
 	}
 
-	if (node_check(value, &node, &nodelen) == -1)
+	if (node_check(value, &node) == -1)
 		return -1;
 
-	switch (index_find_node(self, node, nodelen)) {
+	switch (index_find_node(self, node, 20)) {
 	case -3:
 		return -1;
 	case -2:
@@ -1554,7 +1650,7 @@
 		goto bail;
 	}
 
-	interesting = calloc(sizeof(*interesting), 1 << revcount);
+	interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
 	if (interesting == NULL) {
 		PyErr_NoMemory();
 		goto bail;
@@ -1687,7 +1783,7 @@
 	revs = PyMem_Malloc(argcount * sizeof(*revs));
 	if (argcount > 0 && revs == NULL)
 		return PyErr_NoMemory();
-	len = index_length(self) - 1;
+	len = index_length(self);
 
 	for (i = 0; i < argcount; i++) {
 		static const int capacity = 24;
@@ -1787,7 +1883,7 @@
 /*
  * Invalidate any trie entries introduced by added revs.
  */
-static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
+static void index_invalidate_added(indexObject *self, Py_ssize_t start)
 {
 	Py_ssize_t i, len = PyList_GET_SIZE(self->added);
 
@@ -1795,7 +1891,7 @@
 		PyObject *tuple = PyList_GET_ITEM(self->added, i);
 		PyObject *node = PyTuple_GET_ITEM(tuple, 7);
 
-		nt_insert(self, PyBytes_AS_STRING(node), -1);
+		nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
 	}
 
 	if (start == 0)
@@ -1809,7 +1905,7 @@
 static int index_slice_del(indexObject *self, PyObject *item)
 {
 	Py_ssize_t start, stop, step, slicelength;
-	Py_ssize_t length = index_length(self);
+	Py_ssize_t length = index_length(self) + 1;
 	int ret = 0;
 
 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
@@ -1845,23 +1941,23 @@
 		return -1;
 	}
 
-	if (start < self->length - 1) {
-		if (self->nt) {
+	if (start < self->length) {
+		if (self->ntinitialized) {
 			Py_ssize_t i;
 
-			for (i = start + 1; i < self->length - 1; i++) {
+			for (i = start + 1; i < self->length; i++) {
 				const char *node = index_node_existing(self, i);
 				if (node == NULL)
 					return -1;
 
-				nt_insert(self, node, -1);
+				nt_delete_node(&self->nt, node);
 			}
 			if (self->added)
-				nt_invalidate_added(self, 0);
+				index_invalidate_added(self, 0);
 			if (self->ntrev > start)
 				self->ntrev = (int)start;
 		}
-		self->length = start + 1;
+		self->length = start;
 		if (start < self->raw_length) {
 			if (self->cache) {
 				Py_ssize_t i;
@@ -1873,13 +1969,13 @@
 		goto done;
 	}
 
-	if (self->nt) {
-		nt_invalidate_added(self, start - self->length + 1);
+	if (self->ntinitialized) {
+		index_invalidate_added(self, start - self->length);
 		if (self->ntrev > start)
 			self->ntrev = (int)start;
 	}
 	if (self->added)
-		ret = PyList_SetSlice(self->added, start - self->length + 1,
+		ret = PyList_SetSlice(self->added, start - self->length,
 				      PyList_GET_SIZE(self->added), NULL);
 done:
 	Py_CLEAR(self->headrevs);
@@ -1897,17 +1993,16 @@
 				  PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 	long rev;
 
 	if (PySlice_Check(item) && value == NULL)
 		return index_slice_del(self, item);
 
-	if (node_check(item, &node, &nodelen) == -1)
+	if (node_check(item, &node) == -1)
 		return -1;
 
 	if (value == NULL)
-		return self->nt ? nt_insert(self, node, -1) : 0;
+		return self->ntinitialized ? nt_delete_node(&self->nt, node) : 0;
 	rev = PyInt_AsLong(value);
 	if (rev > INT_MAX || rev < 0) {
 		if (!PyErr_Occurred())
@@ -1915,9 +2010,9 @@
 		return -1;
 	}
 
-	if (nt_init(self) == -1)
+	if (index_init_nt(self) == -1)
 		return -1;
-	return nt_insert(self, node, (int)rev);
+	return nt_insert(&self->nt, node, (int)rev);
 }
 
 /*
@@ -1966,7 +2061,7 @@
 	self->headrevs = NULL;
 	self->filteredrevs = Py_None;
 	Py_INCREF(Py_None);
-	self->nt = NULL;
+	self->ntinitialized = 0;
 	self->offsets = NULL;
 
 	if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
@@ -1984,8 +2079,6 @@
 	self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
 	self->data = data_obj;
 
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
 	self->ntlookups = self->ntmisses = 0;
 	self->ntrev = -1;
 	Py_INCREF(self->data);
@@ -1995,14 +2088,14 @@
 		if (len == -1)
 			goto bail;
 		self->raw_length = len;
-		self->length = len + 1;
+		self->length = len;
 	} else {
 		if (size % v1_hdrsize) {
 			PyErr_SetString(PyExc_ValueError, "corrupt index file");
 			goto bail;
 		}
 		self->raw_length = size / v1_hdrsize;
-		self->length = self->raw_length + 1;
+		self->length = self->raw_length;
 	}
 
 	return 0;
@@ -2016,6 +2109,35 @@
 	return (PyObject *)self;
 }
 
+static void _index_clearcaches(indexObject *self)
+{
+	if (self->cache) {
+		Py_ssize_t i;
+
+		for (i = 0; i < self->raw_length; i++)
+			Py_CLEAR(self->cache[i]);
+		free(self->cache);
+		self->cache = NULL;
+	}
+	if (self->offsets) {
+		PyMem_Free((void *)self->offsets);
+		self->offsets = NULL;
+	}
+	if (self->ntinitialized) {
+		nt_dealloc(&self->nt);
+	}
+	self->ntinitialized = 0;
+	Py_CLEAR(self->headrevs);
+}
+
+static PyObject *index_clearcaches(indexObject *self)
+{
+	_index_clearcaches(self);
+	self->ntrev = -1;
+	self->ntlookups = self->ntmisses = 0;
+	Py_RETURN_NONE;
+}
+
 static void index_dealloc(indexObject *self)
 {
 	_index_clearcaches(self);
@@ -2066,8 +2188,8 @@
 	 "get filtered head revisions"}, /* Can always do filtering */
 	{"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
 	 "determine revisions with deltas to reconstruct fulltext"},
-	{"insert", (PyCFunction)index_insert, METH_VARARGS,
-	 "insert an index entry"},
+	{"append", (PyCFunction)index_append, METH_O,
+	 "append an index entry"},
 	{"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
 	 "match a potentially ambiguous node ID"},
 	{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
@@ -2175,6 +2297,12 @@
 	Py_INCREF(&indexType);
 	PyModule_AddObject(mod, "index", (PyObject *)&indexType);
 
+	nodetreeType.tp_new = PyType_GenericNew;
+	if (PyType_Ready(&nodetreeType) < 0)
+		return;
+	Py_INCREF(&nodetreeType);
+	PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
+
 	nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
 				  -1, -1, -1, -1, nullid, 20);
 	if (nullentry)
--- a/mercurial/changegroup.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/changegroup.py	Tue Sep 04 12:16:28 2018 -0400
@@ -14,33 +14,37 @@
 from .i18n import _
 from .node import (
     hex,
+    nullid,
     nullrev,
     short,
 )
 
+from .thirdparty import (
+    attr,
+)
+
 from . import (
-    dagutil,
+    dagop,
     error,
+    match as matchmod,
     mdiff,
     phases,
     pycompat,
+    repository,
     util,
 )
 
 from .utils import (
+    interfaceutil,
     stringutil,
 )
 
-_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
-_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
-_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
+_CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
+_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
+_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
 
 LFS_REQUIREMENT = 'lfs'
 
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = 'narrowhg-experimental'
-
 readexactly = util.readexactly
 
 def getchunk(stream):
@@ -61,6 +65,10 @@
     """return a changegroup chunk header (string) for a zero-length chunk"""
     return struct.pack(">l", 0)
 
+def _fileheader(path):
+    """Obtain a changegroup chunk header for a named path."""
+    return chunkheader(len(path)) + path
+
 def writechunks(ui, chunks, filename, vfs=None):
     """Write chunks to a file and return its filename.
 
@@ -114,7 +122,7 @@
     bundlerepo and some debug commands - their use is discouraged.
     """
     deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '01'
     _grouplistcount = 1 # One list of files after the manifests
 
@@ -187,7 +195,7 @@
         if not l:
             return {}
         headerdata = readexactly(self._stream, self.deltaheadersize)
-        header = struct.unpack(self.deltaheader, headerdata)
+        header = self.deltaheader.unpack(headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
         node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
         return (node, p1, p2, cs, deltabase, delta, flags)
@@ -245,7 +253,7 @@
         # be empty during the pull
         self.manifestheader()
         deltas = self.deltaiter()
-        repo.manifestlog.addgroup(deltas, revmap, trp)
+        repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
         prog.complete()
         self.callback = None
 
@@ -325,7 +333,7 @@
                 cl = repo.changelog
                 ml = repo.manifestlog
                 # validate incoming csets have their manifests
-                for cset in xrange(clstart, clend):
+                for cset in pycompat.xrange(clstart, clend):
                     mfnode = cl.changelogrevision(cset).manifest
                     mfest = ml[mfnode].readdelta()
                     # store file cgnodes we must see
@@ -367,7 +375,7 @@
                 repo.hook('pretxnchangegroup',
                           throw=True, **pycompat.strkwargs(hookargs))
 
-            added = [cl.node(r) for r in xrange(clstart, clend)]
+            added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
             phaseall = None
             if srctype in ('push', 'serve'):
                 # Old servers can not push the boundary themselves.
@@ -446,7 +454,7 @@
     remain the same.
     """
     deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '02'
 
     def _deltaheader(self, headertuple, prevnode):
@@ -462,7 +470,7 @@
     separating manifests and files.
     """
     deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '03'
     _grouplistcount = 2 # One list of manifests and one list of files
 
@@ -476,9 +484,8 @@
             # If we get here, there are directory manifests in the changegroup
             d = chunkdata["filename"]
             repo.ui.debug("adding %s revisions\n" % d)
-            dirlog = repo.manifestlog._revlog.dirlog(d)
             deltas = self.deltaiter()
-            if not dirlog.addgroup(deltas, revmap, trp):
+            if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
                 raise error.Abort(_("received dir revlog group is empty"))
 
 class headerlessfixup(object):
@@ -493,139 +500,358 @@
             return d
         return readexactly(self._fh, n)
 
-class cg1packer(object):
-    deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    version = '01'
-    def __init__(self, repo, bundlecaps=None):
+@interfaceutil.implementer(repository.irevisiondeltarequest)
+@attr.s(slots=True, frozen=True)
+class revisiondeltarequest(object):
+    node = attr.ib()
+    linknode = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    ellipsis = attr.ib(default=False)
+
+def _revisiondeltatochunks(delta, headerfn):
+    """Serialize a revisiondelta to changegroup chunks."""
+
+    # The captured revision delta may be encoded as a delta against
+    # a base revision or as a full revision. The changegroup format
+    # requires that everything on the wire be deltas. So for full
+    # revisions, we need to invent a header that says to rewrite
+    # data.
+
+    if delta.delta is not None:
+        prefix, data = b'', delta.delta
+    elif delta.basenode == nullid:
+        data = delta.revision
+        prefix = mdiff.trivialdiffheader(len(data))
+    else:
+        data = delta.revision
+        prefix = mdiff.replacediffheader(delta.baserevisionsize,
+                                         len(data))
+
+    meta = headerfn(delta)
+
+    yield chunkheader(len(meta) + len(prefix) + len(data))
+    yield meta
+    if prefix:
+        yield prefix
+    yield data
+
+def _sortnodesnormal(store, nodes, reorder):
+    """Sort nodes for changegroup generation and turn into revnums."""
+    # for generaldelta revlogs, we linearize the revs; this will both be
+    # much quicker and generate a much smaller bundle
+    if (store._generaldelta and reorder is None) or reorder:
+        revs = set(store.rev(n) for n in nodes)
+        return dagop.linearize(revs, store.parentrevs)
+    else:
+        return sorted([store.rev(n) for n in nodes])
+
+def _sortnodesellipsis(store, nodes, cl, lookup):
+    """Sort nodes for changegroup generation and turn into revnums."""
+    # Ellipses serving mode.
+    #
+    # In a perfect world, we'd generate better ellipsis-ified graphs
+    # for non-changelog revlogs. In practice, we haven't started doing
+    # that yet, so the resulting DAGs for the manifestlog and filelogs
+    # are actually full of bogus parentage on all the ellipsis
+    # nodes. This has the side effect that, while the contents are
+    # correct, the individual DAGs might be completely out of whack in
+    # a case like 882681bc3166 and its ancestors (back about 10
+    # revisions or so) in the main hg repo.
+    #
+    # The one invariant we *know* holds is that the new (potentially
+    # bogus) DAG shape will be valid if we order the nodes in the
+    # order that they're introduced in dramatis personae by the
+    # changelog, so what we do is we sort the non-changelog histories
+    # by the order in which they are used by the changelog.
+    key = lambda n: cl.rev(lookup(n))
+    return [store.rev(n) for n in sorted(nodes, key=key)]
+
+def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
+                            linknode, clrevtolocalrev, fullclnodes,
+                            precomputedellipsis):
+    linkparents = precomputedellipsis[linkrev]
+    def local(clrev):
+        """Turn a changelog revnum into a local revnum.
+
+        The ellipsis dag is stored as revnums on the changelog,
+        but when we're producing ellipsis entries for
+        non-changelog revlogs, we need to turn those numbers into
+        something local. This does that for us, and during the
+        changelog sending phase will also expand the stored
+        mappings as needed.
+        """
+        if clrev == nullrev:
+            return nullrev
+
+        if ischangelog:
+            return clrev
+
+        # Walk the ellipsis-ized changelog breadth-first looking for a
+        # change that has been linked from the current revlog.
+        #
+        # For a flat manifest revlog only a single step should be necessary
+        # as all relevant changelog entries are relevant to the flat
+        # manifest.
+        #
+        # For a filelog or tree manifest dirlog however not every changelog
+        # entry will have been relevant, so we need to skip some changelog
+        # nodes even after ellipsis-izing.
+        walk = [clrev]
+        while walk:
+            p = walk[0]
+            walk = walk[1:]
+            if p in clrevtolocalrev:
+                return clrevtolocalrev[p]
+            elif p in fullclnodes:
+                walk.extend([pp for pp in cl.parentrevs(p)
+                                if pp != nullrev])
+            elif p in precomputedellipsis:
+                walk.extend([pp for pp in precomputedellipsis[p]
+                                if pp != nullrev])
+            else:
+                # In this case, we've got an ellipsis with parents
+                # outside the current bundle (likely an
+                # incremental pull). We "know" that we can use the
+                # value of this same revlog at whatever revision
+                # is pointed to by linknode. "Know" is in scare
+                # quotes because I haven't done enough examination
+                # of edge cases to convince myself this is really
+                # a fact - it works for all the (admittedly
+                # thorough) cases in our testsuite, but I would be
+                # somewhat unsurprised to find a case in the wild
+                # where this breaks down a bit. That said, I don't
+                # know if it would hurt anything.
+                for i in pycompat.xrange(rev, 0, -1):
+                    if store.linkrev(i) == clrev:
+                        return i
+                # We failed to resolve a parent for this node, so
+                # we crash the changegroup construction.
+                raise error.Abort(
+                    'unable to resolve parent while packing %r %r'
+                    ' for changeset %r' % (store.indexfile, rev, clrev))
+
+        return nullrev
+
+    if not linkparents or (
+        store.parentrevs(rev) == (nullrev, nullrev)):
+        p1, p2 = nullrev, nullrev
+    elif len(linkparents) == 1:
+        p1, = sorted(local(p) for p in linkparents)
+        p2 = nullrev
+    else:
+        p1, p2 = sorted(local(p) for p in linkparents)
+
+    p1node, p2node = store.node(p1), store.node(p2)
+
+    # TODO: try and actually send deltas for ellipsis data blocks
+    return revisiondeltarequest(
+        node=node,
+        p1node=p1node,
+        p2node=p2node,
+        linknode=linknode,
+        basenode=nullid,
+        ellipsis=True,
+    )
+
+def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
+               allowreorder,
+               topic=None,
+               ellipses=False, clrevtolocalrev=None, fullclnodes=None,
+               precomputedellipsis=None):
+    """Calculate deltas for a set of revisions.
+
+    Is a generator of ``revisiondelta`` instances.
+
+    If topic is not None, progress detail will be generated using this
+    topic name (e.g. changesets, manifests, etc).
+    """
+    if not nodes:
+        return
+
+    # We perform two passes over the revisions whose data we will emit.
+    #
+    # In the first pass, we obtain information about the deltas that will
+    # be generated. This involves computing linknodes and adjusting the
+    # request to take shallow fetching into account. The end result of
+    # this pass is a list of "request" objects stating which deltas
+    # to obtain.
+    #
+    # The second pass is simply resolving the requested deltas.
+
+    cl = repo.changelog
+
+    if ischangelog:
+        # Changelog doesn't benefit from reordering revisions. So send
+        # out revisions in store order.
+        # TODO the API would be cleaner if this were controlled by the
+        # store producing the deltas.
+        revs = sorted(cl.rev(n) for n in nodes)
+    elif ellipses:
+        revs = _sortnodesellipsis(store, nodes, cl, lookup)
+    else:
+        revs = _sortnodesnormal(store, nodes, allowreorder)
+
+    # In the first pass, collect info about the deltas we'll be
+    # generating.
+    requests = []
+
+    # Add the parent of the first rev.
+    revs.insert(0, store.parentrevs(revs[0])[0])
+
+    for i in pycompat.xrange(len(revs) - 1):
+        prev = revs[i]
+        curr = revs[i + 1]
+
+        node = store.node(curr)
+        linknode = lookup(node)
+        p1node, p2node = store.parents(node)
+
+        if ellipses:
+            linkrev = cl.rev(linknode)
+            clrevtolocalrev[linkrev] = curr
+
+            # This is a node to send in full, because the changeset it
+            # corresponds to was a full changeset.
+            if linknode in fullclnodes:
+                requests.append(revisiondeltarequest(
+                    node=node,
+                    p1node=p1node,
+                    p2node=p2node,
+                    linknode=linknode,
+                    basenode=None,
+                ))
+
+            elif linkrev not in precomputedellipsis:
+                pass
+            else:
+                requests.append(_makenarrowdeltarequest(
+                    cl, store, ischangelog, curr, node, linkrev, linknode,
+                    clrevtolocalrev, fullclnodes,
+                    precomputedellipsis))
+        else:
+            requests.append(revisiondeltarequest(
+                node=node,
+                p1node=p1node,
+                p2node=p2node,
+                linknode=linknode,
+                basenode=store.node(prev) if forcedeltaparentprev else None,
+            ))
+
+    # We expect the first pass to be fast, so we only engage the progress
+    # meter for constructing the revision deltas.
+    progress = None
+    if topic is not None:
+        progress = repo.ui.makeprogress(topic, unit=_('chunks'),
+                                        total=len(requests))
+
+    for i, delta in enumerate(store.emitrevisiondeltas(requests)):
+        if progress:
+            progress.update(i + 1)
+
+        yield delta
+
+    if progress:
+        progress.complete()
+
+class cgpacker(object):
+    def __init__(self, repo, filematcher, version, allowreorder,
+                 builddeltaheader, manifestsend,
+                 forcedeltaparentprev=False,
+                 bundlecaps=None, ellipses=False,
+                 shallow=False, ellipsisroots=None, fullnodes=None):
         """Given a source repo, construct a bundler.
 
+        filematcher is a matcher that matches on files to include in the
+        changegroup. Used to facilitate sparse changegroups.
+
+        allowreorder controls whether reordering of revisions is allowed.
+        This value is used when ``bundle.reorder`` is ``auto`` or isn't
+        set.
+
+        forcedeltaparentprev indicates whether delta parents must be against
+        the previous revision in a delta group. This should only be used for
+        compatibility with changegroup version 1.
+
+        builddeltaheader is a callable that constructs the header for a group
+        delta.
+
+        manifestsend is a chunk to send after manifests have been fully emitted.
+
+        ellipses indicates whether ellipsis serving mode is enabled.
+
         bundlecaps is optional and can be used to specify the set of
         capabilities which can be used to build the bundle. While bundlecaps is
         unused in core Mercurial, extensions rely on this feature to communicate
         capabilities to customize the changegroup packer.
+
+        shallow indicates whether shallow data might be sent. The packer may
+        need to pack file contents not introduced by the changes being packed.
+
+        fullnodes is the set of changelog nodes which should not be ellipsis
+        nodes. We store this rather than the set of nodes that should be
+        ellipsis because for very large histories we expect this to be
+        significantly smaller.
         """
+        assert filematcher
+        self._filematcher = filematcher
+
+        self.version = version
+        self._forcedeltaparentprev = forcedeltaparentprev
+        self._builddeltaheader = builddeltaheader
+        self._manifestsend = manifestsend
+        self._ellipses = ellipses
+
         # Set of capabilities we can use to build the bundle.
         if bundlecaps is None:
             bundlecaps = set()
         self._bundlecaps = bundlecaps
+        self._isshallow = shallow
+        self._fullclnodes = fullnodes
+
+        # Maps ellipsis revs to their roots at the changelog level.
+        self._precomputedellipsis = ellipsisroots
+
         # experimental config: bundle.reorder
         reorder = repo.ui.config('bundle', 'reorder')
         if reorder == 'auto':
-            reorder = None
+            self._reorder = allowreorder
         else:
-            reorder = stringutil.parsebool(reorder)
+            self._reorder = stringutil.parsebool(reorder)
+
         self._repo = repo
-        self._reorder = reorder
+
         if self._repo.ui.verbose and not self._repo.ui.debugflag:
             self._verbosenote = self._repo.ui.note
         else:
             self._verbosenote = lambda s: None
 
-    def close(self):
-        return closechunk()
-
-    def fileheader(self, fname):
-        return chunkheader(len(fname)) + fname
-
-    # Extracted both for clarity and for overriding in extensions.
-    def _sortgroup(self, revlog, nodelist, lookup):
-        """Sort nodes for change group and turn them into revnums."""
-        # for generaldelta revlogs, we linearize the revs; this will both be
-        # much quicker and generate a much smaller bundle
-        if (revlog._generaldelta and self._reorder is None) or self._reorder:
-            dag = dagutil.revlogdag(revlog)
-            return dag.linearize(set(revlog.rev(n) for n in nodelist))
-        else:
-            return sorted([revlog.rev(n) for n in nodelist])
-
-    def group(self, nodelist, revlog, lookup, units=None):
-        """Calculate a delta group, yielding a sequence of changegroup chunks
-        (strings).
-
-        Given a list of changeset revs, return a set of deltas and
-        metadata corresponding to nodes. The first delta is
-        first parent(nodelist[0]) -> nodelist[0], the receiver is
-        guaranteed to have this parent as it has all history before
-        these changesets. In the case firstparent is nullrev the
-        changegroup starts with a full revision.
-
-        If units is not None, progress detail will be generated, units specifies
-        the type of revlog that is touched (changelog, manifest, etc.).
-        """
-        # if we don't have any revisions touched by these changesets, bail
-        if len(nodelist) == 0:
-            yield self.close()
-            return
-
-        revs = self._sortgroup(revlog, nodelist, lookup)
+    def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
+        """Yield a sequence of changegroup byte chunks."""
 
-        # add the parent of the first rev
-        p = revlog.parentrevs(revs[0])[0]
-        revs.insert(0, p)
-
-        # build deltas
-        progress = None
-        if units is not None:
-            progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
-                                                  total=(len(revs) - 1))
-        for r in xrange(len(revs) - 1):
-            if progress:
-                progress.update(r + 1)
-            prev, curr = revs[r], revs[r + 1]
-            linknode = lookup(revlog.node(curr))
-            for c in self.revchunk(revlog, curr, prev, linknode):
-                yield c
-
-        if progress:
-            progress.complete()
-        yield self.close()
-
-    # filter any nodes that claim to be part of the known set
-    def prune(self, revlog, missing, commonrevs):
-        rr, rl = revlog.rev, revlog.linkrev
-        return [n for n in missing if rl(rr(n)) not in commonrevs]
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        """Pack flat manifests into a changegroup stream."""
-        assert not dir
-        for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
-                                lookuplinknode, units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return ''
-
-    def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
         repo = self._repo
         cl = repo.changelog
 
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            n = c[0]
-            # record the first changeset introducing this manifest version
-            mfs.setdefault(n, x)
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            changedfiles.update(c[3])
-            return x
-
         self._verbosenote(_('uncompressed size of bundle content:\n'))
         size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
+
+        clstate, deltas = self._generatechangelog(cl, clnodes)
+        for delta in deltas:
+            for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
+                size += len(chunk)
+                yield chunk
+
+        close = closechunk()
+        size += len(close)
+        yield closechunk()
+
         self._verbosenote(_('%8.i (changelog)\n') % size)
 
+        clrevorder = clstate['clrevorder']
+        manifests = clstate['manifests']
+        changedfiles = clstate['changedfiles']
+
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
         # The fastpath is usually safer than the slowpath, because the filelogs
@@ -648,34 +874,142 @@
         fastpathlinkrev = fastpathlinkrev and (
             'treemanifest' not in repo.requirements)
 
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        mfs.clear()
+        fnodes = {}  # needed file nodes
+
+        size = 0
+        it = self.generatemanifests(
+            commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
+            clstate['clrevtomanifestrev'])
+
+        for tree, deltas in it:
+            if tree:
+                assert self.version == b'03'
+                chunk = _fileheader(tree)
+                size += len(chunk)
+                yield chunk
+
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
+
+            close = closechunk()
+            size += len(close)
+            yield close
+
+        self._verbosenote(_('%8.i (manifests)\n') % size)
+        yield self._manifestsend
+
+        mfdicts = None
+        if self._ellipses and self._isshallow:
+            mfdicts = [(self._repo.manifestlog[n].read(), lr)
+                       for (n, lr) in manifests.iteritems()]
+
+        manifests.clear()
         clrevs = set(cl.rev(x) for x in clnodes)
 
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
+        it = self.generatefiles(changedfiles, commonrevs,
+                                source, mfdicts, fastpathlinkrev,
+                                fnodes, clrevs)
+
+        for path, deltas in it:
+            h = _fileheader(path)
+            size = len(h)
+            yield h
 
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
 
-        yield self.close()
+            close = closechunk()
+            size += len(close)
+            yield close
+
+            self._verbosenote(_('%8.i  %s\n') % (size, path))
+
+        yield closechunk()
 
         if clnodes:
             repo.hook('outgoing', node=hex(clnodes[0]), source=source)
 
-    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
-                          fnodes, source):
+    def _generatechangelog(self, cl, nodes):
+        """Generate data for changelog chunks.
+
+        Returns a 2-tuple of a dict containing state and an iterable of
+        byte chunks. The state will not be fully populated until the
+        chunk stream has been fully consumed.
+        """
+        clrevorder = {}
+        manifests = {}
+        mfl = self._repo.manifestlog
+        changedfiles = set()
+        clrevtomanifestrev = {}
+
+        # Callback for the changelog, used to collect changed files and
+        # manifest nodes.
+        # Returns the linkrev node (identity in the changelog case).
+        def lookupcl(x):
+            c = cl.changelogrevision(x)
+            clrevorder[x] = len(clrevorder)
+
+            if self._ellipses:
+                # Only update manifests if x is going to be sent. Otherwise we
+                # end up with bogus linkrevs specified for manifests and
+                # we skip some manifest nodes that we should otherwise
+                # have sent.
+                if (x in self._fullclnodes
+                    or cl.rev(x) in self._precomputedellipsis):
+
+                    manifestnode = c.manifest
+                    # Record the first changeset introducing this manifest
+                    # version.
+                    manifests.setdefault(manifestnode, x)
+                    # Set this narrow-specific dict so we have the lowest
+                    # manifest revnum to look up for this cl revnum. (Part of
+                    # mapping changelog ellipsis parents to manifest ellipsis
+                    # parents)
+                    clrevtomanifestrev.setdefault(
+                        cl.rev(x), mfl.rev(manifestnode))
+                # We can't trust the changed files list in the changeset if the
+                # client requested a shallow clone.
+                if self._isshallow:
+                    changedfiles.update(mfl[c.manifest].read().keys())
+                else:
+                    changedfiles.update(c.files)
+            else:
+                # record the first changeset introducing this manifest version
+                manifests.setdefault(c.manifest, x)
+                # Record a complete list of potentially-changed files in
+                # this manifest.
+                changedfiles.update(c.files)
+
+            return x
+
+        state = {
+            'clrevorder': clrevorder,
+            'manifests': manifests,
+            'changedfiles': changedfiles,
+            'clrevtomanifestrev': clrevtomanifestrev,
+        }
+
+        gen = deltagroup(
+            self._repo, cl, nodes, True, lookupcl,
+            self._forcedeltaparentprev,
+            # Reorder settings are currently ignored for changelog.
+            True,
+            ellipses=self._ellipses,
+            topic=_('changesets'),
+            clrevtolocalrev={},
+            fullclnodes=self._fullclnodes,
+            precomputedellipsis=self._precomputedellipsis)
+
+        return state, gen
+
+    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
+                          manifests, fnodes, source, clrevtolocalrev):
         """Returns an iterator of changegroup chunks containing manifests.
 
         `source` is unused here, but is used by extensions like remotefilelog to
@@ -683,16 +1017,15 @@
         """
         repo = self._repo
         mfl = repo.manifestlog
-        dirlog = mfl._revlog.dirlog
-        tmfnodes = {'': mfs}
+        tmfnodes = {'': manifests}
 
         # Callback for the manifest, used to collect linkrevs for filelog
         # revisions.
         # Returns the linkrev node (collected in lookupcl).
-        def makelookupmflinknode(dir, nodes):
+        def makelookupmflinknode(tree, nodes):
             if fastpathlinkrev:
-                assert not dir
-                return mfs.__getitem__
+                assert not tree
+                return manifests.__getitem__
 
             def lookupmflinknode(x):
                 """Callback for looking up the linknode for manifests.
@@ -711,16 +1044,16 @@
                 treemanifests to send.
                 """
                 clnode = nodes[x]
-                mdata = mfl.get(dir, x).readfast(shallow=True)
+                mdata = mfl.get(tree, x).readfast(shallow=True)
                 for p, n, fl in mdata.iterentries():
                     if fl == 't': # subdirectory manifest
-                        subdir = dir + p + '/'
-                        tmfclnodes = tmfnodes.setdefault(subdir, {})
+                        subtree = tree + p + '/'
+                        tmfclnodes = tmfnodes.setdefault(subtree, {})
                         tmfclnode = tmfclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[tmfclnode]:
                             tmfclnodes[n] = clnode
                     else:
-                        f = dir + p
+                        f = tree + p
                         fclnodes = fnodes.setdefault(f, {})
                         fclnode = fclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[fclnode]:
@@ -728,22 +1061,84 @@
                 return clnode
             return lookupmflinknode
 
-        size = 0
         while tmfnodes:
-            dir, nodes = tmfnodes.popitem()
-            prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
-            if not dir or prunednodes:
-                for x in self._packmanifests(dir, prunednodes,
-                                             makelookupmflinknode(dir, nodes)):
-                    size += len(x)
-                    yield x
-        self._verbosenote(_('%8.i (manifests)\n') % size)
-        yield self._manifestsdone()
+            tree, nodes = tmfnodes.popitem()
+            store = mfl.getstorage(tree)
+
+            if not self._filematcher.visitdir(store.tree[:-1] or '.'):
+                prunednodes = []
+            else:
+                frev, flr = store.rev, store.linkrev
+                prunednodes = [n for n in nodes
+                               if flr(frev(n)) not in commonrevs]
+
+            if tree and not prunednodes:
+                continue
+
+            lookupfn = makelookupmflinknode(tree, nodes)
+
+            deltas = deltagroup(
+                self._repo, store, prunednodes, False, lookupfn,
+                self._forcedeltaparentprev, self._reorder,
+                ellipses=self._ellipses,
+                topic=_('manifests'),
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            yield tree, deltas
 
     # The 'source' parameter is useful for extensions
-    def generatefiles(self, changedfiles, linknodes, commonrevs, source):
+    def generatefiles(self, changedfiles, commonrevs, source,
+                      mfdicts, fastpathlinkrev, fnodes, clrevs):
+        changedfiles = list(filter(self._filematcher, changedfiles))
+
+        if not fastpathlinkrev:
+            def normallinknodes(unused, fname):
+                return fnodes.get(fname, {})
+        else:
+            cln = self._repo.changelog.node
+
+            def normallinknodes(store, fname):
+                flinkrev = store.linkrev
+                fnode = store.node
+                revs = ((r, flinkrev(r)) for r in store)
+                return dict((fnode(r), cln(lr))
+                            for r, lr in revs if lr in clrevs)
+
+        clrevtolocalrev = {}
+
+        if self._isshallow:
+            # In a shallow clone, the linknodes callback needs to also include
+            # those file nodes that are in the manifests we sent but weren't
+            # introduced by those manifests.
+            commonctxs = [self._repo[c] for c in commonrevs]
+            clrev = self._repo.changelog.rev
+
+            # Defining this function has a side-effect of overriding the
+            # function of the same name that was passed in as an argument.
+            # TODO have caller pass in appropriate function.
+            def linknodes(flog, fname):
+                for c in commonctxs:
+                    try:
+                        fnode = c.filenode(fname)
+                        clrevtolocalrev[c.rev()] = flog.rev(fnode)
+                    except error.ManifestLookupError:
+                        pass
+                links = normallinknodes(flog, fname)
+                if len(links) != len(mfdicts):
+                    for mf, lr in mfdicts:
+                        fnode = mf.get(fname, None)
+                        if fnode in links:
+                            links[fnode] = min(links[fnode], lr, key=clrev)
+                        elif fnode:
+                            links[fnode] = lr
+                return links
+        else:
+            linknodes = normallinknodes
+
         repo = self._repo
-        progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
+        progress = repo.ui.makeprogress(_('files'), unit=_('files'),
                                         total=len(changedfiles))
         for i, fname in enumerate(sorted(changedfiles)):
             filerevlog = repo.file(fname)
@@ -751,129 +1146,89 @@
                 raise error.Abort(_("empty or missing file data for %s") %
                                   fname)
 
+            clrevtolocalrev.clear()
+
             linkrevnodes = linknodes(filerevlog, fname)
             # Lookup for filenodes, we collected the linkrev nodes above in the
             # fastpath case and with lookupmf in the slowpath case.
             def lookupfilelog(x):
                 return linkrevnodes[x]
 
-            filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
-            if filenodes:
-                progress.update(i + 1, item=fname)
-                h = self.fileheader(fname)
-                size = len(h)
-                yield h
-                for chunk in self.group(filenodes, filerevlog, lookupfilelog):
-                    size += len(chunk)
-                    yield chunk
-                self._verbosenote(_('%8.i  %s\n') % (size, fname))
+            frev, flr = filerevlog.rev, filerevlog.linkrev
+            filenodes = [n for n in linkrevnodes
+                         if flr(frev(n)) not in commonrevs]
+
+            if not filenodes:
+                continue
+
+            progress.update(i + 1, item=fname)
+
+            deltas = deltagroup(
+                self._repo, filerevlog, filenodes, False, lookupfilelog,
+                self._forcedeltaparentprev, self._reorder,
+                ellipses=self._ellipses,
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            yield fname, deltas
+
         progress.complete()
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        if not revlog.candelta(prev, rev):
-            raise error.ProgrammingError('cg1 should not be used in this case')
-        return prev
-
-    def revchunk(self, revlog, rev, prev, linknode):
-        node = revlog.node(rev)
-        p1, p2 = revlog.parentrevs(rev)
-        base = self.deltaparent(revlog, rev, p1, p2, prev)
+def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.linknode)
 
-        prefix = ''
-        if revlog.iscensored(base) or revlog.iscensored(rev):
-            try:
-                delta = revlog.revision(node, raw=True)
-            except error.CensoredNodeError as e:
-                delta = e.tombstone
-            if base == nullrev:
-                prefix = mdiff.trivialdiffheader(len(delta))
-            else:
-                baselen = revlog.rawsize(base)
-                prefix = mdiff.replacediffheader(baselen, len(delta))
-        elif base == nullrev:
-            delta = revlog.revision(node, raw=True)
-            prefix = mdiff.trivialdiffheader(len(delta))
-        else:
-            delta = revlog.revdiff(base, rev)
-        p1n, p2n = revlog.parents(node)
-        basenode = revlog.node(base)
-        flags = revlog.flags(rev)
-        meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
-        meta += prefix
-        l = len(meta) + len(delta)
-        yield chunkheader(l)
-        yield meta
-        yield delta
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # do nothing with basenode, it is implicitly the previous one in HG10
-        # do nothing with flags, it is implicitly 0 for cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
+    return cgpacker(repo, filematcher, b'01',
+                    allowreorder=None,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    forcedeltaparentprev=True,
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-class cg2packer(cg1packer):
-    version = '02'
-    deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-
-    def __init__(self, repo, bundlecaps=None):
-        super(cg2packer, self).__init__(repo, bundlecaps)
-        if self._reorder is None:
-            # Since generaldelta is directly supported by cg2, reordering
-            # generally doesn't help, so we disable it by default (treating
-            # bundle.reorder=auto just like bundle.reorder=False).
-            self._reorder = False
+def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode)
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        dp = revlog.deltaparent(rev)
-        if dp == nullrev and revlog.storedeltachains:
-            # Avoid sending full revisions when delta parent is null. Pick prev
-            # in that case. It's tempting to pick p1 in this case, as p1 will
-            # be smaller in the common case. However, computing a delta against
-            # p1 may require resolving the raw text of p1, which could be
-            # expensive. The revlog caches should have prev cached, meaning
-            # less CPU for changegroup generation. There is likely room to add
-            # a flag and/or config option to control this behavior.
-            base = prev
-        elif dp == nullrev:
-            # revlog is configured to use full snapshot for a reason,
-            # stick to full snapshot.
-            base = nullrev
-        elif dp not in (p1, p2, prev):
-            # Pick prev when we can't be sure remote has the base revision.
-            return prev
-        else:
-            base = dp
-        if base != nullrev and not revlog.candelta(base, rev):
-            base = nullrev
-        return base
+    # Since generaldelta is directly supported by cg2, reordering
+    # generally doesn't help, so we disable it by default (treating
+    # bundle.reorder=auto just like bundle.reorder=False).
+    return cgpacker(repo, filematcher, b'02',
+                    allowreorder=False,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # Do nothing with flags, it is implicitly 0 in cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
-
-class cg3packer(cg2packer):
-    version = '03'
-    deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        if dir:
-            yield self.fileheader(dir)
+def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
+                   shallow=False, ellipsisroots=None, fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
 
-        dirlog = self._repo.manifestlog._revlog.dirlog(dir)
-        for chunk in self.group(mfnodes, dirlog, lookuplinknode,
-                                units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return self.close()
+    return cgpacker(repo, filematcher, b'03',
+                    allowreorder=False,
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=closechunk(),
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        return struct.pack(
-            self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
-
-_packermap = {'01': (cg1packer, cg1unpacker),
+_packermap = {'01': (_makecg1packer, cg1unpacker),
              # cg2 adds support for exchanging generaldelta
-             '02': (cg2packer, cg2unpacker),
+             '02': (_makecg2packer, cg2unpacker),
              # cg3 adds support for exchanging revlog flags and treemanifests
-             '03': (cg3packer, cg3unpacker),
+             '03': (_makecg3packer, cg3unpacker),
 }
 
 def allsupportedversions(repo):
@@ -899,7 +1254,7 @@
         # support versions 01 and 02.
         versions.discard('01')
         versions.discard('02')
-    if NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
         versions.discard('01')
@@ -927,9 +1282,32 @@
     assert versions
     return min(versions)
 
-def getbundler(version, repo, bundlecaps=None):
+def getbundler(version, repo, bundlecaps=None, filematcher=None,
+               ellipses=False, shallow=False, ellipsisroots=None,
+               fullnodes=None):
     assert version in supportedoutgoingversions(repo)
-    return _packermap[version][0](repo, bundlecaps)
+
+    if filematcher is None:
+        filematcher = matchmod.alwaysmatcher(repo.root, '')
+
+    if version == '01' and not filematcher.always():
+        raise error.ProgrammingError('version 01 changegroups do not support '
+                                     'sparse file matchers')
+
+    if ellipses and version in (b'01', b'02'):
+        raise error.Abort(
+            _('ellipsis nodes require at least cg3 on client and server, '
+              'but negotiated version %s') % version)
+
+    # Requested files could include files not in the local store. So
+    # filter those out.
+    filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
+                                             filematcher)
+
+    fn = _packermap[version][0]
+    return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
+              shallow=shallow, ellipsisroots=ellipsisroots,
+              fullnodes=fullnodes)
 
 def getunbundler(version, fh, alg, extras=None):
     return _packermap[version][1](fh, alg, extras=extras)
@@ -950,8 +1328,9 @@
                         {'clcount': len(outgoing.missing) })
 
 def makestream(repo, outgoing, version, source, fastpath=False,
-               bundlecaps=None):
-    bundler = getbundler(version, repo, bundlecaps=bundlecaps)
+               bundlecaps=None, filematcher=None):
+    bundler = getbundler(version, repo, bundlecaps=bundlecaps,
+                         filematcher=filematcher)
 
     repo = repo.unfiltered()
     commonrevs = outgoing.common
@@ -989,7 +1368,7 @@
         revisions += len(fl) - o
         if f in needfiles:
             needs = needfiles[f]
-            for new in xrange(o, len(fl)):
+            for new in pycompat.xrange(o, len(fl)):
                 n = fl.node(new)
                 if n in needs:
                     needs.remove(n)
--- a/mercurial/changelog.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/changelog.py	Tue Sep 04 12:16:28 2018 -0400
@@ -22,7 +22,6 @@
     error,
     pycompat,
     revlog,
-    util,
 )
 from .utils import (
     dateutil,
@@ -304,7 +303,7 @@
         # Delta chains for changelogs tend to be very small because entries
         # tend to be small and don't delta well with each. So disable delta
         # chains.
-        self.storedeltachains = False
+        self._storedeltachains = False
 
         self._realopener = opener
         self._delayed = False
@@ -313,7 +312,7 @@
         self.filteredrevs = frozenset()
 
     def tiprev(self):
-        for i in xrange(len(self) -1, -2, -1):
+        for i in pycompat.xrange(len(self) -1, -2, -1):
             if i not in self.filteredrevs:
                 return i
 
@@ -332,7 +331,7 @@
             return revlog.revlog.__iter__(self)
 
         def filterediter():
-            for i in xrange(len(self)):
+            for i in pycompat.xrange(len(self)):
                 if i not in self.filteredrevs:
                     yield i
 
@@ -344,12 +343,6 @@
             if i not in self.filteredrevs:
                 yield i
 
-    @util.propertycache
-    def nodemap(self):
-        # XXX need filtering too
-        self.rev(self.node(0))
-        return self._nodecache
-
     def reachableroots(self, minroot, heads, roots, includepath=False):
         return self.index.reachableroots2(minroot, heads, roots, includepath)
 
@@ -552,19 +545,3 @@
         just to access this is costly."""
         extra = self.read(rev)[5]
         return encoding.tolocal(extra.get("branch")), 'close' in extra
-
-    def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
-        # overlay over the standard revlog._addrevision to track the new
-        # revision on the transaction.
-        rev = len(self)
-        node = super(changelog, self)._addrevision(node, rawtext, transaction,
-                                                   *args, **kwargs)
-        revs = transaction.changes.get('revs')
-        if revs is not None:
-            if revs:
-                assert revs[-1] + 1 == rev
-                revs = xrange(revs[0], rev + 1)
-            else:
-                revs = xrange(rev, rev + 1)
-            transaction.changes['revs'] = revs
-        return node
--- a/mercurial/cmdutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/cmdutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -607,17 +607,13 @@
 def _unshelvemsg():
     return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
 
-def _updatecleanmsg(dest=None):
-    warning = _('warning: this will discard uncommitted changes')
-    return 'hg update --clean %s (%s)' % (dest or '.', warning)
-
 def _graftmsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-    return _helpmessage('hg graft --continue', _updatecleanmsg())
+    return _helpmessage('hg graft --continue', 'hg graft --abort')
 
 def _mergemsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-     return _helpmessage('hg commit', _updatecleanmsg())
+    return _helpmessage('hg commit', 'hg merge --abort')
 
 def _bisectmsg():
     msg = _('To mark the changeset good:    hg bisect --good\n'
@@ -1058,7 +1054,7 @@
     fn = makefilename(ctx, pat, **props)
     return open(fn, mode)
 
-def openrevlog(repo, cmd, file_, opts):
+def openstorage(repo, cmd, file_, opts, returnrevlog=False):
     """opens the changelog, manifest, a filelog or a given revlog"""
     cl = opts['changelog']
     mf = opts['manifest']
@@ -1087,16 +1083,30 @@
                                    "treemanifest enabled"))
             if not dir.endswith('/'):
                 dir = dir + '/'
-            dirlog = repo.manifestlog._revlog.dirlog(dir)
+            dirlog = repo.manifestlog.getstorage(dir)
             if len(dirlog):
                 r = dirlog
         elif mf:
-            r = repo.manifestlog._revlog
+            r = repo.manifestlog.getstorage(b'')
         elif file_:
             filelog = repo.file(file_)
             if len(filelog):
                 r = filelog
+
+        # Not all storage may be revlogs. If requested, try to return an actual
+        # revlog instance.
+        if returnrevlog:
+            if isinstance(r, revlog.revlog):
+                pass
+            elif util.safehasattr(r, '_revlog'):
+                r = r._revlog
+            elif r is not None:
+                raise error.Abort(_('%r does not appear to be a revlog') % r)
+
     if not r:
+        if not returnrevlog:
+            raise error.Abort(_('cannot give path to non-revlog'))
+
         if not file_:
             raise error.CommandError(cmd, _('invalid arguments'))
         if not os.path.isfile(file_):
@@ -1105,6 +1115,18 @@
                           file_[:-2] + ".i")
     return r
 
+def openrevlog(repo, cmd, file_, opts):
+    """Obtain a revlog backing storage of an item.
+
+    This is similar to ``openstorage()`` except it always returns a revlog.
+
+    In most cases, a caller cares about the main storage object - not the
+    revlog backing it. Therefore, this function should only be used by code
+    that needs to examine low-level revlog implementation details. e.g. debug
+    commands.
+    """
+    return openstorage(repo, cmd, file_, opts, returnrevlog=True)
+
 def copy(ui, repo, pats, opts, rename=False):
     # called with the repo lock held
     #
@@ -1162,7 +1184,7 @@
             ui.warn(_('%s: not overwriting - %s collides with %s\n') %
                     (reltarget, repo.pathto(abssrc, cwd),
                      repo.pathto(prevsrc, cwd)))
-            return
+            return True # report a failure
 
         # check for overwrites
         exists = os.path.lexists(target)
@@ -1172,7 +1194,7 @@
                 repo.dirstate.normalize(abstarget)):
                 if not rename:
                     ui.warn(_("%s: can't copy - same file\n") % reltarget)
-                    return
+                    return True # report a failure
                 exists = False
                 samefile = True
 
@@ -1185,20 +1207,20 @@
                     else:
                         flags = '--force'
                     if rename:
-                        hint = _('(hg rename %s to replace the file by '
+                        hint = _("('hg rename %s' to replace the file by "
                                  'recording a rename)\n') % flags
                     else:
-                        hint = _('(hg copy %s to replace the file by '
+                        hint = _("('hg copy %s' to replace the file by "
                                  'recording a copy)\n') % flags
                 else:
                     msg = _('%s: not overwriting - file exists\n')
                     if rename:
-                        hint = _('(hg rename --after to record the rename)\n')
+                        hint = _("('hg rename --after' to record the rename)\n")
                     else:
-                        hint = _('(hg copy --after to record the copy)\n')
+                        hint = _("('hg copy --after' to record the copy)\n")
                 ui.warn(msg % reltarget)
                 ui.warn(hint)
-                return
+                return True # report a failure
 
         if after:
             if not exists:
@@ -1208,7 +1230,7 @@
                 else:
                     ui.warn(_('%s: not recording copy - %s does not exist\n') %
                             (relsrc, reltarget))
-                return
+                return True # report a failure
         elif not dryrun:
             try:
                 if exists:
@@ -1232,6 +1254,10 @@
                 else:
                     ui.warn(_('%s: cannot copy - %s\n') %
                             (relsrc, encoding.strtolocal(inst.strerror)))
+                    if rename:
+                        hint = _("('hg rename --after' to record the rename)\n")
+                    else:
+                        hint = _("('hg copy --after' to record the copy)\n")
                     return True # report a failure
 
         if ui.verbose or not exact:
@@ -1349,9 +1375,6 @@
             if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
                 errors += 1
 
-    if errors:
-        ui.warn(_('(consider using --after)\n'))
-
     return errors != 0
 
 ## facility to let extension process additional data into an import patch
@@ -1755,7 +1778,7 @@
         """
         cl_count = len(repo)
         revs = []
-        for j in xrange(0, last + 1):
+        for j in pycompat.xrange(0, last + 1):
             linkrev = filelog.linkrev(j)
             if linkrev < minrev:
                 continue
@@ -1889,9 +1912,6 @@
     revs = _walkrevs(repo, opts)
     if not revs:
         return []
-    if allfiles and len(revs) > 1:
-        raise error.Abort(_("multiple revisions not supported with "
-                            "--all-files"))
     wanted = set()
     slowpath = match.anypats() or (not match.always() and opts.get('removed'))
     fncache = {}
@@ -1902,7 +1922,7 @@
     # wanted: a cache of filenames that were changed (ctx.files()) and that
     # match the file filtering conditions.
 
-    if match.always():
+    if match.always() or allfiles:
         # No files, no patterns.  Display all revs.
         wanted = revs
     elif not slowpath:
@@ -1966,7 +1986,7 @@
         rev = repo[rev].rev()
         ff = _followfilter(repo)
         stop = min(revs[0], revs[-1])
-        for x in xrange(rev, stop - 1, -1):
+        for x in pycompat.xrange(rev, stop - 1, -1):
             if ff.match(x):
                 wanted = wanted - [x]
 
@@ -1985,7 +2005,7 @@
         stopiteration = False
         for windowsize in increasingwindows():
             nrevs = []
-            for i in xrange(windowsize):
+            for i in pycompat.xrange(windowsize):
                 rev = next(it, None)
                 if rev is None:
                     stopiteration = True
@@ -2038,7 +2058,8 @@
                 cca(f)
             names.append(f)
             if ui.verbose or not exact:
-                ui.status(_('adding %s\n') % match.rel(f))
+                ui.status(_('adding %s\n') % match.rel(f),
+                          label='addremove.added')
 
     for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
@@ -2136,7 +2157,8 @@
 
     for f in forget:
         if ui.verbose or not match.exact(f) or interactive:
-            ui.status(_('removing %s\n') % match.rel(f))
+            ui.status(_('removing %s\n') % match.rel(f),
+                      label='addremove.removed')
 
     if not dryrun:
         rejected = wctx.forget(forget, prefix)
@@ -2154,8 +2176,8 @@
         if needsfctx:
             fc = ctx[f]
             fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
-        fm.data(abspath=f)
-        fm.write('path', fmt, m.rel(f))
+        fm.data(path=f)
+        fm.plain(fmt % m.rel(f))
         ret = 0
 
     for subpath in sorted(ctx.substate):
@@ -2269,7 +2291,8 @@
     for f in list:
         if ui.verbose or not m.exact(f):
             progress.increment()
-            ui.status(_('removing %s\n') % m.rel(f))
+            ui.status(_('removing %s\n') % m.rel(f),
+                      label='addremove.removed')
     progress.complete()
 
     if not dryrun:
@@ -2300,7 +2323,7 @@
     fm.startitem()
     fm.context(ctx=ctx)
     fm.write('data', '%s', data)
-    fm.data(abspath=path, path=matcher.rel(path))
+    fm.data(path=path)
 
 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
     err = 1
@@ -2428,7 +2451,7 @@
         if len(old.parents()) > 1:
             # ctx.files() isn't reliable for merges, so fall back to the
             # slower repo.status() method
-            files = set([fn for st in repo.status(base, old)[:3]
+            files = set([fn for st in base.status(old)[:3]
                          for fn in st])
         else:
             files = set(old.files())
@@ -2556,8 +2579,10 @@
         obsmetadata = None
         if opts.get('note'):
             obsmetadata = {'note': encoding.fromlocal(opts['note'])}
+        backup = ui.configbool('ui', 'history-editing-backup')
         scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
-                             fixphase=True, targetphase=commitphase)
+                             fixphase=True, targetphase=commitphase,
+                             backup=backup)
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -2749,7 +2774,7 @@
 
     # `names` is a mapping for all elements in working copy and target revision
     # The mapping is in the form:
-    #   <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
+    #   <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
     names = {}
 
     with repo.wlock():
@@ -2994,10 +3019,9 @@
                                     util.copyfile(target, bakname)
                                 else:
                                     util.rename(target, bakname)
-                    if ui.verbose or not exact:
-                        if not isinstance(msg, bytes):
-                            msg = msg(abs)
-                        ui.status(msg % rel)
+                    if opts.get('dry_run'):
+                        if ui.verbose or not exact:
+                            ui.status(msg % rel)
                 elif exact:
                     ui.warn(msg % rel)
                 break
@@ -3010,7 +3034,8 @@
             prefetch(repo, [ctx.rev()],
                      matchfiles(repo,
                                 [f for sublist in oplist for f in sublist]))
-            _performrevert(repo, parents, ctx, actions, interactive, tobackup)
+            _performrevert(repo, parents, ctx, names, actions, interactive,
+                           tobackup)
 
         if targetsubs:
             # Revert the subrepos on the revert list
@@ -3022,7 +3047,7 @@
                     raise error.Abort("subrepository '%s' does not exist in %s!"
                                       % (sub, short(ctx.node())))
 
-def _performrevert(repo, parents, ctx, actions, interactive=False,
+def _performrevert(repo, parents, ctx, names, actions, interactive=False,
                    tobackup=None):
     """function that actually perform all the actions computed for revert
 
@@ -3047,16 +3072,23 @@
             pass
         repo.dirstate.remove(f)
 
+    def prntstatusmsg(action, f):
+        rel, exact = names[f]
+        if repo.ui.verbose or not exact:
+            repo.ui.status(actions[action][1] % rel)
+
     audit_path = pathutil.pathauditor(repo.root, cached=True)
     for f in actions['forget'][0]:
         if interactive:
             choice = repo.ui.promptchoice(
                 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
             if choice == 0:
+                prntstatusmsg('forget', f)
                 repo.dirstate.drop(f)
             else:
                 excluded_files.append(f)
         else:
+            prntstatusmsg('forget', f)
             repo.dirstate.drop(f)
     for f in actions['remove'][0]:
         audit_path(f)
@@ -3064,13 +3096,16 @@
             choice = repo.ui.promptchoice(
                 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
             if choice == 0:
+                prntstatusmsg('remove', f)
                 doremove(f)
             else:
                 excluded_files.append(f)
         else:
+            prntstatusmsg('remove', f)
             doremove(f)
     for f in actions['drop'][0]:
         audit_path(f)
+        prntstatusmsg('drop', f)
         repo.dirstate.remove(f)
 
     normal = None
@@ -3117,14 +3152,18 @@
             tobackup = set()
         # Apply changes
         fp = stringio()
+        # chunks are serialized per file, but files aren't sorted
+        for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
+            prntstatusmsg('revert', f)
         for c in chunks:
-            # Create a backup file only if this hunk should be backed up
-            if ishunk(c) and c.header.filename() in tobackup:
+            if ishunk(c):
                 abs = c.header.filename()
-                target = repo.wjoin(abs)
-                bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
-                util.copyfile(target, bakname)
-                tobackup.remove(abs)
+                # Create a backup file only if this hunk should be backed up
+                if c.header.filename() in tobackup:
+                    target = repo.wjoin(abs)
+                    bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
+                    util.copyfile(target, bakname)
+                    tobackup.remove(abs)
             c.write(fp)
         dopatch = fp.tell()
         fp.seek(0)
@@ -3136,6 +3175,7 @@
         del fp
     else:
         for f in actions['revert'][0]:
+            prntstatusmsg('revert', f)
             checkout(f)
             if normal:
                 normal(f)
@@ -3143,6 +3183,7 @@
     for f in actions['add'][0]:
         # Don't checkout modified files, they are already created by the diff
         if f not in newlyaddedandmodifiedfiles:
+            prntstatusmsg('add', f)
             checkout(f)
             repo.dirstate.add(f)
 
@@ -3150,6 +3191,7 @@
     if node == parent and p2 == nullid:
         normal = repo.dirstate.normal
     for f in actions['undelete'][0]:
+        prntstatusmsg('undelete', f)
         checkout(f)
         normal(f)
 
--- a/mercurial/color.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/color.py	Tue Sep 04 12:16:28 2018 -0400
@@ -83,6 +83,8 @@
     'grep.filename': 'magenta',
     'grep.user': 'magenta',
     'grep.date': 'magenta',
+    'addremove.added': 'green',
+    'addremove.removed': 'red',
     'bookmarks.active': 'green',
     'branches.active': 'none',
     'branches.closed': 'black bold',
@@ -117,6 +119,7 @@
     'formatvariant.config.default': 'green',
     'formatvariant.default': '',
     'histedit.remaining': 'red bold',
+    'ui.error': 'red',
     'ui.prompt': 'yellow',
     'log.changeset': 'yellow',
     'patchbomb.finalsummary': '',
--- a/mercurial/commands.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/commands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -35,6 +35,7 @@
     error,
     exchange,
     extensions,
+    filemerge,
     formatter,
     graphmod,
     hbisect,
@@ -336,10 +337,10 @@
              ('rev', ' ', lambda x: x.fctx.rev(), formatrev),
              ('node', ' ', lambda x: hexfn(x.fctx.node()), formathex),
              ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
-             ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
+             ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
              ('line_number', ':', lambda x: x.lineno, pycompat.bytestr),
             ]
-    opnamemap = {'rev': 'number', 'node': 'changeset'}
+    opnamemap = {'rev': 'number', 'node': 'changeset', 'path': 'file'}
 
     if (not opts.get('user') and not opts.get('changeset')
         and not opts.get('date') and not opts.get('file')):
@@ -379,7 +380,7 @@
     for abs in ctx.walk(m):
         fctx = ctx[abs]
         rootfm.startitem()
-        rootfm.data(abspath=abs, path=m.rel(abs))
+        rootfm.data(path=abs)
         if not opts.get('text') and fctx.isbinary():
             rootfm.plain(_("%s: binary file\n")
                          % ((pats and m.rel(abs)) or abs))
@@ -900,6 +901,7 @@
     ('d', 'delete', False, _('delete a given bookmark')),
     ('m', 'rename', '', _('rename a given bookmark'), _('OLD')),
     ('i', 'inactive', False, _('mark a bookmark inactive')),
+    ('', 'active', False, _('display the active bookmark')),
     ] + formatteropts,
     _('hg bookmarks [OPTIONS]... [NAME]...'))
 def bookmark(ui, repo, *names, **opts):
@@ -926,6 +928,10 @@
     A bookmark named '@' has the special property that :hg:`clone` will
     check it out by default if it exists.
 
+    The '--active' flag will display the current bookmark or return non-zero,
+    if combined with other action, they will be performed on the active
+    bookmark.
+
     .. container:: verbose
 
       Examples:
@@ -955,6 +961,7 @@
     delete = opts.get(r'delete')
     rename = opts.get(r'rename')
     inactive = opts.get(r'inactive')
+    active = opts.get(r'active')
 
     if delete and rename:
         raise error.Abort(_("--delete and --rename are incompatible"))
@@ -962,6 +969,16 @@
         raise error.Abort(_("--rev is incompatible with --delete"))
     if rename and rev:
         raise error.Abort(_("--rev is incompatible with --rename"))
+    if delete and active:
+        raise error.Abort(_("--delete is incompatible with --active"))
+    if rev and active:
+        raise error.Abort(_("--rev is incompatible with --active"))
+    if rename and active:
+        raise error.Abort(_("--rename is incompatible with --active"))
+    if names and active:
+        raise error.Abort(_("NAMES is incompatible with --active"))
+    if inactive and active:
+        raise error.Abort(_("--inactive is incompatible with --active"))
     if not names and (delete or rev):
         raise error.Abort(_("bookmark name required"))
 
@@ -986,6 +1003,11 @@
                     ui.status(_("no active bookmark\n"))
                 else:
                     bookmarks.deactivate(repo)
+    elif active:
+        book = repo._activebookmark
+        if book is None:
+            return 1
+        ui.write("%s\n" % book, label=bookmarks.activebookmarklabel)
     else: # show bookmarks
         bookmarks.printbookmarks(ui, repo, **opts)
 
@@ -2532,6 +2554,7 @@
     """
     opts = pycompat.byteskwargs(opts)
     diff = opts.get('all') or opts.get('diff')
+    all_files = opts.get('all_files')
     if diff and opts.get('all_files'):
         raise error.Abort(_('--diff and --all-files are mutually exclusive'))
     # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working
@@ -2606,16 +2629,16 @@
     def difflinestates(a, b):
         sm = difflib.SequenceMatcher(None, a, b)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag == 'insert':
-                for i in xrange(blo, bhi):
+            if tag == r'insert':
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
-            elif tag == 'delete':
-                for i in xrange(alo, ahi):
+            elif tag == r'delete':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-            elif tag == 'replace':
-                for i in xrange(alo, ahi):
+            elif tag == r'replace':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-                for i in xrange(blo, bhi):
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
 
     def display(fm, fn, ctx, pstates, states):
@@ -2623,7 +2646,7 @@
         if fm.isplain():
             formatuser = ui.shortuser
         else:
-            formatuser = str
+            formatuser = pycompat.bytestr
         if ui.quiet:
             datefmt = '%Y-%m-%d'
         else:
@@ -2637,7 +2660,7 @@
             except error.WdirUnsupported:
                 return ctx[fn].isbinary()
 
-        fieldnamemap = {'filename': 'file', 'linenumber': 'line_number'}
+        fieldnamemap = {'filename': 'path', 'linenumber': 'line_number'}
         if diff:
             iter = difflinestates(pstates, states)
         else:
@@ -2648,20 +2671,22 @@
             fm.data(node=fm.hexfunc(scmutil.binnode(ctx)))
 
             cols = [
-                ('filename', fn, True),
-                ('rev', rev, not plaingrep),
-                ('linenumber', l.linenum, opts.get('line_number')),
+                ('filename', '%s', fn, True),
+                ('rev', '%d', rev, not plaingrep),
+                ('linenumber', '%d', l.linenum, opts.get('line_number')),
             ]
             if diff:
-                cols.append(('change', change, True))
+                cols.append(('change', '%s', change, True))
             cols.extend([
-                ('user', formatuser(ctx.user()), opts.get('user')),
-                ('date', fm.formatdate(ctx.date(), datefmt), opts.get('date')),
+                ('user', '%s', formatuser(ctx.user()), opts.get('user')),
+                ('date', '%s', fm.formatdate(ctx.date(), datefmt),
+                 opts.get('date')),
             ])
-            lastcol = next(name for name, data, cond in reversed(cols) if cond)
-            for name, data, cond in cols:
+            lastcol = next(
+                name for name, fmt, data, cond in reversed(cols) if cond)
+            for name, fmt, data, cond in cols:
                 field = fieldnamemap.get(name, name)
-                fm.condwrite(cond, field, '%s', data, label='grep.%s' % name)
+                fm.condwrite(cond, field, fmt, data, label='grep.%s' % name)
                 if cond and name != lastcol:
                     fm.plain(sep, label='grep.sep')
             if not opts.get('files_with_matches'):
@@ -2756,7 +2781,7 @@
             if pstates or states:
                 r = display(fm, fn, ctx, pstates, states)
                 found = found or r
-                if r and not diff:
+                if r and not diff and not all_files:
                     skip[fn] = True
                     if copy:
                         skip[copy] = True
@@ -4026,7 +4051,7 @@
     # search for a unique phase argument
     targetphase = None
     for idx, name in enumerate(phases.phasenames):
-        if opts[name]:
+        if opts.get(name, False):
             if targetphase is not None:
                 raise error.Abort(_('only one phase can be specified'))
             targetphase = idx
@@ -4481,7 +4506,8 @@
     ('l', 'list', None, _('list state of files needing merge')),
     ('m', 'mark', None, _('mark files as resolved')),
     ('u', 'unmark', None, _('mark files as unresolved')),
-    ('n', 'no-status', None, _('hide status prefix'))]
+    ('n', 'no-status', None, _('hide status prefix')),
+    ('', 're-merge', None, _('re-merge files'))]
     + mergetoolopts + walkopts + formatteropts,
     _('[OPTION]... [FILE]...'),
     inferrepo=True)
@@ -4498,9 +4524,9 @@
 
     The resolve command can be used in the following ways:
 
-    - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
-      files, discarding any previous merge attempts. Re-merging is not
-      performed for files already marked as resolved. Use ``--all/-a``
+    - :hg:`resolve [--re-merge] [--tool TOOL] FILE...`: attempt to re-merge
+      the specified files, discarding any previous merge attempts. Re-merging
+      is not performed for files already marked as resolved. Use ``--all/-a``
       to select all unresolved files. ``--tool`` can be used to specify
       the merge tool used for the given files. It overrides the HGMERGE
       environment variable and your configuration files.  Previous file
@@ -4528,18 +4554,38 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    flaglist = 'all mark unmark list no_status'.split()
-    all, mark, unmark, show, nostatus = \
+    confirm = ui.configbool('commands', 'resolve.confirm')
+    flaglist = 'all mark unmark list no_status re_merge'.split()
+    all, mark, unmark, show, nostatus, remerge = \
         [opts.get(o) for o in flaglist]
 
-    if (show and (mark or unmark)) or (mark and unmark):
-        raise error.Abort(_("too many options specified"))
+    actioncount = len(list(filter(None, [show, mark, unmark, remerge])))
+    if actioncount > 1:
+        raise error.Abort(_("too many actions specified"))
+    elif (actioncount == 0
+          and ui.configbool('commands', 'resolve.explicit-re-merge')):
+        hint = _('use --mark, --unmark, --list or --re-merge')
+        raise error.Abort(_('no action specified'), hint=hint)
     if pats and all:
         raise error.Abort(_("can't specify --all and patterns"))
     if not (all or pats or show or mark or unmark):
         raise error.Abort(_('no files or directories specified'),
                          hint=('use --all to re-merge all unresolved files'))
 
+    if confirm:
+        if all:
+            if ui.promptchoice(_(b're-merge all unresolved files (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if mark and not pats:
+            if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if unmark and not pats:
+            if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+
     if show:
         ui.pager('resolve')
         fm = ui.formatter('resolve', opts)
@@ -4594,6 +4640,12 @@
         runconclude = False
 
         tocomplete = []
+        hasconflictmarkers = []
+        if mark:
+            markcheck = ui.config('commands', 'resolve.mark-check')
+            if markcheck not in ['warn', 'abort']:
+                # Treat all invalid / unrecognized values as 'none'.
+                markcheck = False
         for f in ms:
             if not m(f):
                 continue
@@ -4629,6 +4681,12 @@
                 continue
 
             if mark:
+                if markcheck:
+                    with repo.wvfs(f) as fobj:
+                        fdata = fobj.read()
+                    if filemerge.hasconflictmarkers(fdata) and \
+                        ms[f] != mergemod.MERGE_RECORD_RESOLVED:
+                        hasconflictmarkers.append(f)
                 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
             elif unmark:
                 ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
@@ -4663,6 +4721,13 @@
                         if inst.errno != errno.ENOENT:
                             raise
 
+        if hasconflictmarkers:
+            ui.warn(_('warning: the following files still have conflict '
+                      'markers:\n  ') + '\n  '.join(hasconflictmarkers) + '\n')
+            if markcheck == 'abort' and not all:
+                raise error.Abort(_('conflict markers detected'),
+                                  hint=_('use --all to mark anyway'))
+
         for f in tocomplete:
             try:
                 # resolve file
@@ -4693,8 +4758,11 @@
                 for f in ms:
                     if not m(f):
                         continue
-                    flags = ''.join(['-%s ' % o[0:1] for o in flaglist
-                                                   if opts.get(o)])
+                    def flag(o):
+                        if o == 're_merge':
+                            return '--re-merge '
+                        return '-%s ' % o[0:1]
+                    flags = ''.join([flag(o) for o in flaglist if opts.get(o)])
                     hint = _("(try: hg resolve %s%s)\n") % (
                              flags,
                              ' '.join(pats))
@@ -5128,10 +5196,12 @@
             for f in files:
                 fm.startitem()
                 fm.context(ctx=ctx2)
+                fm.data(path=f)
                 fm.condwrite(showchar, 'status', '%s ', char, label=label)
-                fm.write('path', fmt, repo.pathto(f, cwd), label=label)
+                fm.plain(fmt % repo.pathto(f, cwd), label=label)
                 if f in copy:
-                    fm.write("copy", '  %s' + end, repo.pathto(copy[f], cwd),
+                    fm.data(source=copy[f])
+                    fm.plain(('  %s' + end) % repo.pathto(copy[f], cwd),
                              label='status.copied')
 
     if ((ui.verbose or ui.configbool('commands', 'status.verbose'))
--- a/mercurial/commandserver.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/commandserver.py	Tue Sep 04 12:16:28 2018 -0400
@@ -353,7 +353,7 @@
         # handle exceptions that may be raised by command server. most of
         # known exceptions are caught by dispatch.
         except error.Abort as inst:
-            ui.warn(_('abort: %s\n') % inst)
+            ui.error(_('abort: %s\n') % inst)
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
--- a/mercurial/configitems.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/configitems.py	Tue Sep 04 12:16:28 2018 -0400
@@ -190,6 +190,15 @@
 coreconfigitem('commands', 'grep.all-files',
     default=False,
 )
+coreconfigitem('commands', 'resolve.confirm',
+    default=False,
+)
+coreconfigitem('commands', 'resolve.explicit-re-merge',
+    default=False,
+)
+coreconfigitem('commands', 'resolve.mark-check',
+    default='none',
+)
 coreconfigitem('commands', 'show.aliasprefix',
     default=list,
 )
@@ -584,9 +593,15 @@
 coreconfigitem('experimental', 'removeemptydirs',
     default=True,
 )
+coreconfigitem('experimental', 'revisions.prefixhexnode',
+    default=False,
+)
 coreconfigitem('experimental', 'revlogv2',
     default=None,
 )
+coreconfigitem('experimental', 'revisions.disambiguatewithin',
+    default=None,
+)
 coreconfigitem('experimental', 'single-head-per-branch',
     default=False,
 )
@@ -667,6 +682,9 @@
 coreconfigitem('format', 'usestore',
     default=True,
 )
+coreconfigitem('format', 'internal-phase',
+    default=False,
+)
 coreconfigitem('fsmonitor', 'warn_when_unused',
     default=True,
 )
@@ -759,6 +777,9 @@
 coreconfigitem('merge', 'preferancestor',
         default=lambda: ['*'],
 )
+coreconfigitem('merge', 'strict-capability-check',
+    default=False,
+)
 coreconfigitem('merge-tools', '.*',
     default=None,
     generic=True,
--- a/mercurial/context.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/context.py	Tue Sep 04 12:16:28 2018 -0400
@@ -193,25 +193,26 @@
         return self.rev() in obsmod.getrevs(self._repo, 'extinct')
 
     def orphan(self):
-        """True if the changeset is not obsolete but it's ancestor are"""
+        """True if the changeset is not obsolete, but its ancestor is"""
         return self.rev() in obsmod.getrevs(self._repo, 'orphan')
 
     def phasedivergent(self):
-        """True if the changeset try to be a successor of a public changeset
+        """True if the changeset tries to be a successor of a public changeset
 
-        Only non-public and non-obsolete changesets may be bumped.
+        Only non-public and non-obsolete changesets may be phase-divergent.
         """
         return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
 
     def contentdivergent(self):
-        """Is a successors of a changeset with multiple possible successors set
+        """Is a successor of a changeset with multiple possible successor sets
 
-        Only non-public and non-obsolete changesets may be divergent.
+        Only non-public and non-obsolete changesets may be content-divergent.
         """
         return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
 
     def isunstable(self):
-        """True if the changeset is either unstable, bumped or divergent"""
+        """True if the changeset is either orphan, phase-divergent or
+        content-divergent"""
         return self.orphan() or self.phasedivergent() or self.contentdivergent()
 
     def instabilities(self):
@@ -372,6 +373,10 @@
                 for rfiles, sfiles in zip(r, s):
                     rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
 
+        narrowmatch = self._repo.narrowmatch()
+        if not narrowmatch.always():
+            for l in r:
+                l[:] = list(filter(narrowmatch, l))
         for l in r:
             l.sort()
 
@@ -438,7 +443,6 @@
                         "unsupported changeid '%s' of type %s" %
                         (changeid, type(changeid)))
 
-            # lookup failed
         except (error.FilteredIndexError, error.FilteredLookupError):
             raise error.FilteredRepoLookupError(_("filtered revision '%s'")
                                                 % pycompat.bytestr(changeid))
@@ -590,12 +594,6 @@
                             short(n) for n in sorted(cahs) if n != anc))
         return changectx(self._repo, anc)
 
-    def descendant(self, other):
-        msg = (b'ctx.descendant(other) is deprecated, '
-               b'use ctx.isancestorof(other)')
-        self._repo.ui.deprecwarn(msg, b'4.7')
-        return self.isancestorof(other)
-
     def isancestorof(self, other):
         """True if this changeset is an ancestor of other"""
         return self._repo.changelog.isancestorrev(self._rev, other._rev)
@@ -1903,9 +1901,9 @@
         # Test that each new directory to be created to write this path from p2
         # is not a file in p1.
         components = path.split('/')
-        for i in xrange(len(components)):
+        for i in pycompat.xrange(len(components)):
             component = "/".join(components[0:i])
-            if component in self.p1():
+            if component in self.p1() and self._cache[component]['exists']:
                 fail(path, component)
 
         # Test the other direction -- that this path from p2 isn't a directory
@@ -1929,8 +1927,13 @@
                         flags=flags)
 
     def setflags(self, path, l, x):
+        flag = ''
+        if l:
+            flag = 'l'
+        elif x:
+            flag = 'x'
         self._markdirty(path, exists=True, date=dateutil.makedate(),
-                        flags=(l and 'l' or '') + (x and 'x' or ''))
+                        flags=flag)
 
     def remove(self, path):
         self._markdirty(path, exists=False)
@@ -2037,6 +2040,13 @@
         return keys
 
     def _markdirty(self, path, exists, data=None, date=None, flags=''):
+        # data not provided, let's see if we already have some; if not, let's
+        # grab it from our underlying context, so that we always have data if
+        # the file is marked as existing.
+        if exists and data is None:
+            oldentry = self._cache.get(path) or {}
+            data = oldentry.get('data') or self._wrappedctx[path].data()
+
         self._cache[path] = {
             'exists': exists,
             'data': data,
--- a/mercurial/copies.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/copies.py	Tue Sep 04 12:16:28 2018 -0400
@@ -20,6 +20,9 @@
     scmutil,
     util,
 )
+from .utils import (
+    stringutil,
+)
 
 def _findlimit(repo, a, b):
     """
@@ -366,19 +369,22 @@
         return repo.dirstate.copies(), {}, {}, {}, {}
 
     copytracing = repo.ui.config('experimental', 'copytrace')
+    boolctrace = stringutil.parsebool(copytracing)
 
     # Copy trace disabling is explicitly below the node == p1 logic above
     # because the logic above is required for a simple copy to be kept across a
     # rebase.
-    if copytracing == 'off':
-        return {}, {}, {}, {}, {}
-    elif copytracing == 'heuristics':
+    if copytracing == 'heuristics':
         # Do full copytracing if only non-public revisions are involved as
         # that will be fast enough and will also cover the copies which could
         # be missed by heuristics
         if _isfullcopytraceable(repo, c1, base):
             return _fullcopytracing(repo, c1, c2, base)
         return _heuristicscopytracing(repo, c1, c2, base)
+    elif boolctrace is False:
+        # stringutil.parsebool() returns None when it is unable to parse the
+        # value, so we should rely on making sure copytracing is on such cases
+        return {}, {}, {}, {}, {}
     else:
         return _fullcopytracing(repo, c1, c2, base)
 
@@ -593,16 +599,16 @@
             continue
         elif dsrc in d1 and ddst in d1:
             # directory wasn't entirely moved locally
-            invalid.add(dsrc + "/")
+            invalid.add(dsrc)
         elif dsrc in d2 and ddst in d2:
             # directory wasn't entirely moved remotely
-            invalid.add(dsrc + "/")
-        elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
+            invalid.add(dsrc)
+        elif dsrc in dirmove and dirmove[dsrc] != ddst:
             # files from the same directory moved to two different places
-            invalid.add(dsrc + "/")
+            invalid.add(dsrc)
         else:
             # looks good so far
-            dirmove[dsrc + "/"] = ddst + "/"
+            dirmove[dsrc] = ddst
 
     for i in invalid:
         if i in dirmove:
@@ -612,6 +618,8 @@
     if not dirmove:
         return copy, {}, diverge, renamedelete, {}
 
+    dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
+
     for d in dirmove:
         repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
                       (d, dirmove[d]))
@@ -868,8 +876,10 @@
     copies between fromrev and rev.
     """
     exclude = {}
+    ctraceconfig = repo.ui.config('experimental', 'copytrace')
+    bctrace = stringutil.parsebool(ctraceconfig)
     if (skiprev is not None and
-        repo.ui.config('experimental', 'copytrace') != 'off'):
+        (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
         # copytrace='off' skips this line, but not the entire function because
         # the line below is O(size of the repo) during a rebase, while the rest
         # of the function is much faster (and is required for carrying copy
--- a/mercurial/dagop.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/dagop.py	Tue Sep 04 12:16:28 2018 -0400
@@ -195,7 +195,7 @@
     """Build map of 'rev -> child revs', offset from startrev"""
     cl = repo.changelog
     nullrev = node.nullrev
-    descmap = [[] for _rev in xrange(startrev, len(cl))]
+    descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
     for currev in cl.revs(startrev + 1):
         p1rev, p2rev = cl.parentrevs(currev)
         if p1rev >= startrev:
@@ -435,7 +435,7 @@
         for idx, (parent, blocks) in enumerate(pblocks):
             for (a1, a2, b1, b2), _t in blocks:
                 if a2 - a1 >= b2 - b1:
-                    for bk in xrange(b1, b2):
+                    for bk in pycompat.xrange(b1, b2):
                         if child.fctxs[bk] == childfctx:
                             ak = min(a1 + (bk - b1), a2 - 1)
                             child.fctxs[bk] = parent.fctxs[ak]
@@ -448,7 +448,7 @@
         # line.
         for parent, blocks in remaining:
             for a1, a2, b1, b2 in blocks:
-                for bk in xrange(b1, b2):
+                for bk in pycompat.xrange(b1, b2):
                     if child.fctxs[bk] == childfctx:
                         ak = min(a1 + (bk - b1), a2 - 1)
                         child.fctxs[bk] = parent.fctxs[ak]
@@ -715,3 +715,63 @@
     for g in groups:
         for r in g[0]:
             yield r
+
+def headrevs(revs, parentsfn):
+    """Resolve the set of heads from a set of revisions.
+
+    Receives an iterable of revision numbers and a callbable that receives a
+    revision number and returns an iterable of parent revision numbers, possibly
+    including nullrev.
+
+    Returns a set of revision numbers that are DAG heads within the passed
+    subset.
+
+    ``nullrev`` is never included in the returned set, even if it is provided in
+    the input set.
+    """
+    headrevs = set(revs)
+
+    for rev in revs:
+        for prev in parentsfn(rev):
+            headrevs.discard(prev)
+
+    headrevs.discard(node.nullrev)
+
+    return headrevs
+
+def linearize(revs, parentsfn):
+    """Linearize and topologically sort a list of revisions.
+
+    The linearization process tires to create long runs of revs where a child
+    rev comes immediately after its first parent. This is done by visiting the
+    heads of the revs in inverse topological order, and for each visited rev,
+    visiting its second parent, then its first parent, then adding the rev
+    itself to the output list.
+
+    Returns a list of revision numbers.
+    """
+    visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
+    finished = set()
+    result = []
+
+    while visit:
+        rev = visit.pop()
+        if rev < 0:
+            rev = -rev - 1
+
+            if rev not in finished:
+                result.append(rev)
+                finished.add(rev)
+
+        else:
+            visit.append(-rev - 1)
+
+            for prev in parentsfn(rev):
+                if prev == node.nullrev or prev not in revs or prev in finished:
+                    continue
+
+                visit.append(prev)
+
+    assert len(result) == len(revs)
+
+    return result
--- a/mercurial/dagparser.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/dagparser.py	Tue Sep 04 12:16:28 2018 -0400
@@ -222,7 +222,7 @@
         elif c == '+':
             c, digs = nextrun(nextch(), pycompat.bytestr(string.digits))
             n = int(digs)
-            for i in xrange(0, n):
+            for i in pycompat.xrange(0, n):
                 yield 'n', (r, [p1])
                 p1 = r
                 r += 1
--- a/mercurial/dagutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,287 +0,0 @@
-# dagutil.py - dag utilities for mercurial
-#
-# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
-# and Peter Arrenbrecht <peter@arrenbrecht.ch>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from .i18n import _
-from .node import nullrev
-
-class basedag(object):
-    '''generic interface for DAGs
-
-    terms:
-    "ix" (short for index) identifies a nodes internally,
-    "id" identifies one externally.
-
-    All params are ixs unless explicitly suffixed otherwise.
-    Pluralized params are lists or sets.
-    '''
-
-    def __init__(self):
-        self._inverse = None
-
-    def nodeset(self):
-        '''set of all node ixs'''
-        raise NotImplementedError
-
-    def heads(self):
-        '''list of head ixs'''
-        raise NotImplementedError
-
-    def parents(self, ix):
-        '''list of parents ixs of ix'''
-        raise NotImplementedError
-
-    def inverse(self):
-        '''inverse DAG, where parents becomes children, etc.'''
-        raise NotImplementedError
-
-    def ancestorset(self, starts, stops=None):
-        '''
-        set of all ancestors of starts (incl), but stop walk at stops (excl)
-        '''
-        raise NotImplementedError
-
-    def descendantset(self, starts, stops=None):
-        '''
-        set of all descendants of starts (incl), but stop walk at stops (excl)
-        '''
-        return self.inverse().ancestorset(starts, stops)
-
-    def headsetofconnecteds(self, ixs):
-        '''
-        subset of connected list of ixs so that no node has a descendant in it
-
-        By "connected list" we mean that if an ancestor and a descendant are in
-        the list, then so is at least one path connecting them.
-        '''
-        raise NotImplementedError
-
-    def externalize(self, ix):
-        '''return a node id'''
-        return self._externalize(ix)
-
-    def externalizeall(self, ixs):
-        '''return a list of (or set if given a set) of node ids'''
-        ids = self._externalizeall(ixs)
-        if isinstance(ixs, set):
-            return set(ids)
-        return list(ids)
-
-    def internalize(self, id):
-        '''return a node ix'''
-        return self._internalize(id)
-
-    def internalizeall(self, ids, filterunknown=False):
-        '''return a list of (or set if given a set) of node ixs'''
-        ixs = self._internalizeall(ids, filterunknown)
-        if isinstance(ids, set):
-            return set(ixs)
-        return list(ixs)
-
-
-class genericdag(basedag):
-    '''generic implementations for DAGs'''
-
-    def ancestorset(self, starts, stops=None):
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            n = pending.pop()
-            if n not in seen and n not in stops:
-                seen.add(n)
-                pending.extend(self.parents(n))
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        hds = set(ixs)
-        if not hds:
-            return hds
-        for n in ixs:
-            for p in self.parents(n):
-                hds.discard(p)
-        assert hds
-        return hds
-
-
-class revlogbaseddag(basedag):
-    '''generic dag interface to a revlog'''
-
-    def __init__(self, revlog, nodeset):
-        basedag.__init__(self)
-        self._revlog = revlog
-        self._heads = None
-        self._nodeset = nodeset
-
-    def nodeset(self):
-        return self._nodeset
-
-    def heads(self):
-        if self._heads is None:
-            self._heads = self._getheads()
-        return self._heads
-
-    def _externalize(self, ix):
-        return self._revlog.index[ix][7]
-    def _externalizeall(self, ixs):
-        idx = self._revlog.index
-        return [idx[i][7] for i in ixs]
-
-    def _internalize(self, id):
-        ix = self._revlog.rev(id)
-        if ix == nullrev:
-            raise LookupError(id, self._revlog.indexfile, _('nullid'))
-        return ix
-    def _internalizeall(self, ids, filterunknown):
-        rl = self._revlog
-        if filterunknown:
-            return [r for r in map(rl.nodemap.get, ids)
-                    if (r is not None
-                        and r != nullrev
-                        and r not in rl.filteredrevs)]
-        return [self._internalize(i) for i in ids]
-
-
-class revlogdag(revlogbaseddag):
-    '''dag interface to a revlog'''
-
-    def __init__(self, revlog, localsubset=None):
-        revlogbaseddag.__init__(self, revlog, set(revlog))
-        self._heads = localsubset
-
-    def _getheads(self):
-        return [r for r in self._revlog.headrevs() if r != nullrev]
-
-    def parents(self, ix):
-        rlog = self._revlog
-        idx = rlog.index
-        revdata = idx[ix]
-        prev = revdata[5]
-        if prev != nullrev:
-            prev2 = revdata[6]
-            if prev2 == nullrev:
-                return [prev]
-            return [prev, prev2]
-        prev2 = revdata[6]
-        if prev2 != nullrev:
-            return [prev2]
-        return []
-
-    def inverse(self):
-        if self._inverse is None:
-            self._inverse = inverserevlogdag(self)
-        return self._inverse
-
-    def ancestorset(self, starts, stops=None):
-        rlog = self._revlog
-        idx = rlog.index
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            rev = pending.pop()
-            if rev not in seen and rev not in stops:
-                seen.add(rev)
-                revdata = idx[rev]
-                for i in [5, 6]:
-                    prev = revdata[i]
-                    if prev != nullrev:
-                        pending.append(prev)
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        if not ixs:
-            return set()
-        rlog = self._revlog
-        idx = rlog.index
-        headrevs = set(ixs)
-        for rev in ixs:
-            revdata = idx[rev]
-            for i in [5, 6]:
-                prev = revdata[i]
-                if prev != nullrev:
-                    headrevs.discard(prev)
-        assert headrevs
-        return headrevs
-
-    def linearize(self, ixs):
-        '''linearize and topologically sort a list of revisions
-
-        The linearization process tries to create long runs of revs where
-        a child rev comes immediately after its first parent. This is done by
-        visiting the heads of the given revs in inverse topological order,
-        and for each visited rev, visiting its second parent, then its first
-        parent, then adding the rev itself to the output list.
-        '''
-        sorted = []
-        visit = list(self.headsetofconnecteds(ixs))
-        visit.sort(reverse=True)
-        finished = set()
-
-        while visit:
-            cur = visit.pop()
-            if cur < 0:
-                cur = -cur - 1
-                if cur not in finished:
-                    sorted.append(cur)
-                    finished.add(cur)
-            else:
-                visit.append(-cur - 1)
-                visit += [p for p in self.parents(cur)
-                          if p in ixs and p not in finished]
-        assert len(sorted) == len(ixs)
-        return sorted
-
-
-class inverserevlogdag(revlogbaseddag, genericdag):
-    '''inverse of an existing revlog dag; see revlogdag.inverse()'''
-
-    def __init__(self, orig):
-        revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
-        self._orig = orig
-        self._children = {}
-        self._roots = []
-        self._walkfrom = len(self._revlog) - 1
-
-    def _walkto(self, walkto):
-        rev = self._walkfrom
-        cs = self._children
-        roots = self._roots
-        idx = self._revlog.index
-        while rev >= walkto:
-            data = idx[rev]
-            isroot = True
-            for prev in [data[5], data[6]]: # parent revs
-                if prev != nullrev:
-                    cs.setdefault(prev, []).append(rev)
-                    isroot = False
-            if isroot:
-                roots.append(rev)
-            rev -= 1
-        self._walkfrom = rev
-
-    def _getheads(self):
-        self._walkto(nullrev)
-        return self._roots
-
-    def parents(self, ix):
-        if ix is None:
-            return []
-        if ix <= self._walkfrom:
-            self._walkto(ix)
-        return self._children.get(ix, [])
-
-    def inverse(self):
-        return self._orig
--- a/mercurial/debugcommands.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/debugcommands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -42,13 +42,12 @@
     color,
     context,
     dagparser,
-    dagutil,
     encoding,
     error,
     exchange,
     extensions,
     filemerge,
-    fileset,
+    filesetlang,
     formatter,
     hg,
     httppeer,
@@ -89,6 +88,10 @@
     stringutil,
 )
 
+from .revlogutils import (
+    deltas as deltautil
+)
+
 release = lockmod.release
 
 command = registrar.command()
@@ -177,7 +180,8 @@
     if mergeable_file:
         linesperrev = 2
         # make a file with k lines per rev
-        initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
+        initialmergedlines = ['%d' % i
+                              for i in pycompat.xrange(0, total * linesperrev)]
         initialmergedlines.append("")
 
     tags = []
@@ -556,7 +560,7 @@
         file_, rev = None, file_
     elif rev is None:
         raise error.CommandError('debugdata', _('invalid arguments'))
-    r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
+    r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
     try:
         ui.write(r.revision(r.lookup(rev), raw=True))
     except KeyError:
@@ -706,7 +710,7 @@
             largestblock = 0
             srchunks = 0
 
-            for revschunk in revlog._slicechunk(r, chain):
+            for revschunk in deltautil.slicechunk(r, chain):
                 srchunks += 1
                 blkend = start(revschunk[-1]) + length(revschunk[-1])
                 blksize = blkend - start(revschunk[0])
@@ -790,9 +794,10 @@
             if not opts.get('nonheads'):
                 ui.write(("unpruned common: %s\n") %
                          " ".join(sorted(short(n) for n in common)))
-                dag = dagutil.revlogdag(repo.changelog)
-                all = dag.ancestorset(dag.internalizeall(common))
-                common = dag.externalizeall(dag.headsetofconnecteds(all))
+
+                clnode = repo.changelog.node
+                common = repo.revs('heads(::%ln)', common)
+                common = {clnode(r) for r in common}
         else:
             nodes = None
             if pushedrevs:
@@ -887,15 +892,45 @@
 @command('debugfileset',
     [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
      ('', 'all-files', False,
-      _('test files from all revisions and working directory'))],
-    _('[-r REV] [--all-files] FILESPEC'))
+      _('test files from all revisions and working directory')),
+     ('s', 'show-matcher', None,
+      _('print internal representation of matcher')),
+     ('p', 'show-stage', [],
+      _('print parsed tree at the given stage'), _('NAME'))],
+    _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
 def debugfileset(ui, repo, expr, **opts):
     '''parse and apply a fileset specification'''
+    from . import fileset
+    fileset.symbols # force import of fileset so we have predicates to optimize
     opts = pycompat.byteskwargs(opts)
     ctx = scmutil.revsingle(repo, opts.get('rev'), None)
-    if ui.verbose:
-        tree = fileset.parse(expr)
-        ui.note(fileset.prettyformat(tree), "\n")
+
+    stages = [
+        ('parsed', pycompat.identity),
+        ('analyzed', filesetlang.analyze),
+        ('optimized', filesetlang.optimize),
+    ]
+    stagenames = set(n for n, f in stages)
+
+    showalways = set()
+    if ui.verbose and not opts['show_stage']:
+        # show parsed tree by --verbose (deprecated)
+        showalways.add('parsed')
+    if opts['show_stage'] == ['all']:
+        showalways.update(stagenames)
+    else:
+        for n in opts['show_stage']:
+            if n not in stagenames:
+                raise error.Abort(_('invalid stage name: %s') % n)
+        showalways.update(opts['show_stage'])
+
+    tree = filesetlang.parse(expr)
+    for n, f in stages:
+        tree = f(tree)
+        if n in showalways:
+            if opts['show_stage'] or n != 'parsed':
+                ui.write(("* %s:\n") % n)
+            ui.write(filesetlang.prettyformat(tree), "\n")
 
     files = set()
     if opts['all_files']:
@@ -914,14 +949,15 @@
         files.update(ctx.substate)
 
     m = ctx.matchfileset(expr)
+    if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
+        ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
     for f in sorted(files):
         if not m(f):
             continue
         ui.write("%s\n" % f)
 
 @command('debugformat',
-         [] + cmdutil.formatteropts,
-        _(''))
+         [] + cmdutil.formatteropts)
 def debugformat(ui, repo, **opts):
     """display format information about the current repository
 
@@ -1076,77 +1112,48 @@
             else:
                 ui.write(_("%s is not ignored\n") % m.uipath(f))
 
-@command('debugindex', cmdutil.debugrevlogopts +
-    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
-    _('[-f FORMAT] -c|-m|FILE'),
-    optionalrepo=True)
+@command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
+         _('-c|-m|FILE'))
 def debugindex(ui, repo, file_=None, **opts):
-    """dump the contents of an index file"""
+    """dump index data for a storage primitive"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
-    format = opts.get('format', 0)
-    if format not in (0, 1):
-        raise error.Abort(_("unknown format %d") % format)
+    store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
 
     if ui.debugflag:
         shortfn = hex
     else:
         shortfn = short
 
-    # There might not be anything in r, so have a sane default
     idlen = 12
-    for i in r:
-        idlen = len(shortfn(r.node(i)))
+    for i in store:
+        idlen = len(shortfn(store.node(i)))
         break
 
-    if format == 0:
-        if ui.verbose:
-            ui.write(("   rev    offset  length linkrev"
-                     " %s %s p2\n") % ("nodeid".ljust(idlen),
-                                       "p1".ljust(idlen)))
-        else:
-            ui.write(("   rev linkrev %s %s p2\n") % (
-                "nodeid".ljust(idlen), "p1".ljust(idlen)))
-    elif format == 1:
-        if ui.verbose:
-            ui.write(("   rev flag   offset   length     size   link     p1"
-                      "     p2 %s\n") % "nodeid".rjust(idlen))
-        else:
-            ui.write(("   rev flag     size   link     p1     p2 %s\n") %
-                     "nodeid".rjust(idlen))
-
-    for i in r:
-        node = r.node(i)
-        if format == 0:
-            try:
-                pp = r.parents(node)
-            except Exception:
-                pp = [nullid, nullid]
-            if ui.verbose:
-                ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
-                        i, r.start(i), r.length(i), r.linkrev(i),
-                        shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
-            else:
-                ui.write("% 6d % 7d %s %s %s\n" % (
-                    i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
-                    shortfn(pp[1])))
-        elif format == 1:
-            pr = r.parentrevs(i)
-            if ui.verbose:
-                ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
-                        i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
-                        r.linkrev(i), pr[0], pr[1], shortfn(node)))
-            else:
-                ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
-                    i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
-                    shortfn(node)))
+    fm = ui.formatter('debugindex', opts)
+    fm.plain(b'   rev linkrev %s %s p2\n' % (
+        b'nodeid'.ljust(idlen),
+        b'p1'.ljust(idlen)))
+
+    for rev in store:
+        node = store.node(rev)
+        parents = store.parents(node)
+
+        fm.startitem()
+        fm.write(b'rev', b'%6d ', rev)
+        fm.write(b'linkrev', '%7d ', store.linkrev(rev))
+        fm.write(b'node', '%s ', shortfn(node))
+        fm.write(b'p1', '%s ', shortfn(parents[0]))
+        fm.write(b'p2', '%s', shortfn(parents[1]))
+        fm.plain(b'\n')
+
+    fm.end()
 
 @command('debugindexdot', cmdutil.debugrevlogopts,
     _('-c|-m|FILE'), optionalrepo=True)
 def debugindexdot(ui, repo, file_=None, **opts):
     """dump an index DAG as a graphviz dot file"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
+    r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
     ui.write(("digraph G {\n"))
     for i in r:
         node = r.node(i)
@@ -1446,6 +1453,53 @@
 
     return held
 
+@command('debugmanifestfulltextcache', [
+        ('', 'clear', False, _('clear the cache')),
+        ('a', 'add', '', _('add the given manifest node to the cache'),
+         _('NODE'))
+    ], '')
+def debugmanifestfulltextcache(ui, repo, add=None, **opts):
+    """show, clear or amend the contents of the manifest fulltext cache"""
+    with repo.lock():
+        r = repo.manifestlog.getstorage(b'')
+        try:
+            cache = r._fulltextcache
+        except AttributeError:
+            ui.warn(_(
+                "Current revlog implementation doesn't appear to have a "
+                'manifest fulltext cache\n'))
+            return
+
+        if opts.get(r'clear'):
+            cache.clear()
+
+        if add:
+            try:
+                manifest = repo.manifestlog[r.lookup(add)]
+            except error.LookupError as e:
+                raise error.Abort(e, hint="Check your manifest node id")
+            manifest.read()  # stores revisision in cache too
+
+        if not len(cache):
+            ui.write(_('Cache empty'))
+        else:
+            ui.write(
+                _('Cache contains %d manifest entries, in order of most to '
+                  'least recent:\n') % (len(cache),))
+            totalsize = 0
+            for nodeid in cache:
+                # Use cache.get to not update the LRU order
+                data = cache.get(nodeid)
+                size = len(data)
+                totalsize += size + 24   # 20 bytes nodeid, 4 bytes size
+                ui.write(_('id: %s, size %s\n') % (
+                    hex(nodeid), util.bytecount(size)))
+            ondisk = cache._opener.stat('manifestfulltextcache').st_size
+            ui.write(
+                _('Total cache data size %s, on-disk %s\n') % (
+                    util.bytecount(totalsize), util.bytecount(ondisk))
+            )
+
 @command('debugmergestate', [], '')
 def debugmergestate(ui, repo, *args):
     """print merge state
@@ -1971,7 +2025,7 @@
         ts = 0
         heads = set()
 
-        for rev in xrange(numrevs):
+        for rev in pycompat.xrange(numrevs):
             dbase = r.deltaparent(rev)
             if dbase == -1:
                 dbase = rev
@@ -2006,20 +2060,43 @@
     if not flags:
         flags = ['(none)']
 
+    ### tracks merge vs single parent
     nummerges = 0
+
+    ### tracks ways the "delta" are build
+    # nodelta
+    numempty = 0
+    numemptytext = 0
+    numemptydelta = 0
+    # full file content
     numfull = 0
+    # intermediate snapshot against a prior snapshot
+    numsemi = 0
+    # snapshot count per depth
+    numsnapdepth = collections.defaultdict(lambda: 0)
+    # delta against previous revision
     numprev = 0
+    # delta against first or second parent (not prev)
     nump1 = 0
     nump2 = 0
+    # delta against neither prev nor parents
     numother = 0
+    # delta against prev that are also first or second parent
+    # (details of `numprev`)
     nump1prev = 0
     nump2prev = 0
+
+    # data about delta chain of each revs
     chainlengths = []
     chainbases = []
     chainspans = []
 
+    # data about each revision
     datasize = [None, 0, 0]
     fullsize = [None, 0, 0]
+    semisize = [None, 0, 0]
+    # snapshot count per depth
+    snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
     deltasize = [None, 0, 0]
     chunktypecounts = {}
     chunktypesizes = {}
@@ -2032,7 +2109,7 @@
         l[2] += size
 
     numrevs = len(r)
-    for rev in xrange(numrevs):
+    for rev in pycompat.xrange(numrevs):
         p1, p2 = r.parentrevs(rev)
         delta = r.deltaparent(rev)
         if format > 0:
@@ -2044,30 +2121,49 @@
             chainlengths.append(0)
             chainbases.append(r.start(rev))
             chainspans.append(size)
-            numfull += 1
-            addsize(size, fullsize)
+            if size == 0:
+                numempty += 1
+                numemptytext += 1
+            else:
+                numfull += 1
+                numsnapdepth[0] += 1
+                addsize(size, fullsize)
+                addsize(size, snapsizedepth[0])
         else:
             chainlengths.append(chainlengths[delta] + 1)
             baseaddr = chainbases[delta]
             revaddr = r.start(rev)
             chainbases.append(baseaddr)
             chainspans.append((revaddr - baseaddr) + size)
-            addsize(size, deltasize)
-            if delta == rev - 1:
-                numprev += 1
-                if delta == p1:
-                    nump1prev += 1
+            if size == 0:
+                numempty += 1
+                numemptydelta += 1
+            elif r.issnapshot(rev):
+                addsize(size, semisize)
+                numsemi += 1
+                depth = r.snapshotdepth(rev)
+                numsnapdepth[depth] += 1
+                addsize(size, snapsizedepth[depth])
+            else:
+                addsize(size, deltasize)
+                if delta == rev - 1:
+                    numprev += 1
+                    if delta == p1:
+                        nump1prev += 1
+                    elif delta == p2:
+                        nump2prev += 1
+                elif delta == p1:
+                    nump1 += 1
                 elif delta == p2:
-                    nump2prev += 1
-            elif delta == p1:
-                nump1 += 1
-            elif delta == p2:
-                nump2 += 1
-            elif delta != nullrev:
-                numother += 1
+                    nump2 += 1
+                elif delta != nullrev:
+                    numother += 1
 
         # Obtain data on the raw chunks in the revlog.
-        segment = r._getsegmentforrevs(rev, rev)[1]
+        if util.safehasattr(r, '_getsegmentforrevs'):
+            segment = r._getsegmentforrevs(rev, rev)[1]
+        else:
+            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
         if segment:
             chunktype = bytes(segment[0:1])
         else:
@@ -2081,20 +2177,28 @@
         chunktypesizes[chunktype] += size
 
     # Adjust size min value for empty cases
-    for size in (datasize, fullsize, deltasize):
+    for size in (datasize, fullsize, semisize, deltasize):
         if size[0] is None:
             size[0] = 0
 
-    numdeltas = numrevs - numfull
+    numdeltas = numrevs - numfull - numempty - numsemi
     numoprev = numprev - nump1prev - nump2prev
     totalrawsize = datasize[2]
     datasize[2] /= numrevs
     fulltotal = fullsize[2]
     fullsize[2] /= numfull
+    semitotal = semisize[2]
+    snaptotal = {}
+    if 0 < numsemi:
+        semisize[2] /= numsemi
+    for depth in snapsizedepth:
+        snaptotal[depth] = snapsizedepth[depth][2]
+        snapsizedepth[depth][2] /= numsnapdepth[depth]
+
     deltatotal = deltasize[2]
-    if numrevs - numfull > 0:
-        deltasize[2] /= numrevs - numfull
-    totalsize = fulltotal + deltatotal
+    if numdeltas > 0:
+        deltasize[2] /= numdeltas
+    totalsize = fulltotal + semitotal + deltatotal
     avgchainlen = sum(chainlengths) / numrevs
     maxchainlen = max(chainlengths)
     maxchainspan = max(chainspans)
@@ -2126,10 +2230,22 @@
     ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
     ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
     ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    empty     : ') + fmt % pcfmt(numempty, numrevs))
+    ui.write(('                   text  : ')
+             + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
+    ui.write(('                   delta : ')
+             + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
+    ui.write(('    snapshot  : ') + fmt % pcfmt(numfull + numsemi, numrevs))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(numsnapdepth[depth], numrevs))
     ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
     ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    snapshot  : ')
+             + fmt % pcfmt(fulltotal + semitotal, totalsize))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(snaptotal[depth], totalsize))
     ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
     def fmtchunktype(chunktype):
@@ -2163,6 +2279,13 @@
                  % tuple(datasize))
     ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
              % tuple(fullsize))
+    ui.write(('inter-snapshot size (min/max/avg)    : %d / %d / %d\n')
+             % tuple(semisize))
+    for depth in sorted(snapsizedepth):
+        if depth == 0:
+            continue
+        ui.write(('    level-%-3d (min/max/avg)          : %d / %d / %d\n')
+                 % ((depth,) + tuple(snapsizedepth[depth])))
     ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
              % tuple(deltasize))
 
@@ -2186,6 +2309,71 @@
             ui.write(('deltas against other : ') + fmt % pcfmt(numother,
                                                              numdeltas))
 
+@command('debugrevlogindex', cmdutil.debugrevlogopts +
+    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
+    _('[-f FORMAT] -c|-m|FILE'),
+    optionalrepo=True)
+def debugrevlogindex(ui, repo, file_=None, **opts):
+    """dump the contents of a revlog index"""
+    opts = pycompat.byteskwargs(opts)
+    r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
+    format = opts.get('format', 0)
+    if format not in (0, 1):
+        raise error.Abort(_("unknown format %d") % format)
+
+    if ui.debugflag:
+        shortfn = hex
+    else:
+        shortfn = short
+
+    # There might not be anything in r, so have a sane default
+    idlen = 12
+    for i in r:
+        idlen = len(shortfn(r.node(i)))
+        break
+
+    if format == 0:
+        if ui.verbose:
+            ui.write(("   rev    offset  length linkrev"
+                     " %s %s p2\n") % ("nodeid".ljust(idlen),
+                                       "p1".ljust(idlen)))
+        else:
+            ui.write(("   rev linkrev %s %s p2\n") % (
+                "nodeid".ljust(idlen), "p1".ljust(idlen)))
+    elif format == 1:
+        if ui.verbose:
+            ui.write(("   rev flag   offset   length     size   link     p1"
+                      "     p2 %s\n") % "nodeid".rjust(idlen))
+        else:
+            ui.write(("   rev flag     size   link     p1     p2 %s\n") %
+                     "nodeid".rjust(idlen))
+
+    for i in r:
+        node = r.node(i)
+        if format == 0:
+            try:
+                pp = r.parents(node)
+            except Exception:
+                pp = [nullid, nullid]
+            if ui.verbose:
+                ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
+                        i, r.start(i), r.length(i), r.linkrev(i),
+                        shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
+            else:
+                ui.write("% 6d % 7d %s %s %s\n" % (
+                    i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
+                    shortfn(pp[1])))
+        elif format == 1:
+            pr = r.parentrevs(i)
+            if ui.verbose:
+                ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
+                        i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
+                        r.linkrev(i), pr[0], pr[1], shortfn(node)))
+            else:
+                ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
+                    i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
+                    shortfn(node)))
+
 @command('debugrevspec',
     [('', 'optimize', None,
       _('print parsed tree after optimizing (DEPRECATED)')),
@@ -2642,7 +2830,7 @@
         if line.startswith(b'#'):
             continue
 
-        if not line.startswith(' '):
+        if not line.startswith(b' '):
             # New block. Flush previous one.
             if activeaction:
                 yield activeaction, blocklines
@@ -3056,11 +3244,10 @@
                 if isinstance(res, wireprotov2peer.commandresponse):
                     val = list(res.cborobjects())
                     ui.status(_('response: %s\n') %
-                              stringutil.pprint(val, bprefix=True))
-
+                              stringutil.pprint(val, bprefix=True, indent=2))
                 else:
                     ui.status(_('response: %s\n') %
-                              stringutil.pprint(res, bprefix=True))
+                              stringutil.pprint(res, bprefix=True, indent=2))
 
         elif action == 'batchbegin':
             if batchedcommands is not None:
@@ -3122,18 +3309,20 @@
             # urllib.Request insists on using has_data() as a proxy for
             # determining the request method. Override that to use our
             # explicitly requested method.
-            req.get_method = lambda: method
+            req.get_method = lambda: pycompat.sysstr(method)
 
             try:
                 res = opener.open(req)
                 body = res.read()
             except util.urlerr.urlerror as e:
-                e.read()
+                # read() method must be called, but only exists in Python 2
+                getattr(e, 'read', lambda: None)()
                 continue
 
             if res.headers.get('Content-Type') == 'application/mercurial-cbor':
                 ui.write(_('cbor> %s\n') %
-                         stringutil.pprint(cbor.loads(body), bprefix=True))
+                         stringutil.pprint(cbor.loads(body), bprefix=True,
+                                           indent=2))
 
         elif action == 'close':
             peer.close()
--- a/mercurial/diffhelper.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/diffhelper.py	Tue Sep 04 12:16:28 2018 -0400
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    pycompat,
 )
 
 def addlines(fp, hunk, lena, lenb, a, b):
@@ -26,7 +27,7 @@
         num = max(todoa, todob)
         if num == 0:
             break
-        for i in xrange(num):
+        for i in pycompat.xrange(num):
             s = fp.readline()
             if not s:
                 raise error.ParseError(_('incomplete hunk'))
@@ -71,7 +72,7 @@
     blen = len(b)
     if alen > blen - bstart or bstart < 0:
         return False
-    for i in xrange(alen):
+    for i in pycompat.xrange(alen):
         if a[i][1:] != b[i + bstart]:
             return False
     return True
--- a/mercurial/dirstate.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/dirstate.py	Tue Sep 04 12:16:28 2018 -0400
@@ -893,8 +893,11 @@
             wadd = work.append
             while work:
                 nd = work.pop()
-                if not match.visitdir(nd):
+                visitentries = match.visitchildrenset(nd)
+                if not visitentries:
                     continue
+                if visitentries == 'this' or visitentries == 'all':
+                    visitentries = None
                 skip = None
                 if nd == '.':
                     nd = ''
@@ -909,6 +912,16 @@
                         continue
                     raise
                 for f, kind, st in entries:
+                    # Some matchers may return files in the visitentries set,
+                    # instead of 'this', if the matcher explicitly mentions them
+                    # and is not an exactmatcher. This is acceptable; we do not
+                    # make any hard assumptions about file-or-directory below
+                    # based on the presence of `f` in visitentries. If
+                    # visitchildrenset returned a set, we can always skip the
+                    # entries *not* in the set it provided regardless of whether
+                    # they're actually a file or a directory.
+                    if visitentries and f not in visitentries:
+                        continue
                     if normalizefile:
                         # even though f might be a directory, we're only
                         # interested in comparing it to files currently in the
--- a/mercurial/dirstateguard.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/dirstateguard.py	Tue Sep 04 12:16:28 2018 -0400
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    narrowspec,
     util,
 )
 
@@ -33,7 +34,10 @@
         self._active = False
         self._closed = False
         self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
+        self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
+                                      (name, id(self)))
         repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
+        narrowspec.savebackup(repo, self._narrowspecbackupname)
         self._active = True
 
     def __del__(self):
@@ -52,10 +56,12 @@
 
         self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
                                          self._backupname)
+        narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
         self._active = False
         self._closed = True
 
     def _abort(self):
+        narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
         self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
                                            self._backupname)
         self._active = False
--- a/mercurial/dispatch.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/dispatch.py	Tue Sep 04 12:16:28 2018 -0400
@@ -21,6 +21,8 @@
 
 from .i18n import _
 
+from hgdemandimport import tracing
+
 from . import (
     cmdutil,
     color,
@@ -84,7 +86,8 @@
 def run():
     "run the command in sys.argv"
     initstdio()
-    req = request(pycompat.sysargv[1:])
+    with tracing.log('parse args into request'):
+        req = request(pycompat.sysargv[1:])
     err = None
     try:
         status = dispatch(req)
@@ -176,182 +179,184 @@
 
 def dispatch(req):
     """run the command specified in req.args; returns an integer status code"""
-    if req.ferr:
-        ferr = req.ferr
-    elif req.ui:
-        ferr = req.ui.ferr
-    else:
-        ferr = procutil.stderr
+    with tracing.log('dispatch.dispatch'):
+        if req.ferr:
+            ferr = req.ferr
+        elif req.ui:
+            ferr = req.ui.ferr
+        else:
+            ferr = procutil.stderr
 
-    try:
-        if not req.ui:
-            req.ui = uimod.ui.load()
-        req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
-        if req.earlyoptions['traceback']:
-            req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
+        try:
+            if not req.ui:
+                req.ui = uimod.ui.load()
+            req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
+            if req.earlyoptions['traceback']:
+                req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
 
-        # set ui streams from the request
-        if req.fin:
-            req.ui.fin = req.fin
-        if req.fout:
-            req.ui.fout = req.fout
-        if req.ferr:
-            req.ui.ferr = req.ferr
-    except error.Abort as inst:
-        ferr.write(_("abort: %s\n") % inst)
-        if inst.hint:
-            ferr.write(_("(%s)\n") % inst.hint)
-        return -1
-    except error.ParseError as inst:
-        _formatparse(ferr.write, inst)
-        return -1
+            # set ui streams from the request
+            if req.fin:
+                req.ui.fin = req.fin
+            if req.fout:
+                req.ui.fout = req.fout
+            if req.ferr:
+                req.ui.ferr = req.ferr
+        except error.Abort as inst:
+            ferr.write(_("abort: %s\n") % inst)
+            if inst.hint:
+                ferr.write(_("(%s)\n") % inst.hint)
+            return -1
+        except error.ParseError as inst:
+            _formatparse(ferr.write, inst)
+            return -1
 
-    msg = _formatargs(req.args)
-    starttime = util.timer()
-    ret = 1  # default of Python exit code on unhandled exception
-    try:
-        ret = _runcatch(req) or 0
-    except error.ProgrammingError as inst:
-        req.ui.warn(_('** ProgrammingError: %s\n') % inst)
-        if inst.hint:
-            req.ui.warn(_('** (%s)\n') % inst.hint)
-        raise
-    except KeyboardInterrupt as inst:
+        msg = _formatargs(req.args)
+        starttime = util.timer()
+        ret = 1  # default of Python exit code on unhandled exception
         try:
-            if isinstance(inst, error.SignalInterrupt):
-                msg = _("killed!\n")
-            else:
-                msg = _("interrupted!\n")
-            req.ui.warn(msg)
-        except error.SignalInterrupt:
-            # maybe pager would quit without consuming all the output, and
-            # SIGPIPE was raised. we cannot print anything in this case.
-            pass
-        except IOError as inst:
-            if inst.errno != errno.EPIPE:
-                raise
-        ret = -1
-    finally:
-        duration = util.timer() - starttime
-        req.ui.flush()
-        if req.ui.logblockedtimes:
-            req.ui._blockedtimes['command_duration'] = duration * 1000
-            req.ui.log('uiblocked', 'ui blocked ms',
-                       **pycompat.strkwargs(req.ui._blockedtimes))
-        req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
-                   msg, ret & 255, duration)
-        try:
-            req._runexithandlers()
-        except: # exiting, so no re-raises
-            ret = ret or -1
-    return ret
+            ret = _runcatch(req) or 0
+        except error.ProgrammingError as inst:
+            req.ui.error(_('** ProgrammingError: %s\n') % inst)
+            if inst.hint:
+                req.ui.error(_('** (%s)\n') % inst.hint)
+            raise
+        except KeyboardInterrupt as inst:
+            try:
+                if isinstance(inst, error.SignalInterrupt):
+                    msg = _("killed!\n")
+                else:
+                    msg = _("interrupted!\n")
+                req.ui.error(msg)
+            except error.SignalInterrupt:
+                # maybe pager would quit without consuming all the output, and
+                # SIGPIPE was raised. we cannot print anything in this case.
+                pass
+            except IOError as inst:
+                if inst.errno != errno.EPIPE:
+                    raise
+            ret = -1
+        finally:
+            duration = util.timer() - starttime
+            req.ui.flush()
+            if req.ui.logblockedtimes:
+                req.ui._blockedtimes['command_duration'] = duration * 1000
+                req.ui.log('uiblocked', 'ui blocked ms',
+                           **pycompat.strkwargs(req.ui._blockedtimes))
+            req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
+                       msg, ret & 255, duration)
+            try:
+                req._runexithandlers()
+            except: # exiting, so no re-raises
+                ret = ret or -1
+        return ret
 
 def _runcatch(req):
-    def catchterm(*args):
-        raise error.SignalInterrupt
+    with tracing.log('dispatch._runcatch'):
+        def catchterm(*args):
+            raise error.SignalInterrupt
 
-    ui = req.ui
-    try:
-        for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
-            num = getattr(signal, name, None)
-            if num:
-                signal.signal(num, catchterm)
-    except ValueError:
-        pass # happens if called in a thread
-
-    def _runcatchfunc():
-        realcmd = None
+        ui = req.ui
         try:
-            cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
-            cmd = cmdargs[0]
-            aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
-            realcmd = aliases[0]
-        except (error.UnknownCommand, error.AmbiguousCommand,
-                IndexError, getopt.GetoptError):
-            # Don't handle this here. We know the command is
-            # invalid, but all we're worried about for now is that
-            # it's not a command that server operators expect to
-            # be safe to offer to users in a sandbox.
-            pass
-        if realcmd == 'serve' and '--stdio' in cmdargs:
-            # We want to constrain 'hg serve --stdio' instances pretty
-            # closely, as many shared-ssh access tools want to grant
-            # access to run *only* 'hg -R $repo serve --stdio'. We
-            # restrict to exactly that set of arguments, and prohibit
-            # any repo name that starts with '--' to prevent
-            # shenanigans wherein a user does something like pass
-            # --debugger or --config=ui.debugger=1 as a repo
-            # name. This used to actually run the debugger.
-            if (len(req.args) != 4 or
-                req.args[0] != '-R' or
-                req.args[1].startswith('--') or
-                req.args[2] != 'serve' or
-                req.args[3] != '--stdio'):
-                raise error.Abort(
-                    _('potentially unsafe serve --stdio invocation: %s') %
-                    (stringutil.pprint(req.args),))
+            for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+                num = getattr(signal, name, None)
+                if num:
+                    signal.signal(num, catchterm)
+        except ValueError:
+            pass # happens if called in a thread
 
-        try:
-            debugger = 'pdb'
-            debugtrace = {
-                'pdb': pdb.set_trace
-            }
-            debugmortem = {
-                'pdb': pdb.post_mortem
-            }
+        def _runcatchfunc():
+            realcmd = None
+            try:
+                cmdargs = fancyopts.fancyopts(
+                    req.args[:], commands.globalopts, {})
+                cmd = cmdargs[0]
+                aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
+                realcmd = aliases[0]
+            except (error.UnknownCommand, error.AmbiguousCommand,
+                    IndexError, getopt.GetoptError):
+                # Don't handle this here. We know the command is
+                # invalid, but all we're worried about for now is that
+                # it's not a command that server operators expect to
+                # be safe to offer to users in a sandbox.
+                pass
+            if realcmd == 'serve' and '--stdio' in cmdargs:
+                # We want to constrain 'hg serve --stdio' instances pretty
+                # closely, as many shared-ssh access tools want to grant
+                # access to run *only* 'hg -R $repo serve --stdio'. We
+                # restrict to exactly that set of arguments, and prohibit
+                # any repo name that starts with '--' to prevent
+                # shenanigans wherein a user does something like pass
+                # --debugger or --config=ui.debugger=1 as a repo
+                # name. This used to actually run the debugger.
+                if (len(req.args) != 4 or
+                    req.args[0] != '-R' or
+                    req.args[1].startswith('--') or
+                    req.args[2] != 'serve' or
+                    req.args[3] != '--stdio'):
+                    raise error.Abort(
+                        _('potentially unsafe serve --stdio invocation: %s') %
+                        (stringutil.pprint(req.args),))
 
-            # read --config before doing anything else
-            # (e.g. to change trust settings for reading .hg/hgrc)
-            cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
-
-            if req.repo:
-                # copy configs that were passed on the cmdline (--config) to
-                # the repo ui
-                for sec, name, val in cfgs:
-                    req.repo.ui.setconfig(sec, name, val, source='--config')
+            try:
+                debugger = 'pdb'
+                debugtrace = {
+                    'pdb': pdb.set_trace
+                }
+                debugmortem = {
+                    'pdb': pdb.post_mortem
+                }
 
-            # developer config: ui.debugger
-            debugger = ui.config("ui", "debugger")
-            debugmod = pdb
-            if not debugger or ui.plain():
-                # if we are in HGPLAIN mode, then disable custom debugging
-                debugger = 'pdb'
-            elif req.earlyoptions['debugger']:
-                # This import can be slow for fancy debuggers, so only
-                # do it when absolutely necessary, i.e. when actual
-                # debugging has been requested
-                with demandimport.deactivated():
-                    try:
-                        debugmod = __import__(debugger)
-                    except ImportError:
-                        pass # Leave debugmod = pdb
+                # read --config before doing anything else
+                # (e.g. to change trust settings for reading .hg/hgrc)
+                cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
+
+                if req.repo:
+                    # copy configs that were passed on the cmdline (--config) to
+                    # the repo ui
+                    for sec, name, val in cfgs:
+                        req.repo.ui.setconfig(sec, name, val, source='--config')
 
-            debugtrace[debugger] = debugmod.set_trace
-            debugmortem[debugger] = debugmod.post_mortem
+                # developer config: ui.debugger
+                debugger = ui.config("ui", "debugger")
+                debugmod = pdb
+                if not debugger or ui.plain():
+                    # if we are in HGPLAIN mode, then disable custom debugging
+                    debugger = 'pdb'
+                elif req.earlyoptions['debugger']:
+                    # This import can be slow for fancy debuggers, so only
+                    # do it when absolutely necessary, i.e. when actual
+                    # debugging has been requested
+                    with demandimport.deactivated():
+                        try:
+                            debugmod = __import__(debugger)
+                        except ImportError:
+                            pass # Leave debugmod = pdb
 
-            # enter the debugger before command execution
-            if req.earlyoptions['debugger']:
-                ui.warn(_("entering debugger - "
-                        "type c to continue starting hg or h for help\n"))
+                debugtrace[debugger] = debugmod.set_trace
+                debugmortem[debugger] = debugmod.post_mortem
 
-                if (debugger != 'pdb' and
-                    debugtrace[debugger] == debugtrace['pdb']):
-                    ui.warn(_("%s debugger specified "
-                              "but its module was not found\n") % debugger)
-                with demandimport.deactivated():
-                    debugtrace[debugger]()
-            try:
-                return _dispatch(req)
-            finally:
-                ui.flush()
-        except: # re-raises
-            # enter the debugger when we hit an exception
-            if req.earlyoptions['debugger']:
-                traceback.print_exc()
-                debugmortem[debugger](sys.exc_info()[2])
-            raise
+                # enter the debugger before command execution
+                if req.earlyoptions['debugger']:
+                    ui.warn(_("entering debugger - "
+                            "type c to continue starting hg or h for help\n"))
 
-    return _callcatch(ui, _runcatchfunc)
+                    if (debugger != 'pdb' and
+                        debugtrace[debugger] == debugtrace['pdb']):
+                        ui.warn(_("%s debugger specified "
+                                  "but its module was not found\n") % debugger)
+                    with demandimport.deactivated():
+                        debugtrace[debugger]()
+                try:
+                    return _dispatch(req)
+                finally:
+                    ui.flush()
+            except: # re-raises
+                # enter the debugger when we hit an exception
+                if req.earlyoptions['debugger']:
+                    traceback.print_exc()
+                    debugmortem[debugger](sys.exc_info()[2])
+                raise
+        return _callcatch(ui, _runcatchfunc)
 
 def _callcatch(ui, func):
     """like scmutil.callcatch but handles more high-level exceptions about
@@ -370,9 +375,8 @@
             ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
             commands.help_(ui, inst.args[0], full=False, command=True)
         else:
-            ui.pager('help')
             ui.warn(_("hg: %s\n") % inst.args[1])
-            commands.help_(ui, 'shortlist')
+            ui.warn(_("(use 'hg help -v' for a list of global options)\n"))
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
@@ -394,9 +398,8 @@
                     _reportsimilar(ui.warn, sim)
                     suggested = True
             if not suggested:
-                ui.pager('help')
                 ui.warn(nocmdmsg)
-                commands.help_(ui, 'shortlist')
+                ui.warn(_("(use 'hg help' for a list of commands)\n"))
     except IOError:
         raise
     except KeyboardInterrupt:
--- a/mercurial/encoding.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/encoding.py	Tue Sep 04 12:16:28 2018 -0400
@@ -251,7 +251,7 @@
 def getcols(s, start, c):
     '''Use colwidth to find a c-column substring of s starting at byte
     index start'''
-    for x in xrange(start + c, len(s)):
+    for x in pycompat.xrange(start + c, len(s)):
         t = s[start:x]
         if colwidth(t) == c:
             return t
@@ -346,7 +346,7 @@
     else:
         uslice = lambda i: u[:-i]
         concat = lambda s: s + ellipsis
-    for i in xrange(1, len(u)):
+    for i in pycompat.xrange(1, len(u)):
         usub = uslice(i)
         if ucolwidth(usub) <= width:
             return concat(usub.encode(_sysstr(encoding)))
--- a/mercurial/error.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/error.py	Tue Sep 04 12:16:28 2018 -0400
@@ -58,6 +58,9 @@
     def __str__(self):
         return RevlogError.__str__(self)
 
+class AmbiguousPrefixLookupError(LookupError):
+    pass
+
 class FilteredLookupError(LookupError):
     pass
 
--- a/mercurial/exchange.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/exchange.py	Tue Sep 04 12:16:28 2018 -0400
@@ -15,6 +15,7 @@
     bin,
     hex,
     nullid,
+    nullrev,
 )
 from .thirdparty import (
     attr,
@@ -27,10 +28,12 @@
     error,
     lock as lockmod,
     logexchange,
+    narrowspec,
     obsolete,
     phases,
     pushkey,
     pycompat,
+    repository,
     scmutil,
     sslutil,
     streamclone,
@@ -44,6 +47,8 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+_NARROWACL_SECTION = 'narrowhgacl'
+
 # Maps bundle version human names to changegroup versions.
 _bundlespeccgversions = {'v1': '01',
                          'v2': '02',
@@ -1427,7 +1432,7 @@
         old_heads = unficl.heads()
         clstart = len(unficl)
         _pullbundle2(pullop)
-        if changegroup.NARROW_REQUIREMENT in repo.requirements:
+        if repository.NARROW_REQUIREMENT in repo.requirements:
             # XXX narrow clones filter the heads on the server side during
             # XXX getbundle and result in partial replies as well.
             # XXX Disable pull bundles in this case as band aid to avoid
@@ -1830,6 +1835,176 @@
             pullop.repo.invalidatevolatilesets()
     return tr
 
+def applynarrowacl(repo, kwargs):
+    """Apply narrow fetch access control.
+
+    This massages the named arguments for getbundle wire protocol commands
+    so requested data is filtered through access control rules.
+    """
+    ui = repo.ui
+    # TODO this assumes existence of HTTP and is a layering violation.
+    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+    user_includes = ui.configlist(
+        _NARROWACL_SECTION, username + '.includes',
+        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+    user_excludes = ui.configlist(
+        _NARROWACL_SECTION, username + '.excludes',
+        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+    if not user_includes:
+        raise error.Abort(_("{} configuration for user {} is empty")
+                          .format(_NARROWACL_SECTION, username))
+
+    user_includes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_includes]
+    user_excludes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+
+    req_includes = set(kwargs.get(r'includepats', []))
+    req_excludes = set(kwargs.get(r'excludepats', []))
+
+    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
+        req_includes, req_excludes, user_includes, user_excludes)
+
+    if invalid_includes:
+        raise error.Abort(
+            _("The following includes are not accessible for {}: {}")
+            .format(username, invalid_includes))
+
+    new_args = {}
+    new_args.update(kwargs)
+    new_args[r'narrow'] = True
+    new_args[r'includepats'] = req_includes
+    if req_excludes:
+        new_args[r'excludepats'] = req_excludes
+
+    return new_args
+
+def _computeellipsis(repo, common, heads, known, match, depth=None):
+    """Compute the shape of a narrowed DAG.
+
+    Args:
+      repo: The repository we're transferring.
+      common: The roots of the DAG range we're transferring.
+              May be just [nullid], which means all ancestors of heads.
+      heads: The heads of the DAG range we're transferring.
+      match: The narrowmatcher that allows us to identify relevant changes.
+      depth: If not None, only consider nodes to be full nodes if they are at
+             most depth changesets away from one of heads.
+
+    Returns:
+      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
+
+        visitnodes: The list of nodes (either full or ellipsis) which
+                    need to be sent to the client.
+        relevant_nodes: The set of changelog nodes which change a file inside
+                 the narrowspec. The client needs these as non-ellipsis nodes.
+        ellipsisroots: A dict of {rev: parents} that is used in
+                       narrowchangegroup to produce ellipsis nodes with the
+                       correct parents.
+    """
+    cl = repo.changelog
+    mfl = repo.manifestlog
+
+    clrev = cl.rev
+
+    commonrevs = {clrev(n) for n in common} | {nullrev}
+    headsrevs = {clrev(n) for n in heads}
+
+    if depth:
+        revdepth = {h: 0 for h in headsrevs}
+
+    ellipsisheads = collections.defaultdict(set)
+    ellipsisroots = collections.defaultdict(set)
+
+    def addroot(head, curchange):
+        """Add a root to an ellipsis head, splitting heads with 3 roots."""
+        ellipsisroots[head].add(curchange)
+        # Recursively split ellipsis heads with 3 roots by finding the
+        # roots' youngest common descendant which is an elided merge commit.
+        # That descendant takes 2 of the 3 roots as its own, and becomes a
+        # root of the head.
+        while len(ellipsisroots[head]) > 2:
+            child, roots = splithead(head)
+            splitroots(head, child, roots)
+            head = child  # Recurse in case we just added a 3rd root
+
+    def splitroots(head, child, roots):
+        ellipsisroots[head].difference_update(roots)
+        ellipsisroots[head].add(child)
+        ellipsisroots[child].update(roots)
+        ellipsisroots[child].discard(child)
+
+    def splithead(head):
+        r1, r2, r3 = sorted(ellipsisroots[head])
+        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
+            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
+                            nr1, head, nr2, head)
+            for j in mid:
+                if j == nr2:
+                    return nr2, (nr1, nr2)
+                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
+                    return j, (nr1, nr2)
+        raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
+                            'roots: %d %d %d') % (head, r1, r2, r3))
+
+    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
+    visit = reversed(missing)
+    relevant_nodes = set()
+    visitnodes = [cl.node(m) for m in missing]
+    required = set(headsrevs) | known
+    for rev in visit:
+        clrev = cl.changelogrevision(rev)
+        ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
+        if depth is not None:
+            curdepth = revdepth[rev]
+            for p in ps:
+                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
+        needed = False
+        shallow_enough = depth is None or revdepth[rev] <= depth
+        if shallow_enough:
+            curmf = mfl[clrev.manifest].read()
+            if ps:
+                # We choose to not trust the changed files list in
+                # changesets because it's not always correct. TODO: could
+                # we trust it for the non-merge case?
+                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
+                needed = bool(curmf.diff(p1mf, match))
+                if not needed and len(ps) > 1:
+                    # For merge changes, the list of changed files is not
+                    # helpful, since we need to emit the merge if a file
+                    # in the narrow spec has changed on either side of the
+                    # merge. As a result, we do a manifest diff to check.
+                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
+                    needed = bool(curmf.diff(p2mf, match))
+            else:
+                # For a root node, we need to include the node if any
+                # files in the node match the narrowspec.
+                needed = any(curmf.walk(match))
+
+        if needed:
+            for head in ellipsisheads[rev]:
+                addroot(head, rev)
+            for p in ps:
+                required.add(p)
+            relevant_nodes.add(cl.node(rev))
+        else:
+            if not ps:
+                ps = [nullrev]
+            if rev in required:
+                for head in ellipsisheads[rev]:
+                    addroot(head, rev)
+                for p in ps:
+                    ellipsisheads[p].add(rev)
+            else:
+                for p in ps:
+                    ellipsisheads[p] |= ellipsisheads[rev]
+
+    # add common changesets as roots of their reachable ellipsis heads
+    for c in commonrevs:
+        for head in ellipsisheads[c]:
+            addroot(head, c)
+    return visitnodes, relevant_nodes, ellipsisroots
+
 def caps20to10(repo, role):
     """return a set with appropriate options to use bundle20 during getbundle"""
     caps = {'HG20'}
@@ -1924,30 +2099,52 @@
 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
                               b2caps=None, heads=None, common=None, **kwargs):
     """add a changegroup part to the requested bundle"""
-    cgstream = None
-    if kwargs.get(r'cg', True):
-        # build changegroup bundle here.
-        version = '01'
-        cgversions = b2caps.get('changegroup')
-        if cgversions:  # 3.1 and 3.2 ship with an empty value
-            cgversions = [v for v in cgversions
-                          if v in changegroup.supportedoutgoingversions(repo)]
-            if not cgversions:
-                raise ValueError(_('no common changegroup version'))
-            version = max(cgversions)
-        outgoing = _computeoutgoing(repo, heads, common)
-        if outgoing.missing:
-            cgstream = changegroup.makestream(repo, outgoing, version, source,
-                                              bundlecaps=bundlecaps)
+    if not kwargs.get(r'cg', True):
+        return
+
+    version = '01'
+    cgversions = b2caps.get('changegroup')
+    if cgversions:  # 3.1 and 3.2 ship with an empty value
+        cgversions = [v for v in cgversions
+                      if v in changegroup.supportedoutgoingversions(repo)]
+        if not cgversions:
+            raise ValueError(_('no common changegroup version'))
+        version = max(cgversions)
+
+    outgoing = _computeoutgoing(repo, heads, common)
+    if not outgoing.missing:
+        return
 
-    if cgstream:
-        part = bundler.newpart('changegroup', data=cgstream)
-        if cgversions:
-            part.addparam('version', version)
-        part.addparam('nbchanges', '%d' % len(outgoing.missing),
-                      mandatory=False)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+    if kwargs.get(r'narrow', False):
+        include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+        exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+        filematcher = narrowspec.match(repo.root, include=include,
+                                       exclude=exclude)
+    else:
+        filematcher = None
+
+    cgstream = changegroup.makestream(repo, outgoing, version, source,
+                                      bundlecaps=bundlecaps,
+                                      filematcher=filematcher)
+
+    part = bundler.newpart('changegroup', data=cgstream)
+    if cgversions:
+        part.addparam('version', version)
+
+    part.addparam('nbchanges', '%d' % len(outgoing.missing),
+                  mandatory=False)
+
+    if 'treemanifest' in repo.requirements:
+        part.addparam('treemanifest', '1')
+
+    if kwargs.get(r'narrow', False) and (include or exclude):
+        narrowspecpart = bundler.newpart('narrow:spec')
+        if include:
+            narrowspecpart.addparam(
+                'include', '\n'.join(include), mandatory=True)
+        if exclude:
+            narrowspecpart.addparam(
+                'exclude', '\n'.join(exclude), mandatory=True)
 
 @getbundle2partsgenerator('bookmarks')
 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
@@ -2069,8 +2266,13 @@
     # Don't send unless:
     # - changeset are being exchanged,
     # - the client supports it.
-    if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
+    # - narrow bundle isn't in play (not currently compatible).
+    if (not kwargs.get(r'cg', True)
+        or 'rev-branch-cache' not in b2caps
+        or kwargs.get(r'narrow', False)
+        or repo.ui.has_section(_NARROWACL_SECTION)):
         return
+
     outgoing = _computeoutgoing(repo, heads, common)
     bundle2.addpartrevbranchcache(repo, bundler, outgoing)
 
--- a/mercurial/extensions.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/extensions.py	Tue Sep 04 12:16:28 2018 -0400
@@ -124,7 +124,7 @@
     # note: this ui.debug happens before --debug is processed,
     #       Use --config ui.debug=1 to see them.
     if ui.configbool('devel', 'debug.extensions'):
-        ui.debug('could not import %s (%s): trying %s\n'
+        ui.debug('debug.extensions:     - could not import %s (%s): trying %s\n'
                  % (failed, stringutil.forcebytestr(err), next))
         if ui.debugflag:
             ui.traceback()
@@ -166,7 +166,7 @@
             _rejectunicode(t, o._table)
     _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
 
-def load(ui, name, path):
+def load(ui, name, path, log=lambda *a: None):
     if name.startswith('hgext.') or name.startswith('hgext/'):
         shortname = name[6:]
     else:
@@ -175,8 +175,11 @@
         return None
     if shortname in _extensions:
         return _extensions[shortname]
+    log('  - loading extension: %r\n', shortname)
     _extensions[shortname] = None
-    mod = _importext(name, path, bind(_reportimporterror, ui))
+    with util.timedcm('load extension %r', shortname) as stats:
+        mod = _importext(name, path, bind(_reportimporterror, ui))
+    log('  > %r extension loaded in %s\n', shortname, stats)
 
     # Before we do anything with the extension, check against minimum stated
     # compatibility. This gives extension authors a mechanism to have their
@@ -187,12 +190,16 @@
         ui.warn(_('(third party extension %s requires version %s or newer '
                   'of Mercurial; disabling)\n') % (shortname, minver))
         return
+    log('    - validating extension tables: %r\n', shortname)
     _validatetables(ui, mod)
 
     _extensions[shortname] = mod
     _order.append(shortname)
-    for fn in _aftercallbacks.get(shortname, []):
-        fn(loaded=True)
+    log('    - invoking registered callbacks: %r\n', shortname)
+    with util.timedcm('callbacks extension %r', shortname) as stats:
+        for fn in _aftercallbacks.get(shortname, []):
+            fn(loaded=True)
+    log('    > callbacks completed in %s\n', stats)
     return mod
 
 def _runuisetup(name, ui):
@@ -225,28 +232,41 @@
     return True
 
 def loadall(ui, whitelist=None):
+    if ui.configbool('devel', 'debug.extensions'):
+        log = lambda msg, *values: ui.debug('debug.extensions: ',
+            msg % values, label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
     result = ui.configitems("extensions")
     if whitelist is not None:
         result = [(k, v) for (k, v) in result if k in whitelist]
     newindex = len(_order)
-    for (name, path) in result:
-        if path:
-            if path[0:1] == '!':
-                _disabledextensions[name] = path[1:]
-                continue
-        try:
-            load(ui, name, path)
-        except Exception as inst:
-            msg = stringutil.forcebytestr(inst)
+    log('loading %sextensions\n', 'additional ' if newindex else '')
+    log('- processing %d entries\n', len(result))
+    with util.timedcm('load all extensions') as stats:
+        for (name, path) in result:
             if path:
-                ui.warn(_("*** failed to import extension %s from %s: %s\n")
-                        % (name, path, msg))
-            else:
-                ui.warn(_("*** failed to import extension %s: %s\n")
-                        % (name, msg))
-            if isinstance(inst, error.Hint) and inst.hint:
-                ui.warn(_("*** (%s)\n") % inst.hint)
-            ui.traceback()
+                if path[0:1] == '!':
+                    if name not in _disabledextensions:
+                        log('  - skipping disabled extension: %r\n', name)
+                    _disabledextensions[name] = path[1:]
+                    continue
+            try:
+                load(ui, name, path, log)
+            except Exception as inst:
+                msg = stringutil.forcebytestr(inst)
+                if path:
+                    ui.warn(_("*** failed to import extension %s from %s: %s\n")
+                            % (name, path, msg))
+                else:
+                    ui.warn(_("*** failed to import extension %s: %s\n")
+                            % (name, msg))
+                if isinstance(inst, error.Hint) and inst.hint:
+                    ui.warn(_("*** (%s)\n") % inst.hint)
+                ui.traceback()
+
+    log('> loaded %d extensions, total time %s\n',
+        len(_order) - newindex, stats)
     # list of (objname, loadermod, loadername) tuple:
     # - objname is the name of an object in extension module,
     #   from which extra information is loaded
@@ -258,29 +278,47 @@
     earlyextraloaders = [
         ('configtable', configitems, 'loadconfigtable'),
     ]
+
+    log('- loading configtable attributes\n')
     _loadextra(ui, newindex, earlyextraloaders)
 
     broken = set()
+    log('- executing uisetup hooks\n')
     for name in _order[newindex:]:
-        if not _runuisetup(name, ui):
-            broken.add(name)
+        log('  - running uisetup for %r\n', name)
+        with util.timedcm('uisetup %r', name) as stats:
+            if not _runuisetup(name, ui):
+                log('    - the %r extension uisetup failed\n', name)
+                broken.add(name)
+        log('  > uisetup for %r took %s\n', name, stats)
 
+    log('- executing extsetup hooks\n')
     for name in _order[newindex:]:
         if name in broken:
             continue
-        if not _runextsetup(name, ui):
-            broken.add(name)
+        log('  - running extsetup for %r\n', name)
+        with util.timedcm('extsetup %r', name) as stats:
+            if not _runextsetup(name, ui):
+                log('    - the %r extension extsetup failed\n', name)
+                broken.add(name)
+        log('  > extsetup for %r took %s\n', name, stats)
 
     for name in broken:
+        log('    - disabling broken %r extension\n', name)
         _extensions[name] = None
 
     # Call aftercallbacks that were never met.
-    for shortname in _aftercallbacks:
-        if shortname in _extensions:
-            continue
+    log('- executing remaining aftercallbacks\n')
+    with util.timedcm('aftercallbacks') as stats:
+        for shortname in _aftercallbacks:
+            if shortname in _extensions:
+                continue
 
-        for fn in _aftercallbacks[shortname]:
-            fn(loaded=False)
+            for fn in _aftercallbacks[shortname]:
+                log('  - extension %r not loaded, notify callbacks\n',
+                    shortname)
+                fn(loaded=False)
+    log('> remaining aftercallbacks completed in %s\n', stats)
 
     # loadall() is called multiple times and lingering _aftercallbacks
     # entries could result in double execution. See issue4646.
@@ -304,6 +342,7 @@
     # - loadermod is the module where loader is placed
     # - loadername is the name of the function,
     #   which takes (ui, extensionname, extraobj) arguments
+    log('- loading extension registration objects\n')
     extraloaders = [
         ('cmdtable', commands, 'loadcmdtable'),
         ('colortable', color, 'loadcolortable'),
@@ -314,7 +353,10 @@
         ('templatefunc', templatefuncs, 'loadfunction'),
         ('templatekeyword', templatekw, 'loadkeyword'),
     ]
-    _loadextra(ui, newindex, extraloaders)
+    with util.timedcm('load registration objects') as stats:
+        _loadextra(ui, newindex, extraloaders)
+    log('> extension registration object loading took %s\n', stats)
+    log('extension loading complete\n')
 
 def _loadextra(ui, newindex, extraloaders):
     for name in _order[newindex:]:
--- a/mercurial/filelog.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/filelog.py	Tue Sep 04 12:16:28 2018 -0400
@@ -26,7 +26,6 @@
         self.filename = path
         self.index = self._revlog.index
         self.version = self._revlog.version
-        self.storedeltachains = self._revlog.storedeltachains
         self._generaldelta = self._revlog._generaldelta
 
     def __len__(self):
@@ -77,9 +76,6 @@
     def deltaparent(self, rev):
         return self._revlog.deltaparent(rev)
 
-    def candelta(self, baserev, rev):
-        return self._revlog.candelta(baserev, rev)
-
     def iscensored(self, rev):
         return self._revlog.iscensored(rev)
 
@@ -95,6 +91,9 @@
     def revdiff(self, rev1, rev2):
         return self._revlog.revdiff(rev1, rev2)
 
+    def emitrevisiondeltas(self, requests):
+        return self._revlog.emitrevisiondeltas(requests)
+
     def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
                     node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
                     cachedelta=None):
@@ -206,47 +205,6 @@
     def opener(self):
         return self._revlog.opener
 
-    @property
-    def _lazydeltabase(self):
-        return self._revlog._lazydeltabase
-
-    @_lazydeltabase.setter
-    def _lazydeltabase(self, value):
-        self._revlog._lazydeltabase = value
-
-    @property
-    def _deltabothparents(self):
-        return self._revlog._deltabothparents
-
-    @_deltabothparents.setter
-    def _deltabothparents(self, value):
-        self._revlog._deltabothparents = value
-
-    @property
-    def _inline(self):
-        return self._revlog._inline
-
-    @property
-    def _withsparseread(self):
-        return getattr(self._revlog, '_withsparseread', False)
-
-    @property
-    def _srmingapsize(self):
-        return self._revlog._srmingapsize
-
-    @property
-    def _srdensitythreshold(self):
-        return self._revlog._srdensitythreshold
-
-    def _deltachain(self, rev, stoprev=None):
-        return self._revlog._deltachain(rev, stoprev)
-
-    def chainbase(self, rev):
-        return self._revlog.chainbase(rev)
-
-    def chainlen(self, rev):
-        return self._revlog.chainlen(rev)
-
     def clone(self, tr, destrevlog, **kwargs):
         if not isinstance(destrevlog, filelog):
             raise error.ProgrammingError('expected filelog to clone()')
--- a/mercurial/filemerge.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/filemerge.py	Tue Sep 04 12:16:28 2018 -0400
@@ -56,12 +56,14 @@
 fullmerge = internaltool.fullmerge # both premerge and merge
 
 _localchangedotherdeletedmsg = _(
-    "local%(l)s changed %(fd)s which other%(o)s deleted\n"
+    "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
+    "What do you want to do?\n"
     "use (c)hanged version, (d)elete, or leave (u)nresolved?"
     "$$ &Changed $$ &Delete $$ &Unresolved")
 
 _otherchangedlocaldeletedmsg = _(
-    "other%(o)s changed %(fd)s which local%(l)s deleted\n"
+    "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
+    "What do you want to do?\n"
     "use (c)hanged version, leave (d)eleted, or "
     "leave (u)nresolved?"
     "$$ &Changed $$ &Deleted $$ &Unresolved")
@@ -137,6 +139,13 @@
     return procutil.findexe(util.expandpath(exe))
 
 def _picktool(repo, ui, path, binary, symlink, changedelete):
+    strictcheck = ui.configbool('merge', 'strict-capability-check')
+
+    def hascapability(tool, capability, strict=False):
+        if tool in internals:
+            return strict and internals[tool].capabilities.get(capability)
+        return _toolbool(ui, tool, capability)
+
     def supportscd(tool):
         return tool in internals and internals[tool].mergetype == nomerge
 
@@ -149,9 +158,9 @@
                 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
             else: # configured but non-existing tools are more silent
                 ui.note(_("couldn't find merge tool %s\n") % tmsg)
-        elif symlink and not _toolbool(ui, tool, "symlink"):
+        elif symlink and not hascapability(tool, "symlink", strictcheck):
             ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
-        elif binary and not _toolbool(ui, tool, "binary"):
+        elif binary and not hascapability(tool, "binary", strictcheck):
             ui.warn(_("tool %s can't handle binary\n") % tmsg)
         elif changedelete and not supportscd(tool):
             # the nomerge tools are the only tools that support change/delete
@@ -186,9 +195,19 @@
             return (hgmerge, hgmerge)
 
     # then patterns
+
+    # whether binary capability should be checked strictly
+    binarycap = binary and strictcheck
+
     for pat, tool in ui.configitems("merge-patterns"):
         mf = match.match(repo.root, '', [pat])
-        if mf(path) and check(tool, pat, symlink, False, changedelete):
+        if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
+            if binary and not hascapability(tool, "binary", strict=True):
+                ui.warn(_("warning: check merge-patterns configurations,"
+                          " if %r for binary file %r is unintentional\n"
+                          "(see 'hg help merge-tools'"
+                          " for binary files capability)\n")
+                        % (pycompat.bytestr(tool), pycompat.bytestr(path)))
             toolpath = _findtool(ui, tool)
             return (tool, _quotetoolpath(toolpath))
 
@@ -208,9 +227,10 @@
     if uimerge:
         # external tools defined in uimerge won't be able to handle
         # change/delete conflicts
-        if uimerge not in names and not changedelete:
-            return (uimerge, uimerge)
-        tools.insert(0, (None, uimerge)) # highest priority
+        if check(uimerge, path, symlink, binary, changedelete):
+            if uimerge not in names and not changedelete:
+                return (uimerge, uimerge)
+            tools.insert(0, (None, uimerge)) # highest priority
     tools.append((None, "hgmerge")) # the old default, if found
     for p, t in tools:
         if check(t, None, symlink, binary, changedelete):
@@ -469,7 +489,7 @@
     success, status = tagmerge.merge(repo, fcd, fco, fca)
     return success, status, False
 
-@internaltool('dump', fullmerge)
+@internaltool('dump', fullmerge, binary=True, symlink=True)
 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Creates three versions of the files to merge, containing the
@@ -495,7 +515,7 @@
     repo.wwrite(fd + ".base", fca.data(), fca.flags())
     return False, 1, False
 
-@internaltool('forcedump', mergeonly)
+@internaltool('forcedump', mergeonly, binary=True, symlink=True)
 def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
                 labels=None):
     """
@@ -916,14 +936,17 @@
         _haltmerge()
     # default action is 'continue', in which case we neither prompt nor halt
 
+def hasconflictmarkers(data):
+    return bool(re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", data,
+                          re.MULTILINE))
+
 def _check(repo, r, ui, tool, fcd, files):
     fd = fcd.path()
     unused, unused, unused, back = files
 
     if not r and (_toolbool(ui, tool, "checkconflicts") or
                   'conflicts' in _toollist(ui, tool, "check")):
-        if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
-                     re.MULTILINE):
+        if hasconflictmarkers(fcd.data()):
             r = 1
 
     checked = False
@@ -967,6 +990,24 @@
         internals['internal:' + name] = func
         internalsdoc[fullname] = func
 
+        capabilities = sorted([k for k, v in func.capabilities.items() if v])
+        if capabilities:
+            capdesc = "    (actual capabilities: %s)" % ', '.join(capabilities)
+            func.__doc__ = (func.__doc__ +
+                            pycompat.sysstr("\n\n%s" % capdesc))
+
+    # to put i18n comments into hg.pot for automatically generated texts
+
+    # i18n: "binary" and "symlink" are keywords
+    # i18n: this text is added automatically
+    _("    (actual capabilities: binary, symlink)")
+    # i18n: "binary" is keyword
+    # i18n: this text is added automatically
+    _("    (actual capabilities: binary)")
+    # i18n: "symlink" is keyword
+    # i18n: this text is added automatically
+    _("    (actual capabilities: symlink)")
+
 # load built-in merge tools explicitly to setup internalsdoc
 loadinternalmerge(None, None, internaltool)
 
--- a/mercurial/fileset.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/fileset.py	Tue Sep 04 12:16:28 2018 -0400
@@ -13,9 +13,9 @@
 from .i18n import _
 from . import (
     error,
+    filesetlang,
     match as matchmod,
     merge,
-    parser,
     pycompat,
     registrar,
     scmutil,
@@ -25,126 +25,28 @@
     stringutil,
 )
 
-elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    ":": (15, None, None, ("kindpat", 15), None),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
-}
-
-keywords = {'and', 'or', 'not'}
-
-globchars = ".*{}[]?/\\_"
+# common weight constants
+_WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME
+_WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS
+_WEIGHT_STATUS = filesetlang.WEIGHT_STATUS
+_WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH
 
-def tokenize(program):
-    pos, l = 0, len(program)
-    program = pycompat.bytestr(program)
-    while pos < l:
-        c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
-            pass
-        elif c in "(),-:|&+!": # handle simple operators
-            yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
-                pos += 1
-                c = program[pos]
-                decode = lambda x: x
-            else:
-                decode = parser.unescapestr
-            pos += 1
-            s = pos
-            while pos < l: # find closing quote
-                d = program[pos]
-                if d == '\\': # skip over escaped characters
-                    pos += 2
-                    continue
-                if d == c:
-                    yield ('string', decode(program[s:pos]), s)
-                    break
-                pos += 1
-            else:
-                raise error.ParseError(_("unterminated string"), s)
-        elif c.isalnum() or c in globchars or ord(c) > 127:
-            # gather up a symbol/keyword
-            s = pos
-            pos += 1
-            while pos < l: # find end of symbol
-                d = program[pos]
-                if not (d.isalnum() or d in globchars or ord(d) > 127):
-                    break
-                pos += 1
-            sym = program[s:pos]
-            if sym in keywords: # operator keywords
-                yield (sym, None, s)
-            else:
-                yield ('symbol', sym, s)
-            pos -= 1
-        else:
-            raise error.ParseError(_("syntax error"), pos)
-        pos += 1
-    yield ('end', None, pos)
-
-def parse(expr):
-    p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(expr))
-    if pos != len(expr):
-        raise error.ParseError(_("invalid token"), pos)
-    return tree
-
-def getsymbol(x):
-    if x and x[0] == 'symbol':
-        return x[1]
-    raise error.ParseError(_('not a symbol'))
-
-def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
-        return x[1]
-    raise error.ParseError(err)
-
-def _getkindpat(x, y, allkinds, err):
-    kind = getsymbol(x)
-    pat = getstring(y, err)
-    if kind not in allkinds:
-        raise error.ParseError(_("invalid pattern kind: %s") % kind)
-    return '%s:%s' % (kind, pat)
-
-def getpattern(x, allkinds, err):
-    if x and x[0] == 'kindpat':
-        return _getkindpat(x[1], x[2], allkinds, err)
-    return getstring(x, err)
-
-def getlist(x):
-    if not x:
-        return []
-    if x[0] == 'list':
-        return getlist(x[1]) + [x[2]]
-    return [x]
-
-def getargs(x, min, max, err):
-    l = getlist(x)
-    if len(l) < min or len(l) > max:
-        raise error.ParseError(err)
-    return l
+# helpers for processing parsed tree
+getsymbol = filesetlang.getsymbol
+getstring = filesetlang.getstring
+_getkindpat = filesetlang.getkindpat
+getpattern = filesetlang.getpattern
+getargs = filesetlang.getargs
 
 def getmatch(mctx, x):
     if not x:
         raise error.ParseError(_("missing argument"))
     return methods[x[0]](mctx, *x[1:])
 
+def getmatchwithstatus(mctx, x, hint):
+    keys = set(getstring(hint, 'status hint must be a string').split())
+    return getmatch(mctx.withstatus(keys), x)
+
 def stringmatch(mctx, x):
     return mctx.matcher([x])
 
@@ -152,15 +54,20 @@
     return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds,
                                          _("pattern must be a string")))
 
+def patternsmatch(mctx, *xs):
+    allkinds = matchmod.allpatternkinds
+    patterns = [getpattern(x, allkinds, _("pattern must be a string"))
+                for x in xs]
+    return mctx.matcher(patterns)
+
 def andmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.intersectmatchers(xm, ym)
 
-def ormatch(mctx, x, y):
-    xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
-    return matchmod.unionmatcher([xm, ym])
+def ormatch(mctx, *xs):
+    ms = [getmatch(mctx, x) for x in xs]
+    return matchmod.unionmatcher(ms)
 
 def notmatch(mctx, x):
     m = getmatch(mctx, x)
@@ -168,15 +75,12 @@
 
 def minusmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.differencematcher(xm, ym)
 
-def negatematch(mctx, x):
-    raise error.ParseError(_("can't use negate operator in this context"))
-
-def listmatch(mctx, x, y):
+def listmatch(mctx, *xs):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "filesets.x or y"'))
+                           hint=_('see \'hg help "filesets.x or y"\''))
 
 def func(mctx, a, b):
     funcname = getsymbol(a)
@@ -193,14 +97,11 @@
 # with:
 #  mctx - current matchctx instance
 #  x - argument in tree form
-symbols = {}
+symbols = filesetlang.symbols
 
-# filesets using matchctx.status()
-_statuscallers = set()
+predicate = registrar.filesetpredicate(symbols)
 
-predicate = registrar.filesetpredicate()
-
-@predicate('modified()', callstatus=True)
+@predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS)
 def modified(mctx, x):
     """File that is modified according to :hg:`status`.
     """
@@ -209,7 +110,7 @@
     s = set(mctx.status().modified)
     return mctx.predicate(s.__contains__, predrepr='modified')
 
-@predicate('added()', callstatus=True)
+@predicate('added()', callstatus=True, weight=_WEIGHT_STATUS)
 def added(mctx, x):
     """File that is added according to :hg:`status`.
     """
@@ -218,7 +119,7 @@
     s = set(mctx.status().added)
     return mctx.predicate(s.__contains__, predrepr='added')
 
-@predicate('removed()', callstatus=True)
+@predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS)
 def removed(mctx, x):
     """File that is removed according to :hg:`status`.
     """
@@ -227,7 +128,7 @@
     s = set(mctx.status().removed)
     return mctx.predicate(s.__contains__, predrepr='removed')
 
-@predicate('deleted()', callstatus=True)
+@predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS)
 def deleted(mctx, x):
     """Alias for ``missing()``.
     """
@@ -236,7 +137,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('missing()', callstatus=True)
+@predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS)
 def missing(mctx, x):
     """File that is missing according to :hg:`status`.
     """
@@ -245,7 +146,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('unknown()', callstatus=True)
+@predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def unknown(mctx, x):
     """File that is unknown according to :hg:`status`."""
     # i18n: "unknown" is a keyword
@@ -253,7 +154,7 @@
     s = set(mctx.status().unknown)
     return mctx.predicate(s.__contains__, predrepr='unknown')
 
-@predicate('ignored()', callstatus=True)
+@predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def ignored(mctx, x):
     """File that is ignored according to :hg:`status`."""
     # i18n: "ignored" is a keyword
@@ -261,7 +162,7 @@
     s = set(mctx.status().ignored)
     return mctx.predicate(s.__contains__, predrepr='ignored')
 
-@predicate('clean()', callstatus=True)
+@predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS)
 def clean(mctx, x):
     """File that is clean according to :hg:`status`.
     """
@@ -277,7 +178,7 @@
     getargs(x, 0, 0, _("tracked takes no arguments"))
     return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked')
 
-@predicate('binary()')
+@predicate('binary()', weight=_WEIGHT_READ_CONTENTS)
 def binary(mctx, x):
     """File that appears to be binary (contains NUL bytes).
     """
@@ -304,7 +205,7 @@
     ctx = mctx.ctx
     return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink')
 
-@predicate('resolved()')
+@predicate('resolved()', weight=_WEIGHT_STATUS)
 def resolved(mctx, x):
     """File that is marked resolved according to :hg:`resolve -l`.
     """
@@ -316,7 +217,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'r',
                           predrepr='resolved')
 
-@predicate('unresolved()')
+@predicate('unresolved()', weight=_WEIGHT_STATUS)
 def unresolved(mctx, x):
     """File that is marked unresolved according to :hg:`resolve -l`.
     """
@@ -328,7 +229,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'u',
                           predrepr='unresolved')
 
-@predicate('hgignore()')
+@predicate('hgignore()', weight=_WEIGHT_STATUS)
 def hgignore(mctx, x):
     """File that matches the active .hgignore pattern.
     """
@@ -336,7 +237,7 @@
     getargs(x, 0, 0, _("hgignore takes no arguments"))
     return mctx.ctx.repo().dirstate._ignore
 
-@predicate('portable()')
+@predicate('portable()', weight=_WEIGHT_CHECK_FILENAME)
 def portable(mctx, x):
     """File that has a portable name. (This doesn't include filenames with case
     collisions.)
@@ -346,7 +247,7 @@
     return mctx.predicate(lambda f: util.checkwinfilename(f) is None,
                           predrepr='portable')
 
-@predicate('grep(regex)')
+@predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS)
 def grep(mctx, x):
     """File contains the given regular expression.
     """
@@ -400,7 +301,7 @@
         b = _sizetomax(expr)
         return lambda x: x >= a and x <= b
 
-@predicate('size(expression)')
+@predicate('size(expression)', weight=_WEIGHT_STATUS)
 def size(mctx, x):
     """File size matches the given expression. Examples:
 
@@ -415,7 +316,7 @@
     return mctx.fpredicate(lambda fctx: m(fctx.size()),
                            predrepr=('size(%r)', expr), cache=True)
 
-@predicate('encoding(name)')
+@predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS)
 def encoding(mctx, x):
     """File can be successfully decoded with the given character
     encoding. May not be useful for encodings other than ASCII and
@@ -437,7 +338,7 @@
 
     return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True)
 
-@predicate('eol(style)')
+@predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS)
 def eol(mctx, x):
     """File contains newlines of the given style (dos, unix, mac). Binary
     files are excluded, files with mixed line endings match multiple
@@ -471,7 +372,7 @@
         return p and p[0].path() != fctx.path()
     return mctx.fpredicate(copiedp, predrepr='copied', cache=True)
 
-@predicate('revs(revs, pattern)')
+@predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS)
 def revs(mctx, x):
     """Evaluate set in the specified revisions. If the revset match multiple
     revs, this will return file matching pattern in any of the revision.
@@ -486,14 +387,15 @@
     matchers = []
     for r in revs:
         ctx = repo[r]
-        matchers.append(getmatch(mctx.switch(ctx, _buildstatus(ctx, x)), x))
+        mc = mctx.switch(ctx.p1(), ctx)
+        matchers.append(getmatch(mc, x))
     if not matchers:
         return mctx.never()
     if len(matchers) == 1:
         return matchers[0]
     return matchmod.unionmatcher(matchers)
 
-@predicate('status(base, rev, pattern)')
+@predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS)
 def status(mctx, x):
     """Evaluate predicate using status change between ``base`` and
     ``rev``. Examples:
@@ -513,7 +415,8 @@
     if not revspec:
         raise error.ParseError(reverr)
     basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec])
-    return getmatch(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x)
+    mc = mctx.switch(basectx, ctx)
+    return getmatch(mc, x)
 
 @predicate('subrepo([pattern])')
 def subrepo(mctx, x):
@@ -539,24 +442,52 @@
         return mctx.predicate(sstate.__contains__, predrepr='subrepo')
 
 methods = {
+    'withstatus': getmatchwithstatus,
     'string': stringmatch,
     'symbol': stringmatch,
     'kindpat': kindpatmatch,
+    'patterns': patternsmatch,
     'and': andmatch,
     'or': ormatch,
     'minus': minusmatch,
-    'negate': negatematch,
     'list': listmatch,
-    'group': getmatch,
     'not': notmatch,
     'func': func,
 }
 
 class matchctx(object):
-    def __init__(self, ctx, status=None, badfn=None):
+    def __init__(self, basectx, ctx, badfn=None):
+        self._basectx = basectx
         self.ctx = ctx
-        self._status = status
         self._badfn = badfn
+        self._match = None
+        self._status = None
+
+    def narrowed(self, match):
+        """Create matchctx for a sub-tree narrowed by the given matcher"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = match
+        # leave wider status which we don't have to care
+        mctx._status = self._status
+        return mctx
+
+    def switch(self, basectx, ctx):
+        mctx = matchctx(basectx, ctx, self._badfn)
+        mctx._match = self._match
+        return mctx
+
+    def withstatus(self, keys):
+        """Create matchctx which has precomputed status specified by the keys"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = self._match
+        mctx._buildstatus(keys)
+        return mctx
+
+    def _buildstatus(self, keys):
+        self._status = self._basectx.status(self.ctx, self._match,
+                                            listignored='ignored' in keys,
+                                            listclean='clean' in keys,
+                                            listunknown='unknown' in keys)
 
     def status(self):
         return self._status
@@ -612,62 +543,20 @@
         return matchmod.nevermatcher(repo.root, repo.getcwd(),
                                      badfn=self._badfn)
 
-    def switch(self, ctx, status=None):
-        return matchctx(ctx, status, self._badfn)
-
-# filesets using matchctx.switch()
-_switchcallers = [
-    'revs',
-    'status',
-]
-
-def _intree(funcs, tree):
-    if isinstance(tree, tuple):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
-            if tree[1][1] in funcs:
-                return True
-            if tree[1][1] in _switchcallers:
-                # arguments won't be evaluated in the current context
-                return False
-        for s in tree[1:]:
-            if _intree(funcs, s):
-                return True
-    return False
-
 def match(ctx, expr, badfn=None):
     """Create a matcher for a single fileset expression"""
-    tree = parse(expr)
-    mctx = matchctx(ctx, _buildstatus(ctx, tree), badfn=badfn)
+    tree = filesetlang.parse(expr)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
+    mctx = matchctx(ctx.p1(), ctx, badfn=badfn)
     return getmatch(mctx, tree)
 
-def _buildstatus(ctx, tree, basectx=None):
-    # do we need status info?
-
-    if _intree(_statuscallers, tree):
-        unknown = _intree(['unknown'], tree)
-        ignored = _intree(['ignored'], tree)
-
-        r = ctx.repo()
-        if basectx is None:
-            basectx = ctx.p1()
-        return r.status(basectx, ctx,
-                        unknown=unknown, ignored=ignored, clean=True)
-    else:
-        return None
-
-def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
 
 def loadpredicate(ui, extname, registrarobj):
     """Load fileset predicates from specified registrarobj
     """
     for name, func in registrarobj._table.iteritems():
         symbols[name] = func
-        if func._callstatus:
-            _statuscallers.add(name)
-
-# load built-in predicates explicitly to setup _statuscallers
-loadpredicate(None, None, predicate)
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = symbols.values()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/filesetlang.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,330 @@
+# filesetlang.py - parser, tokenizer and utility for file set language
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+from . import (
+    error,
+    parser,
+    pycompat,
+)
+
+# common weight constants for static optimization
+# (see registrar.filesetpredicate for details)
+WEIGHT_CHECK_FILENAME = 0.5
+WEIGHT_READ_CONTENTS = 30
+WEIGHT_STATUS = 10
+WEIGHT_STATUS_THOROUGH = 50
+
+elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    ":": (15, None, None, ("kindpat", 15), None),
+    "-": (5, None, ("negate", 19), ("minus", 5), None),
+    "not": (10, None, ("not", 10), None, None),
+    "!": (10, None, ("not", 10), None, None),
+    "and": (5, None, None, ("and", 5), None),
+    "&": (5, None, None, ("and", 5), None),
+    "or": (4, None, None, ("or", 4), None),
+    "|": (4, None, None, ("or", 4), None),
+    "+": (4, None, None, ("or", 4), None),
+    ",": (2, None, None, ("list", 2), None),
+    ")": (0, None, None, None, None),
+    "symbol": (0, "symbol", None, None, None),
+    "string": (0, "string", None, None, None),
+    "end": (0, None, None, None, None),
+}
+
+keywords = {'and', 'or', 'not'}
+
+symbols = {}
+
+globchars = ".*{}[]?/\\_"
+
+def tokenize(program):
+    pos, l = 0, len(program)
+    program = pycompat.bytestr(program)
+    while pos < l:
+        c = program[pos]
+        if c.isspace(): # skip inter-token whitespace
+            pass
+        elif c in "(),-:|&+!": # handle simple operators
+            yield (c, None, pos)
+        elif (c in '"\'' or c == 'r' and
+              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+            if c == 'r':
+                pos += 1
+                c = program[pos]
+                decode = lambda x: x
+            else:
+                decode = parser.unescapestr
+            pos += 1
+            s = pos
+            while pos < l: # find closing quote
+                d = program[pos]
+                if d == '\\': # skip over escaped characters
+                    pos += 2
+                    continue
+                if d == c:
+                    yield ('string', decode(program[s:pos]), s)
+                    break
+                pos += 1
+            else:
+                raise error.ParseError(_("unterminated string"), s)
+        elif c.isalnum() or c in globchars or ord(c) > 127:
+            # gather up a symbol/keyword
+            s = pos
+            pos += 1
+            while pos < l: # find end of symbol
+                d = program[pos]
+                if not (d.isalnum() or d in globchars or ord(d) > 127):
+                    break
+                pos += 1
+            sym = program[s:pos]
+            if sym in keywords: # operator keywords
+                yield (sym, None, s)
+            else:
+                yield ('symbol', sym, s)
+            pos -= 1
+        else:
+            raise error.ParseError(_("syntax error"), pos)
+        pos += 1
+    yield ('end', None, pos)
+
+def parse(expr):
+    p = parser.parser(elements)
+    tree, pos = p.parse(tokenize(expr))
+    if pos != len(expr):
+        raise error.ParseError(_("invalid token"), pos)
+    return parser.simplifyinfixops(tree, {'list', 'or'})
+
+def getsymbol(x):
+    if x and x[0] == 'symbol':
+        return x[1]
+    raise error.ParseError(_('not a symbol'))
+
+def getstring(x, err):
+    if x and (x[0] == 'string' or x[0] == 'symbol'):
+        return x[1]
+    raise error.ParseError(err)
+
+def getkindpat(x, y, allkinds, err):
+    kind = getsymbol(x)
+    pat = getstring(y, err)
+    if kind not in allkinds:
+        raise error.ParseError(_("invalid pattern kind: %s") % kind)
+    return '%s:%s' % (kind, pat)
+
+def getpattern(x, allkinds, err):
+    if x and x[0] == 'kindpat':
+        return getkindpat(x[1], x[2], allkinds, err)
+    return getstring(x, err)
+
+def getlist(x):
+    if not x:
+        return []
+    if x[0] == 'list':
+        return list(x[1:])
+    return [x]
+
+def getargs(x, min, max, err):
+    l = getlist(x)
+    if len(l) < min or len(l) > max:
+        raise error.ParseError(err)
+    return l
+
+def _analyze(x):
+    if x is None:
+        return x
+
+    op = x[0]
+    if op in {'string', 'symbol'}:
+        return x
+    if op == 'kindpat':
+        getsymbol(x[1])  # kind must be a symbol
+        t = _analyze(x[2])
+        return (op, x[1], t)
+    if op == 'group':
+        return _analyze(x[1])
+    if op == 'negate':
+        raise error.ParseError(_("can't use negate operator in this context"))
+    if op == 'not':
+        t = _analyze(x[1])
+        return (op, t)
+    if op == 'and':
+        ta = _analyze(x[1])
+        tb = _analyze(x[2])
+        return (op, ta, tb)
+    if op == 'minus':
+        return _analyze(('and', x[1], ('not', x[2])))
+    if op in {'list', 'or'}:
+        ts = tuple(_analyze(y) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        getsymbol(x[1])  # function name must be a symbol
+        ta = _analyze(x[2])
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _insertstatushints(x):
+    """Insert hint nodes where status should be calculated (first path)
+
+    This works in bottom-up way, summing up status names and inserting hint
+    nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left.
+
+    Returns (status-names, new-tree) at the given subtree, where status-names
+    is a sum of status names referenced in the given subtree.
+    """
+    if x is None:
+        return (), x
+
+    op = x[0]
+    if op in {'string', 'symbol', 'kindpat'}:
+        return (), x
+    if op == 'not':
+        h, t = _insertstatushints(x[1])
+        return h, (op, t)
+    if op == 'and':
+        ha, ta = _insertstatushints(x[1])
+        hb, tb = _insertstatushints(x[2])
+        hr = ha + hb
+        if ha and hb:
+            return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
+        return hr, (op, ta, tb)
+    if op == 'or':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        hr = sum(hs, ())
+        if sum(bool(h) for h in hs) > 1:
+            return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
+        return hr, (op,) + ts
+    if op == 'list':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        return sum(hs, ()), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        # don't propagate 'ha' crossing a function boundary
+        ha, ta = _insertstatushints(x[2])
+        if getattr(symbols.get(f), '_callstatus', False):
+            return (f,), ('withstatus', (op, x[1], ta), ('string', f))
+        return (), (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _mergestatushints(x, instatus):
+    """Remove redundant status hint nodes (second path)
+
+    This is the top-down path to eliminate inner hint nodes.
+    """
+    if x is None:
+        return x
+
+    op = x[0]
+    if op == 'withstatus':
+        if instatus:
+            # drop redundant hint node
+            return _mergestatushints(x[1], instatus)
+        t = _mergestatushints(x[1], instatus=True)
+        return (op, t, x[2])
+    if op in {'string', 'symbol', 'kindpat'}:
+        return x
+    if op == 'not':
+        t = _mergestatushints(x[1], instatus)
+        return (op, t)
+    if op == 'and':
+        ta = _mergestatushints(x[1], instatus)
+        tb = _mergestatushints(x[2], instatus)
+        return (op, ta, tb)
+    if op in {'list', 'or'}:
+        ts = tuple(_mergestatushints(y, instatus) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        # don't propagate 'instatus' crossing a function boundary
+        ta = _mergestatushints(x[2], instatus=False)
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def analyze(x):
+    """Transform raw parsed tree to evaluatable tree which can be fed to
+    optimize() or getmatch()
+
+    All pseudo operations should be mapped to real operations or functions
+    defined in methods or symbols table respectively.
+    """
+    t = _analyze(x)
+    _h, t = _insertstatushints(t)
+    return _mergestatushints(t, instatus=False)
+
+def _optimizeandops(op, ta, tb):
+    if tb is not None and tb[0] == 'not':
+        return ('minus', ta, tb[1])
+    return (op, ta, tb)
+
+def _optimizeunion(xs):
+    # collect string patterns so they can be compiled into a single regexp
+    ws, ts, ss = [], [], []
+    for x in xs:
+        w, t = _optimize(x)
+        if t is not None and t[0] in {'string', 'symbol', 'kindpat'}:
+            ss.append(t)
+            continue
+        ws.append(w)
+        ts.append(t)
+    if ss:
+        ws.append(WEIGHT_CHECK_FILENAME)
+        ts.append(('patterns',) + tuple(ss))
+    return ws, ts
+
+def _optimize(x):
+    if x is None:
+        return 0, x
+
+    op = x[0]
+    if op == 'withstatus':
+        w, t = _optimize(x[1])
+        return w, (op, t, x[2])
+    if op in {'string', 'symbol'}:
+        return WEIGHT_CHECK_FILENAME, x
+    if op == 'kindpat':
+        w, t = _optimize(x[2])
+        return w, (op, x[1], t)
+    if op == 'not':
+        w, t = _optimize(x[1])
+        return w, (op, t)
+    if op == 'and':
+        wa, ta = _optimize(x[1])
+        wb, tb = _optimize(x[2])
+        if wa <= wb:
+            return wa, _optimizeandops(op, ta, tb)
+        else:
+            return wb, _optimizeandops(op, tb, ta)
+    if op == 'or':
+        ws, ts = _optimizeunion(x[1:])
+        if len(ts) == 1:
+            return ws[0], ts[0] # 'or' operation is fully optimized out
+        ts = tuple(it[1] for it in sorted(enumerate(ts),
+                                          key=lambda it: ws[it[0]]))
+        return max(ws), (op,) + ts
+    if op == 'list':
+        ws, ts = zip(*(_optimize(y) for y in x[1:]))
+        return sum(ws), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        w = getattr(symbols.get(f), '_weight', 1)
+        wa, ta = _optimize(x[2])
+        return w + wa, (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def optimize(x):
+    """Reorder/rewrite evaluatable tree for optimization
+
+    All pseudo operations should be transformed beforehand.
+    """
+    _w, t = _optimize(x)
+    return t
+
+def prettyformat(tree):
+    return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/graphmod.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/graphmod.py	Tue Sep 04 12:16:28 2018 -0400
@@ -22,6 +22,7 @@
 from .node import nullrev
 from . import (
     dagop,
+    pycompat,
     smartset,
     util,
 )
@@ -280,7 +281,7 @@
         line.extend(echars[-(remainder * 2):])
     return line
 
-def _drawendinglines(lines, extra, edgemap, seen):
+def _drawendinglines(lines, extra, edgemap, seen, state):
     """Draw ending lines for missing parent edges
 
     None indicates an edge that ends at between this node and the next
@@ -297,7 +298,8 @@
     while edgechars and edgechars[-1] is None:
         edgechars.pop()
     shift_size = max((edgechars.count(None) * 2) - 1, 0)
-    while len(lines) < 3 + shift_size:
+    minlines = 3 if not state['graphshorten'] else 2
+    while len(lines) < minlines + shift_size:
         lines.append(extra[:])
 
     if shift_size:
@@ -318,7 +320,7 @@
                 positions[i] = max(pos, targets[i])
                 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
 
-    map = {1: '|', 2: '~'}
+    map = {1: '|', 2: '~'} if not state['graphshorten'] else {1: '~'}
     for i, line in enumerate(lines):
         if None not in line:
             continue
@@ -426,16 +428,16 @@
     # shift_interline is the line containing the non-vertical
     # edges between this entry and the next
     shift_interline = echars[:idx * 2]
-    for i in xrange(2 + coldiff):
+    for i in pycompat.xrange(2 + coldiff):
         shift_interline.append(' ')
     count = ncols - idx - 1
     if coldiff == -1:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['/', ' '])
     elif coldiff == 0:
         shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
     else:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['\\', ' '])
 
     # draw edges from the current node to its parents
@@ -462,7 +464,7 @@
         while len(lines) < len(text):
             lines.append(extra_interline[:])
 
-    _drawendinglines(lines, extra_interline, edgemap, seen)
+    _drawendinglines(lines, extra_interline, edgemap, seen, state)
 
     while len(text) < len(lines):
         text.append("")
--- a/mercurial/help.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/help.py	Tue Sep 04 12:16:28 2018 -0400
@@ -205,6 +205,8 @@
      loaddoc('bundle2', subdir='internals')),
     (['bundles'], _('Bundles'),
      loaddoc('bundles', subdir='internals')),
+    (['cbor'], _('CBOR'),
+     loaddoc('cbor', subdir='internals')),
     (['censor'], _('Censor'),
      loaddoc('censor', subdir='internals')),
     (['changegroups'], _('Changegroups'),
@@ -642,8 +644,8 @@
 
     return ''.join(rst)
 
-def formattedhelp(ui, commands, name, keep=None, unknowncmd=False, full=True,
-                  **opts):
+def formattedhelp(ui, commands, fullname, keep=None, unknowncmd=False,
+                  full=True, **opts):
     """get help for a given topic (as a dotted name) as rendered rst
 
     Either returns the rendered help text or raises an exception.
@@ -652,19 +654,17 @@
         keep = []
     else:
         keep = list(keep) # make a copy so we can mutate this later
-    fullname = name
-    section = None
-    subtopic = None
-    if name and '.' in name:
-        name, remaining = name.split('.', 1)
-        remaining = encoding.lower(remaining)
-        if '.' in remaining:
-            subtopic, section = remaining.split('.', 1)
-        else:
-            if name in subtopics:
-                subtopic = remaining
-            else:
-                section = remaining
+
+    # <fullname> := <name>[.<subtopic][.<section>]
+    name = subtopic = section = None
+    if fullname is not None:
+        nameparts = fullname.split('.')
+        name = nameparts.pop(0)
+        if nameparts and name in subtopics:
+            subtopic = nameparts.pop(0)
+        if nameparts:
+            section = encoding.lower('.'.join(nameparts))
+
     textwidth = ui.configint('ui', 'textwidth')
     termwidth = ui.termwidth() - 2
     if textwidth <= 0 or termwidth < textwidth:
@@ -672,19 +672,19 @@
     text = help_(ui, commands, name,
                  subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
 
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
+    blocks, pruned = minirst.parse(text, keep=keep)
+    if 'verbose' in pruned:
+        keep.append('omitted')
+    else:
+        keep.append('notomitted')
+    blocks, pruned = minirst.parse(text, keep=keep)
+    if section:
+        blocks = minirst.filtersections(blocks, section)
 
     # We could have been given a weird ".foo" section without a name
     # to look for, or we could have simply failed to found "foo.bar"
     # because bar isn't a section of foo
-    if section and not (formatted and name):
+    if section and not (blocks and name):
         raise error.Abort(_("help section not found: %s") % fullname)
 
-    if 'verbose' in pruned:
-        keep.append('omitted')
-    else:
-        keep.append('notomitted')
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
-    return formatted
+    return minirst.formatplain(blocks, textwidth)
--- a/mercurial/help/config.txt	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/help/config.txt	Tue Sep 04 12:16:28 2018 -0400
@@ -438,6 +438,25 @@
 ``commands``
 ------------
 
+``resolve.confirm``
+    Confirm before performing action if no filename is passed.
+    (default: False)
+
+``resolve.explicit-re-merge``
+    Require uses of ``hg resolve`` to specify which action it should perform,
+    instead of re-merging files by default.
+    (default: False)
+
+``resolve.mark-check``
+    Determines what level of checking :hg:`resolve --mark` will perform before
+    marking files as resolved. Valid values are ``none`, ``warn``, and
+    ``abort``. ``warn`` will output a warning listing the file(s) that still
+    have conflict markers in them, but will still mark everything resolved.
+    ``abort`` will output the same warning but will not mark things as resolved.
+    If --all is passed and this is set to ``abort``, only a warning will be
+    shown (an error will not be raised).
+    (default: ``none``)
+
 ``status.relative``
     Make paths in :hg:`status` output relative to the current directory.
     (default: False)
@@ -1333,6 +1352,11 @@
    halted, the repository is left in a normal ``unresolved`` merge state.
    (default: ``continue``)
 
+``strict-capability-check``
+   Whether capabilities of internal merge tools are checked strictly
+   or not, while examining rules to decide merge tool to be used.
+   (default: False)
+
 ``merge-patterns``
 ------------------
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/cbor.txt	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,130 @@
+Mercurial uses Concise Binary Object Representation (CBOR)
+(RFC 7049) for various data formats.
+
+This document describes the subset of CBOR that Mercurial uses and
+gives recommendations for appropriate use of CBOR within Mercurial.
+
+Type Limitations
+================
+
+Major types 0 and 1 (unsigned integers and negative integers) MUST be
+fully supported.
+
+Major type 2 (byte strings) MUST be fully supported. However, there
+are limitations around the use of indefinite-length byte strings.
+(See below.)
+
+Major type 3 (text strings) are NOT supported.
+
+Major type 4 (arrays) MUST be supported. However, values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length arrays are NOT supported.
+
+Major type 5 (maps) MUST be supported. However, key values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length maps are NOT supported.
+
+Major type 6 (semantic tagging of major types) can be used with the
+following semantic tag values:
+
+258
+   Mathematical finite set. Suitable for representing Python's
+   ``set`` type.
+
+All other semantic tag values are not allowed.
+
+Major type 7 (simple data types) can be used with the following
+type values:
+
+20
+   False
+21
+   True
+22
+   Null
+31
+   Break stop code (for indefinite-length items).
+
+All other simple data type values (including every value requiring the
+1 byte extension) are disallowed.
+
+Indefinite-Length Byte Strings
+==============================
+
+Indefinite-length byte strings (major type 2) are allowed. However,
+they MUST NOT occur inside a container type (such as an array or map).
+i.e. they can only occur as the "top-most" element in a stream of
+values.
+
+Encoders and decoders SHOULD *stream* indefinite-length byte strings.
+i.e. an encoder or decoder SHOULD NOT buffer the entirety of a long
+byte string value when indefinite-length byte strings are being used
+if it can be avoided. Mercurial MAY use extremely long indefinite-length
+byte strings and buffering the source or destination value COULD lead to
+memory exhaustion.
+
+Chunks in an indefinite-length byte string SHOULD NOT exceed 2^20
+bytes.
+
+Container Types
+===============
+
+Mercurial may use the array (major type 4), map (major type 5), and
+set (semantic tag 258 plus major type 4 array) container types.
+
+An array may contain any supported type as values.
+
+A map MUST only use the following types as keys:
+
+* unsigned integers (major type 0)
+* negative integers (major type 1)
+* byte strings (major type 2) (but not indefinite-length byte strings)
+* false (simple type 20)
+* true (simple type 21)
+* null (simple type 22)
+
+A map MUST only use the following types as values:
+
+* all types supported as map keys
+* arrays
+* maps
+* sets
+
+A set may only use the following types as values:
+
+* all types supported as map keys
+
+It is recommended that keys in maps and values in sets and arrays all
+be of a uniform type.
+
+Avoiding Large Byte Strings
+===========================
+
+The use of large byte strings is discouraged, especially in scenarios where
+the total size of the byte string may by unbound for some inputs (e.g. when
+representing the content of a tracked file). It is highly recommended to use
+indefinite-length byte strings for these purposes.
+
+Since indefinite-length byte strings cannot be nested within an outer
+container (such as an array or map), to associate a large byte string
+with another data structure, it is recommended to use an array or
+map followed immediately by an indefinite-length byte string. For example,
+instead of the following map::
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "long_value": "some very large value...",
+   }
+
+Use a map followed by a byte string:
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "value_follows": True,
+   }
+   <BEGIN INDEFINITE-LENGTH BYTE STRING>
+   "some very large value"
+   "..."
+   <END INDEFINITE-LENGTH BYTE STRING>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/linelog.txt	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,302 @@
+linelog is a storage format inspired by the "Interleaved deltas" idea. See
+https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction.
+
+0. SCCS Weave
+
+  To understand what linelog is, first we have a quick look at a simplified
+  (with header removed) SCCS weave format, which is an implementation of the
+  "Interleaved deltas" idea.
+
+0.1 Basic SCCS Weave File Format
+
+  A SCCS weave file consists of plain text lines. Each line is either a
+  special instruction starting with "^A" or part of the content of the real
+  file the weave tracks. There are 3 important operations, where REV denotes
+  the revision number:
+
+    ^AI REV, marking the beginning of an insertion block introduced by REV
+    ^AD REV, marking the beginning of a deletion block introduced by REV
+    ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV"
+
+  Note on revision numbers: For any two different revision numbers, one must
+  be an ancestor of the other to make them comparable. This enforces linear
+  history. Besides, the comparison functions (">=", "<") should be efficient.
+  This means, if revisions are strings like git or hg, an external map is
+  required to convert them into integers.
+
+  For example, to represent the following changes:
+
+    REV 1 | REV 2 | REV 3
+    ------+-------+-------
+    a     | a     | a
+    b     | b     | 2
+    c     | 1     | c
+          | 2     |
+          | c     |
+
+  A possible weave file looks like:
+
+    ^AI 1
+    a
+    ^AD 3
+    b
+    ^AI 2
+    1
+    ^AE 3
+    2
+    ^AE 2
+    c
+    ^AE 1
+
+  An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In
+  the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3".
+  Therefore we need some extra information for "^AE". The SCCS weave uses a
+  revision number. It could also be a boolean value about whether it is an
+  insertion or a deletion (see section 0.4).
+
+0.2 Checkout
+
+  The "checkout" operation is to retrieve file content at a given revision,
+  say X. It's doable by going through the file line by line and:
+
+    - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there
+    - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there
+    - Ignore ^AE
+    - For normal lines, just output them
+
+0.3 Annotate
+
+  The "annotate" operation is to show extra metadata like the revision number
+  and the original line number a line comes from.
+
+  It's basically just a "Checkout". For the extra metadata, they can be stored
+  side by side with the line contents. Alternatively, we can infer the
+  revision number from "^AI"s.
+
+  Some SCM tools have to calculate diffs on the fly and thus are much slower
+  on this operation.
+
+0.4 Tree Structure
+
+  The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE"
+  blocks can be interleaved.
+
+  If we consider insertions and deletions separately, they can form tree
+  structures, respectively.
+
+    +--- ^AI 1        +--- ^AD 3
+    | +- ^AI 2        | +- ^AD 2
+    | |               | |
+    | +- ^AE 2        | +- ^AE 2
+    |                 |
+    +--- ^AE 1        +--- ^AE 3
+
+  More specifically, it's possible to build a tree for all insertions, where
+  the tree node has the structure "(rev, startline, endline)". "startline" is
+  the line number of "^AI" and "endline" is the line number of the matched
+  "^AE".  The tree will have these properties:
+
+    1. child.rev > parent.rev
+    2. child.startline > parent.startline
+    3. child.endline < parent.endline
+
+  A similar tree for all deletions can also be built with the first property
+  changed to:
+
+    1. child.rev < parent.rev
+
+0.5 Malformed Cases
+
+  The following cases are considered malformed in our implementation:
+
+    1. Interleaved insertions, or interleaved deletions.
+       It can be rewritten to a non-interleaved tree structure.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x         ^AI x
+       a             a
+       ^AI x + 1  -> ^AI x + 1
+       b             b
+       ^AE x         ^AE x + 1
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    2. Nested insertions, where the inner one has a smaller revision number.
+       Or nested deletions, where the inner one has a larger revision number.
+       It can be rewritten to a non-nested form.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x + 1     ^AI x + 1
+       a             a
+       ^AI x      -> ^AE x + 1
+       b             ^AI x
+       ^AE x         b
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    3. Insertion inside deletion with a smaller revision number.
+
+       Rewrite by duplicating the content inserted:
+
+       ^AD x          ^AD x
+       a              a
+       ^AI x + 1  ->  b
+       b              c
+       ^AE x + 1      ^AE x
+       c              ^AI x + 1
+       ^AE x          b
+                      ^AE x + 1
+
+       Note: If "annotate" purely depends on "^AI" information, then the
+       duplication content will lose track of where "b" is originally from.
+
+  Some of them may be valid in other implementations for special purposes. For
+  example, to "revive" a previously deleted block in a newer revision.
+
+0.6 Cases Can Be Optimized
+
+  It's always better to get things nested. For example, the left is more
+  efficient than the right while they represent the same content:
+
+    +--- ^AD 2          +- ^AD 1
+    | +- ^AD 1          |   LINE A
+    | |   LINE A        +- ^AE 1
+    | +- ^AE 1          +- ^AD 2
+    |     LINE B        |   LINE B
+    +--- ^AE 2          +- ^AE 2
+
+  Our implementation sometimes generates the less efficient data. To always
+  get the optimal form, it requires extra code complexity that seems unworthy.
+
+0.7 Inefficiency
+
+  The file format can be slow because:
+
+  - Inserting a new line at position P requires rewriting all data after P.
+  - Finding "^AE" requires walking through the content (O(N), where N is the
+    number of lines between "^AI/D" and "^AE").
+
+1. Linelog
+
+  The linelog is a binary format that dedicates to speed up mercurial (or
+  git)'s "annotate" operation. It's designed to avoid issues mentioned in
+  section 0.7.
+
+1.1 Content Stored
+
+  Linelog is not another storage for file contents. It only stores line
+  numbers and corresponding revision numbers, instead of actual line content.
+  This is okay for the "annotate" operation because usually the external
+  source is fast to checkout the content of a file at a specific revision.
+
+  A typical SCCS weave is also fast on the "grep" operation, which needs
+  random accesses to line contents from different revisions of a file. This
+  can be slow with linelog's no-line-content design. However we could use
+  an extra map ((rev, line num) -> line content) to speed it up.
+
+  Note the revision numbers in linelog should be independent from mercurial
+  integer revision numbers. There should be some mapping between linelog rev
+  and hg hash stored side by side, to make the files reusable after being
+  copied to another machine.
+
+1.2 Basic Format
+
+  A linelog file consists of "instruction"s. An "instruction" can be either:
+
+    - JGE  REV ADDR     # jump to ADDR if rev >= REV
+    - JL   REV ADDR     # jump to ADDR if rev < REV
+    - LINE REV LINENUM  # append the (LINENUM+1)-th line in revision REV
+
+  For example, here is the example linelog representing the same file with
+  3 revisions mentioned in section 0.1:
+
+    SCCS  |    Linelog
+    Weave | Addr : Instruction
+    ------+------+-------------
+    ^AI 1 |    0 : JL   1 8
+    a     |    1 : LINE 1 0
+    ^AD 3 |    2 : JGE  3 6
+    b     |    3 : LINE 1 1
+    ^AI 2 |    4 : JL   2 7
+    1     |    5 : LINE 2 2
+    ^AE 3 |
+    2     |    6 : LINE 2 3
+    ^AE 2 |
+    c     |    7 : LINE 1 2
+    ^AE 1 |
+          |    8 : END
+
+  This way, "find ^AE" is O(1) because we just jump there. And we can insert
+  new lines without rewriting most part of the file by appending new lines and
+  changing a single instruction to jump to them.
+
+  The current implementation uses 64 bits for an instruction: The opcode (JGE,
+  JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32
+  bits. It also stores the max revision number and buffer size at the first
+  64 bits for quick access to these values.
+
+1.3 Comparing with Mercurial's revlog format
+
+  Apparently, linelog is very different from revlog: linelog stores rev and
+  line numbers, while revlog has line contents and other metadata (like
+  parents, flags). However, the revlog format could also be used to store rev
+  and line numbers. For example, to speed up the annotate operation, we could
+  also pre-calculate annotate results and just store them using the revlog
+  format.
+
+  Therefore, linelog is actually somehow similar to revlog, with the important
+  trade-off that it only supports linear history (mentioned in section 0.1).
+  Essentially, the differences are:
+
+    a) Linelog is full of deltas, while revlog could contain full file
+       contents sometimes. So linelog is smaller. Revlog could trade
+       reconstruction speed for file size - best case, revlog is as small as
+       linelog.
+    b) The interleaved delta structure allows skipping large portion of
+       uninteresting deltas so linelog's content reconstruction is faster than
+       the delta-only version of revlog (however it's possible to construct
+       a case where interleaved deltas degrade to plain deltas, so linelog
+       worst case would be delta-only revlog). Revlog could trade file size
+       for reconstruction speed.
+    c) Linelog implicitly maintains the order of all lines it stores. So it
+       could dump all the lines from all revisions, with a reasonable order.
+       While revlog could also dump all line additions, it requires extra
+       computation to figure out the order putting those lines - that's some
+       kind of "merge".
+
+  "c" makes "hg absorb" easier to implement and makes it possible to do
+  "annotate --deleted".
+
+1.4 Malformed Cases Handling
+
+  The following "case 1", "case 2", and "case 3" refer to cases mentioned
+  in section 0.5.
+
+  Using the exposed API (replacelines), case 1 is impossible to generate,
+  although it's possible to generate it by constructing rawdata and load that
+  via linelog.fromdata.
+
+  Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to
+  replacelines are related to the latest revision) eliminates the possibility
+  of case 3. That makes sense since usually you'd like to make edits on top of
+  the latest revision. Practically, both absorb and fastannotate do this.
+
+  Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev
+  eliminates the possibility of case 2. That makes sense since usually the
+  edits belong to "new revisions", not "old revisions". Practically,
+  fastannotate does this. Absorb calls replacelines with rev < maxrev to edit
+  past revisions. So it needs some extra care to not generate case 2.
+
+  If case 1 occurs, that probably means linelog file corruption (assuming
+  linelog is edited via public APIs) the checkout or annotate result could
+  be less meaningful or even error out, but linelog wouldn't enter an infinite
+  loop.
+
+  If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE"
+  operations on the left side are silently ignored.
--- a/mercurial/help/merge-tools.txt	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/help/merge-tools.txt	Tue Sep 04 12:16:28 2018 -0400
@@ -36,8 +36,9 @@
 
 .. internaltoolsmarker
 
-Internal tools are always available and do not require a GUI but will by default
-not handle symlinks or binary files.
+Internal tools are always available and do not require a GUI but will
+by default not handle symlinks or binary files. See next section for
+detail about "actual capabilities" described above.
 
 Choosing a merge tool
 =====================
@@ -54,8 +55,7 @@
 
 3. If the filename of the file to be merged matches any of the patterns in the
    merge-patterns configuration section, the first usable merge tool
-   corresponding to a matching pattern is used. Here, binary capabilities of the
-   merge tool are not considered.
+   corresponding to a matching pattern is used.
 
 4. If ui.merge is set it will be considered next. If the value is not the name
    of a configured tool, the specified value is used and must be executable by
@@ -72,6 +72,30 @@
 
 8. Otherwise, ``:prompt`` is used.
 
+For historical reason, Mercurial treats merge tools as below while
+examining rules above.
+
+==== =============== ====== =======
+step specified via   binary symlink
+==== =============== ====== =======
+1.   --tool          o/o    o/o
+2.   HGMERGE         o/o    o/o
+3.   merge-patterns  o/o(*) x/?(*)
+4.   ui.merge        x/?(*) x/?(*)
+==== =============== ====== =======
+
+Each capability column indicates Mercurial behavior for
+internal/external merge tools at examining each rule.
+
+- "o": "assume that a tool has capability"
+- "x": "assume that a tool does not have capability"
+- "?": "check actual capability of a tool"
+
+If ``merge.strict-capability-check`` configuration is true, Mercurial
+checks capabilities of merge tools strictly in (*) cases above (= each
+capability column becomes "?/?"). It is false by default for backward
+compatibility.
+
 .. note::
 
    After selecting a merge program, Mercurial will by default attempt
--- a/mercurial/hg.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/hg.py	Tue Sep 04 12:16:28 2018 -0400
@@ -9,6 +9,7 @@
 from __future__ import absolute_import
 
 import errno
+import functools
 import hashlib
 import os
 import shutil
@@ -162,9 +163,16 @@
     """return a repository object for the specified path"""
     obj = _peerlookup(path).instance(ui, path, create, intents=intents)
     ui = getattr(obj, "ui", ui)
+    if ui.configbool('devel', 'debug.extensions'):
+        log = functools.partial(
+            ui.debug, 'debug.extensions: ', label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
     for f in presetupfuncs or []:
         f(ui, obj)
+    log('- executing reposetup hooks\n')
     for name, module in extensions.extensions(ui):
+        log('  - running reposetup for %s\n' % (name,))
         hook = getattr(module, 'reposetup', None)
         if hook:
             hook(ui, obj)
@@ -258,7 +266,7 @@
         raise error.Abort(_('destination already exists'))
 
     if not destwvfs.isdir():
-        destwvfs.mkdir()
+        destwvfs.makedirs()
     destvfs.makedir()
 
     requirements = ''
@@ -373,31 +381,30 @@
     try:
         hardlink = None
         topic = _('linking') if hardlink else _('copying')
-        progress = ui.makeprogress(topic)
-        num = 0
-        srcpublishing = srcrepo.publishing()
-        srcvfs = vfsmod.vfs(srcrepo.sharedpath)
-        dstvfs = vfsmod.vfs(destpath)
-        for f in srcrepo.store.copylist():
-            if srcpublishing and f.endswith('phaseroots'):
-                continue
-            dstbase = os.path.dirname(f)
-            if dstbase and not dstvfs.exists(dstbase):
-                dstvfs.mkdir(dstbase)
-            if srcvfs.exists(f):
-                if f.endswith('data'):
-                    # 'dstbase' may be empty (e.g. revlog format 0)
-                    lockfile = os.path.join(dstbase, "lock")
-                    # lock to avoid premature writing to the target
-                    destlock = lock.lock(dstvfs, lockfile)
-                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
-                                             hardlink, progress)
-                num += n
-        if hardlink:
-            ui.debug("linked %d files\n" % num)
-        else:
-            ui.debug("copied %d files\n" % num)
-        progress.complete()
+        with ui.makeprogress(topic) as progress:
+            num = 0
+            srcpublishing = srcrepo.publishing()
+            srcvfs = vfsmod.vfs(srcrepo.sharedpath)
+            dstvfs = vfsmod.vfs(destpath)
+            for f in srcrepo.store.copylist():
+                if srcpublishing and f.endswith('phaseroots'):
+                    continue
+                dstbase = os.path.dirname(f)
+                if dstbase and not dstvfs.exists(dstbase):
+                    dstvfs.mkdir(dstbase)
+                if srcvfs.exists(f):
+                    if f.endswith('data'):
+                        # 'dstbase' may be empty (e.g. revlog format 0)
+                        lockfile = os.path.join(dstbase, "lock")
+                        # lock to avoid premature writing to the target
+                        destlock = lock.lock(dstvfs, lockfile)
+                    hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
+                                                 hardlink, progress)
+                    num += n
+            if hardlink:
+                ui.debug("linked %d files\n" % num)
+            else:
+                ui.debug("copied %d files\n" % num)
         return destlock
     except: # re-raises
         release(destlock)
@@ -626,7 +633,7 @@
             srcrepo.hook('preoutgoing', throw=True, source='clone')
             hgdir = os.path.realpath(os.path.join(dest, ".hg"))
             if not os.path.exists(dest):
-                os.mkdir(dest)
+                util.makedirs(dest)
             else:
                 # only clean up directories we create ourselves
                 cleandir = hgdir
--- a/mercurial/hgweb/hgweb_mod.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/hgweb/hgweb_mod.py	Tue Sep 04 12:16:28 2018 -0400
@@ -140,11 +140,6 @@
         if not staticurl.endswith('/'):
             staticurl += '/'
 
-        # some functions for the templater
-
-        def motd(**map):
-            yield self.config('web', 'motd')
-
         # figure out which style to use
 
         vars = {}
@@ -177,12 +172,16 @@
             'urlbase': req.advertisedbaseurl,
             'repo': self.reponame,
             'encoding': encoding.encoding,
-            'motd': motd,
             'sessionvars': sessionvars,
             'pathdef': makebreadcrumb(req.apppath),
             'style': style,
             'nonce': self.nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            yield self.config('web', 'motd')
+
         tres = formatter.templateresources(self.repo.ui, self.repo)
         tmpl = templater.templater.frommapfile(mapfile,
                                                filters=filters,
--- a/mercurial/hgweb/hgwebdir_mod.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/hgweb/hgwebdir_mod.py	Tue Sep 04 12:16:28 2018 -0400
@@ -33,6 +33,7 @@
     hg,
     profiling,
     pycompat,
+    registrar,
     scmutil,
     templater,
     templateutil,
@@ -495,12 +496,6 @@
 
     def templater(self, req, nonce):
 
-        def motd(**map):
-            if self.motd is not None:
-                yield self.motd
-            else:
-                yield config('web', 'motd')
-
         def config(section, name, default=uimod._unset, untrusted=True):
             return self.ui.config(section, name, default, untrusted)
 
@@ -520,7 +515,6 @@
 
         defaults = {
             "encoding": encoding.encoding,
-            "motd": motd,
             "url": req.apppath + '/',
             "logourl": logourl,
             "logoimg": logoimg,
@@ -529,5 +523,13 @@
             "style": style,
             "nonce": nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            if self.motd is not None:
+                yield self.motd
+            else:
+                yield config('web', 'motd')
+
         tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
         return tmpl
--- a/mercurial/hgweb/webcommands.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/hgweb/webcommands.py	Tue Sep 04 12:16:28 2018 -0400
@@ -215,7 +215,7 @@
 
         def revgen():
             cl = web.repo.changelog
-            for i in xrange(len(web.repo) - 1, 0, -100):
+            for i in pycompat.xrange(len(web.repo) - 1, 0, -100):
                 l = []
                 for j in cl.revs(max(0, i - 99), i):
                     ctx = web.repo[j]
--- a/mercurial/hgweb/webutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/hgweb/webutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -408,6 +408,12 @@
 
 whyunstable._requires = {'repo', 'ctx'}
 
+# helper to mark a function as a new-style template keyword; can be removed
+# once old-style function gets unsupported and new-style becomes the default
+def _kwfunc(f):
+    f._requires = ()
+    return f
+
 def commonentry(repo, ctx):
     node = ctx.node()
     return {
@@ -432,8 +438,8 @@
         'branches': nodebranchdict(repo, ctx),
         'tags': nodetagsdict(repo, node),
         'bookmarks': nodebookmarksdict(repo, node),
-        'parent': lambda **x: parents(ctx),
-        'child': lambda **x: children(ctx),
+        'parent': _kwfunc(lambda context, mapping: parents(ctx)),
+        'child': _kwfunc(lambda context, mapping: children(ctx)),
     }
 
 def changelistentry(web, ctx):
@@ -450,9 +456,9 @@
 
     entry = commonentry(repo, ctx)
     entry.update(
-        allparents=lambda **x: parents(ctx),
-        parent=lambda **x: parents(ctx, rev - 1),
-        child=lambda **x: children(ctx, rev + 1),
+        allparents=_kwfunc(lambda context, mapping: parents(ctx)),
+        parent=_kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
+        child=_kwfunc(lambda context, mapping: children(ctx, rev + 1)),
         changelogtag=showtags,
         files=files,
     )
@@ -521,7 +527,7 @@
         changesetbranch=showbranch,
         files=templateutil.mappedgenerator(_listfilesgen,
                                            args=(ctx, web.stripecount)),
-        diffsummary=lambda **x: diffsummary(diffstatsgen),
+        diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
         diffstat=diffstats,
         archives=web.archivelist(ctx.hex()),
         **pycompat.strkwargs(commonentry(web.repo, ctx)))
@@ -613,21 +619,21 @@
         len1 = lhi - llo
         len2 = rhi - rlo
         count = min(len1, len2)
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             yield _compline(type=type,
                             leftlineno=llo + i + 1,
                             leftline=leftlines[llo + i],
                             rightlineno=rlo + i + 1,
                             rightline=rightlines[rlo + i])
         if len1 > len2:
-            for i in xrange(llo + count, lhi):
+            for i in pycompat.xrange(llo + count, lhi):
                 yield _compline(type=type,
                                 leftlineno=i + 1,
                                 leftline=leftlines[i],
                                 rightlineno=None,
                                 rightline=None)
         elif len2 > len1:
-            for i in xrange(rlo + count, rhi):
+            for i in pycompat.xrange(rlo + count, rhi):
                 yield _compline(type=type,
                                 leftlineno=None,
                                 leftline=None,
--- a/mercurial/httppeer.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/httppeer.py	Tue Sep 04 12:16:28 2018 -0400
@@ -64,7 +64,7 @@
     result = []
 
     n = 0
-    for i in xrange(0, len(value), valuelen):
+    for i in pycompat.xrange(0, len(value), valuelen):
         n += 1
         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/linelog.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,436 @@
+# linelog - efficient cache for annotate data
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""linelog is an efficient cache for annotate data inspired by SCCS Weaves.
+
+SCCS Weaves are an implementation of
+https://en.wikipedia.org/wiki/Interleaved_deltas. See
+mercurial/help/internals/linelog.txt for an exploration of SCCS weaves
+and how linelog works in detail.
+
+Here's a hacker's summary: a linelog is a program which is executed in
+the context of a revision. Executing the program emits information
+about lines, including the revision that introduced them and the line
+number in the file at the introducing revision. When an insertion or
+deletion is performed on the file, a jump instruction is used to patch
+in a new body of annotate information.
+"""
+from __future__ import absolute_import, print_function
+
+import abc
+import struct
+
+from .thirdparty import (
+    attr,
+)
+from . import (
+    pycompat,
+)
+
+_llentry = struct.Struct('>II')
+
+class LineLogError(Exception):
+    """Error raised when something bad happens internally in linelog."""
+
+@attr.s
+class lineinfo(object):
+    # Introducing revision of this line.
+    rev = attr.ib()
+    # Line number for this line in its introducing revision.
+    linenum = attr.ib()
+    # Private. Offset in the linelog program of this line. Used internally.
+    _offset = attr.ib()
+
+@attr.s
+class annotateresult(object):
+    rev = attr.ib()
+    lines = attr.ib()
+    _eof = attr.ib()
+
+    def __iter__(self):
+        return iter(self.lines)
+
+class _llinstruction(object):
+
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def __init__(self, op1, op2):
+        pass
+
+    @abc.abstractmethod
+    def __str__(self):
+        pass
+
+    def __repr__(self):
+        return str(self)
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        pass
+
+    @abc.abstractmethod
+    def encode(self):
+        """Encode this instruction to the binary linelog format."""
+
+    @abc.abstractmethod
+    def execute(self, rev, pc, emit):
+        """Execute this instruction.
+
+        Args:
+          rev: The revision we're annotating.
+          pc: The current offset in the linelog program.
+          emit: A function that accepts a single lineinfo object.
+
+        Returns:
+          The new value of pc. Returns None if exeuction should stop
+          (that is, we've found the end of the file.)
+        """
+
+class _jge(_llinstruction):
+    """If the current rev is greater than or equal to op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JGE %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(self._cmprev << 2, self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev >= self._cmprev:
+            return self._target
+        return pc + 1
+
+class _jump(_llinstruction):
+    """Unconditional jumps are expressed as a JGE with op1 set to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed JUMP, op1 must be 0, got %d" % op1)
+        self._target = op2
+
+    def __str__(self):
+        return r'JUMP %d' % (self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(0, self._target)
+
+    def execute(self, rev, pc, emit):
+        return self._target
+
+class _eof(_llinstruction):
+    """EOF is expressed as a JGE that always jumps to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed EOF, op1 must be 0, got %d" % op1)
+        if op2 != 0:
+            raise LineLogError("malformed EOF, op2 must be 0, got %d" % op2)
+
+    def __str__(self):
+        return r'EOF'
+
+    def __eq__(self, other):
+        return type(self) == type(other)
+
+    def encode(self):
+        return _llentry.pack(0, 0)
+
+    def execute(self, rev, pc, emit):
+        return None
+
+class _jl(_llinstruction):
+    """If the current rev is less than op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JL %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(1 | (self._cmprev << 2), self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev < self._cmprev:
+            return self._target
+        return pc + 1
+
+class _line(_llinstruction):
+    """Emit a line."""
+
+    def __init__(self, op1, op2):
+        # This line was introduced by this revision number.
+        self._rev = op1
+        # This line had the specified line number in the introducing revision.
+        self._origlineno = op2
+
+    def __str__(self):
+        return r'LINE %d %d' % (self._rev, self._origlineno)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._rev == other._rev
+                and self._origlineno == other._origlineno)
+
+    def encode(self):
+        return _llentry.pack(2 | (self._rev << 2), self._origlineno)
+
+    def execute(self, rev, pc, emit):
+        emit(lineinfo(self._rev, self._origlineno, pc))
+        return pc + 1
+
+def _decodeone(data, offset):
+    """Decode a single linelog instruction from an offset in a buffer."""
+    try:
+        op1, op2 = _llentry.unpack_from(data, offset)
+    except struct.error as e:
+        raise LineLogError('reading an instruction failed: %r' % e)
+    opcode = op1 & 0b11
+    op1 = op1 >> 2
+    if opcode == 0:
+        if op1 == 0:
+            if op2 == 0:
+                return _eof(op1, op2)
+            return _jump(op1, op2)
+        return _jge(op1, op2)
+    elif opcode == 1:
+        return _jl(op1, op2)
+    elif opcode == 2:
+        return _line(op1, op2)
+    raise NotImplementedError('Unimplemented opcode %r' % opcode)
+
+class linelog(object):
+    """Efficient cache for per-line history information."""
+
+    def __init__(self, program=None, maxrev=0):
+        if program is None:
+            # We pad the program with an extra leading EOF so that our
+            # offsets will match the C code exactly. This means we can
+            # interoperate with the C code.
+            program = [_eof(0, 0), _eof(0, 0)]
+        self._program = program
+        self._lastannotate = None
+        self._maxrev = maxrev
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._program == other._program
+                and self._maxrev == other._maxrev)
+
+    def __repr__(self):
+        return '<linelog at %s: maxrev=%d size=%d>' % (
+            hex(id(self)), self._maxrev, len(self._program))
+
+    def debugstr(self):
+        fmt = r'%%%dd %%s' % len(str(len(self._program)))
+        return pycompat.sysstr('\n').join(
+            fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1))
+
+    @classmethod
+    def fromdata(cls, buf):
+        if len(buf) % _llentry.size != 0:
+            raise LineLogError(
+                "invalid linelog buffer size %d (must be a multiple of %d)" % (
+                    len(buf), _llentry.size))
+        expected = len(buf) / _llentry.size
+        fakejge = _decodeone(buf, 0)
+        if isinstance(fakejge, _jump):
+            maxrev = 0
+        else:
+            maxrev = fakejge._cmprev
+        numentries = fakejge._target
+        if expected != numentries:
+            raise LineLogError("corrupt linelog data: claimed"
+                               " %d entries but given data for %d entries" % (
+                                   expected, numentries))
+        instructions = [_eof(0, 0)]
+        for offset in pycompat.xrange(1, numentries):
+            instructions.append(_decodeone(buf, offset * _llentry.size))
+        return cls(instructions, maxrev=maxrev)
+
+    def encode(self):
+        hdr = _jge(self._maxrev, len(self._program)).encode()
+        return hdr + ''.join(i.encode() for i in self._program[1:])
+
+    def clear(self):
+        self._program = []
+        self._maxrev = 0
+        self._lastannotate = None
+
+    def replacelines_vec(self, rev, a1, a2, blines):
+        return self.replacelines(rev, a1, a2, 0, len(blines),
+                                 _internal_blines=blines)
+
+    def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
+        """Replace lines [a1, a2) with lines [b1, b2)."""
+        if self._lastannotate:
+            # TODO(augie): make replacelines() accept a revision at
+            # which we're editing as well as a revision to mark
+            # responsible for the edits. In hg-experimental it's
+            # stateful like this, so we're doing the same thing to
+            # retain compatibility with absorb until that's imported.
+            ar = self._lastannotate
+        else:
+            ar = self.annotate(rev)
+            #        ar = self.annotate(self._maxrev)
+        if a1 > len(ar.lines):
+            raise LineLogError(
+                '%d contains %d lines, tried to access line %d' % (
+                    rev, len(ar.lines), a1))
+        elif a1 == len(ar.lines):
+            # Simulated EOF instruction since we're at EOF, which
+            # doesn't have a "real" line.
+            a1inst = _eof(0, 0)
+            a1info = lineinfo(0, 0, ar._eof)
+        else:
+            a1info = ar.lines[a1]
+            a1inst = self._program[a1info._offset]
+        programlen = self._program.__len__
+        oldproglen = programlen()
+        appendinst = self._program.append
+
+        # insert
+        blineinfos = []
+        bappend = blineinfos.append
+        if b1 < b2:
+            # Determine the jump target for the JGE at the start of
+            # the new block.
+            tgt = oldproglen + (b2 - b1 + 1)
+            # Jump to skip the insert if we're at an older revision.
+            appendinst(_jl(rev, tgt))
+            for linenum in pycompat.xrange(b1, b2):
+                if _internal_blines is None:
+                    bappend(lineinfo(rev, linenum, programlen()))
+                    appendinst(_line(rev, linenum))
+                else:
+                    newrev, newlinenum = _internal_blines[linenum]
+                    bappend(lineinfo(newrev, newlinenum, programlen()))
+                    appendinst(_line(newrev, newlinenum))
+        # delete
+        if a1 < a2:
+            if a2 > len(ar.lines):
+                raise LineLogError(
+                    '%d contains %d lines, tried to access line %d' % (
+                        rev, len(ar.lines), a2))
+            elif a2 == len(ar.lines):
+                endaddr = ar._eof
+            else:
+                endaddr = ar.lines[a2]._offset
+            if a2 > 0 and rev < self._maxrev:
+                # If we're here, we're deleting a chunk of an old
+                # commit, so we need to be careful and not touch
+                # invisible lines between a2-1 and a2 (IOW, lines that
+                # are added later).
+                endaddr = ar.lines[a2 - 1]._offset + 1
+            appendinst(_jge(rev, endaddr))
+        # copy instruction from a1
+        a1instpc = programlen()
+        appendinst(a1inst)
+        # if a1inst isn't a jump or EOF, then we need to add an unconditional
+        # jump back into the program here.
+        if not isinstance(a1inst, (_jump, _eof)):
+            appendinst(_jump(0, a1info._offset + 1))
+        # Patch instruction at a1, which makes our patch live.
+        self._program[a1info._offset] = _jump(0, oldproglen)
+
+        # Update self._lastannotate in place. This serves as a cache to avoid
+        # expensive "self.annotate" in this function, when "replacelines" is
+        # used continuously.
+        if len(self._lastannotate.lines) > a1:
+            self._lastannotate.lines[a1]._offset = a1instpc
+        else:
+            assert isinstance(a1inst, _eof)
+            self._lastannotate._eof = a1instpc
+        self._lastannotate.lines[a1:a2] = blineinfos
+        self._lastannotate.rev = max(self._lastannotate.rev, rev)
+
+        if rev > self._maxrev:
+            self._maxrev = rev
+
+    def annotate(self, rev):
+        pc = 1
+        lines = []
+        executed = 0
+        # Sanity check: if instructions executed exceeds len(program), we
+        # hit an infinite loop in the linelog program somehow and we
+        # should stop.
+        while pc is not None and executed < len(self._program):
+            inst = self._program[pc]
+            lastpc = pc
+            pc = inst.execute(rev, pc, lines.append)
+            executed += 1
+        if pc is not None:
+            raise LineLogError(
+                r'Probably hit an infinite loop in linelog. Program:\n' +
+                self.debugstr())
+        ar = annotateresult(rev, lines, lastpc)
+        self._lastannotate = ar
+        return ar
+
+    @property
+    def maxrev(self):
+        return self._maxrev
+
+    # Stateful methods which depend on the value of the last
+    # annotation run. This API is for compatiblity with the original
+    # linelog, and we should probably consider refactoring it.
+    @property
+    def annotateresult(self):
+        """Return the last annotation result. C linelog code exposed this."""
+        return [(l.rev, l.linenum) for l in self._lastannotate.lines]
+
+    def getoffset(self, line):
+        return self._lastannotate.lines[line]._offset
+
+    def getalllines(self, start=0, end=0):
+        """Get all lines that ever occurred in [start, end).
+
+        Passing start == end == 0 means "all lines ever".
+
+        This works in terms of *internal* program offsets, not line numbers.
+        """
+        pc = start or 1
+        lines = []
+        # only take as many steps as there are instructions in the
+        # program - if we don't find an EOF or our stop-line before
+        # then, something is badly broken.
+        for step in pycompat.xrange(len(self._program)):
+            inst = self._program[pc]
+            nextpc = pc + 1
+            if isinstance(inst, _jump):
+                nextpc = inst._target
+            elif isinstance(inst, _eof):
+                return lines
+            elif isinstance(inst, (_jl, _jge)):
+                pass
+            elif isinstance(inst, _line):
+                lines.append((inst._rev, inst._origlineno))
+            else:
+                raise LineLogError("Illegal instruction %r" % inst)
+            if nextpc == end:
+                return lines
+            pc = nextpc
+        raise LineLogError("Failed to perform getalllines")
--- a/mercurial/localrepo.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/localrepo.py	Tue Sep 04 12:16:28 2018 -0400
@@ -394,6 +394,7 @@
         'relshared',
         'dotencode',
         'exp-sparse',
+        'internal-phase'
     }
     openerreqs = {
         'revlogv1',
@@ -435,14 +436,6 @@
         self.root = self.wvfs.base
         self.path = self.wvfs.join(".hg")
         self.origroot = path
-        # This is only used by context.workingctx.match in order to
-        # detect files in subrepos.
-        self.auditor = pathutil.pathauditor(
-            self.root, callback=self._checknested)
-        # This is only used by context.basectx.match in order to detect
-        # files in subrepos.
-        self.nofsauditor = pathutil.pathauditor(
-            self.root, callback=self._checknested, realfs=False, cached=True)
         self.baseui = baseui
         self.ui = baseui.copy()
         self.ui.copy = baseui.copy # prevent copying repo configuration
@@ -495,6 +488,11 @@
                         ' dummy changelog to prevent using the old repo layout'
                     )
             else:
+                try:
+                    self.vfs.stat()
+                except OSError as inst:
+                    if inst.errno != errno.ENOENT:
+                        raise
                 raise error.RepoError(_("repository %s not found") % path)
         elif create:
             raise error.RepoError(_("repository %s already exists") % path)
@@ -703,6 +701,22 @@
     def _writerequirements(self):
         scmutil.writerequires(self.vfs, self.requirements)
 
+    # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
+    # self -> auditor -> self._checknested -> self
+
+    @property
+    def auditor(self):
+        # This is only used by context.workingctx.match in order to
+        # detect files in subrepos.
+        return pathutil.pathauditor(self.root, callback=self._checknested)
+
+    @property
+    def nofsauditor(self):
+        # This is only used by context.basectx.match in order to detect
+        # files in subrepos.
+        return pathutil.pathauditor(self.root, callback=self._checknested,
+                                    realfs=False, cached=True)
+
     def _checknested(self, path):
         """Determine if path is a legal nested repository."""
         if not path.startswith(self.root):
@@ -811,7 +825,7 @@
                                " working parent %s!\n") % short(node))
             return nullid
 
-    @repofilecache(narrowspec.FILENAME)
+    @storecache(narrowspec.FILENAME)
     def narrowpats(self):
         """matcher patterns for this repository's narrowspec
 
@@ -823,9 +837,9 @@
             source = hg.sharedreposource(self)
         return narrowspec.load(source)
 
-    @repofilecache(narrowspec.FILENAME)
+    @storecache(narrowspec.FILENAME)
     def _narrowmatch(self):
-        if changegroup.NARROW_REQUIREMENT not in self.requirements:
+        if repository.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always(self.root, '')
         include, exclude = self.narrowpats
         return narrowspec.match(self.root, include=include, exclude=exclude)
@@ -850,7 +864,7 @@
         if isinstance(changeid, slice):
             # wdirrev isn't contiguous so the slice shouldn't include it
             return [context.changectx(self, i)
-                    for i in xrange(*changeid.indices(len(self)))
+                    for i in pycompat.xrange(*changeid.indices(len(self)))
                     if i not in self.changelog.filteredrevs]
         try:
             return context.changectx(self, changeid)
@@ -860,7 +874,8 @@
     def __contains__(self, changeid):
         """True if the given changeid exists
 
-        error.LookupError is raised if an ambiguous node specified.
+        error.AmbiguousPrefixLookupError is raised if an ambiguous node
+        specified.
         """
         try:
             self[changeid]
@@ -1372,6 +1387,7 @@
             else:
                 # discard all changes (including ones already written
                 # out) in this transaction
+                narrowspec.restorebackup(self, 'journal.narrowspec')
                 repo.dirstate.restorebackup(None, 'journal.dirstate')
 
                 repo.invalidate(clearfilecache=True)
@@ -1385,7 +1401,7 @@
                                      releasefn=releasefn,
                                      checkambigfiles=_cachedfiles,
                                      name=desc)
-        tr.changes['revs'] = xrange(0, 0)
+        tr.changes['origrepolen'] = len(self)
         tr.changes['obsmarkers'] = set()
         tr.changes['phases'] = {}
         tr.changes['bookmarks'] = {}
@@ -1460,6 +1476,7 @@
     @unfilteredmethod
     def _writejournal(self, desc):
         self.dirstate.savebackup(None, 'journal.dirstate')
+        narrowspec.savebackup(self, 'journal.narrowspec')
         self.vfs.write("journal.branch",
                           encoding.fromlocal(self.dirstate.branch()))
         self.vfs.write("journal.desc",
@@ -1547,6 +1564,7 @@
             # prevent dirstateguard from overwriting already restored one
             dsguard.close()
 
+            narrowspec.restorebackup(self, 'undo.narrowspec')
             self.dirstate.restorebackup(None, 'undo.dirstate')
             try:
                 branch = self.vfs.read('undo.branch')
@@ -1601,7 +1619,7 @@
             # later call to `destroyed` will refresh them.
             return
 
-        if tr is None or tr.changes['revs']:
+        if tr is None or tr.changes['origrepolen'] < len(self):
             # updating the unfiltered branchmap should refresh all the others,
             self.ui.debug('updating the branch cache\n')
             branchmap.updatecache(self.filtered('served'))
@@ -1612,6 +1630,10 @@
                 rbc.branchinfo(r)
             rbc.write()
 
+            # ensure the working copy parents are in the manifestfulltextcache
+            for ctx in self['.'].parents():
+                ctx.manifest()  # accessing the manifest is enough
+
     def invalidatecaches(self):
 
         if '_tagscache' in vars(self):
@@ -2026,6 +2048,11 @@
     def commitctx(self, ctx, error=False):
         """Add a new revision to current repository.
         Revision information is passed via the context argument.
+
+        ctx.files() should list all files involved in this commit, i.e.
+        modified/added/removed files. On merge, it may be wider than the
+        ctx.files() to be committed, since any file nodes derived directly
+        from p1 or p2 are excluded from the committed ctx.files().
         """
 
         tr = None
@@ -2039,6 +2066,7 @@
 
             if ctx.manifestnode():
                 # reuse an existing manifest revision
+                self.ui.debug('reusing known manifest\n')
                 mn = ctx.manifestnode()
                 files = ctx.files()
             elif ctx.files():
@@ -2077,16 +2105,31 @@
                         raise
 
                 # update manifest
-                self.ui.note(_("committing manifest\n"))
                 removed = [f for f in sorted(removed) if f in m1 or f in m2]
                 drop = [f for f in removed if f in m]
                 for f in drop:
                     del m[f]
-                mn = mctx.write(trp, linkrev,
-                                p1.manifestnode(), p2.manifestnode(),
-                                added, drop)
                 files = changed + removed
+                md = None
+                if not files:
+                    # if no "files" actually changed in terms of the changelog,
+                    # try hard to detect unmodified manifest entry so that the
+                    # exact same commit can be reproduced later on convert.
+                    md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
+                if not files and md:
+                    self.ui.debug('not reusing manifest (no file change in '
+                                  'changelog, but manifest differs)\n')
+                if files or md:
+                    self.ui.note(_("committing manifest\n"))
+                    mn = mctx.write(trp, linkrev,
+                                    p1.manifestnode(), p2.manifestnode(),
+                                    added, drop)
+                else:
+                    self.ui.debug('reusing manifest form p1 (listed files '
+                                  'actually unchanged)\n')
+                    mn = p1.manifestnode()
             else:
+                self.ui.debug('reusing manifest from p1 (no file change)\n')
                 mn = p1.manifestnode()
                 files = []
 
@@ -2393,5 +2436,8 @@
         # generaldelta is implied by revlogv2.
         requirements.discard('generaldelta')
         requirements.add(REVLOGV2_REQUIREMENT)
+    # experimental config: format.internal-phase
+    if repo.ui.configbool('format', 'internal-phase'):
+        requirements.add('internal-phase')
 
     return requirements
--- a/mercurial/mail.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/mail.py	Tue Sep 04 12:16:28 2018 -0400
@@ -73,15 +73,24 @@
 
     def _get_socket(self, host, port, timeout):
         if self.debuglevel > 0:
-            self._ui.debug('connect: %r\n' % (host, port))
+            self._ui.debug('connect: %r\n' % ((host, port),))
         new_socket = socket.create_connection((host, port), timeout)
         new_socket = sslutil.wrapsocket(new_socket,
                                         self.keyfile, self.certfile,
                                         ui=self._ui,
                                         serverhostname=self._host)
-        self.file = smtplib.SSLFakeFile(new_socket)
+        self.file = new_socket.makefile(r'rb')
         return new_socket
 
+def _pyhastls():
+    """Returns true iff Python has TLS support, false otherwise."""
+    try:
+        import ssl
+        getattr(ssl, 'HAS_TLS', False)
+        return True
+    except ImportError:
+        return False
+
 def _smtp(ui):
     '''build an smtp connection and return a function to send mail'''
     local_hostname = ui.config('smtp', 'local_hostname')
@@ -89,7 +98,7 @@
     # backward compatible: when tls = true, we use starttls.
     starttls = tls == 'starttls' or stringutil.parsebool(tls)
     smtps = tls == 'smtps'
-    if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
+    if (starttls or smtps) and not _pyhastls():
         raise error.Abort(_("can't use TLS: Python SSL support not installed"))
     mailhost = ui.config('smtp', 'host')
     if not mailhost:
@@ -143,8 +152,9 @@
 def _sendmail(ui, sender, recipients, msg):
     '''send mail using sendmail.'''
     program = ui.config('email', 'method')
-    cmdline = '%s -f %s %s' % (program, stringutil.email(sender),
-                               ' '.join(map(stringutil.email, recipients)))
+    stremail = lambda x: stringutil.email(encoding.strtolocal(x))
+    cmdline = '%s -f %s %s' % (program, stremail(sender),
+                               ' '.join(map(stremail, recipients)))
     ui.note(_('sending mail: %s\n') % cmdline)
     fp = procutil.popen(cmdline, 'wb')
     fp.write(util.tonativeeol(msg))
@@ -160,7 +170,8 @@
     # Should be time.asctime(), but Windows prints 2-characters day
     # of month instead of one. Make them print the same thing.
     date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
-    fp.write('From %s %s\n' % (sender, date))
+    fp.write('From %s %s\n' % (encoding.strtolocal(sender),
+                               encoding.strtolocal(date)))
     fp.write(msg)
     fp.write('\n\n')
     fp.close()
@@ -209,7 +220,7 @@
 
     cs = ['us-ascii', 'utf-8', encoding.encoding, encoding.fallbackencoding]
     if display:
-        return mimetextqp(s, subtype, 'us-ascii')
+        cs = ['us-ascii']
     for charset in cs:
         try:
             s.decode(pycompat.sysstr(charset))
@@ -252,10 +263,27 @@
     order. Tries both encoding and fallbackencoding for input. Only as
     last resort send as is in fake ascii.
     Caveat: Do not use for mail parts containing patches!'''
+    sendcharsets = charsets or _charsets(ui)
+    if not isinstance(s, bytes):
+        # We have unicode data, which we need to try and encode to
+        # some reasonable-ish encoding. Try the encodings the user
+        # wants, and fall back to garbage-in-ascii.
+        for ocs in sendcharsets:
+            try:
+                return s.encode(pycompat.sysstr(ocs)), ocs
+            except UnicodeEncodeError:
+                pass
+            except LookupError:
+                ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+        else:
+            # Everything failed, ascii-armor what we've got and send it.
+            return s.encode('ascii', 'backslashreplace')
+    # We have a bytes of unknown encoding. We'll try and guess a valid
+    # encoding, falling back to pretending we had ascii even though we
+    # know that's wrong.
     try:
         s.decode('ascii')
     except UnicodeDecodeError:
-        sendcharsets = charsets or _charsets(ui)
         for ics in (encoding.encoding, encoding.fallbackencoding):
             try:
                 u = s.decode(ics)
@@ -263,7 +291,7 @@
                 continue
             for ocs in sendcharsets:
                 try:
-                    return u.encode(ocs), ocs
+                    return u.encode(pycompat.sysstr(ocs)), ocs
                 except UnicodeEncodeError:
                     pass
                 except LookupError:
@@ -280,40 +308,46 @@
     return s
 
 def _addressencode(ui, name, addr, charsets=None):
+    assert isinstance(addr, bytes)
     name = headencode(ui, name, charsets)
     try:
         acc, dom = addr.split('@')
-        acc = acc.encode('ascii')
-        dom = dom.decode(encoding.encoding).encode('idna')
+        acc.decode('ascii')
+        dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
         addr = '%s@%s' % (acc, dom)
     except UnicodeDecodeError:
         raise error.Abort(_('invalid email address: %s') % addr)
     except ValueError:
         try:
             # too strict?
-            addr = addr.encode('ascii')
+            addr.decode('ascii')
         except UnicodeDecodeError:
             raise error.Abort(_('invalid local address: %s') % addr)
-    return email.utils.formataddr((name, addr))
+    return pycompat.bytesurl(
+        email.utils.formataddr((name, encoding.strfromlocal(addr))))
 
 def addressencode(ui, address, charsets=None, display=False):
     '''Turns address into RFC-2047 compliant header.'''
     if display or not address:
         return address or ''
-    name, addr = email.utils.parseaddr(address)
-    return _addressencode(ui, name, addr, charsets)
+    name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
+    return _addressencode(ui, name, encoding.strtolocal(addr), charsets)
 
 def addrlistencode(ui, addrs, charsets=None, display=False):
     '''Turns a list of addresses into a list of RFC-2047 compliant headers.
     A single element of input list may contain multiple addresses, but output
     always has one address per item'''
+    for a in addrs:
+        assert isinstance(a, bytes), (r'%r unexpectedly not a bytestr' % a)
     if display:
         return [a.strip() for a in addrs if a.strip()]
 
     result = []
-    for name, addr in email.utils.getaddresses(addrs):
+    for name, addr in email.utils.getaddresses(
+            [encoding.strfromlocal(a) for a in addrs]):
         if name or addr:
-            result.append(_addressencode(ui, name, addr, charsets))
+            r = _addressencode(ui, name, encoding.strtolocal(addr), charsets)
+            result.append(r)
     return result
 
 def mimeencode(ui, s, charsets=None, display=False):
--- a/mercurial/manifest.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/manifest.py	Tue Sep 04 12:16:28 2018 -0400
@@ -10,11 +10,14 @@
 import heapq
 import itertools
 import struct
+import weakref
 
 from .i18n import _
 from .node import (
     bin,
     hex,
+    nullid,
+    nullrev,
 )
 from . import (
     error,
@@ -54,12 +57,11 @@
 def _text(it):
     files = []
     lines = []
-    _hex = revlog.hex
     for f, n, fl in it:
         files.append(f)
         # if this is changed to support newlines in filenames,
         # be sure to check the templates/ dir again (especially *-raw.tmpl)
-        lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
+        lines.append("%s\0%s%s\n" % (f, hex(n), fl))
 
     _checkforbidden(files)
     return ''.join(lines)
@@ -565,7 +567,7 @@
                 start, end = _msearch(addbuf, f, start)
                 if not todelete:
                     h, fl = self._lm[f]
-                    l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
+                    l = "%s\0%s%s\n" % (f, hex(h), fl)
                 else:
                     if start == end:
                         # item we want to delete was not found, error out
@@ -679,7 +681,7 @@
 class treemanifest(object):
     def __init__(self, dir='', text=''):
         self._dir = dir
-        self._node = revlog.nullid
+        self._node = nullid
         self._loadfunc = _noop
         self._copyfunc = _noop
         self._dirty = False
@@ -717,7 +719,7 @@
 
     def __repr__(self):
         return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
-                (self._dir, revlog.hex(self._node),
+                (self._dir, hex(self._node),
                  bool(self._loadfunc is _noop),
                  self._dirty, id(self)))
 
@@ -1116,7 +1118,7 @@
         for d, subm in self._dirs.iteritems():
             subp1 = m1._dirs.get(d, emptytree)._node
             subp2 = m2._dirs.get(d, emptytree)._node
-            if subp1 == revlog.nullid:
+            if subp1 == nullid:
                 subp1, subp2 = subp2, subp1
             writesubtree(subm, subp1, subp2)
 
@@ -1136,11 +1138,121 @@
             for subtree in subm.walksubtrees(matcher=matcher):
                 yield subtree
 
-class manifestrevlog(revlog.revlog):
+class manifestfulltextcache(util.lrucachedict):
+    """File-backed LRU cache for the manifest cache
+
+    File consists of entries, up to EOF:
+
+    - 20 bytes node, 4 bytes length, <length> manifest data
+
+    These are written in reverse cache order (oldest to newest).
+
+    """
+    def __init__(self, max):
+        super(manifestfulltextcache, self).__init__(max)
+        self._dirty = False
+        self._read = False
+        self._opener = None
+
+    def read(self):
+        if self._read or self._opener is None:
+            return
+
+        try:
+            with self._opener('manifestfulltextcache') as fp:
+                set = super(manifestfulltextcache, self).__setitem__
+                # ignore trailing data, this is a cache, corruption is skipped
+                while True:
+                    node = fp.read(20)
+                    if len(node) < 20:
+                        break
+                    try:
+                        size = struct.unpack('>L', fp.read(4))[0]
+                    except struct.error:
+                        break
+                    value = bytearray(fp.read(size))
+                    if len(value) != size:
+                        break
+                    set(node, value)
+        except IOError:
+            # the file is allowed to be missing
+            pass
+
+        self._read = True
+        self._dirty = False
+
+    def write(self):
+        if not self._dirty or self._opener is None:
+            return
+        # rotate backwards to the first used node
+        with self._opener(
+                'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
+            ) as fp:
+            node = self._head.prev
+            while True:
+                if node.key in self._cache:
+                    fp.write(node.key)
+                    fp.write(struct.pack('>L', len(node.value)))
+                    fp.write(node.value)
+                if node is self._head:
+                    break
+                node = node.prev
+
+    def __len__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__len__()
+
+    def __contains__(self, k):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__contains__(k)
+
+    def __iter__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__iter__()
+
+    def __getitem__(self, k):
+        if not self._read:
+            self.read()
+        # the cache lru order can change on read
+        setdirty = self._cache.get(k) is not self._head
+        value = super(manifestfulltextcache, self).__getitem__(k)
+        if setdirty:
+            self._dirty = True
+        return value
+
+    def __setitem__(self, k, v):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__setitem__(k, v)
+        self._dirty = True
+
+    def __delitem__(self, k):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__delitem__(k)
+        self._dirty = True
+
+    def get(self, k, default=None):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).get(k, default=default)
+
+    def clear(self, clear_persisted_data=False):
+        super(manifestfulltextcache, self).clear()
+        if clear_persisted_data:
+            self._dirty = True
+            self.write()
+        self._read = False
+
+@interfaceutil.implementer(repository.imanifeststorage)
+class manifestrevlog(object):
     '''A revlog that stores manifest texts. This is responsible for caching the
     full-text manifest contents.
     '''
-    def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
+    def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
                  treemanifest=False):
         """Constructs a new manifest revlog
 
@@ -1164,36 +1276,63 @@
 
         self._treeondisk = optiontreemanifest or treemanifest
 
-        self._fulltextcache = util.lrucachedict(cachesize)
+        self._fulltextcache = manifestfulltextcache(cachesize)
 
-        if dir:
+        if tree:
             assert self._treeondisk, 'opts is %r' % opts
 
         if indexfile is None:
             indexfile = '00manifest.i'
-            if dir:
-                indexfile = "meta/" + dir + indexfile
+            if tree:
+                indexfile = "meta/" + tree + indexfile
 
-        self._dir = dir
+        self.tree = tree
+
         # The dirlogcache is kept on the root manifest log
-        if dir:
+        if tree:
             self._dirlogcache = dirlogcache
         else:
             self._dirlogcache = {'': self}
 
-        super(manifestrevlog, self).__init__(opener, indexfile,
-                                             # only root indexfile is cached
-                                             checkambig=not bool(dir),
-                                             mmaplargeindex=True)
+        self._revlog = revlog.revlog(opener, indexfile,
+                                     # only root indexfile is cached
+                                     checkambig=not bool(tree),
+                                     mmaplargeindex=True)
+
+        self.index = self._revlog.index
+        self.version = self._revlog.version
+        self._generaldelta = self._revlog._generaldelta
+
+    def _setupmanifestcachehooks(self, repo):
+        """Persist the manifestfulltextcache on lock release"""
+        if not util.safehasattr(repo, '_lockref'):
+            return
+
+        self._fulltextcache._opener = repo.cachevfs
+        reporef = weakref.ref(repo)
+        manifestrevlogref = weakref.ref(self)
+
+        def persistmanifestcache():
+            repo = reporef()
+            self = manifestrevlogref()
+            if repo is None or self is None:
+                return
+            if repo.manifestlog.getstorage(b'') is not self:
+                # there's a different manifest in play now, abort
+                return
+            self._fulltextcache.write()
+
+        if repo._currentlock(repo._lockref) is not None:
+            repo._afterlock(persistmanifestcache)
 
     @property
     def fulltextcache(self):
         return self._fulltextcache
 
-    def clearcaches(self):
-        super(manifestrevlog, self).clearcaches()
-        self._fulltextcache.clear()
-        self._dirlogcache = {'': self}
+    def clearcaches(self, clear_persisted_data=False):
+        self._revlog.clearcaches()
+        self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
+        self._dirlogcache = {self.tree: self}
 
     def dirlog(self, d):
         if d:
@@ -1218,9 +1357,10 @@
                                [(x, True) for x in removed])
 
             arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
-            cachedelta = self.rev(p1), deltatext
+            cachedelta = self._revlog.rev(p1), deltatext
             text = util.buffer(arraytext)
-            n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
+            n = self._revlog.addrevision(text, transaction, link, p1, p2,
+                                         cachedelta)
         else:
             # The first parent manifest isn't already loaded, so we'll
             # just encode a fulltext of the manifest and pass that
@@ -1228,13 +1368,13 @@
             # process.
             if self._treeondisk:
                 assert readtree, "readtree must be set for treemanifest writes"
-                m1 = readtree(self._dir, p1)
-                m2 = readtree(self._dir, p2)
+                m1 = readtree(self.tree, p1)
+                m2 = readtree(self.tree, p2)
                 n = self._addtree(m, transaction, link, m1, m2, readtree)
                 arraytext = None
             else:
                 text = m.text()
-                n = self.addrevision(text, transaction, link, p1, p2)
+                n = self._revlog.addrevision(text, transaction, link, p1, p2)
                 arraytext = bytearray(text)
 
         if arraytext is not None:
@@ -1245,7 +1385,8 @@
     def _addtree(self, m, transaction, link, m1, m2, readtree):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
-        if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
+        if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
+            m2)):
             return m.node()
         def writesubtree(subm, subp1, subp2):
             sublog = self.dirlog(subm.dir())
@@ -1254,7 +1395,7 @@
         m.writesubtrees(m1, m2, writesubtree)
         text = m.dirtext()
         n = None
-        if self._dir != '':
+        if self.tree != '':
             # Double-check whether contents are unchanged to one parent
             if text == m1.dirtext():
                 n = m1.node()
@@ -1262,12 +1403,90 @@
                 n = m2.node()
 
         if not n:
-            n = self.addrevision(text, transaction, link, m1.node(), m2.node())
+            n = self._revlog.addrevision(text, transaction, link, m1.node(),
+                                         m2.node())
 
         # Save nodeid so parent manifest can calculate its nodeid
         m.setnode(n)
         return n
 
+    def __len__(self):
+        return len(self._revlog)
+
+    def __iter__(self):
+        return self._revlog.__iter__()
+
+    def rev(self, node):
+        return self._revlog.rev(node)
+
+    def node(self, rev):
+        return self._revlog.node(rev)
+
+    def lookup(self, value):
+        return self._revlog.lookup(value)
+
+    def parentrevs(self, rev):
+        return self._revlog.parentrevs(rev)
+
+    def parents(self, node):
+        return self._revlog.parents(node)
+
+    def linkrev(self, rev):
+        return self._revlog.linkrev(rev)
+
+    def checksize(self):
+        return self._revlog.checksize()
+
+    def revision(self, node, _df=None, raw=False):
+        return self._revlog.revision(node, _df=_df, raw=raw)
+
+    def revdiff(self, rev1, rev2):
+        return self._revlog.revdiff(rev1, rev2)
+
+    def cmp(self, node, text):
+        return self._revlog.cmp(node, text)
+
+    def deltaparent(self, rev):
+        return self._revlog.deltaparent(rev)
+
+    def emitrevisiondeltas(self, requests):
+        return self._revlog.emitrevisiondeltas(requests)
+
+    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
+        return self._revlog.addgroup(deltas, linkmapper, transaction,
+                                     addrevisioncb=addrevisioncb)
+
+    def getstrippoint(self, minlink):
+        return self._revlog.getstrippoint(minlink)
+
+    def strip(self, minlink, transaction):
+        return self._revlog.strip(minlink, transaction)
+
+    def files(self):
+        return self._revlog.files()
+
+    def clone(self, tr, destrevlog, **kwargs):
+        if not isinstance(destrevlog, manifestrevlog):
+            raise error.ProgrammingError('expected manifestrevlog to clone()')
+
+        return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
+
+    @property
+    def indexfile(self):
+        return self._revlog.indexfile
+
+    @indexfile.setter
+    def indexfile(self, value):
+        self._revlog.indexfile = value
+
+    @property
+    def opener(self):
+        return self._revlog.opener
+
+    @opener.setter
+    def opener(self, value):
+        self._revlog.opener = value
+
 @interfaceutil.implementer(repository.imanifestlog)
 class manifestlog(object):
     """A collection class representing the collection of manifest snapshots
@@ -1285,9 +1504,11 @@
         if opts is not None:
             usetreemanifest = opts.get('treemanifest', usetreemanifest)
             cachesize = opts.get('manifestcachesize', cachesize)
-        self._treeinmem = usetreemanifest
+
+        self._treemanifests = usetreemanifest
 
-        self._revlog = repo._constructmanifest()
+        self._rootstore = repo._constructmanifest()
+        self._rootstore._setupmanifestcachehooks(repo)
         self._narrowmatch = repo.narrowmatch()
 
         # A cache of the manifestctx or treemanifestctx for each directory
@@ -1302,58 +1523,58 @@
         """
         return self.get('', node)
 
-    def get(self, dir, node, verify=True):
+    def get(self, tree, node, verify=True):
         """Retrieves the manifest instance for the given node. Throws a
         LookupError if not found.
 
         `verify` - if True an exception will be thrown if the node is not in
                    the revlog
         """
-        if node in self._dirmancache.get(dir, ()):
-            return self._dirmancache[dir][node]
+        if node in self._dirmancache.get(tree, ()):
+            return self._dirmancache[tree][node]
 
         if not self._narrowmatch.always():
-            if not self._narrowmatch.visitdir(dir[:-1] or '.'):
-                return excludeddirmanifestctx(dir, node)
-        if dir:
-            if self._revlog._treeondisk:
+            if not self._narrowmatch.visitdir(tree[:-1] or '.'):
+                return excludeddirmanifestctx(tree, node)
+        if tree:
+            if self._rootstore._treeondisk:
                 if verify:
-                    dirlog = self._revlog.dirlog(dir)
-                    if node not in dirlog.nodemap:
-                        raise LookupError(node, dirlog.indexfile,
-                                          _('no node'))
-                m = treemanifestctx(self, dir, node)
+                    # Side-effect is LookupError is raised if node doesn't
+                    # exist.
+                    self.getstorage(tree).rev(node)
+
+                m = treemanifestctx(self, tree, node)
             else:
                 raise error.Abort(
                         _("cannot ask for manifest directory '%s' in a flat "
-                          "manifest") % dir)
+                          "manifest") % tree)
         else:
             if verify:
-                if node not in self._revlog.nodemap:
-                    raise LookupError(node, self._revlog.indexfile,
-                                      _('no node'))
-            if self._treeinmem:
+                # Side-effect is LookupError is raised if node doesn't exist.
+                self._rootstore.rev(node)
+
+            if self._treemanifests:
                 m = treemanifestctx(self, '', node)
             else:
                 m = manifestctx(self, node)
 
-        if node != revlog.nullid:
-            mancache = self._dirmancache.get(dir)
+        if node != nullid:
+            mancache = self._dirmancache.get(tree)
             if not mancache:
                 mancache = util.lrucachedict(self._cachesize)
-                self._dirmancache[dir] = mancache
+                self._dirmancache[tree] = mancache
             mancache[node] = m
         return m
 
-    def clearcaches(self):
+    def getstorage(self, tree):
+        return self._rootstore.dirlog(tree)
+
+    def clearcaches(self, clear_persisted_data=False):
         self._dirmancache.clear()
-        self._revlog.clearcaches()
+        self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
 
     def rev(self, node):
-        return self._revlog.rev(node)
-
-    def addgroup(self, deltas, linkmapper, transaction):
-        return self._revlog.addgroup(deltas, linkmapper, transaction)
+        return self._rootstore.rev(node)
 
 @interfaceutil.implementer(repository.imanifestrevisionwritable)
 class memmanifestctx(object):
@@ -1361,8 +1582,8 @@
         self._manifestlog = manifestlog
         self._manifestdict = manifestdict()
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def new(self):
         return memmanifestctx(self._manifestlog)
@@ -1376,8 +1597,8 @@
         return self._manifestdict
 
     def write(self, transaction, link, p1, p2, added, removed):
-        return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
-                                  added, removed)
+        return self._storage().add(self._manifestdict, transaction, link,
+                                   p1, p2, added, removed)
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class manifestctx(object):
@@ -1393,12 +1614,12 @@
         # TODO: We eventually want p1, p2, and linkrev exposed on this class,
         # but let's add it later when something needs it and we can load it
         # lazily.
-        #self.p1, self.p2 = revlog.parents(node)
-        #rev = revlog.rev(node)
-        #self.linkrev = revlog.linkrev(rev)
+        #self.p1, self.p2 = store.parents(node)
+        #rev = store.rev(node)
+        #self.linkrev = store.linkrev(rev)
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def node(self):
         return self._node
@@ -1413,17 +1634,20 @@
 
     @propertycache
     def parents(self):
-        return self._revlog().parents(self._node)
+        return self._storage().parents(self._node)
 
     def read(self):
         if self._data is None:
-            if self._node == revlog.nullid:
+            if self._node == nullid:
                 self._data = manifestdict()
             else:
-                rl = self._revlog()
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl._fulltextcache[self._node] = arraytext
+                store = self._storage()
+                if self._node in store.fulltextcache:
+                    text = pycompat.bytestr(store.fulltextcache[self._node])
+                else:
+                    text = store.revision(self._node)
+                    arraytext = bytearray(text)
+                    store.fulltextcache[self._node] = arraytext
                 self._data = manifestdict(text)
         return self._data
 
@@ -1434,10 +1658,10 @@
 
         If `shallow` is True, nothing changes since this is a flat manifest.
         '''
-        rl = self._revlog()
-        r = rl.rev(self._node)
-        deltaparent = rl.deltaparent(r)
-        if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
+        store = self._storage()
+        r = store.rev(self._node)
+        deltaparent = store.deltaparent(r)
+        if deltaparent != nullrev and deltaparent in store.parentrevs(r):
             return self.readdelta()
         return self.read()
 
@@ -1448,9 +1672,9 @@
 
         Changing the value of `shallow` has no effect on flat manifests.
         '''
-        revlog = self._revlog()
-        r = revlog.rev(self._node)
-        d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
+        store = self._storage()
+        r = store.rev(self._node)
+        d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
         return manifestdict(d)
 
     def find(self, key):
@@ -1463,8 +1687,8 @@
         self._dir = dir
         self._treemanifest = treemanifest()
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def new(self, dir=''):
         return memtreemanifestctx(self._manifestlog, dir=dir)
@@ -1480,8 +1704,8 @@
     def write(self, transaction, link, p1, p2, added, removed):
         def readtree(dir, node):
             return self._manifestlog.get(dir, node).read()
-        return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
-                                  added, removed, readtree=readtree)
+        return self._storage().add(self._treemanifest, transaction, link,
+                                   p1, p2, added, removed, readtree=readtree)
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class treemanifestctx(object):
@@ -1495,26 +1719,27 @@
         # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
         # we can instantiate treemanifestctx objects for directories we don't
         # have on disk.
-        #self.p1, self.p2 = revlog.parents(node)
-        #rev = revlog.rev(node)
-        #self.linkrev = revlog.linkrev(rev)
+        #self.p1, self.p2 = store.parents(node)
+        #rev = store.rev(node)
+        #self.linkrev = store.linkrev(rev)
 
-    def _revlog(self):
+    def _storage(self):
         narrowmatch = self._manifestlog._narrowmatch
         if not narrowmatch.always():
             if not narrowmatch.visitdir(self._dir[:-1] or '.'):
                 return excludedmanifestrevlog(self._dir)
-        return self._manifestlog._revlog.dirlog(self._dir)
+        return self._manifestlog.getstorage(self._dir)
 
     def read(self):
         if self._data is None:
-            rl = self._revlog()
-            if self._node == revlog.nullid:
+            store = self._storage()
+            if self._node == nullid:
                 self._data = treemanifest()
-            elif rl._treeondisk:
+            # TODO accessing non-public API
+            elif store._treeondisk:
                 m = treemanifest(dir=self._dir)
                 def gettext():
-                    return rl.revision(self._node)
+                    return store.revision(self._node)
                 def readsubtree(dir, subm):
                     # Set verify to False since we need to be able to create
                     # subtrees for trees that don't exist on disk.
@@ -1523,9 +1748,12 @@
                 m.setnode(self._node)
                 self._data = m
             else:
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl.fulltextcache[self._node] = arraytext
+                if self._node in store.fulltextcache:
+                    text = pycompat.bytestr(store.fulltextcache[self._node])
+                else:
+                    text = store.revision(self._node)
+                    arraytext = bytearray(text)
+                    store.fulltextcache[self._node] = arraytext
                 self._data = treemanifest(dir=self._dir, text=text)
 
         return self._data
@@ -1543,7 +1771,7 @@
 
     @propertycache
     def parents(self):
-        return self._revlog().parents(self._node)
+        return self._storage().parents(self._node)
 
     def readdelta(self, shallow=False):
         '''Returns a manifest containing just the entries that are present
@@ -1556,15 +1784,15 @@
         the subdirectory will be reported among files and distinguished only by
         its 't' flag.
         '''
-        revlog = self._revlog()
+        store = self._storage()
         if shallow:
-            r = revlog.rev(self._node)
-            d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
+            r = store.rev(self._node)
+            d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
             return manifestdict(d)
         else:
             # Need to perform a slow delta
-            r0 = revlog.deltaparent(revlog.rev(self._node))
-            m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
+            r0 = store.deltaparent(store.rev(self._node))
+            m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
             m1 = self.read()
             md = treemanifest(dir=self._dir)
             for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
@@ -1582,15 +1810,15 @@
         If `shallow` is True, it only returns the entries from this manifest,
         and not any submanifests.
         '''
-        rl = self._revlog()
-        r = rl.rev(self._node)
-        deltaparent = rl.deltaparent(r)
-        if (deltaparent != revlog.nullrev and
-            deltaparent in rl.parentrevs(r)):
+        store = self._storage()
+        r = store.rev(self._node)
+        deltaparent = store.deltaparent(r)
+        if (deltaparent != nullrev and
+            deltaparent in store.parentrevs(r)):
             return self.readdelta(shallow=shallow)
 
         if shallow:
-            return manifestdict(rl.revision(self._node))
+            return manifestdict(store.revision(self._node))
         else:
             return self.read()
 
--- a/mercurial/match.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/match.py	Tue Sep 04 12:16:28 2018 -0400
@@ -8,6 +8,7 @@
 from __future__ import absolute_import, print_function
 
 import copy
+import itertools
 import os
 import re
 
@@ -331,6 +332,49 @@
         '''
         return True
 
+    def visitchildrenset(self, dir):
+        '''Decides whether a directory should be visited based on whether it
+        has potential matches in it or one of its subdirectories, and
+        potentially lists which subdirectories of that directory should be
+        visited. This is based on the match's primary, included, and excluded
+        patterns.
+
+        This function is very similar to 'visitdir', and the following mapping
+        can be applied:
+
+             visitdir | visitchildrenlist
+            ----------+-------------------
+             False    | set()
+             'all'    | 'all'
+             True     | 'this' OR non-empty set of subdirs -or files- to visit
+
+        Example:
+          Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
+          the following values (assuming the implementation of visitchildrenset
+          is capable of recognizing this; some implementations are not).
+
+          '.' -> {'foo', 'qux'}
+          'baz' -> set()
+          'foo' -> {'bar'}
+          # Ideally this would be 'all', but since the prefix nature of matchers
+          # is applied to the entire matcher, we have to downgrade this to
+          # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
+          # in.
+          'foo/bar' -> 'this'
+          'qux' -> 'this'
+
+        Important:
+          Most matchers do not know if they're representing files or
+          directories. They see ['path:dir/f'] and don't know whether 'f' is a
+          file or a directory, so visitchildrenset('dir') for most matchers will
+          return {'f'}, but if the matcher knows it's a file (like exactmatcher
+          does), it may return 'this'. Do not rely on the return being a set
+          indicating that there are no files in this dir to investigate (or
+          equivalently that if there are files to investigate in 'dir' that it
+          will always return 'this').
+        '''
+        return 'this'
+
     def always(self):
         '''Matcher will match everything and .files() will be empty --
         optimization might be possible.'''
@@ -367,6 +411,9 @@
     def visitdir(self, dir):
         return 'all'
 
+    def visitchildrenset(self, dir):
+        return 'all'
+
     def __repr__(self):
         return r'<alwaysmatcher>'
 
@@ -390,6 +437,9 @@
     def visitdir(self, dir):
         return False
 
+    def visitchildrenset(self, dir):
+        return set()
+
     def __repr__(self):
         return r'<nevermatcher>'
 
@@ -430,6 +480,15 @@
                 any(parentdir in self._fileset
                     for parentdir in util.finddirs(dir)))
 
+    def visitchildrenset(self, dir):
+        ret = self.visitdir(dir)
+        if ret is True:
+            return 'this'
+        elif not ret:
+            return set()
+        assert ret == 'all'
+        return 'all'
+
     def prefix(self):
         return self._prefix
 
@@ -445,11 +504,14 @@
         self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)',
                                                listsubrepos, root)
         self._prefix = _prefix(kindpats)
-        roots, dirs = _rootsanddirs(kindpats)
+        roots, dirs, parents = _rootsdirsandparents(kindpats)
         # roots are directories which are recursively included.
         self._roots = set(roots)
         # dirs are directories which are non-recursively included.
         self._dirs = set(dirs)
+        # parents are directories which are non-recursively included because
+        # they are needed to get to items in _dirs or _roots.
+        self._parents = set(parents)
 
     def visitdir(self, dir):
         if self._prefix and dir in self._roots:
@@ -457,9 +519,47 @@
         return ('.' in self._roots or
                 dir in self._roots or
                 dir in self._dirs or
+                dir in self._parents or
                 any(parentdir in self._roots
                     for parentdir in util.finddirs(dir)))
 
+    def visitchildrenset(self, dir):
+        if self._prefix and dir in self._roots:
+            return 'all'
+        # Note: this does *not* include the 'dir in self._parents' case from
+        # visitdir, that's handled below.
+        if ('.' in self._roots or
+            dir in self._roots or
+            dir in self._dirs or
+            any(parentdir in self._roots
+                for parentdir in util.finddirs(dir))):
+            return 'this'
+
+        ret = set()
+        if dir in self._parents:
+            # We add a '/' on to `dir` so that we don't return items that are
+            # prefixed by `dir` but are actually siblings of `dir`.
+            suffixeddir = dir + '/' if dir != '.' else ''
+            # Look in all _roots, _dirs, and _parents for things that start with
+            # 'suffixeddir'.
+            for d in [q for q in
+                      itertools.chain(self._roots, self._dirs, self._parents) if
+                      q.startswith(suffixeddir)]:
+                # Don't emit '.' in the response for the root directory
+                if not suffixeddir and d == '.':
+                    continue
+
+                # We return the item name without the `suffixeddir` prefix or a
+                # slash suffix
+                d = d[len(suffixeddir):]
+                if '/' in d:
+                    # This is a subdirectory-of-a-subdirectory, i.e.
+                    # suffixeddir='foo/', d was 'foo/bar/baz' before removing
+                    # 'foo/'.
+                    d = d[:d.index('/')]
+                ret.add(d)
+        return ret
+
     @encoding.strmethod
     def __repr__(self):
         return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats))
@@ -486,6 +586,26 @@
     def visitdir(self, dir):
         return dir in self._dirs
 
+    def visitchildrenset(self, dir):
+        if not self._fileset or dir not in self._dirs:
+            return set()
+
+        candidates = self._fileset | self._dirs - {'.'}
+        if dir != '.':
+            d = dir + '/'
+            candidates = set(c[len(d):] for c in candidates if
+                             c.startswith(d))
+        # self._dirs includes all of the directories, recursively, so if
+        # we're attempting to match foo/bar/baz.txt, it'll have '.', 'foo',
+        # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
+        # '/' in it, indicating a it's for a subdir-of-a-subdir; the
+        # immediate subdir will be in there without a slash.
+        ret = {c for c in candidates if '/' not in c}
+        # We really do not expect ret to be empty, since that would imply that
+        # there's something in _dirs that didn't have a file in _fileset.
+        assert ret
+        return ret
+
     def isexact(self):
         return True
 
@@ -527,6 +647,31 @@
             return False
         return bool(self._m1.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m2_set = self._m2.visitchildrenset(dir)
+        if m2_set == 'all':
+            return set()
+        m1_set = self._m1.visitchildrenset(dir)
+        # Possible values for m1: 'all', 'this', set(...), set()
+        # Possible values for m2:        'this', set(...), set()
+        # If m2 has nothing under here that we care about, return m1, even if
+        # it's 'all'. This is a change in behavior from visitdir, which would
+        # return True, not 'all', for some reason.
+        if not m2_set:
+            return m1_set
+        if m1_set in ['all', 'this']:
+            # Never return 'all' here if m2_set is any kind of non-empty (either
+            # 'this' or set(foo)), since m2 might return set() for a
+            # subdirectory.
+            return 'this'
+        # Possible values for m1:         set(...), set()
+        # Possible values for m2: 'this', set(...)
+        # We ignore m2's set results. They're possibly incorrect:
+        #  m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset('.'):
+        #    m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
+        #    return set(), which is *not* correct, we still need to visit 'dir'!
+        return m1_set
+
     def isexact(self):
         return self._m1.isexact()
 
@@ -591,6 +736,25 @@
         # bool() because visit1=True + visit2='all' should not be 'all'
         return bool(visit1 and self._m2.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m1_set = self._m1.visitchildrenset(dir)
+        if not m1_set:
+            return set()
+        m2_set = self._m2.visitchildrenset(dir)
+        if not m2_set:
+            return set()
+
+        if m1_set == 'all':
+            return m2_set
+        elif m2_set == 'all':
+            return m1_set
+
+        if m1_set == 'this' or m2_set == 'this':
+            return 'this'
+
+        assert isinstance(m1_set, set) and isinstance(m2_set, set)
+        return m1_set.intersection(m2_set)
+
     def always(self):
         return self._m1.always() and self._m2.always()
 
@@ -672,6 +836,13 @@
             dir = self._path + "/" + dir
         return self._matcher.visitdir(dir)
 
+    def visitchildrenset(self, dir):
+        if dir == '.':
+            dir = self._path
+        else:
+            dir = self._path + "/" + dir
+        return self._matcher.visitchildrenset(dir)
+
     def always(self):
         return self._always
 
@@ -744,6 +915,15 @@
             return self._matcher.visitdir(dir[len(self._pathprefix):])
         return dir in self._pathdirs
 
+    def visitchildrenset(self, dir):
+        if dir == self._path:
+            return self._matcher.visitchildrenset('.')
+        if dir.startswith(self._pathprefix):
+            return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
+        if dir in self._pathdirs:
+            return 'this'
+        return set()
+
     def isexact(self):
         return self._matcher.isexact()
 
@@ -784,6 +964,25 @@
             r |= v
         return r
 
+    def visitchildrenset(self, dir):
+        r = set()
+        this = False
+        for m in self._matchers:
+            v = m.visitchildrenset(dir)
+            if not v:
+                continue
+            if v == 'all':
+                return v
+            if this or v == 'this':
+                this = True
+                # don't break, we might have an 'all' in here.
+                continue
+            assert isinstance(v, set)
+            r = r.union(v)
+        if this:
+            return 'this'
+        return r
+
     @encoding.strmethod
     def __repr__(self):
         return ('<unionmatcher matchers=%r>' % self._matchers)
@@ -1004,40 +1203,42 @@
     roots, dirs = _patternrootsanddirs(kindpats)
     return roots
 
-def _rootsanddirs(kindpats):
+def _rootsdirsandparents(kindpats):
     '''Returns roots and exact directories from patterns.
 
-    roots are directories to match recursively, whereas exact directories should
-    be matched non-recursively. The returned (roots, dirs) tuple will also
-    include directories that need to be implicitly considered as either, such as
-    parent directories.
+    `roots` are directories to match recursively, `dirs` should
+    be matched non-recursively, and `parents` are the implicitly required
+    directories to walk to items in either roots or dirs.
 
-    >>> _rootsanddirs(
+    Returns a tuple of (roots, dirs, parents).
+
+    >>> _rootsdirsandparents(
     ...     [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
     ...      (b'glob', b'g*', b'')])
-    (['g/h', 'g/h', '.'], ['g', '.'])
-    >>> _rootsanddirs(
+    (['g/h', 'g/h', '.'], [], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
-    ([], ['g/h', '.', 'g', '.'])
-    >>> _rootsanddirs(
+    ([], ['g/h', '.'], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
     ...      (b'path', b'', b'')])
-    (['r', 'p/p', '.'], ['p', '.'])
-    >>> _rootsanddirs(
+    (['r', 'p/p', '.'], [], ['p', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relglob', b'rg*', b''), (b're', b're/', b''),
     ...      (b'relre', b'rr', b'')])
-    (['.', '.', '.'], ['.'])
+    (['.', '.', '.'], [], ['.'])
     '''
     r, d = _patternrootsanddirs(kindpats)
 
+    p = []
     # Append the parents as non-recursive/exact directories, since they must be
     # scanned to get to either the roots or the other exact directories.
-    d.extend(util.dirs(d))
-    d.extend(util.dirs(r))
+    p.extend(util.dirs(d))
+    p.extend(util.dirs(r))
     # util.dirs() does not include the root directory, so add it manually
-    d.append('.')
+    p.append('.')
 
-    return r, d
+    return r, d, p
 
 def _explicitfiles(kindpats):
     '''Returns the potential explicit filenames from the patterns.
--- a/mercurial/mdiff.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/mdiff.py	Tue Sep 04 12:16:28 2018 -0400
@@ -357,7 +357,7 @@
             # walk backwards from the start of the context up to the start of
             # the previous hunk context until we find a line starting with an
             # alphanumeric char.
-            for i in xrange(astart - 1, lastpos - 1, -1):
+            for i in pycompat.xrange(astart - 1, lastpos - 1, -1):
                 if l1[i][0:1].isalnum():
                     func = b' ' + l1[i].rstrip()
                     # split long function name if ASCII. otherwise we have no
@@ -381,7 +381,7 @@
         hunklines = (
             ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
             + delta
-            + [' ' + l1[x] for x in xrange(a2, aend)]
+            + [' ' + l1[x] for x in pycompat.xrange(a2, aend)]
         )
         # If either file ends without a newline and the last line of
         # that file is part of a hunk, a marker is printed. If the
@@ -390,7 +390,7 @@
         # which the hunk can end in a shared line without a newline.
         skip = False
         if not t1.endswith('\n') and astart + alen == len(l1) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith(('-', ' ')):
                     if hunklines[i].startswith(' '):
                         skip = True
@@ -398,7 +398,7 @@
                     hunklines.insert(i + 1, _missing_newline_marker)
                     break
         if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith('+'):
                     hunklines[i] += '\n'
                     hunklines.insert(i + 1, _missing_newline_marker)
--- a/mercurial/minifileset.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/minifileset.py	Tue Sep 04 12:16:28 2018 -0400
@@ -11,20 +11,23 @@
 from . import (
     error,
     fileset,
+    filesetlang,
     pycompat,
 )
 
 def _sizep(x):
     # i18n: "size" is a keyword
-    expr = fileset.getstring(x, _("size requires an expression"))
+    expr = filesetlang.getstring(x, _("size requires an expression"))
     return fileset.sizematcher(expr)
 
 def _compile(tree):
     if not tree:
         raise error.ParseError(_("missing argument"))
     op = tree[0]
-    if op in {'symbol', 'string', 'kindpat'}:
-        name = fileset.getpattern(tree, {'path'}, _('invalid file pattern'))
+    if op == 'withstatus':
+        return _compile(tree[1])
+    elif op in {'symbol', 'string', 'kindpat'}:
+        name = filesetlang.getpattern(tree, {'path'}, _('invalid file pattern'))
         if name.startswith('**'): # file extension test, ex. "**.tar.gz"
             ext = name[2:]
             for c in pycompat.bytestr(ext):
@@ -39,18 +42,15 @@
             return f
         raise error.ParseError(_("unsupported file pattern: %s") % name,
                                hint=_('paths must be prefixed with "path:"'))
-    elif op == 'or':
-        func1 = _compile(tree[1])
-        func2 = _compile(tree[2])
-        return lambda n, s: func1(n, s) or func2(n, s)
+    elif op in {'or', 'patterns'}:
+        funcs = [_compile(x) for x in tree[1:]]
+        return lambda n, s: any(f(n, s) for f in funcs)
     elif op == 'and':
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and func2(n, s)
     elif op == 'not':
         return lambda n, s: not _compile(tree[1])(n, s)
-    elif op == 'group':
-        return _compile(tree[1])
     elif op == 'func':
         symbols = {
             'all': lambda n, s: True,
@@ -58,7 +58,7 @@
             'size': lambda n, s: _sizep(tree[2])(s),
         }
 
-        name = fileset.getsymbol(tree[1])
+        name = filesetlang.getsymbol(tree[1])
         if name in symbols:
             return symbols[name]
 
@@ -67,11 +67,9 @@
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and not func2(n, s)
-    elif op == 'negate':
-        raise error.ParseError(_("can't use negate operator in this context"))
     elif op == 'list':
         raise error.ParseError(_("can't use a list in this context"),
-                               hint=_('see hg help "filesets.x or y"'))
+                               hint=_('see \'hg help "filesets.x or y"\''))
     raise error.ProgrammingError('illegal tree: %r' % (tree,))
 
 def compile(text):
@@ -88,5 +86,7 @@
     files whose name ends with ".zip", and all files under "bin" in the repo
     root except for "bin/README".
     """
-    tree = fileset.parse(text)
+    tree = filesetlang.parse(text)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
     return _compile(tree)
--- a/mercurial/minirst.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/minirst.py	Tue Sep 04 12:16:28 2018 -0400
@@ -316,7 +316,7 @@
 
             # column markers are ASCII so we can calculate column
             # position in bytes
-            columns = [x for x in xrange(len(div))
+            columns = [x for x in pycompat.xrange(len(div))
                        if div[x:x + 1] == '=' and (x == 0 or
                                                    div[x - 1:x] == ' ')]
             rows = []
@@ -663,69 +663,79 @@
     text = ''.join(formatblock(b, width) for b in blocks)
     return text
 
+def formatplain(blocks, width):
+    """Format parsed blocks as plain text"""
+    return ''.join(formatblock(b, width) for b in blocks)
+
 def format(text, width=80, indent=0, keep=None, style='plain', section=None):
     """Parse and format the text according to width."""
     blocks, pruned = parse(text, indent, keep or [])
-    parents = []
     if section:
-        sections = getsections(blocks)
-        blocks = []
-        i = 0
-        lastparents = []
-        synthetic = []
-        collapse = True
-        while i < len(sections):
-            name, nest, b = sections[i]
-            del parents[nest:]
-            parents.append(i)
-            if name == section:
-                if lastparents != parents:
-                    llen = len(lastparents)
-                    plen = len(parents)
-                    if llen and llen != plen:
-                        collapse = False
-                    s = []
-                    for j in xrange(3, plen - 1):
-                        parent = parents[j]
-                        if (j >= llen or
-                            lastparents[j] != parent):
-                            s.append(len(blocks))
-                            sec = sections[parent][2]
-                            blocks.append(sec[0])
-                            blocks.append(sec[-1])
-                    if s:
-                        synthetic.append(s)
+        blocks = filtersections(blocks, section)
+    if style == 'html':
+        return formathtml(blocks)
+    else:
+        return formatplain(blocks, width=width)
+
+def filtersections(blocks, section):
+    """Select parsed blocks under the specified section
 
-                lastparents = parents[:]
-                blocks.extend(b)
+    The section name is separated by a dot, and matches the suffix of the
+    full section path.
+    """
+    parents = []
+    sections = _getsections(blocks)
+    blocks = []
+    i = 0
+    lastparents = []
+    synthetic = []
+    collapse = True
+    while i < len(sections):
+        path, nest, b = sections[i]
+        del parents[nest:]
+        parents.append(i)
+        if path == section or path.endswith('.' + section):
+            if lastparents != parents:
+                llen = len(lastparents)
+                plen = len(parents)
+                if llen and llen != plen:
+                    collapse = False
+                s = []
+                for j in pycompat.xrange(3, plen - 1):
+                    parent = parents[j]
+                    if (j >= llen or
+                        lastparents[j] != parent):
+                        s.append(len(blocks))
+                        sec = sections[parent][2]
+                        blocks.append(sec[0])
+                        blocks.append(sec[-1])
+                if s:
+                    synthetic.append(s)
 
-                ## Also show all subnested sections
-                while i + 1 < len(sections) and sections[i + 1][1] > nest:
-                    i += 1
-                    blocks.extend(sections[i][2])
-            i += 1
-        if collapse:
-            synthetic.reverse()
-            for s in synthetic:
-                path = [blocks[syn]['lines'][0] for syn in s]
-                real = s[-1] + 2
-                realline = blocks[real]['lines']
-                realline[0] = ('"%s"' %
-                               '.'.join(path + [realline[0]]).replace('"', ''))
-                del blocks[s[0]:real]
+            lastparents = parents[:]
+            blocks.extend(b)
 
-    if style == 'html':
-        text = formathtml(blocks)
-    else:
-        text = ''.join(formatblock(b, width) for b in blocks)
-    if keep is None:
-        return text
-    else:
-        return text, pruned
+            ## Also show all subnested sections
+            while i + 1 < len(sections) and sections[i + 1][1] > nest:
+                i += 1
+                blocks.extend(sections[i][2])
+        i += 1
+    if collapse:
+        synthetic.reverse()
+        for s in synthetic:
+            path = [blocks[syn]['lines'][0] for syn in s]
+            real = s[-1] + 2
+            realline = blocks[real]['lines']
+            realline[0] = ('"%s"' %
+                           '.'.join(path + [realline[0]]).replace('"', ''))
+            del blocks[s[0]:real]
 
-def getsections(blocks):
-    '''return a list of (section name, nesting level, blocks) tuples'''
+    return blocks
+
+def _getsections(blocks):
+    '''return a list of (section path, nesting level, blocks) tuples'''
     nest = ""
+    names = ()
     level = 0
     secs = []
 
@@ -746,7 +756,8 @@
                 nest += i
             level = nest.index(i) + 1
             nest = nest[:level]
-            secs.append((getname(b), level, [b]))
+            names = names[:level] + (getname(b),)
+            secs.append(('.'.join(names), level, [b]))
         elif b['type'] in ('definition', 'field'):
             i = ' '
             if i not in nest:
@@ -767,7 +778,8 @@
                     elif siblingindent == indent:
                         level = sec[1]
                         break
-            secs.append((getname(b), level, [b]))
+            names = names[:level] + (getname(b),)
+            secs.append(('.'.join(names), level, [b]))
         else:
             if not secs:
                 # add an initial empty section
@@ -793,15 +805,6 @@
             secs[-1][2].append(b)
     return secs
 
-def decorateblocks(blocks, width):
-    '''generate a list of (section name, line text) pairs for search'''
-    lines = []
-    for s in getsections(blocks):
-        section = s[0]
-        text = formatblocks(s[2], width)
-        lines.append([(section, l) for l in text.splitlines(True)])
-    return lines
-
 def maketable(data, indent=0, header=False):
     '''Generate an RST table for the given table data as a list of lines'''
 
--- a/mercurial/narrowspec.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/narrowspec.py	Tue Sep 04 12:16:28 2018 -0400
@@ -13,34 +13,13 @@
 from . import (
     error,
     match as matchmod,
+    repository,
+    sparse,
     util,
 )
 
 FILENAME = 'narrowspec'
 
-def _parsestoredpatterns(text):
-    """Parses the narrowspec format that's stored on disk."""
-    patlist = None
-    includepats = []
-    excludepats = []
-    for l in text.splitlines():
-        if l == '[includes]':
-            if patlist is None:
-                patlist = includepats
-            else:
-                raise error.Abort(_('narrowspec includes section must appear '
-                                    'at most once, before excludes'))
-        elif l == '[excludes]':
-            if patlist is not excludepats:
-                patlist = excludepats
-            else:
-                raise error.Abort(_('narrowspec excludes section must appear '
-                                    'at most once'))
-        else:
-            patlist.append(l)
-
-    return set(includepats), set(excludepats)
-
 def parseserverpatterns(text):
     """Parses the narrowspec format that's returned by the server."""
     includepats = set()
@@ -107,10 +86,10 @@
     return set(normalizepattern(p) for p in pats)
 
 def format(includes, excludes):
-    output = '[includes]\n'
+    output = '[include]\n'
     for i in sorted(includes - excludes):
         output += i + '\n'
-    output += '[excludes]\n'
+    output += '[exclude]\n'
     for e in sorted(excludes):
         output += e + '\n'
     return output
@@ -129,21 +108,41 @@
 
 def load(repo):
     try:
-        spec = repo.vfs.read(FILENAME)
+        spec = repo.svfs.read(FILENAME)
     except IOError as e:
         # Treat "narrowspec does not exist" the same as "narrowspec file exists
         # and is empty".
         if e.errno == errno.ENOENT:
-            # Without this the next call to load will use the cached
-            # non-existence of the file, which can cause some odd issues.
-            repo.invalidate(clearfilecache=True)
             return set(), set()
         raise
-    return _parsestoredpatterns(spec)
+    # maybe we should care about the profiles returned too
+    includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
+                                                            'narrow')
+    if profiles:
+        raise error.Abort(_("including other spec files using '%include' is not"
+                            " suported in narrowspec"))
+    return includepats, excludepats
 
 def save(repo, includepats, excludepats):
     spec = format(includepats, excludepats)
-    repo.vfs.write(FILENAME, spec)
+    repo.svfs.write(FILENAME, spec)
+
+def savebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    vfs = repo.vfs
+    vfs.tryunlink(backupname)
+    util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True)
+
+def restorebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME))
+
+def clearbackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    repo.vfs.unlink(backupname)
 
 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
     r""" Restricts the patterns according to repo settings,
--- a/mercurial/node.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/node.py	Tue Sep 04 12:16:28 2018 -0400
@@ -21,20 +21,25 @@
         raise TypeError(e)
 
 nullrev = -1
+# In hex, this is '0000000000000000000000000000000000000000'
 nullid = b"\0" * 20
 nullhex = hex(nullid)
 
 # Phony node value to stand-in for new files in some uses of
 # manifests.
-newnodeid = '!' * 20
-addednodeid = ('0' * 15) + 'added'
-modifiednodeid = ('0' * 12) + 'modified'
+# In hex, this is '2121212121212121212121212121212121212121'
+newnodeid = '!!!!!!!!!!!!!!!!!!!!'
+# In hex, this is '3030303030303030303030303030306164646564'
+addednodeid = '000000000000000added'
+# In hex, this is '3030303030303030303030306d6f646966696564'
+modifiednodeid = '000000000000modified'
 
 wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
 
 # pseudo identifiers for working directory
 # (they are experimental, so don't add too many dependencies on them)
 wdirrev = 0x7fffffff
+# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
 wdirid = b"\xff" * 20
 wdirhex = hex(wdirid)
 
--- a/mercurial/obsolete.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/obsolete.py	Tue Sep 04 12:16:28 2018 -0400
@@ -394,7 +394,7 @@
         off = o3 + metasize * nummeta
         metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
         metadata = []
-        for idx in xrange(0, len(metapairsize), 2):
+        for idx in pycompat.xrange(0, len(metapairsize), 2):
             o1 = off + metapairsize[idx]
             o2 = o1 + metapairsize[idx + 1]
             metadata.append((data[off:o1], data[o1:o2]))
--- a/mercurial/obsutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/obsutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -464,14 +464,14 @@
     phase = repo._phasecache.phase
     succsmarkers = repo.obsstore.successors.get
     public = phases.public
-    addedmarkers = tr.changes.get('obsmarkers')
-    addedrevs = tr.changes.get('revs')
+    addedmarkers = tr.changes['obsmarkers']
+    origrepolen = tr.changes['origrepolen']
     seenrevs = set()
     obsoleted = set()
     for mark in addedmarkers:
         node = mark[0]
         rev = torev(node)
-        if rev is None or rev in seenrevs or rev in addedrevs:
+        if rev is None or rev in seenrevs or rev >= origrepolen:
             continue
         seenrevs.add(rev)
         if phase(repo, rev) == public:
--- a/mercurial/parser.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/parser.py	Tue Sep 04 12:16:28 2018 -0400
@@ -20,7 +20,6 @@
 
 from .i18n import _
 from . import (
-    encoding,
     error,
     pycompat,
     util,
@@ -198,16 +197,11 @@
         # mangle Python's exception into our format
         raise error.ParseError(pycompat.bytestr(e).lower())
 
-def _brepr(obj):
-    if isinstance(obj, bytes):
-        return b"'%s'" % stringutil.escapestr(obj)
-    return encoding.strtolocal(repr(obj))
-
 def _prettyformat(tree, leafnodes, level, lines):
     if not isinstance(tree, tuple):
-        lines.append((level, _brepr(tree)))
+        lines.append((level, stringutil.pprint(tree)))
     elif tree[0] in leafnodes:
-        rs = map(_brepr, tree[1:])
+        rs = map(stringutil.pprint, tree[1:])
         lines.append((level, '(%s %s)' % (tree[0], ' '.join(rs))))
     else:
         lines.append((level, '(%s' % tree[0]))
--- a/mercurial/patch.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/patch.py	Tue Sep 04 12:16:28 2018 -0400
@@ -815,7 +815,7 @@
         for x, s in enumerate(self.lines):
             self.hash.setdefault(s, []).append(x)
 
-        for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
+        for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
             for toponly in [True, False]:
                 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
                 oldstart = oldstart + self.offset + self.skew
@@ -1286,7 +1286,7 @@
         self.lena = int(aend) - self.starta
         if self.starta:
             self.lena += 1
-        for x in xrange(self.lena):
+        for x in pycompat.xrange(self.lena):
             l = lr.readline()
             if l.startswith('---'):
                 # lines addition, old block is empty
@@ -1320,7 +1320,7 @@
         if self.startb:
             self.lenb += 1
         hunki = 1
-        for x in xrange(self.lenb):
+        for x in pycompat.xrange(self.lenb):
             l = lr.readline()
             if l.startswith('\ '):
                 # XXX: the only way to hit this is with an invalid line range.
@@ -1396,14 +1396,14 @@
             top = 0
             bot = 0
             hlen = len(self.hunk)
-            for x in xrange(hlen - 1):
+            for x in pycompat.xrange(hlen - 1):
                 # the hunk starts with the @@ line, so use x+1
                 if self.hunk[x + 1].startswith(' '):
                     top += 1
                 else:
                     break
             if not toponly:
-                for x in xrange(hlen - 1):
+                for x in pycompat.xrange(hlen - 1):
                     if self.hunk[hlen - bot - 1].startswith(' '):
                         bot += 1
                     else:
@@ -2326,7 +2326,7 @@
         relfiltered = True
 
     if not changes:
-        changes = repo.status(ctx1, ctx2, match=match)
+        changes = ctx1.status(ctx2, match=match)
     modified, added, removed = changes[:3]
 
     if not modified and not added and not removed:
--- a/mercurial/phases.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/phases.py	Tue Sep 04 12:16:28 2018 -0400
@@ -123,11 +123,26 @@
 
 _fphasesentry = struct.Struct('>i20s')
 
-allphases = public, draft, secret = range(3)
+INTERNAL_FLAG = 64 # Phases for mercurial internal usage only
+HIDEABLE_FLAG = 32 # Phases that are hideable
+
+# record phase index
+public, draft, secret = range(3)
+internal = INTERNAL_FLAG | HIDEABLE_FLAG
+allphases = range(internal + 1)
 trackedphases = allphases[1:]
-phasenames = ['public', 'draft', 'secret']
+# record phase names
+phasenames = [None] * len(allphases)
+phasenames[:3] = ['public', 'draft', 'secret']
+phasenames[internal] = 'internal'
+# record phase property
 mutablephases = tuple(allphases[1:])
 remotehiddenphases = tuple(allphases[2:])
+localhiddenphases = tuple(p for p in allphases if p & HIDEABLE_FLAG)
+
+def supportinternal(repo):
+    """True if the internal phase can be used on a repository"""
+    return 'internal-phase' in repo.requirements
 
 def _readroots(repo, phasedefaults=None):
     """Read phase roots from disk
@@ -272,19 +287,16 @@
         repo = repo.unfiltered()
         cl = repo.changelog
         self._phasesets = [set() for phase in allphases]
-        roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
-        if roots:
-            ps = set(cl.descendants(roots))
-            for root in roots:
-                ps.add(root)
-            self._phasesets[secret] = ps
-        roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
-        if roots:
-            ps = set(cl.descendants(roots))
-            for root in roots:
-                ps.add(root)
-            ps.difference_update(self._phasesets[secret])
-            self._phasesets[draft] = ps
+        lowerroots = set()
+        for phase in reversed(trackedphases):
+            roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
+            if roots:
+                ps = set(cl.descendants(roots))
+                for root in roots:
+                    ps.add(root)
+                ps.difference_update(lowerroots)
+                lowerroots.update(ps)
+                self._phasesets[phase] = ps
         self._loadedrevslen = len(cl)
 
     def loadphaserevs(self, repo):
@@ -374,7 +386,7 @@
 
         changes = set() # set of revisions to be changed
         delroots = [] # set of root deleted by this path
-        for phase in xrange(targetphase + 1, len(allphases)):
+        for phase in pycompat.xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
             nodes = [n for n in nodes
                      if self.phase(repo, repo[n].rev()) >= phase]
@@ -420,7 +432,7 @@
             affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
 
             # find the phase of the affected revision
-            for phase in xrange(targetphase, -1, -1):
+            for phase in pycompat.xrange(targetphase, -1, -1):
                 if phase:
                     roots = oldroots[phase]
                     revs = set(repo.revs('%ln::%ld', roots, affected))
@@ -434,6 +446,9 @@
     def _retractboundary(self, repo, tr, targetphase, nodes):
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
+        if targetphase == internal and not supportinternal(repo):
+            msg = 'this repository does not support the internal phase'
+            raise error.ProgrammingError(msg)
 
         repo = repo.unfiltered()
         currentroots = self.phaseroots[targetphase]
@@ -589,7 +604,7 @@
     headsbyphase = [[] for i in allphases]
     # No need to keep track of secret phase; any heads in the subset that
     # are not mentioned are implicitly secret.
-    for phase in allphases[:-1]:
+    for phase in allphases[:secret]:
         revset = "heads(%%ln & %s())" % phasenames[phase]
         headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
     return headsbyphase
@@ -602,8 +617,8 @@
     # to update. This avoid creating empty transaction during no-op operation.
 
     for phase in allphases[:-1]:
-        revset = '%%ln - %s()' % phasenames[phase]
-        heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
+        revset = '%ln - _phase(%s)'
+        heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
         if heads:
             advanceboundary(repo, trgetter(), phase, heads)
 
--- a/mercurial/policy.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/policy.py	Tue Sep 04 12:16:28 2018 -0400
@@ -69,7 +69,7 @@
     (r'cext', r'bdiff'): 3,
     (r'cext', r'mpatch'): 1,
     (r'cext', r'osutil'): 4,
-    (r'cext', r'parsers'): 5,
+    (r'cext', r'parsers'): 9,
 }
 
 # map import request to other package or module
--- a/mercurial/pure/osutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/pure/osutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -150,7 +150,7 @@
         rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
         rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) /
                      ctypes.sizeof(ctypes.c_int))
-        return [rfds[i] for i in xrange(rfdscount)]
+        return [rfds[i] for i in pycompat.xrange(rfdscount)]
 
 else:
     import msvcrt
--- a/mercurial/pure/parsers.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/pure/parsers.py	Tue Sep 04 12:16:28 2018 -0400
@@ -39,25 +39,21 @@
 
 class BaseIndexObject(object):
     def __len__(self):
-        return self._lgt + len(self._extra) + 1
+        return self._lgt + len(self._extra)
 
-    def insert(self, i, tup):
-        assert i == -1
+    def append(self, tup):
         self._extra.append(tup)
 
-    def _fix_index(self, i):
+    def _check_index(self, i):
         if not isinstance(i, int):
             raise TypeError("expecting int indexes")
-        if i < 0:
-            i = len(self) + i
         if i < 0 or i >= len(self):
             raise IndexError
-        return i
 
     def __getitem__(self, i):
-        i = self._fix_index(i)
-        if i == len(self) - 1:
+        if i == -1:
             return (0, 0, 0, -1, -1, -1, -1, nullid)
+        self._check_index(i)
         if i >= self._lgt:
             return self._extra[i - self._lgt]
         index = self._calculate_index(i)
@@ -82,7 +78,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._data = self._data[:i * indexsize]
             self._lgt = i
@@ -116,7 +113,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._offsets = self._offsets[:i]
             self._lgt = i
--- a/mercurial/pvec.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/pvec.py	Tue Sep 04 12:16:28 2018 -0400
@@ -52,6 +52,7 @@
 
 from .node import nullrev
 from . import (
+    pycompat,
     util,
 )
 
@@ -72,7 +73,7 @@
 
 def _str(v, l):
     bs = ""
-    for p in xrange(l):
+    for p in pycompat.xrange(l):
         bs = chr(v & 255) + bs
         v >>= 8
     return bs
@@ -91,7 +92,7 @@
             c += 1
         x >>= 1
     return c
-_htab = [_hweight(x) for x in xrange(256)]
+_htab = [_hweight(x) for x in pycompat.xrange(256)]
 
 def _hamming(a, b):
     '''find the hamming distance between two longs'''
@@ -152,7 +153,7 @@
     pvc = r._pveccache
     if ctx.rev() not in pvc:
         cl = r.changelog
-        for n in xrange(ctx.rev() + 1):
+        for n in pycompat.xrange(ctx.rev() + 1):
             if n not in pvc:
                 node = cl.node(n)
                 p1, p2 = cl.parentrevs(n)
--- a/mercurial/pycompat.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/pycompat.py	Tue Sep 04 12:16:28 2018 -0400
@@ -331,6 +331,7 @@
 else:
     import cStringIO
 
+    xrange = xrange
     unicode = unicode
     bytechr = chr
     byterepr = repr
--- a/mercurial/registrar.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/registrar.py	Tue Sep 04 12:16:28 2018 -0400
@@ -247,6 +247,19 @@
      implies 'matchctx.status()' at runtime or not (False, by
      default).
 
+    Optional argument 'weight' indicates the estimated run-time cost, useful
+    for static optimization, default is 1. Higher weight means more expensive.
+    There are predefined weights in the 'filesetlang' module.
+
+    ====== =============================================================
+    Weight Description and examples
+    ====== =============================================================
+    0.5    basic match patterns (e.g. a symbol)
+    10     computing status (e.g. added()) or accessing a few files
+    30     reading file content for each (e.g. grep())
+    50     scanning working directory (ignored())
+    ====== =============================================================
+
     'filesetpredicate' instance in example above can be used to
     decorate multiple functions.
 
@@ -259,8 +272,9 @@
     _getname = _funcregistrarbase._parsefuncdecl
     _docformat = "``%s``\n    %s"
 
-    def _extrasetup(self, name, func, callstatus=False):
+    def _extrasetup(self, name, func, callstatus=False, weight=1):
         func._callstatus = callstatus
+        func._weight = weight
 
 class _templateregistrarbase(_funcregistrarbase):
     """Base of decorator to register functions as template specific one
@@ -281,7 +295,7 @@
             '''
             pass
 
-        # old API
+        # old API (DEPRECATED)
         @templatekeyword('mykeyword')
         def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
             '''Explanation of this template keyword ....
@@ -385,7 +399,8 @@
         internalmerge = registrar.internalmerge()
 
         @internalmerge('mymerge', internalmerge.mergeonly,
-                       onfailure=None, precheck=None):
+                       onfailure=None, precheck=None,
+                       binary=False, symlink=False):
         def mymergefunc(repo, mynode, orig, fcd, fco, fca,
                         toolconf, files, labels=None):
             '''Explanation of this internal merge tool ....
@@ -416,6 +431,12 @@
     'files' and 'labels'. If it returns false value, merging is aborted
     immediately (and file is marked as "unresolved").
 
+    Optional argument 'binary' is a binary files capability of internal
+    merge tool. 'nomerge' merge type implies binary=True.
+
+    Optional argument 'symlink' is a symlinks capability of inetrnal
+    merge function. 'nomerge' merge type implies symlink=True.
+
     'internalmerge' instance in example above can be used to
     decorate multiple functions.
 
@@ -433,7 +454,14 @@
     fullmerge = 'fullmerge'  # both premerge and merge
 
     def _extrasetup(self, name, func, mergetype,
-                    onfailure=None, precheck=None):
+                    onfailure=None, precheck=None,
+                    binary=False, symlink=False):
         func.mergetype = mergetype
         func.onfailure = onfailure
         func.precheck = precheck
+
+        binarycap = binary or mergetype == self.nomerge
+        symlinkcap = symlink or mergetype == self.nomerge
+
+        # actual capabilities, which this internal merge tool has
+        func.capabilities = {"binary": binarycap, "symlink": symlinkcap}
--- a/mercurial/repair.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/repair.py	Tue Sep 04 12:16:28 2018 -0400
@@ -24,6 +24,7 @@
     exchange,
     obsolete,
     obsutil,
+    pycompat,
     util,
 )
 from .utils import (
@@ -70,7 +71,7 @@
     """find out the filelogs affected by the strip"""
     files = set()
 
-    for x in xrange(striprev, len(repo)):
+    for x in pycompat.xrange(striprev, len(repo)):
         files.update(repo[x].files())
 
     return sorted(files)
@@ -80,7 +81,7 @@
     return [revlog.linkrev(r) for r in brokenset]
 
 def _collectmanifest(repo, striprev):
-    return _collectrevlog(repo.manifestlog._revlog, striprev)
+    return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
 
 def _collectbrokencsets(repo, files, striprev):
     """return the changesets which will be broken by the truncation"""
@@ -199,7 +200,7 @@
                     repo.file(fn).strip(striprev, tr)
                 tr.endgroup()
 
-                for i in xrange(offset, len(tr.entries)):
+                for i in pycompat.xrange(offset, len(tr.entries)):
                     file, troffset, ignore = tr.entries[i]
                     with repo.svfs(file, 'a', checkambig=True) as fp:
                         fp.truncate(troffset)
@@ -297,31 +298,31 @@
         if roots:
             strip(self.ui, self.repo, roots, self.backup, self.topic)
 
-def delayedstrip(ui, repo, nodelist, topic=None):
+def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
     """like strip, but works inside transaction and won't strip irreverent revs
 
     nodelist must explicitly contain all descendants. Otherwise a warning will
     be printed that some nodes are not stripped.
 
-    Always do a backup. The last non-None "topic" will be used as the backup
-    topic name. The default backup topic name is "backup".
+    Will do a backup if `backup` is True. The last non-None "topic" will be
+    used as the backup topic name. The default backup topic name is "backup".
     """
     tr = repo.currenttransaction()
     if not tr:
         nodes = safestriproots(ui, repo, nodelist)
-        return strip(ui, repo, nodes, True, topic)
+        return strip(ui, repo, nodes, backup=backup, topic=topic)
     # transaction postclose callbacks are called in alphabet order.
     # use '\xff' as prefix so we are likely to be called last.
     callback = tr.getpostclose('\xffstrip')
     if callback is None:
-        callback = stripcallback(ui, repo, True, topic)
+        callback = stripcallback(ui, repo, backup=backup, topic=topic)
         tr.addpostclose('\xffstrip', callback)
     if topic:
         callback.topic = topic
     callback.addnodes(nodelist)
 
 def stripmanifest(repo, striprev, tr, files):
-    revlog = repo.manifestlog._revlog
+    revlog = repo.manifestlog.getstorage(b'')
     revlog.strip(striprev, tr)
     striptrees(repo, tr, striprev, files)
 
@@ -332,7 +333,7 @@
             if (unencoded.startswith('meta/') and
                 unencoded.endswith('00manifest.i')):
                 dir = unencoded[5:-12]
-                repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
+                repo.manifestlog.getstorage(dir).strip(striprev, tr)
 
 def rebuildfncache(ui, repo):
     """Rebuilds the fncache file from repo history.
--- a/mercurial/repository.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/repository.py	Tue Sep 04 12:16:28 2018 -0400
@@ -15,6 +15,10 @@
     interfaceutil,
 )
 
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = 'narrowhg-experimental'
+
 class ipeerconnection(interfaceutil.Interface):
     """Represents a "connection" to a repository.
 
@@ -314,6 +318,87 @@
             _('cannot %s; remote repository does not support the %r '
               'capability') % (purpose, name))
 
+class irevisiondelta(interfaceutil.Interface):
+    """Represents a delta between one revision and another.
+
+    Instances convey enough information to allow a revision to be exchanged
+    with another repository.
+
+    Instances represent the fulltext revision data or a delta against
+    another revision. Therefore the ``revision`` and ``delta`` attributes
+    are mutually exclusive.
+
+    Typically used for changegroup generation.
+    """
+
+    node = interfaceutil.Attribute(
+        """20 byte node of this revision.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of this revision.""")
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of this revision.""")
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node of the changelog revision this node is linked to.""")
+
+    flags = interfaceutil.Attribute(
+        """2 bytes of integer flags that apply to this revision.""")
+
+    basenode = interfaceutil.Attribute(
+        """20 byte node of the revision this data is a delta against.
+
+        ``nullid`` indicates that the revision is a full revision and not
+        a delta.
+        """)
+
+    baserevisionsize = interfaceutil.Attribute(
+        """Size of base revision this delta is against.
+
+        May be ``None`` if ``basenode`` is ``nullid``.
+        """)
+
+    revision = interfaceutil.Attribute(
+        """Raw fulltext of revision data for this node.""")
+
+    delta = interfaceutil.Attribute(
+        """Delta between ``basenode`` and ``node``.
+
+        Stored in the bdiff delta format.
+        """)
+
+class irevisiondeltarequest(interfaceutil.Interface):
+    """Represents a request to generate an ``irevisiondelta``."""
+
+    node = interfaceutil.Attribute(
+        """20 byte node of revision being requested.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of revision.""")
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of revision.""")
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node to store in ``linknode`` attribute.""")
+
+    basenode = interfaceutil.Attribute(
+        """Base revision that delta should be generated against.
+
+        If ``nullid``, the derived ``irevisiondelta`` should have its
+        ``revision`` field populated and no delta should be generated.
+
+        If ``None``, the delta may be generated against any revision that
+        is an ancestor of this revision. Or a full revision may be used.
+
+        If any other value, the delta should be produced against that
+        revision.
+        """)
+
+    ellipsis = interfaceutil.Attribute(
+        """Boolean on whether the ellipsis flag should be set.""")
+
 class ifilerevisionssequence(interfaceutil.Interface):
     """Contains index data for all revisions of a file.
 
@@ -467,9 +552,6 @@
     def deltaparent(rev):
         """"Return the revision that is a suitable parent to delta against."""
 
-    def candelta(baserev, rev):
-        """"Whether a delta can be generated between two revisions."""
-
 class ifiledata(interfaceutil.Interface):
     """Storage interface for data storage of a specific file.
 
@@ -536,6 +618,30 @@
         revision data.
         """
 
+    def emitrevisiondeltas(requests):
+        """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
+
+        Given an iterable of objects conforming to the ``irevisiondeltarequest``
+        interface, emits objects conforming to the ``irevisiondelta``
+        interface.
+
+        This method is a generator.
+
+        ``irevisiondelta`` should be emitted in the same order of
+        ``irevisiondeltarequest`` that was passed in.
+
+        The emitted objects MUST conform by the results of
+        ``irevisiondeltarequest``. Namely, they must respect any requests
+        for building a delta from a specific ``basenode`` if defined.
+
+        When sending deltas, implementations must take into account whether
+        the client has the base delta before encoding a delta against that
+        revision. A revision encountered previously in ``requests`` is
+        always a suitable base revision. An example of a bad delta is a delta
+        against a non-ancestor revision. Another example of a bad delta is a
+        delta against a censored revision.
+        """
+
 class ifilemutation(interfaceutil.Interface):
     """Storage interface for mutation events of a tracked file."""
 
@@ -614,14 +720,6 @@
         TODO this feels revlog centric and could likely be removed.
         """)
 
-    storedeltachains = interfaceutil.Attribute(
-        """Whether the store stores deltas.
-
-        TODO deltachains are revlog centric. This can probably removed
-        once there are better abstractions for obtaining/writing
-        data.
-        """)
-
     _generaldelta = interfaceutil.Attribute(
         """Whether deltas can be against any parent revision.
 
@@ -890,8 +988,180 @@
         Returns the binary node of the created revision.
         """
 
+class imanifeststorage(interfaceutil.Interface):
+    """Storage interface for manifest data."""
+
+    tree = interfaceutil.Attribute(
+        """The path to the directory this manifest tracks.
+
+        The empty bytestring represents the root manifest.
+        """)
+
+    index = interfaceutil.Attribute(
+        """An ``ifilerevisionssequence`` instance.""")
+
+    indexfile = interfaceutil.Attribute(
+        """Path of revlog index file.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    opener = interfaceutil.Attribute(
+        """VFS opener to use to access underlying files used for storage.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    version = interfaceutil.Attribute(
+        """Revlog version number.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    _generaldelta = interfaceutil.Attribute(
+        """Whether generaldelta storage is being used.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    fulltextcache = interfaceutil.Attribute(
+        """Dict with cache of fulltexts.
+
+        TODO this doesn't feel appropriate for the storage interface.
+        """)
+
+    def __len__():
+        """Obtain the number of revisions stored for this manifest."""
+
+    def __iter__():
+        """Iterate over revision numbers for this manifest."""
+
+    def rev(node):
+        """Obtain the revision number given a binary node.
+
+        Raises ``error.LookupError`` if the node is not known.
+        """
+
+    def node(rev):
+        """Obtain the node value given a revision number.
+
+        Raises ``error.LookupError`` if the revision is not known.
+        """
+
+    def lookup(value):
+        """Attempt to resolve a value to a node.
+
+        Value can be a binary node, hex node, revision number, or a bytes
+        that can be converted to an integer.
+
+        Raises ``error.LookupError`` if a ndoe could not be resolved.
+
+        TODO this is only used by debug* commands and can probably be deleted
+        easily.
+        """
+
+    def parents(node):
+        """Returns a 2-tuple of parent nodes for a node.
+
+        Values will be ``nullid`` if the parent is empty.
+        """
+
+    def parentrevs(rev):
+        """Like parents() but operates on revision numbers."""
+
+    def linkrev(rev):
+        """Obtain the changeset revision number a revision is linked to."""
+
+    def revision(node, _df=None, raw=False):
+        """Obtain fulltext data for a node."""
+
+    def revdiff(rev1, rev2):
+        """Obtain a delta between two revision numbers.
+
+        The returned data is the result of ``bdiff.bdiff()`` on the raw
+        revision data.
+        """
+
+    def cmp(node, fulltext):
+        """Compare fulltext to another revision.
+
+        Returns True if the fulltext is different from what is stored.
+        """
+
+    def emitrevisiondeltas(requests):
+        """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
+
+        See the documentation for ``ifiledata`` for more.
+        """
+
+    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
+        """Process a series of deltas for storage.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def getstrippoint(minlink):
+        """Find minimum revision that must be stripped to strip a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def strip(minlink, transaction):
+        """Remove storage of items starting at a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def checksize():
+        """Obtain the expected sizes of backing files.
+
+        TODO this is used by verify and it should not be part of the interface.
+        """
+
+    def files():
+        """Obtain paths that are backing storage for this manifest.
+
+        TODO this is used by verify and there should probably be a better API
+        for this functionality.
+        """
+
+    def deltaparent(rev):
+        """Obtain the revision that a revision is delta'd against.
+
+        TODO delta encoding is an implementation detail of storage and should
+        not be exposed to the storage interface.
+        """
+
+    def clone(tr, dest, **kwargs):
+        """Clone this instance to another."""
+
+    def clearcaches(clear_persisted_data=False):
+        """Clear any caches associated with this instance."""
+
+    def dirlog(d):
+        """Obtain a manifest storage instance for a tree."""
+
+    def add(m, transaction, link, p1, p2, added, removed, readtree=None):
+        """Add a revision to storage.
+
+        ``m`` is an object conforming to ``imanifestdict``.
+
+        ``link`` is the linkrev revision number.
+
+        ``p1`` and ``p2`` are the parent revision numbers.
+
+        ``added`` and ``removed`` are iterables of added and removed paths,
+        respectively.
+        """
+
 class imanifestlog(interfaceutil.Interface):
-    """Interface representing a collection of manifest snapshots."""
+    """Interface representing a collection of manifest snapshots.
+
+    Represents the root manifest in a repository.
+
+    Also serves as a means to access nested tree manifests and to cache
+    tree manifests.
+    """
 
     def __getitem__(node):
         """Obtain a manifest instance for a given binary node.
@@ -902,15 +1172,15 @@
         interface.
         """
 
-    def get(dir, node, verify=True):
+    def get(tree, node, verify=True):
         """Retrieve the manifest instance for a given directory and binary node.
 
         ``node`` always refers to the node of the root manifest (which will be
         the only manifest if flat manifests are being used).
 
-        If ``dir`` is the empty string, the root manifest is returned. Otherwise
-        the manifest for the specified directory will be returned (requires
-        tree manifests).
+        If ``tree`` is the empty string, the root manifest is returned.
+        Otherwise the manifest for the specified directory will be returned
+        (requires tree manifests).
 
         If ``verify`` is True, ``LookupError`` is raised if the node is not
         known.
@@ -919,6 +1189,15 @@
         interface.
         """
 
+    def getstorage(tree):
+        """Retrieve an interface to storage for a particular tree.
+
+        If ``tree`` is the empty bytestring, storage for the root manifest will
+        be returned. Otherwise storage for a tree manifest is returned.
+
+        TODO formalize interface for returned object.
+        """
+
     def clearcaches():
         """Clear caches associated with this collection."""
 
@@ -928,22 +1207,6 @@
         Raises ``error.LookupError`` if the node is not known.
         """
 
-    def addgroup(deltas, linkmapper, transaction):
-        """Process a series of deltas for storage.
-
-        ``deltas`` is an iterable of 7-tuples of
-        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
-        to add.
-
-        The ``delta`` field contains ``mpatch`` data to apply to a base
-        revision, identified by ``deltabase``. The base node can be
-        ``nullid``, in which case the header from the delta can be ignored
-        and the delta used as the fulltext.
-
-        Returns a list of nodes that were processed. A node will be in the list
-        even if it existed in the store previously.
-        """
-
 class completelocalrepository(interfaceutil.Interface):
     """Monolithic interface for local repositories.
 
--- a/mercurial/repoview.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/repoview.py	Tue Sep 04 12:16:28 2018 -0400
@@ -28,7 +28,10 @@
     branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
     changesets as "hideable". Doing so would break multiple code assertions and
     lead to crashes."""
-    return obsolete.getrevs(repo, 'obsolete')
+    obsoletes = obsolete.getrevs(repo, 'obsolete')
+    internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
+    internals = frozenset(internals)
+    return obsoletes | internals
 
 def pinnedrevs(repo):
     """revisions blocking hidden changesets from being filtered
@@ -128,7 +131,7 @@
             firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
     # protect from nullrev root
     firstmutable = max(0, firstmutable)
-    return frozenset(xrange(firstmutable, len(cl)))
+    return frozenset(pycompat.xrange(firstmutable, len(cl)))
 
 # function to compute filtered set
 #
@@ -210,7 +213,7 @@
         unfichangelog = unfi.changelog
         # bypass call to changelog.method
         unfiindex = unfichangelog.index
-        unfilen = len(unfiindex) - 1
+        unfilen = len(unfiindex)
         unfinode = unfiindex[unfilen - 1][7]
 
         revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
--- a/mercurial/revlog.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/revlog.py	Tue Sep 04 12:16:28 2018 -0400
@@ -17,7 +17,6 @@
 import contextlib
 import errno
 import hashlib
-import heapq
 import os
 import re
 import struct
@@ -27,6 +26,7 @@
 from .node import (
     bin,
     hex,
+    nullhex,
     nullid,
     nullrev,
     wdirfilenodeids,
@@ -35,6 +35,25 @@
     wdirrev,
 )
 from .i18n import _
+from .revlogutils.constants import (
+    FLAG_GENERALDELTA,
+    FLAG_INLINE_DATA,
+    REVIDX_DEFAULT_FLAGS,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+    REVIDX_FLAGS_ORDER,
+    REVIDX_ISCENSORED,
+    REVIDX_KNOWN_FLAGS,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+    REVLOGV0,
+    REVLOGV1,
+    REVLOGV1_FLAGS,
+    REVLOGV2,
+    REVLOGV2_FLAGS,
+    REVLOG_DEFAULT_FLAGS,
+    REVLOG_DEFAULT_FORMAT,
+    REVLOG_DEFAULT_VERSION,
+)
 from .thirdparty import (
     attr,
 )
@@ -44,53 +63,50 @@
     mdiff,
     policy,
     pycompat,
+    repository,
     templatefilters,
     util,
 )
+from .revlogutils import (
+    deltas as deltautil,
+)
 from .utils import (
+    interfaceutil,
     stringutil,
 )
 
+# blanked usage of all the name to prevent pyflakes constraints
+# We need these name available in the module for extensions.
+REVLOGV0
+REVLOGV1
+REVLOGV2
+FLAG_INLINE_DATA
+FLAG_GENERALDELTA
+REVLOG_DEFAULT_FLAGS
+REVLOG_DEFAULT_FORMAT
+REVLOG_DEFAULT_VERSION
+REVLOGV1_FLAGS
+REVLOGV2_FLAGS
+REVIDX_ISCENSORED
+REVIDX_ELLIPSIS
+REVIDX_EXTSTORED
+REVIDX_DEFAULT_FLAGS
+REVIDX_FLAGS_ORDER
+REVIDX_KNOWN_FLAGS
+REVIDX_RAWTEXT_CHANGING_FLAGS
+
 parsers = policy.importmod(r'parsers')
 
 # Aliased for performance.
 _zlibdecompress = zlib.decompress
 
-# revlog header flags
-REVLOGV0 = 0
-REVLOGV1 = 1
-# Dummy value until file format is finalized.
-# Reminder: change the bounds check in revlog.__init__ when this is changed.
-REVLOGV2 = 0xDEAD
-FLAG_INLINE_DATA = (1 << 16)
-FLAG_GENERALDELTA = (1 << 17)
-REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
-REVLOG_DEFAULT_FORMAT = REVLOGV1
-REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
-REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
-REVLOGV2_FLAGS = REVLOGV1_FLAGS
-
-# revlog index flags
-REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
-REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
-REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
-REVIDX_DEFAULT_FLAGS = 0
-# stable order in which flags need to be processed and their processors applied
-REVIDX_FLAGS_ORDER = [
-    REVIDX_ISCENSORED,
-    REVIDX_ELLIPSIS,
-    REVIDX_EXTSTORED,
-]
-REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
-# bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
-
 # max size of revlog with inline data
 _maxinline = 131072
 _chunksize = 1048576
 
 RevlogError = error.RevlogError
 LookupError = error.LookupError
+AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
 CensoredNodeError = error.CensoredNodeError
 ProgrammingError = error.ProgrammingError
 
@@ -196,579 +212,6 @@
     s.update(text)
     return s.digest()
 
-class _testrevlog(object):
-    """minimalist fake revlog to use in doctests"""
-
-    def __init__(self, data, density=0.5, mingap=0):
-        """data is an list of revision payload boundaries"""
-        self._data = data
-        self._srdensitythreshold = density
-        self._srmingapsize = mingap
-
-    def start(self, rev):
-        if rev == 0:
-            return 0
-        return self._data[rev - 1]
-
-    def end(self, rev):
-        return self._data[rev]
-
-    def length(self, rev):
-        return self.end(rev) - self.start(rev)
-
-    def __len__(self):
-        return len(self._data)
-
-def _trimchunk(revlog, revs, startidx, endidx=None):
-    """returns revs[startidx:endidx] without empty trailing revs
-
-    Doctest Setup
-    >>> revlog = _testrevlog([
-    ...  5,  #0
-    ...  10, #1
-    ...  12, #2
-    ...  12, #3 (empty)
-    ...  17, #4
-    ...  21, #5
-    ...  21, #6 (empty)
-    ... ])
-
-    Contiguous cases:
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
-    [0, 1, 2, 3, 4, 5]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
-    [0, 1, 2, 3, 4]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
-    [0, 1, 2]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
-    [2]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
-    [3, 4, 5]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
-    [3, 4]
-
-    Discontiguous cases:
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
-    [1, 3, 5]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
-    [1]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
-    [3, 5]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
-    [3, 5]
-    """
-    length = revlog.length
-
-    if endidx is None:
-        endidx = len(revs)
-
-    # If we have a non-emtpy delta candidate, there are nothing to trim
-    if revs[endidx - 1] < len(revlog):
-        # Trim empty revs at the end, except the very first revision of a chain
-        while (endidx > 1
-                and endidx > startidx
-                and length(revs[endidx - 1]) == 0):
-            endidx -= 1
-
-    return revs[startidx:endidx]
-
-def _segmentspan(revlog, revs, deltainfo=None):
-    """Get the byte span of a segment of revisions
-
-    revs is a sorted array of revision numbers
-
-    >>> revlog = _testrevlog([
-    ...  5,  #0
-    ...  10, #1
-    ...  12, #2
-    ...  12, #3 (empty)
-    ...  17, #4
-    ... ])
-
-    >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
-    17
-    >>> _segmentspan(revlog, [0, 4])
-    17
-    >>> _segmentspan(revlog, [3, 4])
-    5
-    >>> _segmentspan(revlog, [1, 2, 3,])
-    7
-    >>> _segmentspan(revlog, [1, 3])
-    7
-    """
-    if not revs:
-        return 0
-    if deltainfo is not None and len(revlog) <= revs[-1]:
-        if len(revs) == 1:
-            return deltainfo.deltalen
-        offset = revlog.end(len(revlog) - 1)
-        end = deltainfo.deltalen + offset
-    else:
-        end = revlog.end(revs[-1])
-    return end - revlog.start(revs[0])
-
-def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
-    """slice revs to reduce the amount of unrelated data to be read from disk.
-
-    ``revs`` is sliced into groups that should be read in one time.
-    Assume that revs are sorted.
-
-    The initial chunk is sliced until the overall density (payload/chunks-span
-    ratio) is above `revlog._srdensitythreshold`. No gap smaller than
-    `revlog._srmingapsize` is skipped.
-
-    If `targetsize` is set, no chunk larger than `targetsize` will be yield.
-    For consistency with other slicing choice, this limit won't go lower than
-    `revlog._srmingapsize`.
-
-    If individual revisions chunk are larger than this limit, they will still
-    be raised individually.
-
-    >>> revlog = _testrevlog([
-    ...  5,  #00 (5)
-    ...  10, #01 (5)
-    ...  12, #02 (2)
-    ...  12, #03 (empty)
-    ...  27, #04 (15)
-    ...  31, #05 (4)
-    ...  31, #06 (empty)
-    ...  42, #07 (11)
-    ...  47, #08 (5)
-    ...  47, #09 (empty)
-    ...  48, #10 (1)
-    ...  51, #11 (3)
-    ...  74, #12 (23)
-    ...  85, #13 (11)
-    ...  86, #14 (1)
-    ...  91, #15 (5)
-    ... ])
-
-    >>> list(_slicechunk(revlog, list(range(16))))
-    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
-    >>> list(_slicechunk(revlog, [0, 15]))
-    [[0], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 15]))
-    [[0], [11], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
-    [[0], [11, 13, 15]]
-    >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
-    [[1, 2], [5, 8, 10, 11], [14]]
-
-    Slicing with a maximum chunk size
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
-    [[0], [11], [13], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
-    [[0], [11], [13, 15]]
-    """
-    if targetsize is not None:
-        targetsize = max(targetsize, revlog._srmingapsize)
-    # targetsize should not be specified when evaluating delta candidates:
-    # * targetsize is used to ensure we stay within specification when reading,
-    # * deltainfo is used to pick are good delta chain when writing.
-    if not (deltainfo is None or targetsize is None):
-        msg = 'cannot use `targetsize` with a `deltainfo`'
-        raise error.ProgrammingError(msg)
-    for chunk in _slicechunktodensity(revlog, revs,
-                                      deltainfo,
-                                      revlog._srdensitythreshold,
-                                      revlog._srmingapsize):
-        for subchunk in _slicechunktosize(revlog, chunk, targetsize):
-            yield subchunk
-
-def _slicechunktosize(revlog, revs, targetsize=None):
-    """slice revs to match the target size
-
-    This is intended to be used on chunk that density slicing selected by that
-    are still too large compared to the read garantee of revlog. This might
-    happens when "minimal gap size" interrupted the slicing or when chain are
-    built in a way that create large blocks next to each other.
-
-    >>> revlog = _testrevlog([
-    ...  3,  #0 (3)
-    ...  5,  #1 (2)
-    ...  6,  #2 (1)
-    ...  8,  #3 (2)
-    ...  8,  #4 (empty)
-    ...  11, #5 (3)
-    ...  12, #6 (1)
-    ...  13, #7 (1)
-    ...  14, #8 (1)
-    ... ])
-
-    Cases where chunk is already small enough
-    >>> list(_slicechunktosize(revlog, [0], 3))
-    [[0]]
-    >>> list(_slicechunktosize(revlog, [6, 7], 3))
-    [[6, 7]]
-    >>> list(_slicechunktosize(revlog, [0], None))
-    [[0]]
-    >>> list(_slicechunktosize(revlog, [6, 7], None))
-    [[6, 7]]
-
-    cases where we need actual slicing
-    >>> list(_slicechunktosize(revlog, [0, 1], 3))
-    [[0], [1]]
-    >>> list(_slicechunktosize(revlog, [1, 3], 3))
-    [[1], [3]]
-    >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
-    [[1, 2], [3]]
-    >>> list(_slicechunktosize(revlog, [3, 5], 3))
-    [[3], [5]]
-    >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
-    [[3], [5]]
-    >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
-    [[5], [6, 7, 8]]
-    >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
-    [[0], [1, 2], [3], [5], [6, 7, 8]]
-
-    Case with too large individual chunk (must return valid chunk)
-    >>> list(_slicechunktosize(revlog, [0, 1], 2))
-    [[0], [1]]
-    >>> list(_slicechunktosize(revlog, [1, 3], 1))
-    [[1], [3]]
-    >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
-    [[3], [5]]
-    """
-    assert targetsize is None or 0 <= targetsize
-    if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
-        yield revs
-        return
-
-    startrevidx = 0
-    startdata = revlog.start(revs[0])
-    endrevidx = 0
-    iterrevs = enumerate(revs)
-    next(iterrevs) # skip first rev.
-    for idx, r in iterrevs:
-        span = revlog.end(r) - startdata
-        if span <= targetsize:
-            endrevidx = idx
-        else:
-            chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
-            if chunk:
-                yield chunk
-            startrevidx = idx
-            startdata = revlog.start(r)
-            endrevidx = idx
-    yield _trimchunk(revlog, revs, startrevidx)
-
-def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
-                         mingapsize=0):
-    """slice revs to reduce the amount of unrelated data to be read from disk.
-
-    ``revs`` is sliced into groups that should be read in one time.
-    Assume that revs are sorted.
-
-    ``deltainfo`` is a _deltainfo instance of a revision that we would append
-    to the top of the revlog.
-
-    The initial chunk is sliced until the overall density (payload/chunks-span
-    ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
-    skipped.
-
-    >>> revlog = _testrevlog([
-    ...  5,  #00 (5)
-    ...  10, #01 (5)
-    ...  12, #02 (2)
-    ...  12, #03 (empty)
-    ...  27, #04 (15)
-    ...  31, #05 (4)
-    ...  31, #06 (empty)
-    ...  42, #07 (11)
-    ...  47, #08 (5)
-    ...  47, #09 (empty)
-    ...  48, #10 (1)
-    ...  51, #11 (3)
-    ...  74, #12 (23)
-    ...  85, #13 (11)
-    ...  86, #14 (1)
-    ...  91, #15 (5)
-    ... ])
-
-    >>> list(_slicechunktodensity(revlog, list(range(16))))
-    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
-    >>> list(_slicechunktodensity(revlog, [0, 15]))
-    [[0], [15]]
-    >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
-    [[0], [11], [15]]
-    >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
-    [[0], [11, 13, 15]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
-    [[1, 2], [5, 8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           mingapsize=20))
-    [[1, 2, 3, 5, 8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           targetdensity=0.95))
-    [[1, 2], [5], [8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           targetdensity=0.95, mingapsize=12))
-    [[1, 2], [5, 8, 10, 11], [14]]
-    """
-    start = revlog.start
-    length = revlog.length
-
-    if len(revs) <= 1:
-        yield revs
-        return
-
-    nextrev = len(revlog)
-    nextoffset = revlog.end(nextrev - 1)
-
-    if deltainfo is None:
-        deltachainspan = _segmentspan(revlog, revs)
-        chainpayload = sum(length(r) for r in revs)
-    else:
-        deltachainspan = deltainfo.distance
-        chainpayload = deltainfo.compresseddeltalen
-
-    if deltachainspan < mingapsize:
-        yield revs
-        return
-
-    readdata = deltachainspan
-
-    if deltachainspan:
-        density = chainpayload / float(deltachainspan)
-    else:
-        density = 1.0
-
-    if density >= targetdensity:
-        yield revs
-        return
-
-    if deltainfo is not None and deltainfo.deltalen:
-        revs = list(revs)
-        revs.append(nextrev)
-
-    # Store the gaps in a heap to have them sorted by decreasing size
-    gapsheap = []
-    heapq.heapify(gapsheap)
-    prevend = None
-    for i, rev in enumerate(revs):
-        if rev < nextrev:
-            revstart = start(rev)
-            revlen = length(rev)
-        else:
-            revstart = nextoffset
-            revlen = deltainfo.deltalen
-
-        # Skip empty revisions to form larger holes
-        if revlen == 0:
-            continue
-
-        if prevend is not None:
-            gapsize = revstart - prevend
-            # only consider holes that are large enough
-            if gapsize > mingapsize:
-                heapq.heappush(gapsheap, (-gapsize, i))
-
-        prevend = revstart + revlen
-
-    # Collect the indices of the largest holes until the density is acceptable
-    indicesheap = []
-    heapq.heapify(indicesheap)
-    while gapsheap and density < targetdensity:
-        oppgapsize, gapidx = heapq.heappop(gapsheap)
-
-        heapq.heappush(indicesheap, gapidx)
-
-        # the gap sizes are stored as negatives to be sorted decreasingly
-        # by the heap
-        readdata -= (-oppgapsize)
-        if readdata > 0:
-            density = chainpayload / float(readdata)
-        else:
-            density = 1.0
-
-    # Cut the revs at collected indices
-    previdx = 0
-    while indicesheap:
-        idx = heapq.heappop(indicesheap)
-
-        chunk = _trimchunk(revlog, revs, previdx, idx)
-        if chunk:
-            yield chunk
-
-        previdx = idx
-
-    chunk = _trimchunk(revlog, revs, previdx)
-    if chunk:
-        yield chunk
-
-@attr.s(slots=True, frozen=True)
-class _deltainfo(object):
-    distance = attr.ib()
-    deltalen = attr.ib()
-    data = attr.ib()
-    base = attr.ib()
-    chainbase = attr.ib()
-    chainlen = attr.ib()
-    compresseddeltalen = attr.ib()
-
-class _deltacomputer(object):
-    def __init__(self, revlog):
-        self.revlog = revlog
-
-    def _getcandidaterevs(self, p1, p2, cachedelta):
-        """
-        Provides revisions that present an interest to be diffed against,
-        grouped by level of easiness.
-        """
-        revlog = self.revlog
-        gdelta = revlog._generaldelta
-        curr = len(revlog)
-        prev = curr - 1
-        p1r, p2r = revlog.rev(p1), revlog.rev(p2)
-
-        # should we try to build a delta?
-        if prev != nullrev and revlog.storedeltachains:
-            tested = set()
-            # This condition is true most of the time when processing
-            # changegroup data into a generaldelta repo. The only time it
-            # isn't true is if this is the first revision in a delta chain
-            # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
-            if cachedelta and gdelta and revlog._lazydeltabase:
-                # Assume what we received from the server is a good choice
-                # build delta will reuse the cache
-                yield (cachedelta[0],)
-                tested.add(cachedelta[0])
-
-            if gdelta:
-                # exclude already lazy tested base if any
-                parents = [p for p in (p1r, p2r)
-                           if p != nullrev and p not in tested]
-
-                if not revlog._deltabothparents and len(parents) == 2:
-                    parents.sort()
-                    # To minimize the chance of having to build a fulltext,
-                    # pick first whichever parent is closest to us (max rev)
-                    yield (parents[1],)
-                    # then the other one (min rev) if the first did not fit
-                    yield (parents[0],)
-                    tested.update(parents)
-                elif len(parents) > 0:
-                    # Test all parents (1 or 2), and keep the best candidate
-                    yield parents
-                    tested.update(parents)
-
-            if prev not in tested:
-                # other approach failed try against prev to hopefully save us a
-                # fulltext.
-                yield (prev,)
-                tested.add(prev)
-
-    def buildtext(self, revinfo, fh):
-        """Builds a fulltext version of a revision
-
-        revinfo: _revisioninfo instance that contains all needed info
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
-        """
-        btext = revinfo.btext
-        if btext[0] is not None:
-            return btext[0]
-
-        revlog = self.revlog
-        cachedelta = revinfo.cachedelta
-        flags = revinfo.flags
-        node = revinfo.node
-
-        baserev = cachedelta[0]
-        delta = cachedelta[1]
-        # special case deltas which replace entire base; no need to decode
-        # base revision. this neatly avoids censored bases, which throw when
-        # they're decoded.
-        hlen = struct.calcsize(">lll")
-        if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
-                                                   len(delta) - hlen):
-            btext[0] = delta[hlen:]
-        else:
-            # deltabase is rawtext before changed by flag processors, which is
-            # equivalent to non-raw text
-            basetext = revlog.revision(baserev, _df=fh, raw=False)
-            btext[0] = mdiff.patch(basetext, delta)
-
-        try:
-            res = revlog._processflags(btext[0], flags, 'read', raw=True)
-            btext[0], validatehash = res
-            if validatehash:
-                revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
-            if flags & REVIDX_ISCENSORED:
-                raise RevlogError(_('node %s is not censored') % node)
-        except CensoredNodeError:
-            # must pass the censored index flag to add censored revisions
-            if not flags & REVIDX_ISCENSORED:
-                raise
-        return btext[0]
-
-    def _builddeltadiff(self, base, revinfo, fh):
-        revlog = self.revlog
-        t = self.buildtext(revinfo, fh)
-        if revlog.iscensored(base):
-            # deltas based on a censored revision must replace the
-            # full content in one patch, so delta works everywhere
-            header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
-            delta = header + t
-        else:
-            ptext = revlog.revision(base, _df=fh, raw=True)
-            delta = mdiff.textdiff(ptext, t)
-
-        return delta
-
-    def _builddeltainfo(self, revinfo, base, fh):
-        # can we use the cached delta?
-        if revinfo.cachedelta and revinfo.cachedelta[0] == base:
-            delta = revinfo.cachedelta[1]
-        else:
-            delta = self._builddeltadiff(base, revinfo, fh)
-        revlog = self.revlog
-        header, data = revlog.compress(delta)
-        deltalen = len(header) + len(data)
-        chainbase = revlog.chainbase(base)
-        offset = revlog.end(len(revlog) - 1)
-        dist = deltalen + offset - revlog.start(chainbase)
-        if revlog._generaldelta:
-            deltabase = base
-        else:
-            deltabase = chainbase
-        chainlen, compresseddeltalen = revlog._chaininfo(base)
-        chainlen += 1
-        compresseddeltalen += deltalen
-        return _deltainfo(dist, deltalen, (header, data), deltabase,
-                         chainbase, chainlen, compresseddeltalen)
-
-    def finddeltainfo(self, revinfo, fh):
-        """Find an acceptable delta against a candidate revision
-
-        revinfo: information about the revision (instance of _revisioninfo)
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
-
-        Returns the first acceptable candidate revision, as ordered by
-        _getcandidaterevs
-        """
-        cachedelta = revinfo.cachedelta
-        p1 = revinfo.p1
-        p2 = revinfo.p2
-        revlog = self.revlog
-
-        deltainfo = None
-        for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
-            nominateddeltas = []
-            for candidaterev in candidaterevs:
-                # no delta for rawtext-changing revs (see "candelta" for why)
-                if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
-                    continue
-                candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
-                if revlog._isgooddeltainfo(candidatedelta, revinfo):
-                    nominateddeltas.append(candidatedelta)
-            if nominateddeltas:
-                deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
-                break
-
-        return deltainfo
-
 @attr.s(slots=True, frozen=True)
 class _revisioninfo(object):
     """Information about a revision that allows building its fulltext
@@ -788,6 +231,19 @@
     cachedelta = attr.ib()
     flags = attr.ib()
 
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True, frozen=True)
+class revlogrevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    linknode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -800,6 +256,12 @@
 indexformatv0_pack = indexformatv0.pack
 indexformatv0_unpack = indexformatv0.unpack
 
+class revlogoldindex(list):
+    def __getitem__(self, i):
+        if i == -1:
+            return (0, 0, 0, -1, -1, -1, -1, nullid)
+        return list.__getitem__(self, i)
+
 class revlogoldio(object):
     def __init__(self):
         self.size = indexformatv0.size
@@ -821,10 +283,7 @@
             nodemap[e[6]] = n
             n += 1
 
-        # add the magic null revision at -1
-        index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
-        return index, nodemap, None
+        return revlogoldindex(index), nodemap, None
 
     def packentry(self, entry, node, version, rev):
         if gettype(entry[0]):
@@ -1021,7 +480,7 @@
             raise RevlogError(_('unknown version (%d) in revlog %s') %
                               (fmt, self.indexfile))
 
-        self.storedeltachains = True
+        self._storedeltachains = True
 
         self._io = revlogio()
         if self.version == REVLOGV0:
@@ -1071,27 +530,33 @@
                 yield fp
 
     def tip(self):
-        return self.node(len(self.index) - 2)
+        return self.node(len(self.index) - 1)
     def __contains__(self, rev):
         return 0 <= rev < len(self)
     def __len__(self):
-        return len(self.index) - 1
+        return len(self.index)
     def __iter__(self):
-        return iter(xrange(len(self)))
+        return iter(pycompat.xrange(len(self)))
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
         step = 1
+        length = len(self)
         if stop is not None:
             if start > stop:
                 step = -1
             stop += step
+            if stop > length:
+                stop = length
         else:
-            stop = len(self)
-        return xrange(start, stop, step)
+            stop = length
+        return pycompat.xrange(start, stop, step)
 
     @util.propertycache
     def nodemap(self):
-        self.rev(self.node(0))
+        if self.index:
+            # populate mapping down to the initial node
+            node0 = self.index[0][7]  # get around changelog filtering
+            self.rev(node0)
         return self._nodecache
 
     def hasnode(self, node):
@@ -1141,10 +606,10 @@
             i = self.index
             p = self._nodepos
             if p is None:
-                p = len(i) - 2
+                p = len(i) - 1
             else:
                 assert p < len(i)
-            for r in xrange(p, -1, -1):
+            for r in pycompat.xrange(p, -1, -1):
                 v = i[r][7]
                 n[v] = r
                 if v == node:
@@ -1711,11 +1176,6 @@
         a, b = self.rev(a), self.rev(b)
         return self.isancestorrev(a, b)
 
-    def descendant(self, a, b):
-        msg = 'revlog.descendant is deprecated, use revlog.isancestorrev'
-        util.nouideprecwarn(msg, '4.7')
-        return self.isancestorrev(a, b)
-
     def isancestorrev(self, a, b):
         """return True if revision a is an ancestor of revision b
 
@@ -1796,8 +1256,8 @@
             # parsers.c radix tree lookup gave multiple matches
             # fast path: for unfiltered changelog, radix tree is accurate
             if not getattr(self, 'filteredrevs', None):
-                raise LookupError(id, self.indexfile,
-                                  _('ambiguous identifier'))
+                raise AmbiguousPrefixLookupError(id, self.indexfile,
+                                                 _('ambiguous identifier'))
             # fall through to slow path that filters hidden revisions
         except (AttributeError, ValueError):
             # we are pure python, or key was too short to search radix tree
@@ -1814,12 +1274,14 @@
                 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
                 nl = [n for n in nl if hex(n).startswith(id) and
                       self.hasnode(n)]
+                if nullhex.startswith(id):
+                    nl.append(nullid)
                 if len(nl) > 0:
                     if len(nl) == 1 and not maybewdir:
                         self._pcache[id] = nl[0]
                         return nl[0]
-                    raise LookupError(id, self.indexfile,
-                                      _('ambiguous identifier'))
+                    raise AmbiguousPrefixLookupError(id, self.indexfile,
+                                                     _('ambiguous identifier'))
                 if maybewdir:
                     raise error.WdirUnsupported
                 return None
@@ -2030,7 +1492,8 @@
         if not self._withsparseread:
             slicedchunks = (revs,)
         else:
-            slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
+            slicedchunks = deltautil.slicechunk(self, revs,
+                                                targetsize=targetsize)
 
         for revschunk in slicedchunks:
             firstrev = revschunk[0]
@@ -2070,6 +1533,25 @@
         else:
             return rev - 1
 
+    def issnapshot(self, rev):
+        """tells whether rev is a snapshot
+        """
+        if rev == nullrev:
+            return True
+        deltap = self.deltaparent(rev)
+        if deltap == nullrev:
+            return True
+        p1, p2 = self.parentrevs(rev)
+        if deltap in (p1, p2):
+            return False
+        return self.issnapshot(deltap)
+
+    def snapshotdepth(self, rev):
+        """number of snapshot in the chain before this one"""
+        if not self.issnapshot(rev):
+            raise ProgrammingError('revision %d not a snapshot')
+        return len(self._deltachain(rev)[0]) - 1
+
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions
 
@@ -2254,7 +1736,9 @@
         revlog has grown too large to be an inline revlog, it will convert it
         to use multiple index and data files.
         """
-        if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
+        tiprev = len(self) - 1
+        if (not self._inline or
+            (self.start(tiprev) + self.length(tiprev)) < _maxinline):
             return
 
         trinfo = tr.find(self.indexfile)
@@ -2268,7 +1752,7 @@
         else:
             # revlog was stripped at start of transaction, use all leftover data
             trindex = len(self) - 1
-            dataoff = self.end(-2)
+            dataoff = self.end(tiprev)
 
         tr.add(self.datafile, dataoff)
 
@@ -2307,7 +1791,7 @@
             computed by default as hash(text, p1, p2), however subclasses might
             use different hashing method (and override checkhash() in such case)
         flags - the known flags to set on the revision
-        deltacomputer - an optional _deltacomputer instance shared between
+        deltacomputer - an optional deltacomputer instance shared between
             multiple calls
         """
         if link == nullrev:
@@ -2430,54 +1914,6 @@
 
         return compressor.decompress(data)
 
-    def _isgooddeltainfo(self, deltainfo, revinfo):
-        """Returns True if the given delta is good. Good means that it is within
-        the disk span, disk size, and chain length bounds that we know to be
-        performant."""
-        if deltainfo is None:
-            return False
-
-        # - 'deltainfo.distance' is the distance from the base revision --
-        #   bounding it limits the amount of I/O we need to do.
-        # - 'deltainfo.compresseddeltalen' is the sum of the total size of
-        #   deltas we need to apply -- bounding it limits the amount of CPU
-        #   we consume.
-
-        if self._sparserevlog:
-            # As sparse-read will be used, we can consider that the distance,
-            # instead of being the span of the whole chunk,
-            # is the span of the largest read chunk
-            base = deltainfo.base
-
-            if base != nullrev:
-                deltachain = self._deltachain(base)[0]
-            else:
-                deltachain = []
-
-            chunks = _slicechunk(self, deltachain, deltainfo)
-            all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks]
-            distance = max(all_span)
-        else:
-            distance = deltainfo.distance
-
-        textlen = revinfo.textlen
-        defaultmax = textlen * 4
-        maxdist = self._maxdeltachainspan
-        if not maxdist:
-            maxdist = distance # ensure the conditional pass
-        maxdist = max(maxdist, defaultmax)
-        if self._sparserevlog and maxdist < self._srmingapsize:
-            # In multiple place, we are ignoring irrelevant data range below a
-            # certain size. Be also apply this tradeoff here and relax span
-            # constraint for small enought content.
-            maxdist = self._srmingapsize
-        if (distance > maxdist or deltainfo.deltalen > textlen or
-            deltainfo.compresseddeltalen > textlen * 2 or
-            (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
-            return False
-
-        return True
-
     def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
                      cachedelta, ifh, dfh, alwayscache=False,
                      deltacomputer=None):
@@ -2525,43 +1961,29 @@
             textlen = len(rawtext)
 
         if deltacomputer is None:
-            deltacomputer = _deltacomputer(self)
+            deltacomputer = deltautil.deltacomputer(self)
 
         revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
 
-        # no delta for flag processor revision (see "candelta" for why)
-        # not calling candelta since only one revision needs test, also to
-        # avoid overhead fetching flags again.
-        if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
-            deltainfo = None
-        else:
-            deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
+        deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
 
-        if deltainfo is not None:
-            base = deltainfo.base
-            chainbase = deltainfo.chainbase
-            data = deltainfo.data
-            l = deltainfo.deltalen
-        else:
-            rawtext = deltacomputer.buildtext(revinfo, fh)
-            data = self.compress(rawtext)
-            l = len(data[1]) + len(data[0])
-            base = chainbase = curr
-
-        e = (offset_type(offset, flags), l, textlen,
-             base, link, p1r, p2r, node)
-        self.index.insert(-1, e)
+        e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
+             deltainfo.base, link, p1r, p2r, node)
+        self.index.append(e)
         self.nodemap[node] = curr
 
         entry = self._io.packentry(e, self.node, self.version, curr)
-        self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
+        self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
+                         link, offset)
+
+        rawtext = btext[0]
 
         if alwayscache and rawtext is None:
-            rawtext = deltacomputer._buildtext(revinfo, fh)
+            rawtext = deltacomputer.buildtext(revinfo, fh)
 
         if type(rawtext) == bytes: # only accept immutable objects
             self._cache = (node, curr, rawtext)
-        self._chainbasecache[curr] = chainbase
+        self._chainbasecache[curr] = deltainfo.chainbase
         return node
 
     def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
@@ -2627,7 +2049,7 @@
                 dfh.flush()
             ifh.flush()
         try:
-            deltacomputer = _deltacomputer(self)
+            deltacomputer = deltautil.deltacomputer(self)
             # loop through our set of deltas
             for data in deltas:
                 node, p1, p2, linknode, deltabase, delta, flags = data
@@ -2798,7 +2220,7 @@
         self._cache = None
         self._chaininfocache = {}
         self._chunkclear()
-        for x in xrange(rev, len(self)):
+        for x in pycompat.xrange(rev, len(self)):
             del self.nodemap[self.node(x)]
 
         del self.index[rev:-1]
@@ -2846,6 +2268,87 @@
             res.append(self.datafile)
         return res
 
+    def emitrevisiondeltas(self, requests):
+        frev = self.rev
+
+        prevrev = None
+        for request in requests:
+            node = request.node
+            rev = frev(node)
+
+            if prevrev is None:
+                prevrev = self.index[rev][5]
+
+            # Requesting a full revision.
+            if request.basenode == nullid:
+                baserev = nullrev
+            # Requesting an explicit revision.
+            elif request.basenode is not None:
+                baserev = frev(request.basenode)
+            # Allowing us to choose.
+            else:
+                p1rev, p2rev = self.parentrevs(rev)
+                deltaparentrev = self.deltaparent(rev)
+
+                # Avoid sending full revisions when delta parent is null. Pick
+                # prev in that case. It's tempting to pick p1 in this case, as
+                # p1 will be smaller in the common case. However, computing a
+                # delta against p1 may require resolving the raw text of p1,
+                # which could be expensive. The revlog caches should have prev
+                # cached, meaning less CPU for delta generation. There is
+                # likely room to add a flag and/or config option to control this
+                # behavior.
+                if deltaparentrev == nullrev and self._storedeltachains:
+                    baserev = prevrev
+
+                # Revlog is configured to use full snapshot for a reason.
+                # Stick to full snapshot.
+                elif deltaparentrev == nullrev:
+                    baserev = nullrev
+
+                # Pick previous when we can't be sure the base is available
+                # on consumer.
+                elif deltaparentrev not in (p1rev, p2rev, prevrev):
+                    baserev = prevrev
+                else:
+                    baserev = deltaparentrev
+
+                if baserev != nullrev and not self.candelta(baserev, rev):
+                    baserev = nullrev
+
+            revision = None
+            delta = None
+            baserevisionsize = None
+
+            if self.iscensored(baserev) or self.iscensored(rev):
+                try:
+                    revision = self.revision(node, raw=True)
+                except error.CensoredNodeError as e:
+                    revision = e.tombstone
+
+                if baserev != nullrev:
+                    baserevisionsize = self.rawsize(baserev)
+
+            elif baserev == nullrev:
+                revision = self.revision(node, raw=True)
+            else:
+                delta = self.revdiff(baserev, rev)
+
+            extraflags = REVIDX_ELLIPSIS if request.ellipsis else 0
+
+            yield revlogrevisiondelta(
+                node=node,
+                p1node=request.p1node,
+                p2node=request.p2node,
+                linknode=request.linknode,
+                basenode=self.node(baserev),
+                flags=self.flags(rev) | extraflags,
+                baserevisionsize=baserevisionsize,
+                revision=revision,
+                delta=delta)
+
+            prevrev = rev
+
     DELTAREUSEALWAYS = 'always'
     DELTAREUSESAMEREVS = 'samerevs'
     DELTAREUSENEVER = 'never'
@@ -2919,7 +2422,7 @@
             populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
                                                 self.DELTAREUSESAMEREVS)
 
-            deltacomputer = _deltacomputer(destrevlog)
+            deltacomputer = deltautil.deltacomputer(destrevlog)
             index = self.index
             for rev in self:
                 entry = index[rev]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/constants.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,43 @@
+# revlogdeltas.py - constant used for revlog logic
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2018 Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Helper class to compute deltas stored inside revlogs"""
+
+from __future__ import absolute_import
+
+from .. import (
+    util,
+)
+
+# revlog header flags
+REVLOGV0 = 0
+REVLOGV1 = 1
+# Dummy value until file format is finalized.
+# Reminder: change the bounds check in revlog.__init__ when this is changed.
+REVLOGV2 = 0xDEAD
+FLAG_INLINE_DATA = (1 << 16)
+FLAG_GENERALDELTA = (1 << 17)
+REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
+REVLOG_DEFAULT_FORMAT = REVLOGV1
+REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
+REVLOGV2_FLAGS = REVLOGV1_FLAGS
+
+# revlog index flags
+REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
+REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
+REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
+REVIDX_DEFAULT_FLAGS = 0
+# stable order in which flags need to be processed and their processors applied
+REVIDX_FLAGS_ORDER = [
+    REVIDX_ISCENSORED,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+]
+REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
+# bitmark for flags that could cause rawdata content change
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/deltas.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,795 @@
+# revlogdeltas.py - Logic around delta computation for revlog
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2018 Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Helper class to compute deltas stored inside revlogs"""
+
+from __future__ import absolute_import
+
+import heapq
+import struct
+
+# import stuff from node for others to import from revlog
+from ..node import (
+    nullrev,
+)
+from ..i18n import _
+
+from .constants import (
+    REVIDX_ISCENSORED,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+)
+
+from ..thirdparty import (
+    attr,
+)
+
+from .. import (
+    error,
+    mdiff,
+)
+
+RevlogError = error.RevlogError
+CensoredNodeError = error.CensoredNodeError
+
+# maximum <delta-chain-data>/<revision-text-length> ratio
+LIMIT_DELTA2TEXT = 2
+
+class _testrevlog(object):
+    """minimalist fake revlog to use in doctests"""
+
+    def __init__(self, data, density=0.5, mingap=0):
+        """data is an list of revision payload boundaries"""
+        self._data = data
+        self._srdensitythreshold = density
+        self._srmingapsize = mingap
+
+    def start(self, rev):
+        if rev == 0:
+            return 0
+        return self._data[rev - 1]
+
+    def end(self, rev):
+        return self._data[rev]
+
+    def length(self, rev):
+        return self.end(rev) - self.start(rev)
+
+    def __len__(self):
+        return len(self._data)
+
+def slicechunk(revlog, revs, deltainfo=None, targetsize=None):
+    """slice revs to reduce the amount of unrelated data to be read from disk.
+
+    ``revs`` is sliced into groups that should be read in one time.
+    Assume that revs are sorted.
+
+    The initial chunk is sliced until the overall density (payload/chunks-span
+    ratio) is above `revlog._srdensitythreshold`. No gap smaller than
+    `revlog._srmingapsize` is skipped.
+
+    If `targetsize` is set, no chunk larger than `targetsize` will be yield.
+    For consistency with other slicing choice, this limit won't go lower than
+    `revlog._srmingapsize`.
+
+    If individual revisions chunk are larger than this limit, they will still
+    be raised individually.
+
+    >>> revlog = _testrevlog([
+    ...  5,  #00 (5)
+    ...  10, #01 (5)
+    ...  12, #02 (2)
+    ...  12, #03 (empty)
+    ...  27, #04 (15)
+    ...  31, #05 (4)
+    ...  31, #06 (empty)
+    ...  42, #07 (11)
+    ...  47, #08 (5)
+    ...  47, #09 (empty)
+    ...  48, #10 (1)
+    ...  51, #11 (3)
+    ...  74, #12 (23)
+    ...  85, #13 (11)
+    ...  86, #14 (1)
+    ...  91, #15 (5)
+    ... ])
+
+    >>> list(slicechunk(revlog, list(range(16))))
+    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+    >>> list(slicechunk(revlog, [0, 15]))
+    [[0], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 15]))
+    [[0], [11], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 13, 15]))
+    [[0], [11, 13, 15]]
+    >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
+    [[1, 2], [5, 8, 10, 11], [14]]
+
+    Slicing with a maximum chunk size
+    >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
+    [[0], [11], [13], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
+    [[0], [11], [13, 15]]
+    """
+    if targetsize is not None:
+        targetsize = max(targetsize, revlog._srmingapsize)
+    # targetsize should not be specified when evaluating delta candidates:
+    # * targetsize is used to ensure we stay within specification when reading,
+    # * deltainfo is used to pick are good delta chain when writing.
+    if not (deltainfo is None or targetsize is None):
+        msg = 'cannot use `targetsize` with a `deltainfo`'
+        raise error.ProgrammingError(msg)
+    for chunk in _slicechunktodensity(revlog, revs,
+                                      deltainfo,
+                                      revlog._srdensitythreshold,
+                                      revlog._srmingapsize):
+        for subchunk in _slicechunktosize(revlog, chunk, targetsize):
+            yield subchunk
+
+def _slicechunktosize(revlog, revs, targetsize=None):
+    """slice revs to match the target size
+
+    This is intended to be used on chunk that density slicing selected by that
+    are still too large compared to the read garantee of revlog. This might
+    happens when "minimal gap size" interrupted the slicing or when chain are
+    built in a way that create large blocks next to each other.
+
+    >>> revlog = _testrevlog([
+    ...  3,  #0 (3)
+    ...  5,  #1 (2)
+    ...  6,  #2 (1)
+    ...  8,  #3 (2)
+    ...  8,  #4 (empty)
+    ...  11, #5 (3)
+    ...  12, #6 (1)
+    ...  13, #7 (1)
+    ...  14, #8 (1)
+    ... ])
+
+    Cases where chunk is already small enough
+    >>> list(_slicechunktosize(revlog, [0], 3))
+    [[0]]
+    >>> list(_slicechunktosize(revlog, [6, 7], 3))
+    [[6, 7]]
+    >>> list(_slicechunktosize(revlog, [0], None))
+    [[0]]
+    >>> list(_slicechunktosize(revlog, [6, 7], None))
+    [[6, 7]]
+
+    cases where we need actual slicing
+    >>> list(_slicechunktosize(revlog, [0, 1], 3))
+    [[0], [1]]
+    >>> list(_slicechunktosize(revlog, [1, 3], 3))
+    [[1], [3]]
+    >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
+    [[1, 2], [3]]
+    >>> list(_slicechunktosize(revlog, [3, 5], 3))
+    [[3], [5]]
+    >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
+    [[3], [5]]
+    >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
+    [[5], [6, 7, 8]]
+    >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
+    [[0], [1, 2], [3], [5], [6, 7, 8]]
+
+    Case with too large individual chunk (must return valid chunk)
+    >>> list(_slicechunktosize(revlog, [0, 1], 2))
+    [[0], [1]]
+    >>> list(_slicechunktosize(revlog, [1, 3], 1))
+    [[1], [3]]
+    >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
+    [[3], [5]]
+    """
+    assert targetsize is None or 0 <= targetsize
+    if targetsize is None or segmentspan(revlog, revs) <= targetsize:
+        yield revs
+        return
+
+    startrevidx = 0
+    startdata = revlog.start(revs[0])
+    endrevidx = 0
+    iterrevs = enumerate(revs)
+    next(iterrevs) # skip first rev.
+    for idx, r in iterrevs:
+        span = revlog.end(r) - startdata
+        if span <= targetsize:
+            endrevidx = idx
+        else:
+            chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
+            if chunk:
+                yield chunk
+            startrevidx = idx
+            startdata = revlog.start(r)
+            endrevidx = idx
+    yield _trimchunk(revlog, revs, startrevidx)
+
+def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
+                         mingapsize=0):
+    """slice revs to reduce the amount of unrelated data to be read from disk.
+
+    ``revs`` is sliced into groups that should be read in one time.
+    Assume that revs are sorted.
+
+    ``deltainfo`` is a _deltainfo instance of a revision that we would append
+    to the top of the revlog.
+
+    The initial chunk is sliced until the overall density (payload/chunks-span
+    ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
+    skipped.
+
+    >>> revlog = _testrevlog([
+    ...  5,  #00 (5)
+    ...  10, #01 (5)
+    ...  12, #02 (2)
+    ...  12, #03 (empty)
+    ...  27, #04 (15)
+    ...  31, #05 (4)
+    ...  31, #06 (empty)
+    ...  42, #07 (11)
+    ...  47, #08 (5)
+    ...  47, #09 (empty)
+    ...  48, #10 (1)
+    ...  51, #11 (3)
+    ...  74, #12 (23)
+    ...  85, #13 (11)
+    ...  86, #14 (1)
+    ...  91, #15 (5)
+    ... ])
+
+    >>> list(_slicechunktodensity(revlog, list(range(16))))
+    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+    >>> list(_slicechunktodensity(revlog, [0, 15]))
+    [[0], [15]]
+    >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
+    [[0], [11], [15]]
+    >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
+    [[0], [11, 13, 15]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
+    [[1, 2], [5, 8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           mingapsize=20))
+    [[1, 2, 3, 5, 8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           targetdensity=0.95))
+    [[1, 2], [5], [8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           targetdensity=0.95, mingapsize=12))
+    [[1, 2], [5, 8, 10, 11], [14]]
+    """
+    start = revlog.start
+    length = revlog.length
+
+    if len(revs) <= 1:
+        yield revs
+        return
+
+    nextrev = len(revlog)
+    nextoffset = revlog.end(nextrev - 1)
+
+    if deltainfo is None:
+        deltachainspan = segmentspan(revlog, revs)
+        chainpayload = sum(length(r) for r in revs)
+    else:
+        deltachainspan = deltainfo.distance
+        chainpayload = deltainfo.compresseddeltalen
+
+    if deltachainspan < mingapsize:
+        yield revs
+        return
+
+    readdata = deltachainspan
+
+    if deltachainspan:
+        density = chainpayload / float(deltachainspan)
+    else:
+        density = 1.0
+
+    if density >= targetdensity:
+        yield revs
+        return
+
+    if deltainfo is not None and deltainfo.deltalen:
+        revs = list(revs)
+        revs.append(nextrev)
+
+    # Store the gaps in a heap to have them sorted by decreasing size
+    gapsheap = []
+    heapq.heapify(gapsheap)
+    prevend = None
+    for i, rev in enumerate(revs):
+        if rev < nextrev:
+            revstart = start(rev)
+            revlen = length(rev)
+        else:
+            revstart = nextoffset
+            revlen = deltainfo.deltalen
+
+        # Skip empty revisions to form larger holes
+        if revlen == 0:
+            continue
+
+        if prevend is not None:
+            gapsize = revstart - prevend
+            # only consider holes that are large enough
+            if gapsize > mingapsize:
+                heapq.heappush(gapsheap, (-gapsize, i))
+
+        prevend = revstart + revlen
+
+    # Collect the indices of the largest holes until the density is acceptable
+    indicesheap = []
+    heapq.heapify(indicesheap)
+    while gapsheap and density < targetdensity:
+        oppgapsize, gapidx = heapq.heappop(gapsheap)
+
+        heapq.heappush(indicesheap, gapidx)
+
+        # the gap sizes are stored as negatives to be sorted decreasingly
+        # by the heap
+        readdata -= (-oppgapsize)
+        if readdata > 0:
+            density = chainpayload / float(readdata)
+        else:
+            density = 1.0
+
+    # Cut the revs at collected indices
+    previdx = 0
+    while indicesheap:
+        idx = heapq.heappop(indicesheap)
+
+        chunk = _trimchunk(revlog, revs, previdx, idx)
+        if chunk:
+            yield chunk
+
+        previdx = idx
+
+    chunk = _trimchunk(revlog, revs, previdx)
+    if chunk:
+        yield chunk
+
+def _trimchunk(revlog, revs, startidx, endidx=None):
+    """returns revs[startidx:endidx] without empty trailing revs
+
+    Doctest Setup
+    >>> revlog = _testrevlog([
+    ...  5,  #0
+    ...  10, #1
+    ...  12, #2
+    ...  12, #3 (empty)
+    ...  17, #4
+    ...  21, #5
+    ...  21, #6 (empty)
+    ... ])
+
+    Contiguous cases:
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
+    [0, 1, 2, 3, 4, 5]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
+    [0, 1, 2, 3, 4]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
+    [0, 1, 2]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
+    [2]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
+    [3, 4, 5]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
+    [3, 4]
+
+    Discontiguous cases:
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
+    [1, 3, 5]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
+    [1]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
+    [3, 5]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
+    [3, 5]
+    """
+    length = revlog.length
+
+    if endidx is None:
+        endidx = len(revs)
+
+    # If we have a non-emtpy delta candidate, there are nothing to trim
+    if revs[endidx - 1] < len(revlog):
+        # Trim empty revs at the end, except the very first revision of a chain
+        while (endidx > 1
+                and endidx > startidx
+                and length(revs[endidx - 1]) == 0):
+            endidx -= 1
+
+    return revs[startidx:endidx]
+
+def segmentspan(revlog, revs, deltainfo=None):
+    """Get the byte span of a segment of revisions
+
+    revs is a sorted array of revision numbers
+
+    >>> revlog = _testrevlog([
+    ...  5,  #0
+    ...  10, #1
+    ...  12, #2
+    ...  12, #3 (empty)
+    ...  17, #4
+    ... ])
+
+    >>> segmentspan(revlog, [0, 1, 2, 3, 4])
+    17
+    >>> segmentspan(revlog, [0, 4])
+    17
+    >>> segmentspan(revlog, [3, 4])
+    5
+    >>> segmentspan(revlog, [1, 2, 3,])
+    7
+    >>> segmentspan(revlog, [1, 3])
+    7
+    """
+    if not revs:
+        return 0
+    if deltainfo is not None and len(revlog) <= revs[-1]:
+        if len(revs) == 1:
+            return deltainfo.deltalen
+        offset = revlog.end(len(revlog) - 1)
+        end = deltainfo.deltalen + offset
+    else:
+        end = revlog.end(revs[-1])
+    return end - revlog.start(revs[0])
+
+def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
+    """build full text from a (base, delta) pair and other metadata"""
+    # special case deltas which replace entire base; no need to decode
+    # base revision. this neatly avoids censored bases, which throw when
+    # they're decoded.
+    hlen = struct.calcsize(">lll")
+    if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
+                                               len(delta) - hlen):
+        fulltext = delta[hlen:]
+    else:
+        # deltabase is rawtext before changed by flag processors, which is
+        # equivalent to non-raw text
+        basetext = revlog.revision(baserev, _df=fh, raw=False)
+        fulltext = mdiff.patch(basetext, delta)
+
+    try:
+        res = revlog._processflags(fulltext, flags, 'read', raw=True)
+        fulltext, validatehash = res
+        if validatehash:
+            revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
+        if flags & REVIDX_ISCENSORED:
+            raise RevlogError(_('node %s is not censored') % expectednode)
+    except CensoredNodeError:
+        # must pass the censored index flag to add censored revisions
+        if not flags & REVIDX_ISCENSORED:
+            raise
+    return fulltext
+
+@attr.s(slots=True, frozen=True)
+class _deltainfo(object):
+    distance = attr.ib()
+    deltalen = attr.ib()
+    data = attr.ib()
+    base = attr.ib()
+    chainbase = attr.ib()
+    chainlen = attr.ib()
+    compresseddeltalen = attr.ib()
+    snapshotdepth = attr.ib()
+
+def isgooddeltainfo(revlog, deltainfo, revinfo):
+    """Returns True if the given delta is good. Good means that it is within
+    the disk span, disk size, and chain length bounds that we know to be
+    performant."""
+    if deltainfo is None:
+        return False
+
+    # - 'deltainfo.distance' is the distance from the base revision --
+    #   bounding it limits the amount of I/O we need to do.
+    # - 'deltainfo.compresseddeltalen' is the sum of the total size of
+    #   deltas we need to apply -- bounding it limits the amount of CPU
+    #   we consume.
+
+    if revlog._sparserevlog:
+        # As sparse-read will be used, we can consider that the distance,
+        # instead of being the span of the whole chunk,
+        # is the span of the largest read chunk
+        base = deltainfo.base
+
+        if base != nullrev:
+            deltachain = revlog._deltachain(base)[0]
+        else:
+            deltachain = []
+
+        # search for the first non-snapshot revision
+        for idx, r in enumerate(deltachain):
+            if not revlog.issnapshot(r):
+                break
+        deltachain = deltachain[idx:]
+        chunks = slicechunk(revlog, deltachain, deltainfo)
+        all_span = [segmentspan(revlog, revs, deltainfo)
+                    for revs in chunks]
+        distance = max(all_span)
+    else:
+        distance = deltainfo.distance
+
+    textlen = revinfo.textlen
+    defaultmax = textlen * 4
+    maxdist = revlog._maxdeltachainspan
+    if not maxdist:
+        maxdist = distance # ensure the conditional pass
+    maxdist = max(maxdist, defaultmax)
+    if revlog._sparserevlog and maxdist < revlog._srmingapsize:
+        # In multiple place, we are ignoring irrelevant data range below a
+        # certain size. Be also apply this tradeoff here and relax span
+        # constraint for small enought content.
+        maxdist = revlog._srmingapsize
+
+    # Bad delta from read span:
+    #
+    #   If the span of data read is larger than the maximum allowed.
+    if maxdist < distance:
+        return False
+
+    # Bad delta from new delta size:
+    #
+    #   If the delta size is larger than the target text, storing the
+    #   delta will be inefficient.
+    if textlen < deltainfo.deltalen:
+        return False
+
+    # Bad delta from cumulated payload size:
+    #
+    #   If the sum of delta get larger than K * target text length.
+    if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
+        return False
+
+    # Bad delta from chain length:
+    #
+    #   If the number of delta in the chain gets too high.
+    if (revlog._maxchainlen
+            and revlog._maxchainlen < deltainfo.chainlen):
+        return False
+
+    # bad delta from intermediate snapshot size limit
+    #
+    #   If an intermediate snapshot size is higher than the limit.  The
+    #   limit exist to prevent endless chain of intermediate delta to be
+    #   created.
+    if (deltainfo.snapshotdepth is not None and
+            (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
+        return False
+
+    # bad delta if new intermediate snapshot is larger than the previous
+    # snapshot
+    if (deltainfo.snapshotdepth
+            and revlog.length(deltainfo.base) < deltainfo.deltalen):
+        return False
+
+    return True
+
+def _candidategroups(revlog, textlen, p1, p2, cachedelta):
+    """Provides group of revision to be tested as delta base
+
+    This top level function focus on emitting groups with unique and worthwhile
+    content. See _raw_candidate_groups for details about the group order.
+    """
+    # should we try to build a delta?
+    if not (len(revlog) and revlog._storedeltachains):
+        return
+
+    deltalength = revlog.length
+    deltaparent = revlog.deltaparent
+
+    deltas_limit = textlen * LIMIT_DELTA2TEXT
+
+    tested = set([nullrev])
+    for temptative in _rawgroups(revlog, p1, p2, cachedelta):
+        group = []
+        for rev in temptative:
+            # skip over empty delta (no need to include them in a chain)
+            while not (rev == nullrev or rev in tested or deltalength(rev)):
+                rev = deltaparent(rev)
+                tested.add(rev)
+            # filter out revision we tested already
+            if rev in tested:
+                continue
+            tested.add(rev)
+            # filter out delta base that will never produce good delta
+            if deltas_limit < revlog.length(rev):
+                continue
+            # no need to try a delta against nullrev, this will be done as a
+            # last resort.
+            if rev == nullrev:
+                continue
+            # no delta for rawtext-changing revs (see "candelta" for why)
+            if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
+                continue
+            group.append(rev)
+        if group:
+            yield tuple(group)
+
+def _rawgroups(revlog, p1, p2, cachedelta):
+    """Provides group of revision to be tested as delta base
+
+    This lower level function focus on emitting delta theorically interresting
+    without looking it any practical details.
+
+    The group order aims at providing fast or small candidates first.
+    """
+    gdelta = revlog._generaldelta
+    curr = len(revlog)
+    prev = curr - 1
+
+    # should we try to build a delta?
+    if prev != nullrev and revlog._storedeltachains:
+        tested = set()
+        # This condition is true most of the time when processing
+        # changegroup data into a generaldelta repo. The only time it
+        # isn't true is if this is the first revision in a delta chain
+        # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
+        if cachedelta and gdelta and revlog._lazydeltabase:
+            # Assume what we received from the server is a good choice
+            # build delta will reuse the cache
+            yield (cachedelta[0],)
+            tested.add(cachedelta[0])
+
+    # This condition is true most of the time when processing
+    # changegroup data into a generaldelta repo. The only time it
+    # isn't true is if this is the first revision in a delta chain
+    # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
+    if cachedelta and gdelta and revlog._lazydeltabase:
+        # Assume what we received from the server is a good choice
+        # build delta will reuse the cache
+        yield (cachedelta[0],)
+
+    if gdelta:
+        # exclude already lazy tested base if any
+        parents = [p for p in (p1, p2) if p != nullrev]
+
+        if not revlog._deltabothparents and len(parents) == 2:
+            parents.sort()
+            # To minimize the chance of having to build a fulltext,
+            # pick first whichever parent is closest to us (max rev)
+            yield (parents[1],)
+            # then the other one (min rev) if the first did not fit
+            yield (parents[0],)
+        elif len(parents) > 0:
+            # Test all parents (1 or 2), and keep the best candidate
+            yield parents
+
+    # other approach failed try against prev to hopefully save us a
+    # fulltext.
+    yield (prev,)
+
+class deltacomputer(object):
+    def __init__(self, revlog):
+        self.revlog = revlog
+
+    def buildtext(self, revinfo, fh):
+        """Builds a fulltext version of a revision
+
+        revinfo: _revisioninfo instance that contains all needed info
+        fh:      file handle to either the .i or the .d revlog file,
+                 depending on whether it is inlined or not
+        """
+        btext = revinfo.btext
+        if btext[0] is not None:
+            return btext[0]
+
+        revlog = self.revlog
+        cachedelta = revinfo.cachedelta
+        baserev = cachedelta[0]
+        delta = cachedelta[1]
+
+        fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
+                                             revinfo.p1, revinfo.p2,
+                                             revinfo.flags, revinfo.node)
+        return fulltext
+
+    def _builddeltadiff(self, base, revinfo, fh):
+        revlog = self.revlog
+        t = self.buildtext(revinfo, fh)
+        if revlog.iscensored(base):
+            # deltas based on a censored revision must replace the
+            # full content in one patch, so delta works everywhere
+            header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
+            delta = header + t
+        else:
+            ptext = revlog.revision(base, _df=fh, raw=True)
+            delta = mdiff.textdiff(ptext, t)
+
+        return delta
+
+    def _builddeltainfo(self, revinfo, base, fh):
+        # can we use the cached delta?
+        if revinfo.cachedelta and revinfo.cachedelta[0] == base:
+            delta = revinfo.cachedelta[1]
+        else:
+            delta = self._builddeltadiff(base, revinfo, fh)
+        revlog = self.revlog
+        header, data = revlog.compress(delta)
+        deltalen = len(header) + len(data)
+        chainbase = revlog.chainbase(base)
+        offset = revlog.end(len(revlog) - 1)
+        dist = deltalen + offset - revlog.start(chainbase)
+        if revlog._generaldelta:
+            deltabase = base
+        else:
+            deltabase = chainbase
+        chainlen, compresseddeltalen = revlog._chaininfo(base)
+        chainlen += 1
+        compresseddeltalen += deltalen
+
+        revlog = self.revlog
+        snapshotdepth = None
+        if deltabase == nullrev:
+            snapshotdepth = 0
+        elif revlog._sparserevlog and revlog.issnapshot(deltabase):
+            # A delta chain should always be one full snapshot,
+            # zero or more semi-snapshots, and zero or more deltas
+            p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
+            if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
+                snapshotdepth = len(revlog._deltachain(deltabase)[0])
+
+        return _deltainfo(dist, deltalen, (header, data), deltabase,
+                          chainbase, chainlen, compresseddeltalen,
+                          snapshotdepth)
+
+    def _fullsnapshotinfo(self, fh, revinfo):
+        curr = len(self.revlog)
+        rawtext = self.buildtext(revinfo, fh)
+        data = self.revlog.compress(rawtext)
+        compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
+        deltabase = chainbase = curr
+        snapshotdepth = 0
+        chainlen = 1
+
+        return _deltainfo(dist, deltalen, data, deltabase,
+                          chainbase, chainlen, compresseddeltalen,
+                          snapshotdepth)
+
+    def finddeltainfo(self, revinfo, fh):
+        """Find an acceptable delta against a candidate revision
+
+        revinfo: information about the revision (instance of _revisioninfo)
+        fh:      file handle to either the .i or the .d revlog file,
+                 depending on whether it is inlined or not
+
+        Returns the first acceptable candidate revision, as ordered by
+        _candidategroups
+
+        If no suitable deltabase is found, we return delta info for a full
+        snapshot.
+        """
+        if not revinfo.textlen:
+            return self._fullsnapshotinfo(fh, revinfo)
+
+        # no delta for flag processor revision (see "candelta" for why)
+        # not calling candelta since only one revision needs test, also to
+        # avoid overhead fetching flags again.
+        if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
+            return self._fullsnapshotinfo(fh, revinfo)
+
+        cachedelta = revinfo.cachedelta
+        p1 = revinfo.p1
+        p2 = revinfo.p2
+        revlog = self.revlog
+
+        deltainfo = None
+        p1r, p2r = revlog.rev(p1), revlog.rev(p2)
+        groups = _candidategroups(self.revlog, revinfo.textlen,
+                                             p1r, p2r, cachedelta)
+        for candidaterevs in groups:
+            nominateddeltas = []
+            for candidaterev in candidaterevs:
+                candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
+                if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
+                    nominateddeltas.append(candidatedelta)
+            if nominateddeltas:
+                deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
+                break
+
+        if deltainfo is None:
+            deltainfo = self._fullsnapshotinfo(fh, revinfo)
+        return deltainfo
--- a/mercurial/revset.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/revset.py	Tue Sep 04 12:16:28 2018 -0400
@@ -242,7 +242,7 @@
 
 def listset(repo, subset, *xs, **opts):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "revsets.x or y"'))
+                           hint=_('see \'hg help "revsets.x or y"\''))
 
 def keyvaluepair(repo, subset, k, v, order):
     raise error.ParseError(_("can't use a key-value pair in this context"))
@@ -454,6 +454,8 @@
         kind, pattern, matcher = stringutil.stringmatcher(bm)
         bms = set()
         if kind == 'literal':
+            if bm == pattern:
+                pattern = repo._bookmarks.expandname(pattern)
             bmrev = repo._bookmarks.get(pattern, None)
             if not bmrev:
                 raise error.RepoLookupError(_("bookmark '%s' does not exist")
@@ -1566,6 +1568,12 @@
     """helper to select all rev in <targets> phases"""
     return repo._phasecache.getrevset(repo, targets, subset)
 
+@predicate('_phase(idx)', safe=True)
+def phase(repo, subset, x):
+    l = getargs(x, 1, 1, ("_phase requires one argument"))
+    target = getinteger(l[0], ("_phase expects a number"))
+    return _phase(repo, subset, target)
+
 @predicate('draft()', safe=True)
 def draft(repo, subset, x):
     """Changeset in draft phase."""
--- a/mercurial/revsetlang.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/revsetlang.py	Tue Sep 04 12:16:28 2018 -0400
@@ -63,7 +63,7 @@
 _syminitletters = set(pycompat.iterbytestr(
     string.ascii_letters.encode('ascii') +
     string.digits.encode('ascii') +
-    '._@')) | set(map(pycompat.bytechr, xrange(128, 256)))
+    '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
 
 # default set of valid characters for non-initial letters of symbols
 _symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
@@ -177,7 +177,7 @@
                         if p: # possible consecutive -
                             yield ('symbol', p, s)
                         s += len(p)
-                        yield ('-', None, pos)
+                        yield ('-', None, s)
                         s += 1
                     if parts[-1]: # possible trailing -
                         yield ('symbol', parts[-1], s)
--- a/mercurial/scmutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/scmutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -34,6 +34,7 @@
     obsutil,
     pathutil,
     phases,
+    policy,
     pycompat,
     revsetlang,
     similar,
@@ -52,6 +53,8 @@
 else:
     from . import scmposix as scmplatform
 
+parsers = policy.importmod(r'parsers')
+
 termsize = scmplatform.termsize
 
 class status(tuple):
@@ -169,64 +172,64 @@
             reason = _('timed out waiting for lock held by %r') % inst.locker
         else:
             reason = _('lock held by %r') % inst.locker
-        ui.warn(_("abort: %s: %s\n")
-                % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
+        ui.error(_("abort: %s: %s\n") % (
+            inst.desc or stringutil.forcebytestr(inst.filename), reason))
         if not inst.locker:
-            ui.warn(_("(lock might be very busy)\n"))
+            ui.error(_("(lock might be very busy)\n"))
     except error.LockUnavailable as inst:
-        ui.warn(_("abort: could not lock %s: %s\n") %
-                (inst.desc or stringutil.forcebytestr(inst.filename),
-                 encoding.strtolocal(inst.strerror)))
+        ui.error(_("abort: could not lock %s: %s\n") %
+                 (inst.desc or stringutil.forcebytestr(inst.filename),
+                  encoding.strtolocal(inst.strerror)))
     except error.OutOfBandError as inst:
         if inst.args:
             msg = _("abort: remote error:\n")
         else:
             msg = _("abort: remote error\n")
-        ui.warn(msg)
+        ui.error(msg)
         if inst.args:
-            ui.warn(''.join(inst.args))
+            ui.error(''.join(inst.args))
         if inst.hint:
-            ui.warn('(%s)\n' % inst.hint)
+            ui.error('(%s)\n' % inst.hint)
     except error.RepoError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: %s!\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except error.ResponseError as inst:
-        ui.warn(_("abort: %s") % inst.args[0])
+        ui.error(_("abort: %s") % inst.args[0])
         msg = inst.args[1]
         if isinstance(msg, type(u'')):
             msg = pycompat.sysbytes(msg)
         if not isinstance(msg, bytes):
-            ui.warn(" %r\n" % (msg,))
+            ui.error(" %r\n" % (msg,))
         elif not msg:
-            ui.warn(_(" empty string\n"))
+            ui.error(_(" empty string\n"))
         else:
-            ui.warn("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
+            ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
     except error.CensoredNodeError as inst:
-        ui.warn(_("abort: file censored %s!\n") % inst)
+        ui.error(_("abort: file censored %s!\n") % inst)
     except error.RevlogError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: %s!\n") % inst)
     except error.InterventionRequired as inst:
-        ui.warn("%s\n" % inst)
+        ui.error("%s\n" % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
         return 1
     except error.WdirUnsupported:
-        ui.warn(_("abort: working directory revision cannot be specified\n"))
+        ui.error(_("abort: working directory revision cannot be specified\n"))
     except error.Abort as inst:
-        ui.warn(_("abort: %s\n") % inst)
+        ui.error(_("abort: %s\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except ImportError as inst:
-        ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
+        ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
         m = stringutil.forcebytestr(inst).split()[-1]
         if m in "mpatch bdiff".split():
-            ui.warn(_("(did you forget to compile extensions?)\n"))
+            ui.error(_("(did you forget to compile extensions?)\n"))
         elif m in "zlib".split():
-            ui.warn(_("(is your Python install correct?)\n"))
+            ui.error(_("(is your Python install correct?)\n"))
     except IOError as inst:
         if util.safehasattr(inst, "code"):
-            ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
+            ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
         elif util.safehasattr(inst, "reason"):
             try: # usually it is in the form (errno, strerror)
                 reason = inst.reason.args[1]
@@ -236,34 +239,34 @@
             if isinstance(reason, pycompat.unicode):
                 # SSLError of Python 2.7.9 contains a unicode
                 reason = encoding.unitolocal(reason)
-            ui.warn(_("abort: error: %s\n") % reason)
+            ui.error(_("abort: error: %s\n") % reason)
         elif (util.safehasattr(inst, "args")
               and inst.args and inst.args[0] == errno.EPIPE):
             pass
         elif getattr(inst, "strerror", None):
             if getattr(inst, "filename", None):
-                ui.warn(_("abort: %s: %s\n") % (
+                ui.error(_("abort: %s: %s\n") % (
                     encoding.strtolocal(inst.strerror),
                     stringutil.forcebytestr(inst.filename)))
             else:
-                ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+                ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
         else:
             raise
     except OSError as inst:
         if getattr(inst, "filename", None) is not None:
-            ui.warn(_("abort: %s: '%s'\n") % (
+            ui.error(_("abort: %s: '%s'\n") % (
                 encoding.strtolocal(inst.strerror),
                 stringutil.forcebytestr(inst.filename)))
         else:
-            ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+            ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
     except MemoryError:
-        ui.warn(_("abort: out of memory\n"))
+        ui.error(_("abort: out of memory\n"))
     except SystemExit as inst:
         # Commands shouldn't sys.exit directly, but give a return code.
         # Just in case catch this and and pass exit code to caller.
         return inst.code
     except socket.error as inst:
-        ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
+        ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
 
     return -1
 
@@ -437,41 +440,111 @@
     return '%d:%s' % (rev, hexfunc(node))
 
 def resolvehexnodeidprefix(repo, prefix):
-    # Uses unfiltered repo because it's faster when prefix is ambiguous/
-    # This matches the shortesthexnodeidprefix() function below.
-    node = repo.unfiltered().changelog._partialmatch(prefix)
+    if (prefix.startswith('x') and
+        repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
+        prefix = prefix[1:]
+    try:
+        # Uses unfiltered repo because it's faster when prefix is ambiguous/
+        # This matches the shortesthexnodeidprefix() function below.
+        node = repo.unfiltered().changelog._partialmatch(prefix)
+    except error.AmbiguousPrefixLookupError:
+        revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+        if revset:
+            # Clear config to avoid infinite recursion
+            configoverrides = {('experimental',
+                                'revisions.disambiguatewithin'): None}
+            with repo.ui.configoverride(configoverrides):
+                revs = repo.anyrevs([revset], user=True)
+                matches = []
+                for rev in revs:
+                    node = repo.changelog.node(rev)
+                    if hex(node).startswith(prefix):
+                        matches.append(node)
+                if len(matches) == 1:
+                    return matches[0]
+        raise
     if node is None:
         return
     repo.changelog.rev(node)  # make sure node isn't filtered
     return node
 
-def shortesthexnodeidprefix(repo, node, minlength=1):
-    """Find the shortest unambiguous prefix that matches hexnode."""
+def mayberevnum(repo, prefix):
+    """Checks if the given prefix may be mistaken for a revision number"""
+    try:
+        i = int(prefix)
+        # if we are a pure int, then starting with zero will not be
+        # confused as a rev; or, obviously, if the int is larger
+        # than the value of the tip rev
+        if prefix[0:1] == b'0' or i >= len(repo):
+            return False
+        return True
+    except ValueError:
+        return False
+
+def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
+    """Find the shortest unambiguous prefix that matches hexnode.
+
+    If "cache" is not None, it must be a dictionary that can be used for
+    caching between calls to this method.
+    """
     # _partialmatch() of filtered changelog could take O(len(repo)) time,
     # which would be unacceptably slow. so we look for hash collision in
     # unfiltered space, which means some hashes may be slightly longer.
-    cl = repo.unfiltered().changelog
-
-    def isrev(prefix):
-        try:
-            i = int(prefix)
-            # if we are a pure int, then starting with zero will not be
-            # confused as a rev; or, obviously, if the int is larger
-            # than the value of the tip rev
-            if prefix[0:1] == b'0' or i > len(cl):
-                return False
-            return True
-        except ValueError:
-            return False
 
     def disambiguate(prefix):
         """Disambiguate against revnums."""
+        if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
+            if mayberevnum(repo, prefix):
+                return 'x' + prefix
+            else:
+                return prefix
+
         hexnode = hex(node)
         for length in range(len(prefix), len(hexnode) + 1):
             prefix = hexnode[:length]
-            if not isrev(prefix):
+            if not mayberevnum(repo, prefix):
                 return prefix
 
+    cl = repo.unfiltered().changelog
+    revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+    if revset:
+        revs = None
+        if cache is not None:
+            revs = cache.get('disambiguationrevset')
+        if revs is None:
+            revs = repo.anyrevs([revset], user=True)
+            if cache is not None:
+                cache['disambiguationrevset'] = revs
+        if cl.rev(node) in revs:
+            hexnode = hex(node)
+            nodetree = None
+            if cache is not None:
+                nodetree = cache.get('disambiguationnodetree')
+            if not nodetree:
+                try:
+                    nodetree = parsers.nodetree(cl.index, len(revs))
+                except AttributeError:
+                    # no native nodetree
+                    pass
+                else:
+                    for r in revs:
+                        nodetree.insert(r)
+                    if cache is not None:
+                        cache['disambiguationnodetree'] = nodetree
+            if nodetree is not None:
+                length = max(nodetree.shortest(node), minlength)
+                prefix = hexnode[:length]
+                return disambiguate(prefix)
+            for length in range(minlength, len(hexnode) + 1):
+                matches = []
+                prefix = hexnode[:length]
+                for rev in revs:
+                    otherhexnode = repo[rev].hex()
+                    if prefix == otherhexnode[:length]:
+                        matches.append(otherhexnode)
+                if len(matches) == 1:
+                    return disambiguate(prefix)
+
     try:
         return disambiguate(cl.shortest(node, minlength))
     except error.LookupError:
@@ -480,8 +553,8 @@
 def isrevsymbol(repo, symbol):
     """Checks if a symbol exists in the repo.
 
-    See revsymbol() for details. Raises error.LookupError if the symbol is an
-    ambiguous nodeid prefix.
+    See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
+    symbol is an ambiguous nodeid prefix.
     """
     try:
         revsymbol(repo, symbol)
@@ -780,7 +853,7 @@
         return self._revcontains(self._torev(node))
 
 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
-                 fixphase=False, targetphase=None):
+                 fixphase=False, targetphase=None, backup=True):
     """do common cleanups when old nodes are replaced by new nodes
 
     That includes writing obsmarkers or stripping nodes, and moving bookmarks.
@@ -905,7 +978,8 @@
             from . import repair # avoid import cycle
             tostrip = list(replacements)
             if tostrip:
-                repair.delayedstrip(repo.ui, repo, tostrip, operation)
+                repair.delayedstrip(repo.ui, repo, tostrip, operation,
+                                    backup=backup)
 
 def addremove(repo, matcher, prefix, opts=None):
     if opts is None:
@@ -952,9 +1026,11 @@
         if repo.ui.verbose or not m.exact(abs):
             if abs in unknownset:
                 status = _('adding %s\n') % m.uipath(abs)
+                label = 'addremove.added'
             else:
                 status = _('removing %s\n') % m.uipath(abs)
-            repo.ui.status(status)
+                label = 'addremove.removed'
+            repo.ui.status(status, label=label)
 
     renames = _findrenames(repo, m, added + unknown, removed + deleted,
                            similarity)
@@ -1542,13 +1618,13 @@
         @reportsummary
         def reportnewcs(repo, tr):
             """Report the range of new revisions pulled/unbundled."""
-            newrevs = tr.changes.get('revs', xrange(0, 0))
-            if not newrevs:
+            origrepolen = tr.changes.get('origrepolen', len(repo))
+            if origrepolen >= len(repo):
                 return
 
             # Compute the bounds of new revisions' range, excluding obsoletes.
             unfi = repo.unfiltered()
-            revs = unfi.revs('%ld and not obsolete()', newrevs)
+            revs = unfi.revs('%d: and not obsolete()', origrepolen)
             if not revs:
                 # Got only obsoletes.
                 return
@@ -1565,16 +1641,13 @@
             """Report statistics of phase changes for changesets pre-existing
             pull/unbundle.
             """
-            # TODO set() is only appropriate for 4.7 since revs post
-            # 45e05d39d9ce is a pycompat.membershiprange, which has O(n)
-            # membership testing.
-            newrevs = set(tr.changes.get('revs', xrange(0, 0)))
+            origrepolen = tr.changes.get('origrepolen', len(repo))
             phasetracking = tr.changes.get('phases', {})
             if not phasetracking:
                 return
             published = [
                 rev for rev, (old, new) in phasetracking.iteritems()
-                if new == phases.public and rev not in newrevs
+                if new == phases.public and rev < origrepolen
             ]
             if not published:
                 return
--- a/mercurial/server.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/server.py	Tue Sep 04 12:16:28 2018 -0400
@@ -79,7 +79,7 @@
             runargs.append('--daemon-postexec=unlink:%s' % lockpath)
             # Don't pass --cwd to the child process, because we've already
             # changed directory.
-            for i in xrange(1, len(runargs)):
+            for i in pycompat.xrange(1, len(runargs)):
                 if runargs[i].startswith('--cwd='):
                     del runargs[i]
                     break
--- a/mercurial/setdiscovery.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/setdiscovery.py	Tue Sep 04 12:16:28 2018 -0400
@@ -51,30 +51,25 @@
     nullrev,
 )
 from . import (
-    dagutil,
     error,
     util,
 )
 
-def _updatesample(dag, nodes, sample, quicksamplesize=0):
+def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
     """update an existing sample to match the expected size
 
-    The sample is updated with nodes exponentially distant from each head of the
-    <nodes> set. (H~1, H~2, H~4, H~8, etc).
+    The sample is updated with revs exponentially distant from each head of the
+    <revs> set. (H~1, H~2, H~4, H~8, etc).
 
     If a target size is specified, the sampling will stop once this size is
-    reached. Otherwise sampling will happen until roots of the <nodes> set are
+    reached. Otherwise sampling will happen until roots of the <revs> set are
     reached.
 
-    :dag: a dag object from dagutil
-    :nodes:  set of nodes we want to discover (if None, assume the whole dag)
+    :revs:  set of revs we want to discover (if None, assume the whole dag)
+    :heads: set of DAG head revs
     :sample: a sample to update
+    :parentfn: a callable to resolve parents for a revision
     :quicksamplesize: optional target size of the sample"""
-    # if nodes is empty we scan the entire graph
-    if nodes:
-        heads = dag.headsetofconnecteds(nodes)
-    else:
-        heads = dag.heads()
     dist = {}
     visit = collections.deque(heads)
     seen = set()
@@ -91,37 +86,69 @@
             if quicksamplesize and (len(sample) >= quicksamplesize):
                 return
         seen.add(curr)
-        for p in dag.parents(curr):
-            if not nodes or p in nodes:
+
+        for p in parentfn(curr):
+            if p != nullrev and (not revs or p in revs):
                 dist.setdefault(p, d + 1)
                 visit.append(p)
 
-def _takequicksample(dag, nodes, size):
+def _takequicksample(repo, headrevs, revs, size):
     """takes a quick sample of size <size>
 
     It is meant for initial sampling and focuses on querying heads and close
     ancestors of heads.
 
     :dag: a dag object
-    :nodes: set of nodes to discover
+    :headrevs: set of head revisions in local DAG to consider
+    :revs: set of revs to discover
     :size: the maximum size of the sample"""
-    sample = dag.headsetofconnecteds(nodes)
+    sample = set(repo.revs('heads(%ld)', revs))
+
     if len(sample) >= size:
         return _limitsample(sample, size)
-    _updatesample(dag, None, sample, quicksamplesize=size)
+
+    _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
+                  quicksamplesize=size)
     return sample
 
-def _takefullsample(dag, nodes, size):
-    sample = dag.headsetofconnecteds(nodes)
+def _takefullsample(repo, headrevs, revs, size):
+    sample = set(repo.revs('heads(%ld)', revs))
+
     # update from heads
-    _updatesample(dag, nodes, sample)
+    revsheads = set(repo.revs('heads(%ld)', revs))
+    _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
+
     # update from roots
-    _updatesample(dag.inverse(), nodes, sample)
+    revsroots = set(repo.revs('roots(%ld)', revs))
+
+    # _updatesample() essentially does interaction over revisions to look up
+    # their children. This lookup is expensive and doing it in a loop is
+    # quadratic. We precompute the children for all relevant revisions and
+    # make the lookup in _updatesample() a simple dict lookup.
+    #
+    # Because this function can be called multiple times during discovery, we
+    # may still perform redundant work and there is room to optimize this by
+    # keeping a persistent cache of children across invocations.
+    children = {}
+
+    parentrevs = repo.changelog.parentrevs
+    for rev in repo.changelog.revs(start=min(revsroots)):
+        # Always ensure revision has an entry so we don't need to worry about
+        # missing keys.
+        children.setdefault(rev, [])
+
+        for prev in parentrevs(rev):
+            if prev == nullrev:
+                continue
+
+            children.setdefault(prev, []).append(rev)
+
+    _updatesample(revs, revsroots, sample, children.__getitem__)
     assert sample
     sample = _limitsample(sample, size)
     if len(sample) < size:
         more = size - len(sample)
-        sample.update(random.sample(list(nodes - sample), more))
+        sample.update(random.sample(list(revs - sample), more))
     return sample
 
 def _limitsample(sample, desiredlen):
@@ -142,16 +169,17 @@
 
     roundtrips = 0
     cl = local.changelog
-    localsubset = None
+    clnode = cl.node
+    clrev = cl.rev
+
     if ancestorsof is not None:
-        rev = local.changelog.rev
-        localsubset = [rev(n) for n in ancestorsof]
-    dag = dagutil.revlogdag(cl, localsubset=localsubset)
+        ownheads = [clrev(n) for n in ancestorsof]
+    else:
+        ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
 
     # early exit if we know all the specified remote heads already
     ui.debug("query 1; heads\n")
     roundtrips += 1
-    ownheads = dag.heads()
     sample = _limitsample(ownheads, initialsamplesize)
     # indices between sample and externalized version must match
     sample = list(sample)
@@ -159,7 +187,7 @@
     with remote.commandexecutor() as e:
         fheads = e.callcommand('heads', {})
         fknown = e.callcommand('known', {
-            'nodes': dag.externalizeall(sample),
+            'nodes': [clnode(r) for r in sample],
         })
 
     srvheadhashes, yesno = fheads.result(), fknown.result()
@@ -173,15 +201,25 @@
     # compatibility reasons)
     ui.status(_("searching for changes\n"))
 
-    srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
+    srvheads = []
+    for node in srvheadhashes:
+        if node == nullid:
+            continue
+
+        try:
+            srvheads.append(clrev(node))
+        # Catches unknown and filtered nodes.
+        except error.LookupError:
+            continue
+
     if len(srvheads) == len(srvheadhashes):
         ui.debug("all remote heads known locally\n")
-        return (srvheadhashes, False, srvheadhashes,)
+        return srvheadhashes, False, srvheadhashes
 
     if len(sample) == len(ownheads) and all(yesno):
         ui.note(_("all local heads known remotely\n"))
-        ownheadhashes = dag.externalizeall(ownheads)
-        return (ownheadhashes, True, srvheadhashes,)
+        ownheadhashes = [clnode(r) for r in ownheads]
+        return ownheadhashes, True, srvheadhashes
 
     # full blown discovery
 
@@ -202,7 +240,12 @@
 
         if sample:
             missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
-            missing.update(dag.descendantset(missinginsample, missing))
+
+            if missing:
+                missing.update(local.revs('descendants(%ld) - descendants(%ld)',
+                                          missinginsample, missing))
+            else:
+                missing.update(local.revs('descendants(%ld)', missinginsample))
 
             undecided.difference_update(missing)
 
@@ -224,7 +267,7 @@
         if len(undecided) < targetsize:
             sample = list(undecided)
         else:
-            sample = samplefunc(dag, undecided, targetsize)
+            sample = samplefunc(local, ownheads, undecided, targetsize)
 
         roundtrips += 1
         progress.update(roundtrips)
@@ -235,7 +278,7 @@
 
         with remote.commandexecutor() as e:
             yesno = e.callcommand('known', {
-                'nodes': dag.externalizeall(sample),
+                'nodes': [clnode(r) for r in sample],
             }).result()
 
         full = True
@@ -247,10 +290,8 @@
 
     # heads(common) == heads(common.bases) since common represents common.bases
     # and all its ancestors
-    result = dag.headsetofconnecteds(common.bases)
-    # common.bases can include nullrev, but our contract requires us to not
-    # return any heads in that case, so discard that
-    result.discard(nullrev)
+    # The presence of nullrev will confuse heads(). So filter it out.
+    result = set(local.revs('heads(%ld)', common.bases - {nullrev}))
     elapsed = util.timer() - start
     progress.complete()
     ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
@@ -268,4 +309,5 @@
         return ({nullid}, True, srvheadhashes,)
 
     anyincoming = (srvheadhashes != [nullid])
-    return dag.externalizeall(result), anyincoming, srvheadhashes
+    result = {clnode(r) for r in result}
+    return result, anyincoming, srvheadhashes
--- a/mercurial/simplemerge.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/simplemerge.py	Tue Sep 04 12:16:28 2018 -0400
@@ -58,7 +58,8 @@
     """
     if (aend - astart) != (bend - bstart):
         return False
-    for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
+    for ia, ib in zip(pycompat.xrange(astart, aend),
+                      pycompat.xrange(bstart, bend)):
         if a[ia] != b[ib]:
             return False
     else:
--- a/mercurial/smartset.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/smartset.py	Tue Sep 04 12:16:28 2018 -0400
@@ -152,11 +152,11 @@
         # but start > stop is allowed, which should be an empty set.
         ys = []
         it = iter(self)
-        for x in xrange(start):
+        for x in pycompat.xrange(start):
             y = next(it, None)
             if y is None:
                 break
-        for x in xrange(stop - start):
+        for x in pycompat.xrange(stop - start):
             y = next(it, None)
             if y is None:
                 break
@@ -1005,13 +1005,13 @@
             return self.fastdesc()
 
     def fastasc(self):
-        iterrange = xrange(self._start, self._end)
+        iterrange = pycompat.xrange(self._start, self._end)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
 
     def fastdesc(self):
-        iterrange = xrange(self._end - 1, self._start - 1, -1)
+        iterrange = pycompat.xrange(self._end - 1, self._start - 1, -1)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
--- a/mercurial/sparse.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/sparse.py	Tue Sep 04 12:16:28 2018 -0400
@@ -31,9 +31,11 @@
 # a per-repo option, possibly a repo requirement.
 enabled = False
 
-def parseconfig(ui, raw):
+def parseconfig(ui, raw, action):
     """Parse sparse config file content.
 
+    action is the command which is trigerring this read, can be narrow, sparse
+
     Returns a tuple of includes, excludes, and profiles.
     """
     includes = set()
@@ -54,8 +56,8 @@
         elif line == '[include]':
             if havesection and current != includes:
                 # TODO pass filename into this API so we can report it.
-                raise error.Abort(_('sparse config cannot have includes ' +
-                                    'after excludes'))
+                raise error.Abort(_('%(action)s config cannot have includes '
+                                    'after excludes') % {'action': action})
             havesection = True
             current = includes
             continue
@@ -64,14 +66,16 @@
             current = excludes
         elif line:
             if current is None:
-                raise error.Abort(_('sparse config entry outside of '
-                                    'section: %s') % line,
+                raise error.Abort(_('%(action)s config entry outside of '
+                                    'section: %(line)s')
+                                  % {'action': action, 'line': line},
                                   hint=_('add an [include] or [exclude] line '
                                          'to declare the entry type'))
 
             if line.strip().startswith('/'):
-                ui.warn(_('warning: sparse profile cannot use' +
-                          ' paths starting with /, ignoring %s\n') % line)
+                ui.warn(_('warning: %(action)s profile cannot use'
+                          ' paths starting with /, ignoring %(line)s\n')
+                        % {'action': action, 'line': line})
                 continue
             current.add(line)
 
@@ -102,7 +106,7 @@
         raise error.Abort(_('cannot parse sparse patterns from working '
                             'directory'))
 
-    includes, excludes, profiles = parseconfig(repo.ui, raw)
+    includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
     ctx = repo[rev]
 
     if profiles:
@@ -128,7 +132,7 @@
                     repo.ui.debug(msg)
                 continue
 
-            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
+            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse')
             includes.update(pincludes)
             excludes.update(pexcludes)
             profiles.update(subprofs)
@@ -516,7 +520,7 @@
                                 force=False, removing=False):
     """Update the sparse config and working directory state."""
     raw = repo.vfs.tryread('sparse')
-    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
+    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse')
 
     oldstatus = repo.status()
     oldmatch = matcher(repo)
@@ -556,7 +560,7 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
 
         if not includes and not excludes:
             return
@@ -572,7 +576,7 @@
     with repo.wlock():
         # read current configuration
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
         aincludes, aexcludes, aprofiles = activeconfig(repo)
 
         # Import rules on top; only take in rules that are not yet
@@ -582,7 +586,8 @@
             with util.posixfile(util.expandpath(p), mode='rb') as fh:
                 raw = fh.read()
 
-            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
+            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
             oldsize = len(includes) + len(excludes) + len(profiles)
             includes.update(iincludes - aincludes)
             excludes.update(iexcludes - aexcludes)
@@ -615,7 +620,8 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
+        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
 
         if reset:
             newinclude = set()
--- a/mercurial/statprof.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/statprof.py	Tue Sep 04 12:16:28 2018 -0400
@@ -356,7 +356,7 @@
             stack = sample.stack
             sites = ['\1'.join([s.path, str(s.lineno), s.function])
                      for s in stack]
-            file.write(time + '\0' + '\0'.join(sites) + '\n')
+            file.write("%s\0%s\n" % (time, '\0'.join(sites)))
 
 def load_data(path):
     lines = open(path, 'r').read().splitlines()
--- a/mercurial/store.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/store.py	Tue Sep 04 12:16:28 2018 -0400
@@ -118,7 +118,7 @@
     def decode(s):
         i = 0
         while i < len(s):
-            for l in xrange(1, 4):
+            for l in pycompat.xrange(1, 4):
                 try:
                     yield dmap[s[i:i + l]]
                     i += l
@@ -127,7 +127,8 @@
                     pass
             else:
                 raise KeyError
-    return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]),
+    return (lambda s: ''.join([cmap[s[c:c + 1]]
+                               for c in pycompat.xrange(len(s))]),
             lambda s: ''.join(list(decode(s))))
 
 _encodefname, _decodefname = _buildencodefun()
@@ -159,7 +160,7 @@
     'the~07quick~adshot'
     '''
     xchr = pycompat.bytechr
-    cmap = dict([(xchr(x), xchr(x)) for x in xrange(127)])
+    cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
     for x in _reserved():
         cmap[xchr(x)] = "~%02x" % x
     for x in range(ord("A"), ord("Z") + 1):
@@ -316,8 +317,8 @@
         mode = None
     return mode
 
-_data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
-         ' phaseroots obsstore')
+_data = ('narrowspec data meta 00manifest.d 00manifest.i'
+         ' 00changelog.d 00changelog.i phaseroots obsstore')
 
 def isrevlog(f, kind, st):
     return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
@@ -545,7 +546,7 @@
                     raise
 
     def copylist(self):
-        d = ('data meta dh fncache phaseroots obsstore'
+        d = ('narrowspec data meta dh fncache phaseroots obsstore'
              ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
         return (['requires', '00changelog.i'] +
                 ['store/' + f for f in d.split()])
--- a/mercurial/streamclone.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/streamclone.py	Tue Sep 04 12:16:28 2018 -0400
@@ -358,7 +358,7 @@
 
         with repo.transaction('clone'):
             with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
-                for i in xrange(filecount):
+                for i in pycompat.xrange(filecount):
                     # XXX doesn't support '\n' or '\r' in filenames
                     l = fp.readline()
                     try:
--- a/mercurial/templatefilters.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templatefilters.py	Tue Sep 04 12:16:28 2018 -0400
@@ -119,7 +119,7 @@
             b = b[:len(a)]
         if a == b:
             return a
-        for i in xrange(len(a)):
+        for i in pycompat.xrange(len(a)):
             if a[i] != b[i]:
                 return a[:i]
         return a
@@ -266,7 +266,7 @@
     num_lines = len(lines)
     endswithnewline = text[-1:] == '\n'
     def indenter():
-        for i in xrange(num_lines):
+        for i in pycompat.xrange(num_lines):
             l = lines[i]
             if i and l.strip():
                 yield prefix
--- a/mercurial/templatefuncs.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templatefuncs.py	Tue Sep 04 12:16:28 2018 -0400
@@ -140,7 +140,7 @@
     ctx = context.resource(mapping, 'ctx')
     m = ctx.match([raw])
     files = list(ctx.matches(m))
-    return templateutil.compatlist(context, mapping, "file", files)
+    return templateutil.compatfileslist(context, mapping, "file", files)
 
 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
 def fill(context, mapping, args):
@@ -575,7 +575,7 @@
     text = evalstring(context, mapping, args[0])
     style = evalstring(context, mapping, args[1])
 
-    return minirst.format(text, style=style, keep=['verbose'])[0]
+    return minirst.format(text, style=style, keep=['verbose'])
 
 @templatefunc('separate(sep, args...)', argspec='sep *args')
 def separate(context, mapping, args):
@@ -596,7 +596,7 @@
             yield sep
         yield argstr
 
-@templatefunc('shortest(node, minlength=4)', requires={'repo'})
+@templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
 def shortest(context, mapping, args):
     """Obtain the shortest representation of
     a node."""
@@ -629,8 +629,9 @@
             return hexnode
         if not node:
             return hexnode
+    cache = context.resource(mapping, 'cache')
     try:
-        return scmutil.shortesthexnodeidprefix(repo, node, minlength)
+        return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
     except error.RepoLookupError:
         return hexnode
 
--- a/mercurial/templatekw.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templatekw.py	Tue Sep 04 12:16:28 2018 -0400
@@ -168,9 +168,8 @@
 
 @templatekeyword('author', requires={'ctx'})
 def showauthor(context, mapping):
-    """String. The unmodified author of the changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return ctx.user()
+    """Alias for ``{user}``"""
+    return showuser(context, mapping)
 
 @templatekeyword('bisect', requires={'repo', 'ctx'})
 def showbisect(context, mapping):
@@ -293,15 +292,14 @@
                    lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
 
 def _showfilesbystat(context, mapping, name, index):
-    repo = context.resource(mapping, 'repo')
     ctx = context.resource(mapping, 'ctx')
     revcache = context.resource(mapping, 'revcache')
     if 'files' not in revcache:
-        revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
+        revcache['files'] = ctx.p1().status(ctx)[:3]
     files = revcache['files'][index]
-    return compatlist(context, mapping, name, files, element='file')
+    return templateutil.compatfileslist(context, mapping, name, files)
 
-@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_adds', requires={'ctx', 'revcache'})
 def showfileadds(context, mapping):
     """List of strings. Files added by this changeset."""
     return _showfilesbystat(context, mapping, 'file_add', 1)
@@ -325,11 +323,8 @@
             rename = getrenamed(fn, ctx.rev())
             if rename:
                 copies.append((fn, rename))
-
-    copies = util.sortdict(copies)
-    return compatdict(context, mapping, 'file_copy', copies,
-                      key='name', value='source', fmt='%s (%s)',
-                      plural='file_copies')
+    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
+                                             copies)
 
 # showfilecopiesswitch() displays file copies only if copy records are
 # provided before calling the templater, usually with a --copies
@@ -340,17 +335,15 @@
     only if the --copied switch is set.
     """
     copies = context.resource(mapping, 'revcache').get('copies') or []
-    copies = util.sortdict(copies)
-    return compatdict(context, mapping, 'file_copy', copies,
-                      key='name', value='source', fmt='%s (%s)',
-                      plural='file_copies')
+    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
+                                             copies)
 
-@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_dels', requires={'ctx', 'revcache'})
 def showfiledels(context, mapping):
     """List of strings. Files removed by this changeset."""
     return _showfilesbystat(context, mapping, 'file_del', 2)
 
-@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_mods', requires={'ctx', 'revcache'})
 def showfilemods(context, mapping):
     """List of strings. Files modified by this changeset."""
     return _showfilesbystat(context, mapping, 'file_mod', 0)
@@ -361,7 +354,7 @@
     changeset.
     """
     ctx = context.resource(mapping, 'ctx')
-    return compatlist(context, mapping, 'file', ctx.files())
+    return templateutil.compatfileslist(context, mapping, 'file', ctx.files())
 
 @templatekeyword('graphnode', requires={'repo', 'ctx'})
 def showgraphnode(context, mapping):
@@ -550,6 +543,12 @@
         return 'obsolete'
     return ''
 
+@templatekeyword('path', requires={'fctx'})
+def showpath(context, mapping):
+    """String. Repository-absolute path of the current file. (EXPERIMENTAL)"""
+    fctx = context.resource(mapping, 'fctx')
+    return fctx.path()
+
 @templatekeyword('peerurls', requires={'repo'})
 def showpeerurls(context, mapping):
     """A dictionary of repository locations defined in the [paths] section
@@ -758,6 +757,12 @@
     ui = context.resource(mapping, 'ui')
     return ui.termwidth()
 
+@templatekeyword('user', requires={'ctx'})
+def showuser(context, mapping):
+    """String. The unmodified author of the changeset."""
+    ctx = context.resource(mapping, 'ctx')
+    return ctx.user()
+
 @templatekeyword('instabilities', requires={'ctx'})
 def showinstabilities(context, mapping):
     """List of strings. Evolution instabilities affecting the changeset.
--- a/mercurial/templates/map-cmdline.bisect	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templates/map-cmdline.bisect	Tue Sep 04 12:16:28 2018 -0400
@@ -1,10 +1,10 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}\n'
+changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}\n'
 changeset_quiet = '{lshortbisect} {rev}:{node|short}\n'
-changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # We take the zeroth word in order to omit "(implicit)" in the label
 bisectlabel = ' bisect.{word('0', bisect)}'
--- a/mercurial/templates/map-cmdline.default	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templates/map-cmdline.default	Tue Sep 04 12:16:28 2018 -0400
@@ -2,10 +2,10 @@
 # to replace some keywords with 'lkeyword', for 'labelled keyword'
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{summary}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{summary}\n'
 changeset_quiet = '{lnode}'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # File templates
 lfiles = '{if(files,
@@ -54,8 +54,8 @@
 bookmark = '{label("log.bookmark",
                    "bookmark:    {bookmark}")}\n'
 
-user = '{label("log.user",
-               "user:        {author}")}\n'
+luser = '{label("log.user",
+                "user:        {author}")}\n'
 
 summary = '{if(desc|strip, "{label('log.summary',
                                    'summary:     {desc|firstline}')}\n")}'
@@ -74,7 +74,7 @@
                                 {label('ui.note log.description',
                                        '{desc|strip}')}\n\n")}'
 
-status = '{status} {path}\n{if(copy, "  {copy}\n")}'
+status = '{status} {path|relpath}\n{if(source, "  {source|relpath}\n")}'
 
 # Obsfate templates, it would be removed once we introduce the obsfate
 # template fragment
--- a/mercurial/templates/map-cmdline.phases	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templates/map-cmdline.phases	Tue Sep 04 12:16:28 2018 -0400
@@ -1,5 +1,5 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{summary}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{summary}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
--- a/mercurial/templates/map-cmdline.status	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templates/map-cmdline.status	Tue Sep 04 12:16:28 2018 -0400
@@ -2,9 +2,9 @@
 
 [templates]
 # Override base templates
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}{lfiles}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{description}{lfiles}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{extras}{description}{lfiles}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}{lfiles}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{description}{lfiles}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{extras}{description}{lfiles}\n'
 
 # Override the file templates
 lfiles = '{if(files,
--- a/mercurial/templateutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/templateutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -570,6 +570,32 @@
     f = _showcompatlist(context, mapping, name, data, plural, separator)
     return hybridlist(data, name=element or name, fmt=fmt, gen=f)
 
+def compatfilecopiesdict(context, mapping, name, copies):
+    """Wrap list of (dest, source) file names to support old-style list
+    template and field names
+
+    This exists for backward compatibility. Use hybriddict for new template
+    keywords.
+    """
+    # no need to provide {path} to old-style list template
+    c = [{'name': k, 'source': v} for k, v in copies]
+    f = _showcompatlist(context, mapping, name, c, plural='file_copies')
+    copies = util.sortdict(copies)
+    return hybrid(f, copies,
+                  lambda k: {'name': k, 'path': k, 'source': copies[k]},
+                  lambda k: '%s (%s)' % (k, copies[k]))
+
+def compatfileslist(context, mapping, name, files):
+    """Wrap list of file names to support old-style list template and field
+    names
+
+    This exists for backward compatibility. Use hybridlist for new template
+    keywords.
+    """
+    f = _showcompatlist(context, mapping, name, files)
+    return hybrid(f, files, lambda x: {'file': x, 'path': x},
+                  pycompat.identity)
+
 def _showcompatlist(context, mapping, name, values, plural=None, separator=' '):
     """Return a generator that renders old-style list template
 
@@ -810,8 +836,9 @@
     return data
 
 def _recursivesymbolblocker(key):
-    def showrecursion(**args):
+    def showrecursion(context, mapping):
         raise error.Abort(_("recursive reference '%s' in template") % key)
+    showrecursion._requires = ()  # mark as new-style templatekw
     return showrecursion
 
 def runsymbol(context, mapping, key, default=''):
@@ -827,12 +854,16 @@
             v = default
     if callable(v) and getattr(v, '_requires', None) is None:
         # old templatekw: expand all keywords and resources
-        # (TODO: deprecate this after porting web template keywords to new API)
+        # (TODO: drop support for old-style functions. 'f._requires = ()'
+        #  can be removed.)
         props = {k: context._resources.lookup(context, mapping, k)
                  for k in context._resources.knownkeys()}
         # pass context to _showcompatlist() through templatekw._showlist()
         props['templ'] = context
         props.update(mapping)
+        ui = props.get('ui')
+        if ui:
+            ui.deprecwarn("old-style template keyword '%s'" % key, '4.8')
         return v(**pycompat.strkwargs(props))
     if callable(v):
         # new templatekw
--- a/mercurial/treediscovery.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/treediscovery.py	Tue Sep 04 12:16:28 2018 -0400
@@ -16,6 +16,7 @@
 )
 from . import (
     error,
+    pycompat,
 )
 
 def findcommonincoming(repo, remote, heads=None, force=False):
@@ -111,7 +112,7 @@
             progress.increment()
             repo.ui.debug("request %d: %s\n" %
                         (reqcnt, " ".join(map(short, r))))
-            for p in xrange(0, len(r), 10):
+            for p in pycompat.xrange(0, len(r), 10):
                 with remote.commandexecutor() as e:
                     branches = e.callcommand('branches', {
                         'nodes': r[p:p + 10],
--- a/mercurial/ui.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/ui.py	Tue Sep 04 12:16:28 2018 -0400
@@ -67,6 +67,9 @@
 update.check = noconflict
 # Show conflicts information in `hg status`
 status.verbose = True
+# Refuse to perform `hg resolve --mark` on files that still have conflict
+# markers
+resolve.mark-check = abort
 
 [diff]
 git = 1
@@ -392,7 +395,7 @@
     def readconfig(self, filename, root=None, trust=False,
                    sections=None, remap=None):
         try:
-            fp = open(filename, u'rb')
+            fp = open(filename, r'rb')
         except IOError:
             if not sections: # ignore unless we were looking for something
                 return
@@ -1049,6 +1052,7 @@
             command in self.configlist('pager', 'ignore')
             or not self.configbool('ui', 'paginate')
             or not self.configbool('pager', 'attend-' + command, True)
+            or encoding.environ.get('TERM') == 'dumb'
             # TODO: if we want to allow HGPLAINEXCEPT=pager,
             # formatted() will need some adjustment.
             or not self.formatted()
@@ -1420,6 +1424,7 @@
                     return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
+
     def status(self, *msg, **opts):
         '''write status message to output (if ui.quiet is False)
 
@@ -1428,6 +1433,7 @@
         if not self.quiet:
             opts[r'label'] = opts.get(r'label', '') + ' ui.status'
             self.write(*msg, **opts)
+
     def warn(self, *msg, **opts):
         '''write warning message to output (stderr)
 
@@ -1435,6 +1441,15 @@
         '''
         opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
         self.write_err(*msg, **opts)
+
+    def error(self, *msg, **opts):
+        '''write error message to output (stderr)
+
+        This adds an output label of "ui.error".
+        '''
+        opts[r'label'] = opts.get(r'label', '') + ' ui.error'
+        self.write_err(*msg, **opts)
+
     def note(self, *msg, **opts):
         '''write note to output (if ui.verbose is True)
 
@@ -1443,6 +1458,7 @@
         if self.verbose:
             opts[r'label'] = opts.get(r'label', '') + ' ui.note'
             self.write(*msg, **opts)
+
     def debug(self, *msg, **opts):
         '''write debug message to output (if ui.debugflag is True)
 
--- a/mercurial/unionrepo.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/unionrepo.py	Tue Sep 04 12:16:28 2018 -0400
@@ -73,7 +73,7 @@
             # I have no idea if csize is valid in the base revlog context.
             e = (flags, None, rsize, base,
                  link, self.rev(p1node), self.rev(p2node), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
--- a/mercurial/upgrade.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/upgrade.py	Tue Sep 04 12:16:28 2018 -0400
@@ -450,7 +450,7 @@
         return changelog.changelog(repo.svfs)
     elif path.endswith('00manifest.i'):
         mandir = path[:-len('00manifest.i')]
-        return manifest.manifestrevlog(repo.svfs, dir=mandir)
+        return manifest.manifestrevlog(repo.svfs, tree=mandir)
     else:
         #reverse of "/".join(("data", path + ".i"))
         return filelog.filelog(repo.svfs, path[5:-2])
--- a/mercurial/util.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/util.py	Tue Sep 04 12:16:28 2018 -0400
@@ -36,6 +36,10 @@
 import warnings
 import zlib
 
+from .thirdparty import (
+    attr,
+)
+from hgdemandimport import tracing
 from . import (
     encoding,
     error,
@@ -945,12 +949,12 @@
 
         self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
 
-    def setsockopt(self, level, optname, value):
+    def setsockopt(self, res, level, optname, value):
         if not self.states:
             return
 
         self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
-            self.name, level, optname, value))
+            self.name, level, optname, value, res))
 
 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
                       logdata=False, logdataapis=True):
@@ -2874,7 +2878,44 @@
     (1, 0.000000001, _('%.3f ns')),
     )
 
-_timenesting = [0]
+@attr.s
+class timedcmstats(object):
+    """Stats information produced by the timedcm context manager on entering."""
+
+    # the starting value of the timer as a float (meaning and resulution is
+    # platform dependent, see util.timer)
+    start = attr.ib(default=attr.Factory(lambda: timer()))
+    # the number of seconds as a floating point value; starts at 0, updated when
+    # the context is exited.
+    elapsed = attr.ib(default=0)
+    # the number of nested timedcm context managers.
+    level = attr.ib(default=1)
+
+    def __bytes__(self):
+        return timecount(self.elapsed) if self.elapsed else '<unknown>'
+
+    __str__ = encoding.strmethod(__bytes__)
+
+@contextlib.contextmanager
+def timedcm(whencefmt, *whenceargs):
+    """A context manager that produces timing information for a given context.
+
+    On entering a timedcmstats instance is produced.
+
+    This context manager is reentrant.
+
+    """
+    # track nested context managers
+    timedcm._nested += 1
+    timing_stats = timedcmstats(level=timedcm._nested)
+    try:
+        with tracing.log(whencefmt, *whenceargs):
+            yield timing_stats
+    finally:
+        timing_stats.elapsed = timer() - timing_stats.start
+        timedcm._nested -= 1
+
+timedcm._nested = 0
 
 def timed(func):
     '''Report the execution time of a function call to stderr.
@@ -2888,18 +2929,13 @@
     '''
 
     def wrapper(*args, **kwargs):
-        start = timer()
-        indent = 2
-        _timenesting[0] += indent
-        try:
-            return func(*args, **kwargs)
-        finally:
-            elapsed = timer() - start
-            _timenesting[0] -= indent
-            stderr = procutil.stderr
-            stderr.write('%s%s: %s\n' %
-                         (' ' * _timenesting[0], func.__name__,
-                          timecount(elapsed)))
+        with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
+            result = func(*args, **kwargs)
+        stderr = procutil.stderr
+        stderr.write('%s%s: %s\n' % (
+            ' ' * time_stats.level * 2, pycompat.bytestr(func.__name__),
+            time_stats))
+        return result
     return wrapper
 
 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
@@ -3343,6 +3379,9 @@
                 return ''.join(buf)
             chunk = self._reader(65536)
             self._decompress(chunk)
+            if not chunk and not self._pending and not self._eof:
+                # No progress and no new data, bail out
+                return ''.join(buf)
 
 class _GzipCompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh):
--- a/mercurial/utils/cborutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/utils/cborutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -8,10 +8,7 @@
 from __future__ import absolute_import
 
 import struct
-
-from ..thirdparty.cbor.cbor2 import (
-    decoder as decodermod,
-)
+import sys
 
 # Very short very of RFC 7049...
 #
@@ -35,11 +32,16 @@
 
 SUBTYPE_MASK = 0b00011111
 
+SUBTYPE_FALSE = 20
+SUBTYPE_TRUE = 21
+SUBTYPE_NULL = 22
 SUBTYPE_HALF_FLOAT = 25
 SUBTYPE_SINGLE_FLOAT = 26
 SUBTYPE_DOUBLE_FLOAT = 27
 SUBTYPE_INDEFINITE = 31
 
+SEMANTIC_TAG_FINITE_SET = 258
+
 # Indefinite types begin with their major type ORd with information value 31.
 BEGIN_INDEFINITE_BYTESTRING = struct.pack(
     r'>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE)
@@ -146,7 +148,7 @@
 def streamencodeset(s):
     # https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml defines
     # semantic tag 258 for finite sets.
-    yield encodelength(MAJOR_TYPE_SEMANTIC, 258)
+    yield encodelength(MAJOR_TYPE_SEMANTIC, SEMANTIC_TAG_FINITE_SET)
 
     for chunk in streamencodearray(sorted(s, key=_mixedtypesortkey)):
         yield chunk
@@ -213,50 +215,751 @@
 
     return fn(v)
 
-def readindefinitebytestringtoiter(fh, expectheader=True):
-    """Read an indefinite bytestring to a generator.
+class CBORDecodeError(Exception):
+    """Represents an error decoding CBOR."""
+
+if sys.version_info.major >= 3:
+    def _elementtointeger(b, i):
+        return b[i]
+else:
+    def _elementtointeger(b, i):
+        return ord(b[i])
+
+STRUCT_BIG_UBYTE = struct.Struct(r'>B')
+STRUCT_BIG_USHORT = struct.Struct('>H')
+STRUCT_BIG_ULONG = struct.Struct('>L')
+STRUCT_BIG_ULONGLONG = struct.Struct('>Q')
+
+SPECIAL_NONE = 0
+SPECIAL_START_INDEFINITE_BYTESTRING = 1
+SPECIAL_START_ARRAY = 2
+SPECIAL_START_MAP = 3
+SPECIAL_START_SET = 4
+SPECIAL_INDEFINITE_BREAK = 5
 
-    Receives an object with a ``read(X)`` method to read N bytes.
+def decodeitem(b, offset=0):
+    """Decode a new CBOR value from a buffer at offset.
+
+    This function attempts to decode up to one complete CBOR value
+    from ``b`` starting at offset ``offset``.
+
+    The beginning of a collection (such as an array, map, set, or
+    indefinite length bytestring) counts as a single value. For these
+    special cases, a state flag will indicate that a special value was seen.
 
-    If ``expectheader`` is True, it is expected that the first byte read
-    will represent an indefinite length bytestring. Otherwise, we
-    expect the first byte to be part of the first bytestring chunk.
+    When called, the function either returns a decoded value or gives
+    a hint as to how many more bytes are needed to do so. By calling
+    the function repeatedly given a stream of bytes, the caller can
+    build up the original values.
+
+    Returns a tuple with the following elements:
+
+    * Bool indicating whether a complete value was decoded.
+    * A decoded value if first value is True otherwise None
+    * Integer number of bytes. If positive, the number of bytes
+      read. If negative, the number of bytes we need to read to
+      decode this value or the next chunk in this value.
+    * One of the ``SPECIAL_*`` constants indicating special treatment
+      for this value. ``SPECIAL_NONE`` means this is a fully decoded
+      simple value (such as an integer or bool).
     """
-    read = fh.read
-    decodeuint = decodermod.decode_uint
-    byteasinteger = decodermod.byte_as_integer
+
+    initial = _elementtointeger(b, offset)
+    offset += 1
+
+    majortype = initial >> 5
+    subtype = initial & SUBTYPE_MASK
+
+    if majortype == MAJOR_TYPE_UINT:
+        complete, value, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, value, readcount + 1, SPECIAL_NONE
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_NEGINT:
+        # Negative integers are the same as UINT except inverted minus 1.
+        complete, value, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, -value - 1, readcount + 1, SPECIAL_NONE
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_BYTESTRING:
+        # Beginning of bytestrings are treated as uints in order to
+        # decode their length, which may be indefinite.
+        complete, size, readcount = decodeuint(subtype, b, offset,
+                                               allowindefinite=True)
+
+        # We don't know the size of the bytestring. It must be a definitive
+        # length since the indefinite subtype would be encoded in the initial
+        # byte.
+        if not complete:
+            return False, None, readcount, SPECIAL_NONE
+
+        # We know the length of the bytestring.
+        if size is not None:
+            # And the data is available in the buffer.
+            if offset + readcount + size <= len(b):
+                value = b[offset + readcount:offset + readcount + size]
+                return True, value, readcount + size + 1, SPECIAL_NONE
+
+            # And we need more data in order to return the bytestring.
+            else:
+                wanted = len(b) - offset - readcount - size
+                return False, None, wanted, SPECIAL_NONE
+
+        # It is an indefinite length bytestring.
+        else:
+            return True, None, 1, SPECIAL_START_INDEFINITE_BYTESTRING
+
+    elif majortype == MAJOR_TYPE_STRING:
+        raise CBORDecodeError('string major type not supported')
+
+    elif majortype == MAJOR_TYPE_ARRAY:
+        # Beginning of arrays are treated as uints in order to decode their
+        # length. We don't allow indefinite length arrays.
+        complete, size, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, size, readcount + 1, SPECIAL_START_ARRAY
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_MAP:
+        # Beginning of maps are treated as uints in order to decode their
+        # number of elements. We don't allow indefinite length arrays.
+        complete, size, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, size, readcount + 1, SPECIAL_START_MAP
+        else:
+            return False, None, readcount, SPECIAL_NONE
 
-    if expectheader:
-        initial = decodermod.byte_as_integer(read(1))
+    elif majortype == MAJOR_TYPE_SEMANTIC:
+        # Semantic tag value is read the same as a uint.
+        complete, tagvalue, readcount = decodeuint(subtype, b, offset)
+
+        if not complete:
+            return False, None, readcount, SPECIAL_NONE
+
+        # This behavior here is a little wonky. The main type being "decorated"
+        # by this semantic tag follows. A more robust parser would probably emit
+        # a special flag indicating this as a semantic tag and let the caller
+        # deal with the types that follow. But since we don't support many
+        # semantic tags, it is easier to deal with the special cases here and
+        # hide complexity from the caller. If we add support for more semantic
+        # tags, we should probably move semantic tag handling into the caller.
+        if tagvalue == SEMANTIC_TAG_FINITE_SET:
+            if offset + readcount >= len(b):
+                return False, None, -1, SPECIAL_NONE
+
+            complete, size, readcount2, special = decodeitem(b,
+                                                             offset + readcount)
+
+            if not complete:
+                return False, None, readcount2, SPECIAL_NONE
+
+            if special != SPECIAL_START_ARRAY:
+                raise CBORDecodeError('expected array after finite set '
+                                      'semantic tag')
+
+            return True, size, readcount + readcount2 + 1, SPECIAL_START_SET
+
+        else:
+            raise CBORDecodeError('semantic tag %d not allowed' % tagvalue)
+
+    elif majortype == MAJOR_TYPE_SPECIAL:
+        # Only specific values for the information field are allowed.
+        if subtype == SUBTYPE_FALSE:
+            return True, False, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_TRUE:
+            return True, True, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_NULL:
+            return True, None, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_INDEFINITE:
+            return True, None, 1, SPECIAL_INDEFINITE_BREAK
+        # If value is 24, subtype is in next byte.
+        else:
+            raise CBORDecodeError('special type %d not allowed' % subtype)
+    else:
+        assert False
+
+def decodeuint(subtype, b, offset=0, allowindefinite=False):
+    """Decode an unsigned integer.
+
+    ``subtype`` is the lower 5 bits from the initial byte CBOR item
+    "header." ``b`` is a buffer containing bytes. ``offset`` points to
+    the index of the first byte after the byte that ``subtype`` was
+    derived from.
+
+    ``allowindefinite`` allows the special indefinite length value
+    indicator.
+
+    Returns a 3-tuple of (successful, value, count).
+
+    The first element is a bool indicating if decoding completed. The 2nd
+    is the decoded integer value or None if not fully decoded or the subtype
+    is 31 and ``allowindefinite`` is True. The 3rd value is the count of bytes.
+    If positive, it is the number of additional bytes decoded. If negative,
+    it is the number of additional bytes needed to decode this value.
+    """
+
+    # Small values are inline.
+    if subtype < 24:
+        return True, subtype, 0
+    # Indefinite length specifier.
+    elif subtype == 31:
+        if allowindefinite:
+            return True, None, 0
+        else:
+            raise CBORDecodeError('indefinite length uint not allowed here')
+    elif subtype >= 28:
+        raise CBORDecodeError('unsupported subtype on integer type: %d' %
+                              subtype)
 
-        majortype = initial >> 5
-        subtype = initial & SUBTYPE_MASK
+    if subtype == 24:
+        s = STRUCT_BIG_UBYTE
+    elif subtype == 25:
+        s = STRUCT_BIG_USHORT
+    elif subtype == 26:
+        s = STRUCT_BIG_ULONG
+    elif subtype == 27:
+        s = STRUCT_BIG_ULONGLONG
+    else:
+        raise CBORDecodeError('bounds condition checking violation')
+
+    if len(b) - offset >= s.size:
+        return True, s.unpack_from(b, offset)[0], s.size
+    else:
+        return False, None, len(b) - offset - s.size
+
+class bytestringchunk(bytes):
+    """Represents a chunk/segment in an indefinite length bytestring.
+
+    This behaves like a ``bytes`` but in addition has the ``isfirst``
+    and ``islast`` attributes indicating whether this chunk is the first
+    or last in an indefinite length bytestring.
+    """
+
+    def __new__(cls, v, first=False, last=False):
+        self = bytes.__new__(cls, v)
+        self.isfirst = first
+        self.islast = last
+
+        return self
+
+class sansiodecoder(object):
+    """A CBOR decoder that doesn't perform its own I/O.
+
+    To use, construct an instance and feed it segments containing
+    CBOR-encoded bytes via ``decode()``. The return value from ``decode()``
+    indicates whether a fully-decoded value is available, how many bytes
+    were consumed, and offers a hint as to how many bytes should be fed
+    in next time to decode the next value.
+
+    The decoder assumes it will decode N discrete CBOR values, not just
+    a single value. i.e. if the bytestream contains uints packed one after
+    the other, the decoder will decode them all, rather than just the initial
+    one.
+
+    When ``decode()`` indicates a value is available, call ``getavailable()``
+    to return all fully decoded values.
+
+    ``decode()`` can partially decode input. It is up to the caller to keep
+    track of what data was consumed and to pass unconsumed data in on the
+    next invocation.
+
+    The decoder decodes atomically at the *item* level. See ``decodeitem()``.
+    If an *item* cannot be fully decoded, the decoder won't record it as
+    partially consumed. Instead, the caller will be instructed to pass in
+    the initial bytes of this item on the next invocation. This does result
+    in some redundant parsing. But the overhead should be minimal.
+
+    This decoder only supports a subset of CBOR as required by Mercurial.
+    It lacks support for:
+
+    * Indefinite length arrays
+    * Indefinite length maps
+    * Use of indefinite length bytestrings as keys or values within
+      arrays, maps, or sets.
+    * Nested arrays, maps, or sets within sets
+    * Any semantic tag that isn't a mathematical finite set
+    * Floating point numbers
+    * Undefined special value
+
+    CBOR types are decoded to Python types as follows:
+
+    uint -> int
+    negint -> int
+    bytestring -> bytes
+    map -> dict
+    array -> list
+    True -> bool
+    False -> bool
+    null -> None
+    indefinite length bytestring chunk -> [bytestringchunk]
 
-        if majortype != MAJOR_TYPE_BYTESTRING:
-            raise decodermod.CBORDecodeError(
-                'expected major type %d; got %d' % (MAJOR_TYPE_BYTESTRING,
-                                                    majortype))
+    The only non-obvious mapping here is an indefinite length bytestring
+    to the ``bytestringchunk`` type. This is to facilitate streaming
+    indefinite length bytestrings out of the decoder and to differentiate
+    a regular bytestring from an indefinite length bytestring.
+    """
+
+    _STATE_NONE = 0
+    _STATE_WANT_MAP_KEY = 1
+    _STATE_WANT_MAP_VALUE = 2
+    _STATE_WANT_ARRAY_VALUE = 3
+    _STATE_WANT_SET_VALUE = 4
+    _STATE_WANT_BYTESTRING_CHUNK_FIRST = 5
+    _STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT = 6
+
+    def __init__(self):
+        # TODO add support for limiting size of bytestrings
+        # TODO add support for limiting number of keys / values in collections
+        # TODO add support for limiting size of buffered partial values
+
+        self.decodedbytecount = 0
+
+        self._state = self._STATE_NONE
+
+        # Stack of active nested collections. Each entry is a dict describing
+        # the collection.
+        self._collectionstack = []
+
+        # Fully decoded key to use for the current map.
+        self._currentmapkey = None
+
+        # Fully decoded values available for retrieval.
+        self._decodedvalues = []
+
+    @property
+    def inprogress(self):
+        """Whether the decoder has partially decoded a value."""
+        return self._state != self._STATE_NONE
+
+    def decode(self, b, offset=0):
+        """Attempt to decode bytes from an input buffer.
+
+        ``b`` is a collection of bytes and ``offset`` is the byte
+        offset within that buffer from which to begin reading data.
+
+        ``b`` must support ``len()`` and accessing bytes slices via
+        ``__slice__``. Typically ``bytes`` instances are used.
+
+        Returns a tuple with the following fields:
+
+        * Bool indicating whether values are available for retrieval.
+        * Integer indicating the number of bytes that were fully consumed,
+          starting from ``offset``.
+        * Integer indicating the number of bytes that are desired for the
+          next call in order to decode an item.
+        """
+        if not b:
+            return bool(self._decodedvalues), 0, 0
+
+        initialoffset = offset
+
+        # We could easily split the body of this loop into a function. But
+        # Python performance is sensitive to function calls and collections
+        # are composed of many items. So leaving as a while loop could help
+        # with performance. One thing that may not help is the use of
+        # if..elif versus a lookup/dispatch table. There may be value
+        # in switching that.
+        while offset < len(b):
+            # Attempt to decode an item. This could be a whole value or a
+            # special value indicating an event, such as start or end of a
+            # collection or indefinite length type.
+            complete, value, readcount, special = decodeitem(b, offset)
+
+            if readcount > 0:
+                self.decodedbytecount += readcount
+
+            if not complete:
+                assert readcount < 0
+                return (
+                    bool(self._decodedvalues),
+                    offset - initialoffset,
+                    -readcount,
+                )
+
+            offset += readcount
 
-        if subtype != SUBTYPE_INDEFINITE:
-            raise decodermod.CBORDecodeError(
-                'expected indefinite subtype; got %d' % subtype)
+            # No nested state. We either have a full value or beginning of a
+            # complex value to deal with.
+            if self._state == self._STATE_NONE:
+                # A normal value.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(value)
+
+                elif special == SPECIAL_START_ARRAY:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': [],
+                    })
+                    self._state = self._STATE_WANT_ARRAY_VALUE
+
+                elif special == SPECIAL_START_MAP:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': {},
+                    })
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                elif special == SPECIAL_START_SET:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': set(),
+                    })
+                    self._state = self._STATE_WANT_SET_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    self._state = self._STATE_WANT_BYTESTRING_CHUNK_FIRST
+
+                else:
+                    raise CBORDecodeError('unhandled special state: %d' %
+                                          special)
+
+            # This value becomes an element of the current array.
+            elif self._state == self._STATE_WANT_ARRAY_VALUE:
+                # Simple values get appended.
+                if special == SPECIAL_NONE:
+                    c = self._collectionstack[-1]
+                    c['v'].append(value)
+                    c['remaining'] -= 1
+
+                    # self._state doesn't need changed.
+
+                # An array nested within an array.
+                elif special == SPECIAL_START_ARRAY:
+                    lastc = self._collectionstack[-1]
+                    newvalue = []
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    # self._state doesn't need changed.
+
+                # A map nested within an array.
+                elif special == SPECIAL_START_MAP:
+                    lastc = self._collectionstack[-1]
+                    newvalue = {}
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue
+                    })
+
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                elif special == SPECIAL_START_SET:
+                    lastc = self._collectionstack[-1]
+                    newvalue = set()
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_SET_VALUE
 
-    # The indefinite bytestring is composed of chunks of normal bytestrings.
-    # Read chunks until we hit a BREAK byte.
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings '
+                                          'not allowed as array values')
+
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting array value: %d' % special)
+
+            # This value becomes the key of the current map instance.
+            elif self._state == self._STATE_WANT_MAP_KEY:
+                if special == SPECIAL_NONE:
+                    self._currentmapkey = value
+                    self._state = self._STATE_WANT_MAP_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings '
+                                          'not allowed as map keys')
+
+                elif special in (SPECIAL_START_ARRAY, SPECIAL_START_MAP,
+                                 SPECIAL_START_SET):
+                    raise CBORDecodeError('collections not supported as map '
+                                          'keys')
+
+                # We do not allow special values to be used as map keys.
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting map key: %d' % special)
+
+            # This value becomes the value of the current map key.
+            elif self._state == self._STATE_WANT_MAP_VALUE:
+                # Simple values simply get inserted into the map.
+                if special == SPECIAL_NONE:
+                    lastc = self._collectionstack[-1]
+                    lastc['v'][self._currentmapkey] = value
+                    lastc['remaining'] -= 1
+
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                # A new array is used as the map value.
+                elif special == SPECIAL_START_ARRAY:
+                    lastc = self._collectionstack[-1]
+                    newvalue = []
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
 
-    while True:
-        # We need to sniff for the BREAK byte.
-        initial = byteasinteger(read(1))
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_ARRAY_VALUE
+
+                # A new map is used as the map value.
+                elif special == SPECIAL_START_MAP:
+                    lastc = self._collectionstack[-1]
+                    newvalue = {}
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                # A new set is used as the map value.
+                elif special == SPECIAL_START_SET:
+                    lastc = self._collectionstack[-1]
+                    newvalue = set()
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_SET_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings not '
+                                          'allowed as map values')
+
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting map value: %d' % special)
+
+                self._currentmapkey = None
 
-        if initial == BREAK_INT:
-            break
+            # This value is added to the current set.
+            elif self._state == self._STATE_WANT_SET_VALUE:
+                if special == SPECIAL_NONE:
+                    lastc = self._collectionstack[-1]
+                    lastc['v'].add(value)
+                    lastc['remaining'] -= 1
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings not '
+                                          'allowed as set values')
+
+                elif special in (SPECIAL_START_ARRAY,
+                                 SPECIAL_START_MAP,
+                                 SPECIAL_START_SET):
+                    raise CBORDecodeError('collections not allowed as set '
+                                          'values')
+
+                # We don't allow non-trivial types to exist as set values.
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting set value: %d' % special)
+
+            # This value represents the first chunk in an indefinite length
+            # bytestring.
+            elif self._state == self._STATE_WANT_BYTESTRING_CHUNK_FIRST:
+                # We received a full chunk.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(bytestringchunk(value,
+                                                               first=True))
+
+                    self._state = self._STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT
+
+                # The end of stream marker. This means it is an empty
+                # indefinite length bytestring.
+                elif special == SPECIAL_INDEFINITE_BREAK:
+                    # We /could/ convert this to a b''. But we want to preserve
+                    # the nature of the underlying data so consumers expecting
+                    # an indefinite length bytestring get one.
+                    self._decodedvalues.append(bytestringchunk(b'',
+                                                               first=True,
+                                                               last=True))
+
+                    # Since indefinite length bytestrings can't be used in
+                    # collections, we must be at the root level.
+                    assert not self._collectionstack
+                    self._state = self._STATE_NONE
 
-        length = decodeuint(fh, initial & SUBTYPE_MASK)
-        chunk = read(length)
+                else:
+                    raise CBORDecodeError('unexpected special value when '
+                                          'expecting bytestring chunk: %d' %
+                                          special)
+
+            # This value represents the non-initial chunk in an indefinite
+            # length bytestring.
+            elif self._state == self._STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT:
+                # We received a full chunk.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(bytestringchunk(value))
+
+                # The end of stream marker.
+                elif special == SPECIAL_INDEFINITE_BREAK:
+                    self._decodedvalues.append(bytestringchunk(b'', last=True))
+
+                    # Since indefinite length bytestrings can't be used in
+                    # collections, we must be at the root level.
+                    assert not self._collectionstack
+                    self._state = self._STATE_NONE
+
+                else:
+                    raise CBORDecodeError('unexpected special value when '
+                                          'expecting bytestring chunk: %d' %
+                                          special)
+
+            else:
+                raise CBORDecodeError('unhandled decoder state: %d' %
+                                      self._state)
+
+            # We could have just added the final value in a collection. End
+            # all complete collections at the top of the stack.
+            while True:
+                # Bail if we're not waiting on a new collection item.
+                if self._state not in (self._STATE_WANT_ARRAY_VALUE,
+                                       self._STATE_WANT_MAP_KEY,
+                                       self._STATE_WANT_SET_VALUE):
+                    break
+
+                # Or we are expecting more items for this collection.
+                lastc = self._collectionstack[-1]
+
+                if lastc['remaining']:
+                    break
+
+                # The collection at the top of the stack is complete.
+
+                # Discard it, as it isn't needed for future items.
+                self._collectionstack.pop()
 
-        if len(chunk) != length:
-            raise decodermod.CBORDecodeError(
-                'failed to read bytestring chunk: got %d bytes; expected %d' % (
-                    len(chunk), length))
+                # If this is a nested collection, we don't emit it, since it
+                # will be emitted by its parent collection. But we do need to
+                # update state to reflect what the new top-most collection
+                # on the stack is.
+                if self._collectionstack:
+                    self._state = {
+                        list: self._STATE_WANT_ARRAY_VALUE,
+                        dict: self._STATE_WANT_MAP_KEY,
+                        set: self._STATE_WANT_SET_VALUE,
+                    }[type(self._collectionstack[-1]['v'])]
+
+                # If this is the root collection, emit it.
+                else:
+                    self._decodedvalues.append(lastc['v'])
+                    self._state = self._STATE_NONE
+
+        return (
+            bool(self._decodedvalues),
+            offset - initialoffset,
+            0,
+        )
+
+    def getavailable(self):
+        """Returns an iterator over fully decoded values.
+
+        Once values are retrieved, they won't be available on the next call.
+        """
+
+        l = list(self._decodedvalues)
+        self._decodedvalues = []
+        return l
+
+class bufferingdecoder(object):
+    """A CBOR decoder that buffers undecoded input.
+
+    This is a glorified wrapper around ``sansiodecoder`` that adds a buffering
+    layer. All input that isn't consumed by ``sansiodecoder`` will be buffered
+    and concatenated with any new input that arrives later.
+
+    TODO consider adding limits as to the maximum amount of data that can
+    be buffered.
+    """
+    def __init__(self):
+        self._decoder = sansiodecoder()
+        self._leftover = None
+
+    def decode(self, b):
+        """Attempt to decode bytes to CBOR values.
 
-        yield chunk
+        Returns a tuple with the following fields:
+
+        * Bool indicating whether new values are available for retrieval.
+        * Integer number of bytes decoded from the new input.
+        * Integer number of bytes wanted to decode the next value.
+        """
+
+        if self._leftover:
+            oldlen = len(self._leftover)
+            b = self._leftover + b
+            self._leftover = None
+        else:
+            b = b
+            oldlen = 0
+
+        available, readcount, wanted = self._decoder.decode(b)
+
+        if readcount < len(b):
+            self._leftover = b[readcount:]
+
+        return available, readcount - oldlen, wanted
+
+    def getavailable(self):
+        return self._decoder.getavailable()
+
+def decodeall(b):
+    """Decode all CBOR items present in an iterable of bytes.
+
+    In addition to regular decode errors, raises CBORDecodeError if the
+    entirety of the passed buffer does not fully decode to complete CBOR
+    values. This includes failure to decode any value, incomplete collection
+    types, incomplete indefinite length items, and extra data at the end of
+    the buffer.
+    """
+    if not b:
+        return []
+
+    decoder = sansiodecoder()
+
+    havevalues, readcount, wantbytes = decoder.decode(b)
+
+    if readcount != len(b):
+        raise CBORDecodeError('input data not fully consumed')
+
+    if decoder.inprogress:
+        raise CBORDecodeError('input data not complete')
+
+    return decoder.getavailable()
--- a/mercurial/utils/stringutil.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/utils/stringutil.py	Tue Sep 04 12:16:28 2018 -0400
@@ -13,6 +13,7 @@
 import codecs
 import re as remod
 import textwrap
+import types
 
 from ..i18n import _
 from ..thirdparty import attr
@@ -42,27 +43,190 @@
         return pat
     return pat.encode('latin1')
 
-def pprint(o, bprefix=False):
+def pprint(o, bprefix=False, indent=0):
     """Pretty print an object."""
+    return b''.join(pprintgen(o, bprefix=bprefix, indent=indent))
+
+def pprintgen(o, bprefix=False, indent=0, _level=1):
+    """Pretty print an object to a generator of atoms.
+
+    ``bprefix`` is a flag influencing whether bytestrings are preferred with
+    a ``b''`` prefix.
+
+    ``indent`` controls whether collections and nested data structures
+    span multiple lines via the indentation amount in spaces. By default,
+    no newlines are emitted.
+    """
+
     if isinstance(o, bytes):
         if bprefix:
-            return "b'%s'" % escapestr(o)
-        return "'%s'" % escapestr(o)
+            yield "b'%s'" % escapestr(o)
+        else:
+            yield "'%s'" % escapestr(o)
     elif isinstance(o, bytearray):
         # codecs.escape_encode() can't handle bytearray, so escapestr fails
         # without coercion.
-        return "bytearray['%s']" % escapestr(bytes(o))
+        yield "bytearray['%s']" % escapestr(bytes(o))
     elif isinstance(o, list):
-        return '[%s]' % (b', '.join(pprint(a, bprefix=bprefix) for a in o))
+        if not o:
+            yield '[]'
+            return
+
+        yield '['
+
+        if indent:
+            yield '\n'
+            yield ' ' * (_level * indent)
+
+        for i, a in enumerate(o):
+            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (_level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            yield '\n'
+            yield ' ' * ((_level - 1) * indent)
+
+        yield ']'
     elif isinstance(o, dict):
-        return '{%s}' % (b', '.join(
-            '%s: %s' % (pprint(k, bprefix=bprefix),
-                        pprint(v, bprefix=bprefix))
-            for k, v in sorted(o.items())))
+        if not o:
+            yield '{}'
+            return
+
+        yield '{'
+
+        if indent:
+            yield '\n'
+            yield ' ' * (_level * indent)
+
+        for i, (k, v) in enumerate(sorted(o.items())):
+            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            yield ': '
+
+            for chunk in pprintgen(v, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (_level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            yield '\n'
+            yield ' ' * ((_level - 1) * indent)
+
+        yield '}'
+    elif isinstance(o, set):
+        if not o:
+            yield 'set([])'
+            return
+
+        yield 'set(['
+
+        if indent:
+            yield '\n'
+            yield ' ' * (_level * indent)
+
+        for i, k in enumerate(sorted(o)):
+            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (_level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            yield '\n'
+            yield ' ' * ((_level - 1) * indent)
+
+        yield '])'
     elif isinstance(o, tuple):
-        return '(%s)' % (b', '.join(pprint(a, bprefix=bprefix) for a in o))
+        if not o:
+            yield '()'
+            return
+
+        yield '('
+
+        if indent:
+            yield '\n'
+            yield ' ' * (_level * indent)
+
+        for i, a in enumerate(o):
+            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (_level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            yield '\n'
+            yield ' ' * ((_level - 1) * indent)
+
+        yield ')'
+    elif isinstance(o, types.GeneratorType):
+        # Special case of empty generator.
+        try:
+            nextitem = next(o)
+        except StopIteration:
+            yield 'gen[]'
+            return
+
+        yield 'gen['
+
+        if indent:
+            yield '\n'
+            yield ' ' * (_level * indent)
+
+        last = False
+
+        while not last:
+            current = nextitem
+
+            try:
+                nextitem = next(o)
+            except StopIteration:
+                last = True
+
+            for chunk in pprintgen(current, bprefix=bprefix, indent=indent,
+                                   _level=_level + 1):
+                yield chunk
+
+            if not last:
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (_level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            yield '\n'
+            yield ' ' * ((_level -1) * indent)
+
+        yield ']'
     else:
-        return pycompat.byterepr(o)
+        yield pycompat.byterepr(o)
 
 def prettyrepr(o):
     """Pretty print a representation of a possibly-nested object"""
@@ -111,7 +275,7 @@
     elif callable(r):
         return r()
     else:
-        return pycompat.byterepr(r)
+        return pprint(r)
 
 def binary(s):
     """return true if a string is binary data"""
@@ -424,6 +588,8 @@
     return encoding.trim(text, maxlength, ellipsis='...')
 
 def escapestr(s):
+    if isinstance(s, memoryview):
+        s = bytes(s)
     # call underlying function of s.encode('string_escape') directly for
     # Python 3 compatibility
     return codecs.escape_encode(s)[0]
@@ -464,7 +630,7 @@
         def _cutdown(self, ucstr, space_left):
             l = 0
             colwidth = encoding.ucolwidth
-            for i in xrange(len(ucstr)):
+            for i in pycompat.xrange(len(ucstr)):
                 l += colwidth(ucstr[i])
                 if space_left < l:
                     return (ucstr[:i], ucstr[i:])
--- a/mercurial/verify.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/verify.py	Tue Sep 04 12:16:28 2018 -0400
@@ -45,7 +45,7 @@
         self.errors = 0
         self.warnings = 0
         self.havecl = len(repo.changelog) > 0
-        self.havemf = len(repo.manifestlog._revlog) > 0
+        self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
         self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
         self.lrugetctx = util.lrucachefunc(repo.__getitem__)
         self.refersmf = False
@@ -205,7 +205,7 @@
         ui = self.ui
         match = self.match
         mfl = self.repo.manifestlog
-        mf = mfl._revlog.dirlog(dir)
+        mf = mfl.getstorage(dir)
 
         if not dir:
             self.ui.status(_("checking manifests\n"))
--- a/mercurial/win32.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/win32.py	Tue Sep 04 12:16:28 2018 -0400
@@ -615,7 +615,7 @@
     # callers to recreate f immediately while having other readers do their
     # implicit zombie filename blocking on a temporary name.
 
-    for tries in xrange(10):
+    for tries in pycompat.xrange(10):
         temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
         try:
             os.rename(f, temp)  # raises OSError EEXIST if temp exists
--- a/mercurial/wireprotoserver.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/wireprotoserver.py	Tue Sep 04 12:16:28 2018 -0400
@@ -502,14 +502,14 @@
     def getargs(self, args):
         data = {}
         keys = args.split()
-        for n in xrange(len(keys)):
+        for n in pycompat.xrange(len(keys)):
             argline = self._fin.readline()[:-1]
             arg, l = argline.split()
             if arg not in keys:
                 raise error.Abort(_("unexpected parameter %r") % arg)
             if arg == '*':
                 star = {}
-                for k in xrange(int(l)):
+                for k in pycompat.xrange(int(l)):
                     argline = self._fin.readline()[:-1]
                     arg, l = argline.split()
                     val = self._fin.read(int(l))
--- a/mercurial/wireprotov1peer.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/mercurial/wireprotov1peer.py	Tue Sep 04 12:16:28 2018 -0400
@@ -497,7 +497,7 @@
     def between(self, pairs):
         batch = 8 # avoid giant requests
         r = []
-        for i in xrange(0, len(pairs), batch):
+        for i in pycompat.xrange(0, len(pairs), batch):
             n = " ".join([wireprototypes.encodelist(p, '-')
                           for p in pairs[i:i + batch]])
             d = self._call("between", pairs=n)
--- a/setup.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/setup.py	Tue Sep 04 12:16:28 2018 -0400
@@ -817,7 +817,9 @@
             'mercurial.thirdparty.zope',
             'mercurial.thirdparty.zope.interface',
             'mercurial.utils',
+            'mercurial.revlogutils',
             'hgext', 'hgext.convert', 'hgext.fsmonitor',
+            'hgext.fastannotate',
             'hgext.fsmonitor.pywatchman',
             'hgext.infinitepush',
             'hgext.highlight',
--- a/tests/bzr-definitions	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/bzr-definitions	Tue Sep 04 12:16:28 2018 -0400
@@ -6,7 +6,7 @@
 
 glog()
 {
-    hg log -G --template '{rev}@{branch} "{desc|firstline}" files: {files}\n' "$@"
+    hg log -G --template '{rev}@{branch} "{desc|firstline}" files+: [{file_adds}], files-: [{file_dels}], files: [{file_mods}]\n' "$@"
 }
 
 manifest()
--- a/tests/dummysmtpd.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/dummysmtpd.py	Tue Sep 04 12:16:28 2018 -0400
@@ -26,7 +26,7 @@
     def __init__(self, localaddr):
         smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
 
-    def process_message(self, peer, mailfrom, rcpttos, data):
+    def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
         log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
 
     def handle_error(self):
--- a/tests/hghave.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/hghave.py	Tue Sep 04 12:16:28 2018 -0400
@@ -631,17 +631,7 @@
 
 @check("py3exe", "a Python 3.x interpreter is available")
 def has_python3exe():
-    return 'PYTHON3' in os.environ
-
-@check("py3pygments", "Pygments available on Python 3.x")
-def has_py3pygments():
-    if has_py3k():
-        return has_pygments()
-    elif has_python3exe():
-        # just check exit status (ignoring output)
-        py3 = os.environ['PYTHON3']
-        return matchoutput('%s -c "import pygments"' % py3, br'')
-    return False
+    return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)')
 
 @check("pure", "running with pure Python code")
 def has_pure():
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/printrevset.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,41 @@
+from __future__ import absolute_import
+from mercurial import (
+  cmdutil,
+  commands,
+  extensions,
+  logcmdutil,
+  revsetlang,
+  smartset,
+)
+
+from mercurial.utils import (
+  stringutil,
+)
+
+def logrevset(repo, pats, opts):
+    revs = logcmdutil._initialrevs(repo, opts)
+    if not revs:
+        return None
+    match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
+    return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
+
+def uisetup(ui):
+    def printrevset(orig, repo, pats, opts):
+        revs, filematcher = orig(repo, pats, opts)
+        if opts.get(b'print_revset'):
+            expr = logrevset(repo, pats, opts)
+            if expr:
+                tree = revsetlang.parse(expr)
+                tree = revsetlang.analyze(tree)
+            else:
+                tree = []
+            ui = repo.ui
+            ui.write(b'%s\n' % stringutil.pprint(opts.get(b'rev', [])))
+            ui.write(revsetlang.prettyformat(tree) + b'\n')
+            ui.write(stringutil.prettyrepr(revs) + b'\n')
+            revs = smartset.baseset()  # display no revisions
+        return revs, filematcher
+    extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
+    aliases, entry = cmdutil.findcmd(b'log', commands.table)
+    entry[1].append((b'', b'print-revset', False,
+                     b'print generated revset and exit (DEPRECATED)'))
--- a/tests/run-tests.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/run-tests.py	Tue Sep 04 12:16:28 2018 -0400
@@ -64,6 +64,7 @@
 import threading
 import time
 import unittest
+import uuid
 import xml.dom.minidom as minidom
 
 try:
@@ -285,12 +286,12 @@
 
     If path does not exist, return an empty set.
     """
-    cases = set()
+    cases = []
     try:
         with open(path, 'rb') as f:
             for l in f:
                 if l.startswith(b'#testcases '):
-                    cases.update(l[11:].split())
+                    cases.append(sorted(l[11:].split()))
     except IOError as ex:
         if ex.errno != errno.ENOENT:
             raise
@@ -394,11 +395,6 @@
         metavar="HG",
         help="test using specified hg script rather than a "
              "temporary installation")
-    # This option should be deleted once test-check-py3-compat.t and other
-    # Python 3 tests run with Python 3.
-    hgconf.add_argument("--with-python3", metavar="PYTHON3",
-        help="Python 3 interpreter (if running under Python 2)"
-             " (TEMPORARY)")
 
     reporting = parser.add_argument_group('Results Reporting')
     reporting.add_argument("-C", "--annotate", action="store_true",
@@ -532,27 +528,6 @@
         if PYTHON3:
             parser.error(
                 '--py3k-warnings can only be used on Python 2.7')
-    if options.with_python3:
-        if PYTHON3:
-            parser.error('--with-python3 cannot be used when executing with '
-                         'Python 3')
-
-        options.with_python3 = canonpath(options.with_python3)
-        # Verify Python3 executable is acceptable.
-        proc = subprocess.Popen([options.with_python3, b'--version'],
-                                stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        out, _err = proc.communicate()
-        ret = proc.wait()
-        if ret != 0:
-            parser.error('could not determine version of python 3')
-        if not out.startswith('Python '):
-            parser.error('unexpected output from python3 --version: %s' %
-                         out)
-        vers = version.LooseVersion(out[len('Python '):])
-        if vers < version.LooseVersion('3.5.0'):
-            parser.error('--with-python3 version must be 3.5.0 or greater; '
-                         'got %s' % out)
 
     if options.blacklist:
         options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -1068,7 +1043,10 @@
         env["HGUSER"]   = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
+        env["HGHOSTNAME"] = "test-hostname"
         env['HGIPV6'] = str(int(self._useipv6))
+        if 'HGCATAPULTSERVERPIPE' not in env:
+            env['HGCATAPULTSERVERPIPE'] = os.devnull
 
         extraextensions = []
         for opt in self._extraconfigopts:
@@ -1242,14 +1220,15 @@
 
     def __init__(self, path, *args, **kwds):
         # accept an extra "case" parameter
-        case = kwds.pop('case', None)
+        case = kwds.pop('case', [])
         self._case = case
-        self._allcases = parsettestcases(path)
+        self._allcases = {x for y in parsettestcases(path) for x in y}
         super(TTest, self).__init__(path, *args, **kwds)
         if case:
-            self.name = '%s#%s' % (self.name, _strpath(case))
-            self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
-            self._tmpname += b'-%s' % case
+            casepath = b'#'.join(case)
+            self.name = '%s#%s' % (self.name, _strpath(casepath))
+            self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
+            self._tmpname += b'-%s' % casepath
         self._have = {}
 
     @property
@@ -1323,10 +1302,10 @@
         reqs = []
         for arg in args:
             if arg.startswith(b'no-') and arg[3:] in self._allcases:
-                if arg[3:] == self._case:
+                if arg[3:] in self._case:
                     return False
             elif arg in self._allcases:
-                if arg != self._case:
+                if arg not in self._case:
                     return False
             else:
                 reqs.append(arg)
@@ -1342,6 +1321,24 @@
                 script.append(b'%s %d 0\n' % (salt, line))
             else:
                 script.append(b'echo %s %d $?\n' % (salt, line))
+        active = []
+        session = str(uuid.uuid4())
+        if PYTHON3:
+            session = session.encode('ascii')
+        def toggletrace(cmd):
+            if isinstance(cmd, str):
+                quoted = shellquote(cmd.strip())
+            else:
+                quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
+            quoted = quoted.replace(b'\\', b'\\\\')
+            if active:
+                script.append(
+                    b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                        session, active[0]))
+                script.append(
+                    b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                        session, quoted))
+                active[0:] = [quoted]
 
         script = []
 
@@ -1369,11 +1366,36 @@
             script.append(b'alias hg="%s"\n' % self._hgcommand)
         if os.getenv('MSYSTEM'):
             script.append(b'alias pwd="pwd -W"\n')
+
+        hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
+        if hgcatapult and hgcatapult != os.devnull:
+            # Kludge: use a while loop to keep the pipe from getting
+            # closed by our echo commands. The still-running file gets
+            # reaped at the end of the script, which causes the while
+            # loop to exit and closes the pipe. Sigh.
+            script.append(
+                b'rtendtracing() {\n'
+                b'  echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                b'  rm -f "$TESTTMP/.still-running"\n'
+                b'}\n'
+                b'trap "rtendtracing" 0\n'
+                b'touch "$TESTTMP/.still-running"\n'
+                b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
+                b'> $HGCATAPULTSERVERPIPE &\n'
+                b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
+                b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                % {
+                    'name': self.name,
+                    'session': session,
+                }
+            )
+
         if self._case:
+            casestr = b'#'.join(self._case)
             if isinstance(self._case, str):
-                quoted = shellquote(self._case)
+                quoted = shellquote(casestr)
             else:
-                quoted = shellquote(self._case.decode('utf8')).encode('utf8')
+                quoted = shellquote(casestr.decode('utf8')).encode('utf8')
             script.append(b'TESTCASE=%s\n' % quoted)
             script.append(b'export TESTCASE\n')
 
@@ -1433,10 +1455,12 @@
                 prepos = pos
                 pos = n
                 addsalt(n, False)
-                cmd = l[4:].split()
+                rawcmd = l[4:]
+                cmd = rawcmd.split()
+                toggletrace(rawcmd)
                 if len(cmd) == 2 and cmd[0] == b'cd':
                     l = b'  $ cd %s || exit 1\n' % cmd[1]
-                script.append(l[4:])
+                script.append(rawcmd)
             elif l.startswith(b'  > '): # continuations
                 after.setdefault(prepos, []).append(l)
                 script.append(l[4:])
@@ -1455,7 +1479,6 @@
         if skipping is not None:
             after.setdefault(pos, []).append('  !!! missing #endif\n')
         addsalt(n + 1, False)
-
         return salt, script, after, expected
 
     def _processoutput(self, exitcode, output, salt, after, expected):
@@ -2562,9 +2585,6 @@
         osenvironb[b"BINDIR"] = self._bindir
         osenvironb[b"PYTHON"] = PYTHON
 
-        if self.options.with_python3:
-            osenvironb[b'PYTHON3'] = self.options.with_python3
-
         fileb = _bytespath(__file__)
         runtestdir = os.path.abspath(os.path.dirname(fileb))
         osenvironb[b'RUNTESTDIR'] = runtestdir
@@ -2666,31 +2686,42 @@
                 expanded_args.append(arg)
         args = expanded_args
 
-        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))')
+        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
         tests = []
         for t in args:
-            case = None
+            case = []
 
             if not (os.path.basename(t).startswith(b'test-')
                     and (t.endswith(b'.py') or t.endswith(b'.t'))):
 
                 m = testcasepattern.match(t)
                 if m is not None:
-                    t, _, case = m.groups()
+                    t, _, casestr = m.groups()
+                    if casestr:
+                        case = casestr.split(b'#')
                 else:
                     continue
 
             if t.endswith(b'.t'):
                 # .t file may contain multiple test cases
-                cases = sorted(parsettestcases(t))
-                if cases:
-                    if case is not None and case in cases:
-                        tests += [{'path': t, 'case': case}]
-                    elif case is not None and case not in cases:
+                casedimensions = parsettestcases(t)
+                if casedimensions:
+                    cases = []
+                    def addcases(case, casedimensions):
+                        if not casedimensions:
+                            cases.append(case)
+                        else:
+                            for c in casedimensions[0]:
+                                addcases(case + [c], casedimensions[1:])
+                    addcases([], casedimensions)
+                    if case and case in cases:
+                        cases = [case]
+                    elif case:
                         # Ignore invalid cases
-                        pass
+                        cases = []
                     else:
-                        tests += [{'path': t, 'case': c} for c in sorted(cases)]
+                        pass
+                    tests += [{'path': t, 'case': c} for c in sorted(cases)]
                 else:
                     tests.append({'path': t})
             else:
@@ -2701,7 +2732,7 @@
         def _reloadtest(test, i):
             # convert a test back to its description dict
             desc = {'path': test.path}
-            case = getattr(test, '_case', None)
+            case = getattr(test, '_case', [])
             if case:
                 desc['case'] = case
             return self._gettest(desc, i)
@@ -2713,7 +2744,8 @@
                     desc = testdescs[0]
                     # desc['path'] is a relative path
                     if 'case' in desc:
-                        errpath = b'%s.%s.err' % (desc['path'], desc['case'])
+                        casestr = b'#'.join(desc['case'])
+                        errpath = b'%s#%s.err' % (desc['path'], casestr)
                     else:
                         errpath = b'%s.err' % desc['path']
                     errpath = os.path.join(self._outputdir, errpath)
--- a/tests/simplestorerepo.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/simplestorerepo.py	Tue Sep 04 12:16:28 2018 -0400
@@ -22,6 +22,7 @@
     nullrev,
 )
 from mercurial.thirdparty import (
+    attr,
     cbor,
 )
 from mercurial import (
@@ -60,6 +61,19 @@
     if not isinstance(rev, int):
         raise ValueError('expected int')
 
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True, frozen=True)
+class simplestorerevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    linknode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+
 @interfaceutil.implementer(repository.ifilestorage)
 class filestorage(object):
     """Implements storage for a tracked path.
@@ -91,7 +105,6 @@
 
         # This is used by changegroup code :/
         self._generaldelta = True
-        self.storedeltachains = False
 
         self.version = 1
 
@@ -228,7 +241,7 @@
         p1node = self.parents(self.node(rev))[0]
         return self.rev(p1node)
 
-    def candelta(self, baserev, rev):
+    def _candelta(self, baserev, rev):
         validaterev(baserev)
         validaterev(rev)
 
@@ -500,6 +513,54 @@
         return mdiff.textdiff(self.revision(node1, raw=True),
                               self.revision(node2, raw=True))
 
+    def emitrevisiondeltas(self, requests):
+        for request in requests:
+            node = request.node
+            rev = self.rev(node)
+
+            if request.basenode == nullid:
+                baserev = nullrev
+            elif request.basenode is not None:
+                baserev = self.rev(request.basenode)
+            else:
+                # This is a test extension and we can do simple things
+                # for choosing a delta parent.
+                baserev = self.deltaparent(rev)
+
+                if baserev != nullrev and not self._candelta(baserev, rev):
+                    baserev = nullrev
+
+            revision = None
+            delta = None
+            baserevisionsize = None
+
+            if self.iscensored(baserev) or self.iscensored(rev):
+                try:
+                    revision = self.revision(node, raw=True)
+                except error.CensoredNodeError as e:
+                    revision = e.tombstone
+
+                if baserev != nullrev:
+                    baserevisionsize = self.rawsize(baserev)
+
+            elif baserev == nullrev:
+                revision = self.revision(node, raw=True)
+            else:
+                delta = self.revdiff(baserev, rev)
+
+            extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
+
+            yield simplestorerevisiondelta(
+                node=node,
+                p1node=request.p1node,
+                p2node=request.p2node,
+                linknode=request.linknode,
+                basenode=self.node(baserev),
+                flags=self.flags(rev) | extraflags,
+                baserevisionsize=baserevisionsize,
+                revision=revision,
+                delta=delta)
+
     def headrevs(self):
         # Assume all revisions are heads by default.
         revishead = {rev: True for rev in self._indexbyrev}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-edit-lines.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,61 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ hg init repo1
+  $ cd repo1
+
+Make some commits:
+
+  $ for i in 1 2 3; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+absorb --edit-lines will run the editor if filename is provided:
+
+  $ hg absorb --edit-lines
+  nothing applied
+  [1]
+  $ HGEDITOR=cat hg absorb --edit-lines a
+  HG: editing a
+  HG: "y" means the line to the right exists in the changeset to the top
+  HG:
+  HG: /---- 4ec16f85269a commit 1
+  HG: |/--- 5c5f95224a50 commit 2
+  HG: ||/-- 43f0a75bede7 commit 3
+  HG: |||
+      yyy : 1
+       yy : 2
+        y : 3
+  nothing applied
+  [1]
+
+Edit the file using --edit-lines:
+
+  $ cat > editortext << EOF
+  >       y : a
+  >      yy :  b
+  >      y  : c
+  >     yy  : d  
+  >     y y : e
+  >     y   : f
+  >     yyy : g
+  > EOF
+  $ HGEDITOR='cat editortext >' hg absorb -q --edit-lines a
+  $ hg cat -r 0 a
+  d  
+  e
+  f
+  g
+  $ hg cat -r 1 a
+   b
+  c
+  d  
+  g
+  $ hg cat -r 2 a
+  a
+   b
+  e
+  g
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-filefixupstate.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,207 @@
+from __future__ import absolute_import, print_function
+
+import itertools
+from mercurial import pycompat
+from hgext import absorb
+
+class simplefctx(object):
+    def __init__(self, content):
+        self.content = content
+
+    def data(self):
+        return self.content
+
+def insertreturns(x):
+    # insert "\n"s after each single char
+    if isinstance(x, bytes):
+        return b''.join(ch + b'\n' for ch in pycompat.bytestr(x))
+    else:
+        return pycompat.maplist(insertreturns, x)
+
+def removereturns(x):
+    # the revert of "insertreturns"
+    if isinstance(x, bytes):
+        return x.replace(b'\n', b'')
+    else:
+        return pycompat.maplist(removereturns, x)
+
+def assertlistequal(lhs, rhs, decorator=lambda x: x):
+    if lhs != rhs:
+        raise RuntimeError('mismatch:\n actual:   %r\n expected: %r'
+                           % tuple(map(decorator, [lhs, rhs])))
+
+def testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None):
+    """([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None
+
+    workingcopy is a string, of which every character denotes a single line.
+
+    oldcontents, expectedcontents are lists of strings, every character of
+    every string denots a single line.
+
+    if fixups is not None, it's the expected fixups list and will be checked.
+    """
+    expectedcontents = insertreturns(expectedcontents)
+    oldcontents = insertreturns(oldcontents)
+    workingcopy = insertreturns(workingcopy)
+    state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents))
+    state.diffwith(simplefctx(workingcopy))
+    if fixups is not None:
+        assertlistequal(state.fixups, fixups)
+    state.apply()
+    assertlistequal(state.finalcontents, expectedcontents, removereturns)
+
+def buildcontents(linesrevs):
+    # linesrevs: [(linecontent : str, revs : [int])]
+    revs = set(itertools.chain(*[revs for line, revs in linesrevs]))
+    return [b''] + [
+        b''.join([l for l, rs in linesrevs if r in rs])
+        for r in sorted(revs)
+    ]
+
+# input case 0: one single commit
+case0 = [b'', b'11']
+
+# replace a single chunk
+testfilefixup(case0, b'', [b'', b''])
+testfilefixup(case0, b'2', [b'', b'2'])
+testfilefixup(case0, b'22', [b'', b'22'])
+testfilefixup(case0, b'222', [b'', b'222'])
+
+# input case 1: 3 lines, each commit adds one line
+case1 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [      3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case1, b'123', case1)
+testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c'])
+testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3'])
+testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc'])
+testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23'])
+testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c'])
+testfilefixup(case1, b'ab3', [b'', b'a', b'ab', b'ab3'])
+testfilefixup(case1, b'abc', [b'', b'a', b'ab', b'abc'])
+
+# non 1:1 edits
+testfilefixup(case1, b'abcd', case1)
+testfilefixup(case1, b'ab', case1)
+
+# deletion
+testfilefixup(case1, b'',   [b'', b'', b'', b''])
+testfilefixup(case1, b'1',  [b'', b'1', b'1', b'1'])
+testfilefixup(case1, b'2',  [b'', b'', b'2', b'2'])
+testfilefixup(case1, b'3',  [b'', b'', b'', b'3'])
+testfilefixup(case1, b'13', [b'', b'1', b'1', b'13'])
+
+# replaces
+testfilefixup(case1, b'1bb3', [b'', b'1', b'1bb', b'1bb3'])
+
+# (confusing) replaces
+testfilefixup(case1, b'1bbb', case1)
+testfilefixup(case1, b'bbbb', case1)
+testfilefixup(case1, b'bbb3', case1)
+testfilefixup(case1, b'1b', case1)
+testfilefixup(case1, b'bb', case1)
+testfilefixup(case1, b'b3', case1)
+
+# insertions at the beginning and the end
+testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c'])
+testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123'])
+
+# (confusing) insertions
+testfilefixup(case1, b'1a23', case1)
+testfilefixup(case1, b'12b3', case1)
+
+# input case 2: delete in the middle
+case2 = buildcontents([
+    (b'11', [1, 2]),
+    (b'22', [1   ]),
+    (b'33', [1, 2]),
+])
+
+# deletion (optimize code should make it 2 chunks)
+testfilefixup(case2, b'', [b'', b'22', b''],
+              fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)])
+
+# 1:1 line mapping
+testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa'])
+
+# non 1:1 edits
+# note: unlike case0, the chunk is not "continuous" and no edit allowed
+testfilefixup(case2, b'aaa', case2)
+
+# input case 3: rev 3 reverts rev 2
+case3 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2   ]),
+    (b'3', [1, 2, 3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case3, b'13', case3)
+testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b'])
+testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3'])
+testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab'])
+
+# non 1:1 edits
+testfilefixup(case3, b'a', case3)
+testfilefixup(case3, b'abc', case3)
+
+# deletion
+testfilefixup(case3, b'', [b'', b'', b'2', b''])
+
+# insertion
+testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c'])
+
+# input case 4: a slightly complex case
+case4 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [1, 2,  ]),
+    (b'4', [1,    3]),
+    (b'5', [      3]),
+    (b'6', [   2, 3]),
+    (b'7', [   2   ]),
+    (b'8', [   2, 3]),
+    (b'9', [      3]),
+])
+
+testfilefixup(case4, b'1245689', case4)
+testfilefixup(case4, b'1a2456bbb', case4)
+testfilefixup(case4, b'1abc5689', case4)
+testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689'])
+testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee'])
+testfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee'])
+testfilefixup(case4, b'aaaaaa', case4)
+testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b'])
+testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689'])
+testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689'])
+testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28'])
+testfilefixup(case4, b'', [b'', b'34', b'37', b''])
+
+# input case 5: replace a small chunk which is near a deleted line
+case5 = buildcontents([
+    (b'12', [1, 2]),
+    (b'3',  [1]),
+    (b'4',  [1, 2]),
+])
+
+testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4'])
+
+# input case 6: base "changeset" is immutable
+case6 = [b'1357', b'0125678']
+
+testfilefixup(case6, b'0125678', case6)
+testfilefixup(case6, b'0a25678', case6)
+testfilefixup(case6, b'0a256b8', case6)
+testfilefixup(case6, b'abcdefg', [b'1357', b'a1c5e7g'])
+testfilefixup(case6, b'abcdef', case6)
+testfilefixup(case6, b'', [b'1357', b'157'])
+testfilefixup(case6, b'0123456789', [b'1357', b'0123456789'])
+
+# input case 7: change an empty file
+case7 = [b'']
+
+testfilefixup(case7, b'1', case7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-phase.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,30 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$RUNTESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag <<'EOS'
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOS
+
+  $ hg phase -r A --public -q
+  $ hg phase -r C --secret --force -q
+
+  $ hg update C -q
+  $ printf B1 > B
+
+  $ hg absorb -q
+
+  $ hg log -G -T '{desc} {phase}'
+  @  C secret
+  |
+  o  B draft
+  |
+  o  A public
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-rename.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,359 @@
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+rename a to b, then b to a
+
+  $ hg init repo1
+  $ cd repo1
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg mv a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+  $ hg mv b a
+  $ echo 3 >> a
+  $ hg ci -m 3
+
+  $ hg annotate -ncf a
+  0 eff892de26ec a: 1
+  1 bf56e1f4f857 b: 2
+  2 0b888b00216c a: 3
+
+  $ sedi 's/$/a/' a
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+  eff892d -1
+  bf56e1f -2
+  0b888b0 -3
+  eff892d +1a
+  bf56e1f +2a
+  0b888b0 +3a
+
+  $ hg status
+
+  $ hg annotate -ncf a
+  0 5d1c5620e6f2 a: 1a
+  1 9a14ffe67ae9 b: 2a
+  2 9191d121a268 a: 3a
+
+when the first changeset is public
+
+  $ hg phase --public -r 0
+
+  $ sedi 's/a/A/' a
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+          -1a
+  9a14ffe -2a
+  9191d12 -3a
+          +1A
+  9a14ffe +2A
+  9191d12 +3A
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,3 +1,3 @@
+  -1a
+  +1A
+   2A
+   3A
+
+copy a to b
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg cp a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:17b72129ab68 2
+  0:eff892de26ec 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/b/' b
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,1 +0,1 @@
+  eff892d -1
+  eff892d +1a
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -1
+  17b7212 -2
+          +1b
+  17b7212 +2b
+
+  $ hg diff
+  diff --git a/b b/b
+  --- a/b
+  +++ b/b
+  @@ -1,2 +1,2 @@
+  -1
+  +1b
+   2b
+
+copy b to a
+
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ echo 2 >> a
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:e62c256d8b24 2
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/a/' b
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -1
+  e62c256 -2
+          +1a
+  e62c256 +2a
+  showing changes for b
+          @@ -0,1 +0,1 @@
+  55105f9 -1
+  55105f9 +1a
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,2 +1,2 @@
+  -1
+  +1a
+   2a
+
+"move" b to both a and c, follow a - sorted alphabetically
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ hg commit -m cp
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:366daad8e679 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/c/' c
+
+  $ hg absorb -pq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  366daad -2
+  55105f9 +1a
+  366daad +2a
+  showing changes for c
+          @@ -0,2 +0,2 @@
+          -1
+  366daad -3
+          +1c
+  366daad +3c
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:70606019f91b cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+run absorb again would apply the change to c
+
+  $ hg absorb -pq
+  showing changes for c
+          @@ -0,1 +0,1 @@
+  7060601 -1
+  7060601 +1c
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:8bd536cce368 cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1c
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+"move" b to a, c and d, follow d if a gets renamed to e, and c is deleted
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg cp b d
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ echo 4 >> d
+  $ hg commit -m cp
+  $ hg mv a e
+  $ hg rm c
+  $ hg commit -m mv
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  2:49911557c471 mv
+  1:7bc3d43ede83 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/e/' e
+  $ sedi 's/$/d/' d
+
+  $ hg absorb -pq
+  showing changes for d
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  7bc3d43 -4
+  55105f9 +1d
+  7bc3d43 +4d
+  showing changes for e
+          @@ -0,2 +0,2 @@
+          -1
+  7bc3d43 -2
+          +1e
+  7bc3d43 +2e
+
+  $ hg diff
+  diff --git a/e b/e
+  --- a/e
+  +++ b/e
+  @@ -1,2 +1,2 @@
+  -1
+  +1e
+   2e
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  2:34be9b0c786e mv
+  |  diff --git a/c b/c
+  |  deleted file mode 100644
+  |  --- a/c
+  |  +++ /dev/null
+  |  @@ -1,2 +0,0 @@
+  |  -1
+  |  -3
+  |  diff --git a/a b/e
+  |  rename from a
+  |  rename to e
+  |
+  o  1:13e56db5948d cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +2e
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +3
+  |  diff --git a/b b/d
+  |  copy from b
+  |  copy to d
+  |  --- a/b
+  |  +++ b/d
+  |  @@ -1,1 +1,2 @@
+  |   1d
+  |  +4d
+  |
+  o  0:0037613a5dc6 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1d
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-strip.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,45 @@
+Do not strip innocent children. See https://bitbucket.org/facebook/hg-experimental/issues/6/hg-absorb-merges-diverged-commits
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$RUNTESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag << EOF
+  > E
+  > |
+  > D F
+  > |/
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOF
+
+  $ hg up E -q
+  $ echo 1 >> B
+  $ echo 2 >> D
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+
+  $ hg log -G -T '{desc}'
+  @  E
+  |
+  o  D
+  |
+  o  C
+  |
+  o  B
+  |
+  | o  F
+  | |
+  | o  C
+  | |
+  | o  B
+  |/
+  o  A
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,451 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+  $ hg init repo1
+  $ cd repo1
+
+Do not crash with empty repo:
+
+  $ hg absorb
+  abort: no changeset to change
+  [255]
+
+Make some commits:
+
+  $ for i in 1 2 3 4 5; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+  $ hg annotate a
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+Change a few lines:
+
+  $ cat > a <<EOF
+  > 1a
+  > 2b
+  > 3
+  > 4d
+  > 5e
+  > EOF
+
+Preview absorb changes:
+
+  $ hg absorb --print-changes --dry-run
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  4ec16f8 -1
+  5c5f952 -2
+  4ec16f8 +1a
+  5c5f952 +2b
+          @@ -3,2 +3,2 @@
+  ad8b8b7 -4
+  4f55fa6 -5
+  ad8b8b7 +4d
+  4f55fa6 +5e
+
+Run absorb:
+
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg annotate a
+  0: 1a
+  1: 2b
+  2: 3
+  3: 4d
+  4: 5e
+
+Delete a few lines and related commits will be removed if they will be empty:
+
+  $ cat > a <<EOF
+  > 2b
+  > 4d
+  > EOF
+  $ hg absorb
+  saved backup bundle to * (glob)
+  3 of 3 chunk(s) applied
+  $ hg annotate a
+  1: 2b
+  2: 4d
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  2 commit 4
+  |  diff -r 1cae118c7ed8 -r 58a62bade1c6 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -1,1 +1,2 @@
+  |   2b
+  |  +4d
+  |
+  o  1 commit 2
+  |  diff -r 84add69aeac0 -r 1cae118c7ed8 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -0,0 +1,1 @@
+  |  +2b
+  |
+  o  0 commit 1
+  
+
+Non 1:1 map changes will be ignored:
+
+  $ echo 1 > a
+  $ hg absorb
+  nothing applied
+  [1]
+
+Insertaions:
+
+  $ cat > a << EOF
+  > insert before 2b
+  > 2b
+  > 4d
+  > insert aftert 4d
+  > EOF
+  $ hg absorb -q
+  $ hg status
+  $ hg annotate a
+  1: insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Bookmarks are moved:
+
+  $ hg bookmark -r 1 b1
+  $ hg bookmark -r 2 b2
+  $ hg bookmark ba
+  $ hg bookmarks
+     b1                        1:b35060a57a50
+     b2                        2:946e4bc87915
+   * ba                        2:946e4bc87915
+  $ sedi 's/insert/INSERT/' a
+  $ hg absorb -q
+  $ hg status
+  $ hg bookmarks
+     b1                        1:a4183e9b3d31
+     b2                        2:c9b20c925790
+   * ba                        2:c9b20c925790
+
+Non-mofified files are ignored:
+
+  $ touch b
+  $ hg commit -A b -m b
+  $ touch c
+  $ hg add c
+  $ hg rm b
+  $ hg absorb
+  nothing applied
+  [1]
+  $ sedi 's/INSERT/Insert/' a
+  $ hg absorb
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg status
+  A c
+  R b
+
+Public commits will not be changed:
+
+  $ hg phase -p 1
+  $ sedi 's/Insert/insert/' a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -0,1 +0,1 @@
+          -Insert before 2b
+          +insert before 2b
+          @@ -3,1 +3,1 @@
+  85b4e0e -Insert aftert 4d
+  85b4e0e +insert aftert 4d
+  $ hg absorb
+  saved backup bundle to * (glob)
+  1 of 2 chunk(s) applied
+  $ hg diff -U 0
+  diff -r 1c8eadede62a a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	* (glob)
+  @@ -1,1 +1,1 @@
+  -Insert before 2b
+  +insert before 2b
+  $ hg annotate a
+  1: Insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Make working copy clean:
+
+  $ hg revert -q -C a b
+  $ hg forget c
+  $ rm c
+  $ hg status
+
+Merge commit will not be changed:
+
+  $ echo 1 > m1
+  $ hg commit -A m1 -m m1
+  $ hg bookmark -q -i m1
+  $ hg update -q '.^'
+  $ echo 2 > m2
+  $ hg commit -q -A m2 -m m2
+  $ hg merge -q m1
+  $ hg commit -m merge
+  $ hg bookmark -d m1
+  $ hg log -G -T '{rev} {desc} {phase}\n'
+  @    6 merge draft
+  |\
+  | o  5 m2 draft
+  | |
+  o |  4 m1 draft
+  |/
+  o  3 b draft
+  |
+  o  2 commit 4 draft
+  |
+  o  1 commit 2 public
+  |
+  o  0 commit 1 public
+  
+  $ echo 2 >> m1
+  $ echo 2 >> m2
+  $ hg absorb
+  abort: no changeset to change
+  [255]
+  $ hg revert -q -C m1 m2
+
+Use a new repo:
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+Make some commits to multiple files:
+
+  $ for f in a b; do
+  >   for i in 1 2; do
+  >     echo $f line $i >> $f
+  >     hg commit -A $f -m "commit $f $i" -q
+  >   done
+  > done
+
+Use pattern to select files to be fixed up:
+
+  $ sedi 's/line/Line/' a b
+  $ hg status
+  M a
+  M b
+  $ hg absorb a
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  M b
+  $ hg absorb --exclude b
+  nothing applied
+  [1]
+  $ hg absorb b
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  $ cat a b
+  a Line 1
+  a Line 2
+  b Line 1
+  b Line 2
+
+Test config option absorb.max-stack-size:
+
+  $ sedi 's/Line/line/' a b
+  $ hg log -T '{rev}:{node} {desc}\n'
+  3:712d16a8f445834e36145408eabc1d29df05ec09 commit b 2
+  2:74cfa6294160149d60adbf7582b99ce37a4597ec commit b 1
+  1:28f10dcf96158f84985358a2e5d5b3505ca69c22 commit a 2
+  0:f9a81da8dc53380ed91902e5b82c1b36255a4bd0 commit a 1
+  $ hg --config absorb.max-stack-size=1 absorb -pn
+  absorb: only the recent 1 changesets will be analysed
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -a Line 1
+          -a Line 2
+          +a line 1
+          +a line 2
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -b Line 1
+  712d16a -b Line 2
+          +b line 1
+  712d16a +b line 2
+
+Test obsolete markers creation:
+
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=createmarkers
+  > [absorb]
+  > add-noise=1
+  > EOF
+
+  $ hg --config absorb.max-stack-size=3 absorb
+  absorb: only the recent 3 changesets will be analysed
+  2 of 2 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  6:3dfde4199b46 commit b 2 712d16a8f445834e36145408eabc1d29df05ec09
+  5:99cfab7da5ff commit b 1 74cfa6294160149d60adbf7582b99ce37a4597ec
+  4:fec2b3bd9e08 commit a 2 28f10dcf96158f84985358a2e5d5b3505ca69c22
+  0:f9a81da8dc53 commit a 1 
+  $ hg absorb
+  1 of 1 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  10:e1c8c1e030a4 commit b 2 3dfde4199b4610ea6e3c6fa9f5bdad8939d69524
+  9:816c30955758 commit b 1 99cfab7da5ffdaf3b9fc6643b14333e194d87f46
+  8:5867d584106b commit a 2 fec2b3bd9e0834b7cb6a564348a0058171aed811
+  7:8c76602baf10 commit a 1 f9a81da8dc53380ed91902e5b82c1b36255a4bd0
+
+Executable files:
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=True
+  > EOF
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+#if execbit
+  $ echo > foo.py
+  $ chmod +x foo.py
+  $ hg add foo.py
+  $ hg commit -mfoo
+#else
+  $ hg import -q --bypass - <<EOF
+  > # HG changeset patch
+  > foo
+  > 
+  > diff --git a/foo.py b/foo.py
+  > new file mode 100755
+  > --- /dev/null
+  > +++ b/foo.py
+  > @@ -0,0 +1,1 @@
+  > +
+  > EOF
+  $ hg up -q
+#endif
+
+  $ echo bla > foo.py
+  $ hg absorb --dry-run --print-changes
+  showing changes for foo.py
+          @@ -0,1 +0,1 @@
+  99b4ae7 -
+  99b4ae7 +bla
+  $ hg absorb
+  1 of 1 chunk(s) applied
+  $ hg diff -c .
+  diff --git a/foo.py b/foo.py
+  new file mode 100755
+  --- /dev/null
+  +++ b/foo.py
+  @@ -0,0 +1,1 @@
+  +bla
+  $ hg diff
+
+Remove lines may delete changesets:
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ cat > b <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m b12 -A b
+  $ echo 3 >> b
+  $ hg commit -m b3
+  $ echo 4 >> b
+  $ hg commit -m b4
+  $ echo 1 > b
+  $ echo 3 >> a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,0 +2,1 @@
+  bfafb49 +3
+  showing changes for b
+          @@ -1,3 +1,0 @@
+  1154859 -2
+  30970db -3
+  a393a58 -4
+  $ hg absorb -v | grep became
+  bfafb49242db: 1 file(s) changed, became 1a2de97fc652
+  115485984805: 2 file(s) changed, became 0c930dfab74c
+  30970dbf7b40: became empty and was dropped
+  a393a58b9a85: became empty and was dropped
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  5 b12
+  |  diff --git a/b b/b
+  |  new file mode 100644
+  |  --- /dev/null
+  |  +++ b/b
+  |  @@ -0,0 +1,1 @@
+  |  +1
+  |
+  o  4 a12
+     diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,3 @@
+     +1
+     +2
+     +3
+  
+
+Use revert to make the current change and its parent disappear.
+This should move us to the non-obsolete ancestor.
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ hg id
+  bfafb49242db tip
+  $ echo 3 >> a
+  $ hg commit -m a123 a
+  $ echo 4 >> a
+  $ hg commit -m a1234 a
+  $ hg id
+  82dbe7fd19f0 tip
+  $ hg revert -r 0 a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,2 +2,0 @@
+  f1c23dd -3
+  82dbe7f -4
+  $ hg absorb --verbose
+  f1c23dd5d08d: became empty and was dropped
+  82dbe7fd19f0: became empty and was dropped
+  a: 1 of 1 chunk(s) applied
+  $ hg id
+  bfafb49242db tip
--- a/tests/test-add.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-add.t	Tue Sep 04 12:16:28 2018 -0400
@@ -12,6 +12,9 @@
   $ hg forget a
   $ hg add
   adding a
+  $ hg forget a
+  $ hg add --color debug
+  [addremove.added ui.status|adding a]
   $ hg st
   A a
   $ mkdir dir
--- a/tests/test-addremove.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-addremove.t	Tue Sep 04 12:16:28 2018 -0400
@@ -69,6 +69,12 @@
   removing c
   adding d
   recording removal of a as rename to b (100% similar)
+  $ hg addremove -ns 50 --color debug
+  [addremove.removed ui.status|removing a]
+  [addremove.added ui.status|adding b]
+  [addremove.removed ui.status|removing c]
+  [addremove.added ui.status|adding d]
+  [ ui.status|recording removal of a as rename to b (100% similar)]
   $ hg addremove -s 50
   removing a
   adding b
--- a/tests/test-alias.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-alias.t	Tue Sep 04 12:16:28 2018 -0400
@@ -651,81 +651,15 @@
 
   $ hg --invalid root
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid mylog
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid blank
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
 
 environment variable changes in alias commands
--- a/tests/test-amend.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-amend.t	Tue Sep 04 12:16:28 2018 -0400
@@ -331,3 +331,37 @@
   ? missing_content2_content2-untracked
   ? missing_content2_content3-untracked
   ? missing_missing_content3-untracked
+
+==========================================
+Test history-editing-backup config option|
+==========================================
+  $ hg init $TESTTMP/repo4
+  $ cd $TESTTMP/repo4
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo oops>b
+  $ hg ci -Aqm "b"
+  $ echo partiallyfixed > b
+
+#if obsstore-off
+  $ hg amend
+  saved backup bundle to $TESTTMP/repo4/.hg/strip-backup/95e899acf2ce-f11cb050-amend.hg
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#else
+  $ hg amend
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#endif
--- a/tests/test-annotate.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-annotate.t	Tue Sep 04 12:16:28 2018 -0400
@@ -56,7 +56,6 @@
   $ hg annotate -Tjson a
   [
    {
-    "abspath": "a",
     "lines": [{"line": "a\n", "rev": 0}],
     "path": "a"
    }
@@ -65,8 +64,7 @@
   $ hg annotate -Tjson -cdfnul a
   [
    {
-    "abspath": "a",
-    "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
+    "lines": [{"date": [1.0, 0], "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
     "path": "a"
    }
   ]
@@ -127,12 +125,10 @@
   $ hg annotate -Tjson a b
   [
    {
-    "abspath": "a",
     "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
     "path": "a"
    },
    {
-    "abspath": "b",
     "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
     "path": "b"
    }
@@ -140,7 +136,7 @@
 
 annotate multiple files (template)
 
-  $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
+  $ hg annotate -T'== {path} ==\n{lines % "{rev}: {line}"}' a b
   == a ==
   0: a
   1: a
@@ -568,7 +564,6 @@
   $ hg annotate -ncr "wdir()" -Tjson foo
   [
    {
-    "abspath": "foo",
     "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
     "path": "foo"
    }
@@ -870,11 +865,9 @@
   $ hg annotate -Tjson binary empty
   [
    {
-    "abspath": "binary",
     "path": "binary"
    },
    {
-    "abspath": "empty",
     "lines": [],
     "path": "empty"
    }
--- a/tests/test-backout.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-backout.t	Tue Sep 04 12:16:28 2018 -0400
@@ -147,8 +147,8 @@
   $ hg debugstate --nodates
   n 644         12 set                 c
   $ hg backout -d '6 0' -m 'to be rollback-ed soon' -r .
+  removing c
   adding b
-  removing c
   changeset 6:4bfec048029d backs out changeset 5:fac0b729a654
   $ hg rollback -q
   $ hg status -A
--- a/tests/test-bad-extension.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bad-extension.t	Tue Sep 04 12:16:28 2018 -0400
@@ -72,23 +72,56 @@
   $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
   BADEXTS: badext badext2
 
+#if no-extraextensions
 show traceback for ImportError of hgext.name if devel.debug.extensions is set
 
   $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
   > | grep -v '^ ' \
   > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import'
+  debug.extensions: loading extensions
+  debug.extensions: - processing 5 entries
+  debug.extensions:   - loading extension: 'gpg'
+  debug.extensions:   > 'gpg' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'gpg'
+  debug.extensions:     - invoking registered callbacks: 'gpg'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext'
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
   Traceback (most recent call last):
   Exception: bit bucket overflow
-  could not import hgext.badext2 (No module named *badext2): trying hgext3rd.badext2 (glob)
+  debug.extensions:   - loading extension: 'baddocext'
+  debug.extensions:   > 'baddocext' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'baddocext'
+  debug.extensions:     - invoking registered callbacks: 'baddocext'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext2'
+  debug.extensions:     - could not import hgext.badext2 (No module named badext2): trying hgext3rd.badext2
   Traceback (most recent call last):
   ImportError: No module named *badext2 (glob)
-  could not import hgext3rd.badext2 (No module named *badext2): trying badext2 (glob)
+  debug.extensions:     - could not import hgext3rd.badext2 (No module named badext2): trying badext2
   Traceback (most recent call last):
   ImportError: No module named *badext2 (glob)
   *** failed to import extension badext2: No module named badext2
   Traceback (most recent call last):
   ImportError: No module named badext2
+  debug.extensions: > loaded 2 extensions, total time * (glob)
+  debug.extensions: - loading configtable attributes
+  debug.extensions: - executing uisetup hooks
+  debug.extensions:   - running uisetup for 'gpg'
+  debug.extensions:   > uisetup for 'gpg' took * (glob)
+  debug.extensions:   - running uisetup for 'baddocext'
+  debug.extensions:   > uisetup for 'baddocext' took * (glob)
+  debug.extensions: - executing extsetup hooks
+  debug.extensions:   - running extsetup for 'gpg'
+  debug.extensions:   > extsetup for 'gpg' took * (glob)
+  debug.extensions:   - running extsetup for 'baddocext'
+  debug.extensions:   > extsetup for 'baddocext' took * (glob)
+  debug.extensions: - executing remaining aftercallbacks
+  debug.extensions: > remaining aftercallbacks completed in * (glob)
+  debug.extensions: - loading extension registration objects
+  debug.extensions: > extension registration object loading took * (glob)
+  debug.extensions: extension loading complete
+#endif
 
 confirm that there's no crash when an extension's documentation is bad
 
--- a/tests/test-bookmarks-current.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bookmarks-current.t	Tue Sep 04 12:16:28 2018 -0400
@@ -222,3 +222,26 @@
      Z                         0:719295282060
   $ hg parents -q
   4:8fa964221e8e
+
+Checks command to retrieve active bookmark
+------------------------------------------
+
+display how "{activebookmark}" template is unsuitable for the task
+
+  $ hg book -T '- {activebookmark}\n'
+  - 
+  - Y
+  - 
+
+  $ hg book -r . W
+  $ hg book -T '- {activebookmark}\n'
+  - Y
+  - 
+  - Y
+  - 
+
+  $ hg bookmarks --active
+  Y
+  $ hg bookmarks --inactive
+  $ hg bookmarks --active
+  [1]
--- a/tests/test-bookmarks-pushpull.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bookmarks-pushpull.t	Tue Sep 04 12:16:28 2018 -0400
@@ -345,7 +345,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (leaving bookmark V)
   $ hg push -B . ../a
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg update -r V
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-bookmarks.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bookmarks.t	Tue Sep 04 12:16:28 2018 -0400
@@ -151,6 +151,31 @@
   summary:     0
   
 
+"." is expanded to the active bookmark:
+
+  $ hg log -r 'bookmark(.)'
+  changeset:   1:925d80f479bb
+  bookmark:    X2
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     1
+  
+
+but "literal:." is not since "." seems not a literal bookmark:
+
+  $ hg log -r 'bookmark("literal:.")'
+  abort: bookmark '.' does not exist!
+  [255]
+
+"." should fail if there's no active bookmark:
+
+  $ hg bookmark --inactive
+  $ hg log -r 'bookmark(.)'
+  abort: no active bookmark!
+  [255]
+  $ hg log -r 'present(bookmark(.))'
+
   $ hg log -r 'bookmark(unknown)'
   abort: bookmark 'unknown' does not exist!
   [255]
@@ -166,6 +191,12 @@
   $ hg help revsets | grep 'bookmark('
       "bookmark([name])"
 
+reactivate "X2"
+
+  $ hg update X2
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (activating bookmark X2)
+
 bookmarks X and X2 moved to rev 1, Y at rev -1
 
   $ hg bookmarks
@@ -229,7 +260,7 @@
   $ hg book rename-me
   $ hg book -i rename-me
   $ hg book -m . renamed
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg up -q Y
   $ hg book -d rename-me
@@ -249,7 +280,7 @@
   $ hg book delete-me
   $ hg book -i delete-me
   $ hg book -d .
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg up -q Y
   $ hg book -d delete-me
--- a/tests/test-bundle.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bundle.t	Tue Sep 04 12:16:28 2018 -0400
@@ -796,13 +796,13 @@
   057f4db07f61970e1c11e83be79e9d08adc4dc31
   bundle2-output-bundle: "HG20", (1 params) 2 parts total
   bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
-  bundling: 1/2 changesets (50.00%)
-  bundling: 2/2 changesets (100.00%)
-  bundling: 1/2 manifests (50.00%)
-  bundling: 2/2 manifests (100.00%)
-  bundling: b 1/3 files (33.33%)
-  bundling: b1 2/3 files (66.67%)
-  bundling: x 3/3 files (100.00%)
+  changesets: 1/2 chunks (50.00%)
+  changesets: 2/2 chunks (100.00%)
+  manifests: 1/2 chunks (50.00%)
+  manifests: 2/2 chunks (100.00%)
+  files: b 1/3 files (33.33%)
+  files: b1 2/3 files (66.67%)
+  files: x 3/3 files (100.00%)
   bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
 
 #if repobundlerepo
--- a/tests/test-bundle2-format.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-bundle2-format.t	Tue Sep 04 12:16:28 2018 -0400
@@ -873,17 +873,17 @@
   bundle2-output-part: "changegroup" (advisory) streamed payload
   bundle2-output: part 0: "changegroup"
   bundle2-output: header chunk size: 18
-  bundling: 1/4 changesets (25.00%)
-  bundling: 2/4 changesets (50.00%)
-  bundling: 3/4 changesets (75.00%)
-  bundling: 4/4 changesets (100.00%)
-  bundling: 1/4 manifests (25.00%)
-  bundling: 2/4 manifests (50.00%)
-  bundling: 3/4 manifests (75.00%)
-  bundling: 4/4 manifests (100.00%)
-  bundling: D 1/3 files (33.33%)
-  bundling: E 2/3 files (66.67%)
-  bundling: H 3/3 files (100.00%)
+  changesets: 1/4 chunks (25.00%)
+  changesets: 2/4 chunks (50.00%)
+  changesets: 3/4 chunks (75.00%)
+  changesets: 4/4 chunks (100.00%)
+  manifests: 1/4 chunks (25.00%)
+  manifests: 2/4 chunks (50.00%)
+  manifests: 3/4 chunks (75.00%)
+  manifests: 4/4 chunks (100.00%)
+  files: D 1/3 files (33.33%)
+  files: E 2/3 files (66.67%)
+  files: H 3/3 files (100.00%)
   bundle2-output: payload chunk size: 1555
   bundle2-output: closing payload chunk
   bundle2-output: end of bundle
--- a/tests/test-casefolding.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-casefolding.t	Tue Sep 04 12:16:28 2018 -0400
@@ -52,7 +52,8 @@
   $ hg ci -Am addb D/b
   $ hg mv D/b d/b
   D/b: not overwriting - file already committed
-  (hg rename --force to replace the file by recording a rename)
+  ('hg rename --force' to replace the file by recording a rename)
+  [1]
   $ hg mv D/b d/c
   $ hg st
   A D/c
--- a/tests/test-cat.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-cat.t	Tue Sep 04 12:16:28 2018 -0400
@@ -65,7 +65,7 @@
 
 Test template output
 
-  $ hg --cwd tmp cat ../b ../c -T '== {path} ({abspath}) r{rev} ==\n{data}'
+  $ hg --cwd tmp cat ../b ../c -T '== {path|relpath} ({path}) r{rev} ==\n{data}'
   == ../b (b) r2 ==
   1
   == ../c (c) r2 ==
@@ -74,12 +74,10 @@
   $ hg cat b c -Tjson --output -
   [
    {
-    "abspath": "b",
     "data": "1\n",
     "path": "b"
    },
    {
-    "abspath": "c",
     "data": "3\n",
     "path": "c"
    }
@@ -89,7 +87,6 @@
   $ cat tmp/b.json
   [
    {
-    "abspath": "b",
     "data": "1\n",
     "path": "b"
    }
@@ -97,7 +94,6 @@
   $ cat tmp/c.json
   [
    {
-    "abspath": "c",
     "data": "3\n",
     "path": "c"
    }
--- a/tests/test-cbor.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-cbor.py	Tue Sep 04 12:16:28 2018 -0400
@@ -1,6 +1,5 @@
 from __future__ import absolute_import
 
-import io
 import unittest
 
 from mercurial.thirdparty import (
@@ -10,10 +9,17 @@
     cborutil,
 )
 
+class TestCase(unittest.TestCase):
+    if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
+        # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
+        # the regex version.
+        assertRaisesRegex = (# camelcase-required
+            unittest.TestCase.assertRaisesRegexp)
+
 def loadit(it):
     return cbor.loads(b''.join(it))
 
-class BytestringTests(unittest.TestCase):
+class BytestringTests(TestCase):
     def testsimple(self):
         self.assertEqual(
             list(cborutil.streamencode(b'foobar')),
@@ -23,11 +29,20 @@
             loadit(cborutil.streamencode(b'foobar')),
             b'foobar')
 
+        self.assertEqual(cborutil.decodeall(b'\x46foobar'),
+                         [b'foobar'])
+
+        self.assertEqual(cborutil.decodeall(b'\x46foobar\x45fizbi'),
+                         [b'foobar', b'fizbi'])
+
     def testlong(self):
         source = b'x' * 1048576
 
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
     def testfromiter(self):
         # This is the example from RFC 7049 Section 2.2.2.
         source = [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99']
@@ -47,6 +62,25 @@
             loadit(cborutil.streamencodebytestringfromiter(source)),
             b''.join(source))
 
+        self.assertEqual(cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
+                                            b'\x43\xee\xff\x99\xff'),
+                         [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99', b''])
+
+        for i, chunk in enumerate(
+            cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
+                               b'\x43\xee\xff\x99\xff')):
+            self.assertIsInstance(chunk, cborutil.bytestringchunk)
+
+            if i == 0:
+                self.assertTrue(chunk.isfirst)
+            else:
+                self.assertFalse(chunk.isfirst)
+
+            if i == 2:
+                self.assertTrue(chunk.islast)
+            else:
+                self.assertFalse(chunk.islast)
+
     def testfromiterlarge(self):
         source = [b'a' * 16, b'b' * 128, b'c' * 1024, b'd' * 1048576]
 
@@ -71,52 +105,417 @@
             source, chunksize=42))
         self.assertEqual(cbor.loads(dest), source)
 
-    def testreadtoiter(self):
-        source = io.BytesIO(b'\x5f\x44\xaa\xbb\xcc\xdd\x43\xee\xff\x99\xff')
+        self.assertEqual(b''.join(cborutil.decodeall(dest)), source)
+
+        for chunk in cborutil.decodeall(dest):
+            self.assertIsInstance(chunk, cborutil.bytestringchunk)
+            self.assertIn(len(chunk), (0, 8, 42))
+
+        encoded = b'\x5f\xff'
+        b = cborutil.decodeall(encoded)
+        self.assertEqual(b, [b''])
+        self.assertTrue(b[0].isfirst)
+        self.assertTrue(b[0].islast)
+
+    def testdecodevariouslengths(self):
+        for i in (0, 1, 22, 23, 24, 25, 254, 255, 256, 65534, 65535, 65536):
+            source = b'x' * i
+            encoded = b''.join(cborutil.streamencode(source))
+
+            if len(source) < 24:
+                hlen = 1
+            elif len(source) < 256:
+                hlen = 2
+            elif len(source) < 65536:
+                hlen = 3
+            elif len(source) < 1048576:
+                hlen = 5
+
+            self.assertEqual(cborutil.decodeitem(encoded),
+                             (True, source, hlen + len(source),
+                              cborutil.SPECIAL_NONE))
+
+    def testpartialdecode(self):
+        encoded = b''.join(cborutil.streamencode(b'foobar'))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+
+    def testpartialdecodevariouslengths(self):
+        lens = [
+            2,
+            3,
+            10,
+            23,
+            24,
+            25,
+            31,
+            100,
+            254,
+            255,
+            256,
+            257,
+            16384,
+            65534,
+            65535,
+            65536,
+            65537,
+            131071,
+            131072,
+            131073,
+            1048575,
+            1048576,
+            1048577,
+        ]
+
+        for size in lens:
+            if size < 24:
+                hlen = 1
+            elif size < 2**8:
+                hlen = 2
+            elif size < 2**16:
+                hlen = 3
+            elif size < 2**32:
+                hlen = 5
+            else:
+                assert False
+
+            source = b'x' * size
+            encoded = b''.join(cborutil.streamencode(source))
+
+            res = cborutil.decodeitem(encoded[0:1])
+
+            if hlen > 1:
+                self.assertEqual(res, (False, None, -(hlen - 1),
+                                       cborutil.SPECIAL_NONE))
+            else:
+                self.assertEqual(res, (False, None, -(size + hlen - 1),
+                                       cborutil.SPECIAL_NONE))
+
+            # Decoding partial header reports remaining header size.
+            for i in range(hlen - 1):
+                self.assertEqual(cborutil.decodeitem(encoded[0:i + 1]),
+                                 (False, None, -(hlen - i - 1),
+                                  cborutil.SPECIAL_NONE))
+
+            # Decoding complete header reports item size.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen]),
+                             (False, None, -size, cborutil.SPECIAL_NONE))
 
-        it = cborutil.readindefinitebytestringtoiter(source)
-        self.assertEqual(next(it), b'\xaa\xbb\xcc\xdd')
-        self.assertEqual(next(it), b'\xee\xff\x99')
+            # Decoding single byte after header reports item size - 1
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + 1]),
+                             (False, None, -(size - 1), cborutil.SPECIAL_NONE))
+
+            # Decoding all but the last byte reports -1 needed.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size - 1]),
+                             (False, None, -1, cborutil.SPECIAL_NONE))
+
+            # Decoding last byte retrieves value.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size]),
+                             (True, source, hlen + size, cborutil.SPECIAL_NONE))
+
+    def testindefinitepartialdecode(self):
+        encoded = b''.join(cborutil.streamencodebytestringfromiter(
+            [b'foobar', b'biz']))
+
+        # First item should be begin of bytestring special.
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, None, 1,
+                          cborutil.SPECIAL_START_INDEFINITE_BYTESTRING))
+
+        # Second item should be the first chunk. But only available when
+        # we give it 7 bytes (1 byte header + 6 byte chunk).
+        self.assertEqual(cborutil.decodeitem(encoded[1:2]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:3]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:4]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:5]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:6]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:7]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+
+        self.assertEqual(cborutil.decodeitem(encoded[1:8]),
+                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+
+        # Third item should be second chunk. But only available when
+        # we give it 4 bytes (1 byte header + 3 byte chunk).
+        self.assertEqual(cborutil.decodeitem(encoded[8:9]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[8:10]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[8:11]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+
+        self.assertEqual(cborutil.decodeitem(encoded[8:12]),
+                         (True, b'biz', 4, cborutil.SPECIAL_NONE))
+
+        # Fourth item should be end of indefinite stream marker.
+        self.assertEqual(cborutil.decodeitem(encoded[12:13]),
+                         (True, None, 1, cborutil.SPECIAL_INDEFINITE_BREAK))
+
+        # Now test the behavior when going through the decoder.
 
-        with self.assertRaises(StopIteration):
-            next(it)
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:1]),
+                         (False, 1, 0))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:2]),
+                         (False, 1, 6))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:3]),
+                         (False, 1, 5))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:4]),
+                         (False, 1, 4))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:5]),
+                         (False, 1, 3))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:6]),
+                         (False, 1, 2))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:7]),
+                         (False, 1, 1))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:8]),
+                         (True, 8, 0))
+
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:9]),
+                         (True, 8, 3))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:10]),
+                         (True, 8, 2))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:11]),
+                         (True, 8, 1))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:12]),
+                         (True, 12, 0))
+
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:13]),
+                         (True, 13, 0))
 
-class IntTests(unittest.TestCase):
+        decoder = cborutil.sansiodecoder()
+        decoder.decode(encoded[0:8])
+        values = decoder.getavailable()
+        self.assertEqual(values, [b'foobar'])
+        self.assertTrue(values[0].isfirst)
+        self.assertFalse(values[0].islast)
+
+        self.assertEqual(decoder.decode(encoded[8:12]),
+                         (True, 4, 0))
+        values = decoder.getavailable()
+        self.assertEqual(values, [b'biz'])
+        self.assertFalse(values[0].isfirst)
+        self.assertFalse(values[0].islast)
+
+        self.assertEqual(decoder.decode(encoded[12:]),
+                         (True, 1, 0))
+        values = decoder.getavailable()
+        self.assertEqual(values, [b''])
+        self.assertFalse(values[0].isfirst)
+        self.assertTrue(values[0].islast)
+
+class StringTests(TestCase):
+    def testdecodeforbidden(self):
+        encoded = b'\x63foo'
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'string major type not supported'):
+            cborutil.decodeall(encoded)
+
+class IntTests(TestCase):
     def testsmall(self):
         self.assertEqual(list(cborutil.streamencode(0)), [b'\x00'])
+        self.assertEqual(cborutil.decodeall(b'\x00'), [0])
+
         self.assertEqual(list(cborutil.streamencode(1)), [b'\x01'])
+        self.assertEqual(cborutil.decodeall(b'\x01'), [1])
+
         self.assertEqual(list(cborutil.streamencode(2)), [b'\x02'])
+        self.assertEqual(cborutil.decodeall(b'\x02'), [2])
+
         self.assertEqual(list(cborutil.streamencode(3)), [b'\x03'])
+        self.assertEqual(cborutil.decodeall(b'\x03'), [3])
+
         self.assertEqual(list(cborutil.streamencode(4)), [b'\x04'])
+        self.assertEqual(cborutil.decodeall(b'\x04'), [4])
+
+        # Multiple value decode works.
+        self.assertEqual(cborutil.decodeall(b'\x00\x01\x02\x03\x04'),
+                         [0, 1, 2, 3, 4])
 
     def testnegativesmall(self):
         self.assertEqual(list(cborutil.streamencode(-1)), [b'\x20'])
+        self.assertEqual(cborutil.decodeall(b'\x20'), [-1])
+
         self.assertEqual(list(cborutil.streamencode(-2)), [b'\x21'])
+        self.assertEqual(cborutil.decodeall(b'\x21'), [-2])
+
         self.assertEqual(list(cborutil.streamencode(-3)), [b'\x22'])
+        self.assertEqual(cborutil.decodeall(b'\x22'), [-3])
+
         self.assertEqual(list(cborutil.streamencode(-4)), [b'\x23'])
+        self.assertEqual(cborutil.decodeall(b'\x23'), [-4])
+
         self.assertEqual(list(cborutil.streamencode(-5)), [b'\x24'])
+        self.assertEqual(cborutil.decodeall(b'\x24'), [-5])
+
+        # Multiple value decode works.
+        self.assertEqual(cborutil.decodeall(b'\x20\x21\x22\x23\x24'),
+                         [-1, -2, -3, -4, -5])
 
     def testrange(self):
         for i in range(-70000, 70000, 10):
-            self.assertEqual(
-                b''.join(cborutil.streamencode(i)),
-                cbor.dumps(i))
+            encoded = b''.join(cborutil.streamencode(i))
+
+            self.assertEqual(encoded, cbor.dumps(i))
+            self.assertEqual(cborutil.decodeall(encoded), [i])
+
+    def testdecodepartialubyte(self):
+        encoded = b''.join(cborutil.streamencode(250))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 250, 2, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialbyte(self):
+        encoded = b''.join(cborutil.streamencode(-42))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, -42, 2, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialushort(self):
+        encoded = b''.join(cborutil.streamencode(2**15))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 2**15, 3, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialshort(self):
+        encoded = b''.join(cborutil.streamencode(-1024))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, -1024, 3, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialulong(self):
+        encoded = b''.join(cborutil.streamencode(2**28))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 2**28, 5, cborutil.SPECIAL_NONE))
+
+    def testdecodepartiallong(self):
+        encoded = b''.join(cborutil.streamencode(-1048580))
 
-class ArrayTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, -1048580, 5, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialulonglong(self):
+        encoded = b''.join(cborutil.streamencode(2**32))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -8, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -7, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
+                         (True, 2**32, 9, cborutil.SPECIAL_NONE))
+
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.decodeall(encoded[0:1])
+
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.decodeall(encoded[0:2])
+
+    def testdecodepartiallonglong(self):
+        encoded = b''.join(cborutil.streamencode(-7000000000))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -8, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -7, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
+                         (True, -7000000000, 9, cborutil.SPECIAL_NONE))
+
+class ArrayTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode([])), [b'\x80'])
         self.assertEqual(loadit(cborutil.streamencode([])), [])
 
+        self.assertEqual(cborutil.decodeall(b'\x80'), [[]])
+
     def testbasic(self):
         source = [b'foo', b'bar', 1, -10]
 
-        self.assertEqual(list(cborutil.streamencode(source)), [
-            b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29'])
+        chunks = [
+            b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29']
+
+        self.assertEqual(list(cborutil.streamencode(source)), chunks)
+
+        self.assertEqual(cborutil.decodeall(b''.join(chunks)), [source])
 
     def testemptyfromiter(self):
         self.assertEqual(b''.join(cborutil.streamencodearrayfromiter([])),
                          b'\x9f\xff')
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(b'\x9f\xff')
+
     def testfromiter1(self):
         source = [b'foo']
 
@@ -129,26 +528,193 @@
         dest = b''.join(cborutil.streamencodearrayfromiter(source))
         self.assertEqual(cbor.loads(dest), source)
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(dest)
+
     def testtuple(self):
         source = (b'foo', None, 42)
+        encoded = b''.join(cborutil.streamencode(source))
 
-        self.assertEqual(cbor.loads(b''.join(cborutil.streamencode(source))),
-                         list(source))
+        self.assertEqual(cbor.loads(encoded), list(source))
+
+        self.assertEqual(cborutil.decodeall(encoded), [list(source)])
+
+    def testpartialdecode(self):
+        source = list(range(4))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
+
+        source = list(range(23))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
+
+        source = list(range(24))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
 
-class SetTests(unittest.TestCase):
+        source = list(range(256))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
+
+    def testnested(self):
+        source = [[], [], [[], [], []]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        source = [True, None, [True, 0, 2], [None], [], [[[]], -87]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        # A set within an array.
+        source = [None, {b'foo', b'bar', None, False}, set()]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        # A map within an array.
+        source = [None, {}, {b'foo': b'bar', True: False}, [{}]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testindefinitebytestringvalues(self):
+        # Single value array whose value is an empty indefinite bytestring.
+        encoded = b'\x81\x5f\x40\xff'
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as array values'):
+            cborutil.decodeall(encoded)
+
+class SetTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode(set())), [
             b'\xd9\x01\x02',
             b'\x80',
         ])
 
+        self.assertEqual(cborutil.decodeall(b'\xd9\x01\x02\x80'), [set()])
+
     def testset(self):
         source = {b'foo', None, 42}
+        encoded = b''.join(cborutil.streamencode(source))
 
-        self.assertEqual(cbor.loads(b''.join(cborutil.streamencode(source))),
-                         source)
+        self.assertEqual(cbor.loads(encoded), source)
+
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testinvalidtag(self):
+        # Must use array to encode sets.
+        encoded = b'\xd9\x01\x02\xa0'
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'expected array after finite set '
+                                    'semantic tag'):
+            cborutil.decodeall(encoded)
+
+    def testpartialdecode(self):
+        # Semantic tag item will be 3 bytes. Set header will be variable
+        # depending on length.
+        encoded = b''.join(cborutil.streamencode({i for i in range(23)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 23, 4, cborutil.SPECIAL_START_SET))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 23, 4, cborutil.SPECIAL_START_SET))
+
+        encoded = b''.join(cborutil.streamencode({i for i in range(24)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 24, 5, cborutil.SPECIAL_START_SET))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 24, 5, cborutil.SPECIAL_START_SET))
 
-class BoolTests(unittest.TestCase):
+        encoded = b''.join(cborutil.streamencode({i for i in range(256)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 256, 6, cborutil.SPECIAL_START_SET))
+
+    def testinvalidvalue(self):
+        encoded = b''.join([
+            b'\xd9\x01\x02', # semantic tag
+            b'\x81', # array of size 1
+            b'\x5f\x43foo\xff', # indefinite length bytestring "foo"
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\x80', # empty array
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\xa0', # empty map
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\xd9\x01\x02\x81\x01', # set with integer 1
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+class BoolTests(TestCase):
     def testbasic(self):
         self.assertEqual(list(cborutil.streamencode(True)),  [b'\xf5'])
         self.assertEqual(list(cborutil.streamencode(False)), [b'\xf4'])
@@ -156,23 +722,38 @@
         self.assertIs(loadit(cborutil.streamencode(True)), True)
         self.assertIs(loadit(cborutil.streamencode(False)), False)
 
-class NoneTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeall(b'\xf4'), [False])
+        self.assertEqual(cborutil.decodeall(b'\xf5'), [True])
+
+        self.assertEqual(cborutil.decodeall(b'\xf4\xf5\xf5\xf4'),
+                         [False, True, True, False])
+
+class NoneTests(TestCase):
     def testbasic(self):
         self.assertEqual(list(cborutil.streamencode(None)), [b'\xf6'])
 
         self.assertIs(loadit(cborutil.streamencode(None)), None)
 
-class MapTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeall(b'\xf6'), [None])
+        self.assertEqual(cborutil.decodeall(b'\xf6\xf6'), [None, None])
+
+class MapTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode({})), [b'\xa0'])
         self.assertEqual(loadit(cborutil.streamencode({})), {})
 
+        self.assertEqual(cborutil.decodeall(b'\xa0'), [{}])
+
     def testemptyindefinite(self):
         self.assertEqual(list(cborutil.streamencodemapfromiter([])), [
             b'\xbf', b'\xff'])
 
         self.assertEqual(loadit(cborutil.streamencodemapfromiter([])), {})
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(b'\xbf\xff')
+
     def testone(self):
         source = {b'foo': b'bar'}
         self.assertEqual(list(cborutil.streamencode(source)), [
@@ -180,6 +761,8 @@
 
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
+        self.assertEqual(cborutil.decodeall(b'\xa1\x43foo\x43bar'), [source])
+
     def testmultiple(self):
         source = {
             b'foo': b'bar',
@@ -192,6 +775,9 @@
             loadit(cborutil.streamencodemapfromiter(source.items())),
             source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
     def testcomplex(self):
         source = {
             b'key': 1,
@@ -205,6 +791,194 @@
             loadit(cborutil.streamencodemapfromiter(source.items())),
             source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testnested(self):
+        source = {b'key1': None, b'key2': {b'sub1': b'sub2'}, b'sub2': {}}
+        encoded = b''.join(cborutil.streamencode(source))
+
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        source = {
+            b'key1': [],
+            b'key2': [None, False],
+            b'key3': {b'foo', b'bar'},
+            b'key4': {},
+        }
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testillegalkey(self):
+        encoded = b''.join([
+            # map header + len 1
+            b'\xa1',
+            # indefinite length bytestring "foo" in key position
+            b'\x5f\x03foo\xff'
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as map keys'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xa1',
+            b'\x80', # empty array
+            b'\x43foo',
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not supported as map keys'):
+            cborutil.decodeall(encoded)
+
+    def testillegalvalue(self):
+        encoded = b''.join([
+            b'\xa1', # map headers
+            b'\x43foo', # key
+            b'\x5f\x03bar\xff', # indefinite length value
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as map values'):
+            cborutil.decodeall(encoded)
+
+    def testpartialdecode(self):
+        source = {b'key1': b'value1'}
+        encoded = b''.join(cborutil.streamencode(source))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(23)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 23, 1, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(24)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(256)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(65536)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
+
+class SemanticTagTests(TestCase):
+    def testdecodeforbidden(self):
+        for i in range(500):
+            if i == cborutil.SEMANTIC_TAG_FINITE_SET:
+                continue
+
+            tag = cborutil.encodelength(cborutil.MAJOR_TYPE_SEMANTIC,
+                                        i)
+
+            encoded = tag + cborutil.encodelength(cborutil.MAJOR_TYPE_UINT, 42)
+
+            # Partial decode is incomplete.
+            if i < 24:
+                pass
+            elif i < 256:
+                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                                 (False, None, -1, cborutil.SPECIAL_NONE))
+            elif i < 65536:
+                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                                 (False, None, -2, cborutil.SPECIAL_NONE))
+                self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                                 (False, None, -1, cborutil.SPECIAL_NONE))
+
+            with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                        'semantic tag \d+ not allowed'):
+                cborutil.decodeitem(encoded)
+
+class SpecialTypesTests(TestCase):
+    def testforbiddentypes(self):
+        for i in range(256):
+            if i == cborutil.SUBTYPE_FALSE:
+                continue
+            elif i == cborutil.SUBTYPE_TRUE:
+                continue
+            elif i == cborutil.SUBTYPE_NULL:
+                continue
+
+            encoded = cborutil.encodelength(cborutil.MAJOR_TYPE_SPECIAL, i)
+
+            with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                        'special type \d+ not allowed'):
+                cborutil.decodeitem(encoded)
+
+class SansIODecoderTests(TestCase):
+    def testemptyinput(self):
+        decoder = cborutil.sansiodecoder()
+        self.assertEqual(decoder.decode(b''), (False, 0, 0))
+
+class BufferingDecoderTests(TestCase):
+    def testsimple(self):
+        source = [
+            b'foobar',
+            b'x' * 128,
+            {b'foo': b'bar'},
+            True,
+            False,
+            None,
+            [None for i in range(128)],
+        ]
+
+        encoded = b''.join(cborutil.streamencode(source))
+
+        for step in range(1, 32):
+            decoder = cborutil.bufferingdecoder()
+            start = 0
+
+            while start < len(encoded):
+                decoder.decode(encoded[start:start + step])
+                start += step
+
+            self.assertEqual(decoder.getavailable(), [source])
+
+class DecodeallTests(TestCase):
+    def testemptyinput(self):
+        self.assertEqual(cborutil.decodeall(b''), [])
+
+    def testpartialinput(self):
+        encoded = b''.join([
+            b'\x82', # array of 2 elements
+            b'\x01', # integer 1
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'input data not complete'):
+            cborutil.decodeall(encoded)
+
 if __name__ == '__main__':
     import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/test-check-code.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-check-code.t	Tue Sep 04 12:16:28 2018 -0400
@@ -22,7 +22,7 @@
   >>> commands = []
   >>> with open('mercurial/debugcommands.py', 'rb') as fh:
   ...     for line in fh:
-  ...         m = re.match("^@command\('([a-z]+)", line)
+  ...         m = re.match(b"^@command\('([a-z]+)", line)
   ...         if m:
   ...             commands.append(m.group(1))
   >>> scommands = list(sorted(commands))
--- a/tests/test-check-interfaces.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-check-interfaces.py	Tue Sep 04 12:16:28 2018 -0400
@@ -21,6 +21,7 @@
     verify as ziverify,
 )
 from mercurial import (
+    changegroup,
     bundlerepo,
     filelog,
     httppeer,
@@ -28,6 +29,7 @@
     manifest,
     pycompat,
     repository,
+    revlog,
     sshpeer,
     statichttprepo,
     ui as uimod,
@@ -175,6 +177,7 @@
     ziverify.verifyClass(repository.imanifestrevisionwritable,
                          manifest.memtreemanifestctx)
     ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
+    ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
 
     vfs = vfsmod.vfs(b'.')
     fl = filelog.filelog(vfs, b'dummy.i')
@@ -196,4 +199,32 @@
     # Conforms to imanifestdict.
     checkzobject(mctx.read())
 
+    mrl = manifest.manifestrevlog(vfs)
+    checkzobject(mrl)
+
+    ziverify.verifyClass(repository.irevisiondelta,
+                         revlog.revlogrevisiondelta)
+    ziverify.verifyClass(repository.irevisiondeltarequest,
+                         changegroup.revisiondeltarequest)
+
+    rd = revlog.revlogrevisiondelta(
+        node=b'',
+        p1node=b'',
+        p2node=b'',
+        basenode=b'',
+        linknode=b'',
+        flags=b'',
+        baserevisionsize=None,
+        revision=b'',
+        delta=None)
+    checkzobject(rd)
+
+    rdr = changegroup.revisiondeltarequest(
+        node=b'',
+        linknode=b'',
+        p1node=b'',
+        p2node=b'',
+        basenode=b'')
+    checkzobject(rdr)
+
 main()
--- a/tests/test-check-py3-compat.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-check-py3-compat.t	Tue Sep 04 12:16:28 2018 -0400
@@ -3,6 +3,7 @@
   $ . "$TESTDIR/helpers-testrepo.sh"
   $ cd "$TESTDIR"/..
 
+#if no-py3k
   $ testrepohg files 'set:(**.py)' \
   > -X hgdemandimport/demandimportpy2.py \
   > -X mercurial/thirdparty/cbor \
@@ -21,27 +22,26 @@
   contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import
   contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import
   setup.py not using absolute_import
+#endif
 
-#if py3exe
+#if py3k
   $ testrepohg files 'set:(**.py) - grep(pygments)' \
   > -X hgdemandimport/demandimportpy2.py \
   > -X hgext/fsmonitor/pywatchman \
-  > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \
+  > -X mercurial/cffi \
+  > -X mercurial/thirdparty \
+  > | sed 's|\\|/|g' | xargs $PYTHON contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
-  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob)
-  mercurial/cffi/bdiff.py: error importing: <ImportError> cannot import name '_bdiff' (error at bdiff.py:*)
-  mercurial/cffi/bdiffbuild.py: error importing: <ImportError> No module named 'cffi' (error at bdiffbuild.py:*)
-  mercurial/cffi/mpatch.py: error importing: <ImportError> cannot import name '_mpatch' (error at mpatch.py:*)
-  mercurial/cffi/mpatchbuild.py: error importing: <ImportError> No module named 'cffi' (error at mpatchbuild.py:*)
-  mercurial/cffi/osutilbuild.py: error importing: <ImportError> No module named 'cffi' (error at osutilbuild.py:*)
-  mercurial/scmwindows.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/win32.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob)
+  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob) (?)
+  hgext/infinitepush/sqlindexapi.py: error importing: <*Error> No module named 'mysql' (error at sqlindexapi.py:*) (glob) (?)
+  mercurial/scmwindows.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
+  mercurial/win32.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
+  mercurial/windows.py: error importing: <ModuleNotFoundError> No module named 'msvcrt' (error at windows.py:*) (no-windows !)
 
 #endif
 
-#if py3exe py3pygments
+#if py3k pygments
   $ testrepohg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
-  > | xargs $PYTHON3 contrib/check-py3-compat.py \
+  > | xargs $PYTHON contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
 #endif
--- a/tests/test-clone-r.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-clone-r.t	Tue Sep 04 12:16:28 2018 -0400
@@ -37,7 +37,7 @@
   $ hg mv afile anotherfile
   $ hg commit -m "0.3m"
 
-  $ hg debugindex -f 1 afile
+  $ hg debugrevlogindex -f 1 afile
      rev flag     size   link     p1     p2       nodeid
        0 0000        2      0     -1     -1 362fef284ce2
        1 0000        4      1      0     -1 125144f7e028
--- a/tests/test-clone-uncompressed.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-clone-uncompressed.t	Tue Sep 04 12:16:28 2018 -0400
@@ -247,6 +247,7 @@
   sending stream_out command
   1027 files to transfer, 96.3 KB of data
   starting 4 threads for background file closing
+  updating the branch cache
   transferred 96.3 KB in * seconds (*/sec) (glob)
   query 1; heads
   sending batch command
@@ -275,6 +276,7 @@
   1030 files to transfer, 96.4 KB of data
   starting 4 threads for background file closing
   starting 4 threads for background file closing
+  updating the branch cache
   transferred 96.4 KB in * seconds (* */sec) (glob)
   bundle2-input-part: total payload size 112077
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
--- a/tests/test-clone.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-clone.t	Tue Sep 04 12:16:28 2018 -0400
@@ -47,6 +47,7 @@
   checklink (symlink !)
   checklink-target (symlink !)
   checknoexec (execbit !)
+  manifestfulltextcache (reporevlogstore !)
   rbc-names-v1
   rbc-revs-v1
 
@@ -641,7 +642,7 @@
   $ mkdir a
   $ chmod 000 a
   $ hg clone a b
-  abort: repository a not found!
+  abort: Permission denied: '$TESTTMP/fail/a/.hg'
   [255]
 
 Inaccessible destination
@@ -664,7 +665,7 @@
 
   $ mkfifo a
   $ hg clone a b
-  abort: repository a not found!
+  abort: $ENOTDIR$: '$TESTTMP/fail/a/.hg'
   [255]
   $ rm a
 
--- a/tests/test-commit-amend.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-commit-amend.t	Tue Sep 04 12:16:28 2018 -0400
@@ -824,7 +824,8 @@
   $ hg merge -q bar --config ui.interactive=True << EOF
   > c
   > EOF
-  local [working copy] changed aa which other [merge rev] deleted
+  file 'aa' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
   $ hg ci -m 'merge bar (with conflicts)'
   $ hg log --config diff.git=1 -pr .
--- a/tests/test-completion.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-completion.t	Tue Sep 04 12:16:28 2018 -0400
@@ -98,6 +98,7 @@
   debugknown
   debuglabelcomplete
   debuglocks
+  debugmanifestfulltextcache
   debugmergestate
   debugnamecomplete
   debugobsolete
@@ -110,6 +111,7 @@
   debugrebuildfncache
   debugrename
   debugrevlog
+  debugrevlogindex
   debugrevspec
   debugserve
   debugsetparents
@@ -248,7 +250,7 @@
   archive: no-decode, prefix, rev, type, subrepos, include, exclude
   backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
   bisect: reset, good, bad, skip, extend, command, noupdate
-  bookmarks: force, rev, delete, rename, inactive, template
+  bookmarks: force, rev, delete, rename, inactive, active, template
   branch: force, clean, rev
   branches: active, closed, template
   bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
@@ -273,17 +275,18 @@
   debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
   debugdownload: output
   debugextensions: template
-  debugfileset: rev, all-files
+  debugfileset: rev, all-files, show-matcher, show-stage
   debugformat: template
   debugfsinfo: 
   debuggetbundle: head, common, type
   debugignore: 
-  debugindex: changelog, manifest, dir, format
+  debugindex: changelog, manifest, dir, template
   debugindexdot: changelog, manifest, dir
   debuginstall: template
   debugknown: 
   debuglabelcomplete: 
   debuglocks: force-lock, force-wlock, set-lock, set-wlock
+  debugmanifestfulltextcache: clear, add
   debugmergestate: 
   debugnamecomplete: 
   debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
@@ -296,6 +299,7 @@
   debugrebuildfncache: 
   debugrename: rev
   debugrevlog: changelog, manifest, dir, dump
+  debugrevlogindex: changelog, manifest, dir, format
   debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
   debugserve: sshstdio, logiofd, logiofile
   debugsetparents: 
@@ -327,7 +331,7 @@
   phase: public, draft, secret, force, rev
   recover: 
   rename: after, force, include, exclude, dry-run
-  resolve: all, list, mark, unmark, no-status, tool, include, exclude, template
+  resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
   revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
   rollback: dry-run, force
   root: 
--- a/tests/test-conflict.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-conflict.t	Tue Sep 04 12:16:28 2018 -0400
@@ -58,7 +58,7 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg commit
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg merge --abort
   
 
   $ cat a
--- a/tests/test-confused-revert.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-confused-revert.t	Tue Sep 04 12:16:28 2018 -0400
@@ -14,8 +14,8 @@
   R a
 
   $ hg revert --all
+  forgetting b
   undeleting a
-  forgetting b
 
 Should show b unknown and a back to normal:
 
@@ -66,8 +66,8 @@
 Revert should be ok now:
 
   $ hg revert -r2 --all
+  forgetting b
   undeleting a
-  forgetting b
 
 Should show b unknown and a marked modified (merged):
 
--- a/tests/test-contrib-perf.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-contrib-perf.t	Tue Sep 04 12:16:28 2018 -0400
@@ -55,6 +55,8 @@
                  benchmark parsing bookmarks from disk to memory
    perfbranchmap
                  benchmark the update of a branchmap
+   perfbranchmapload
+                 benchmark reading the branchmap
    perfbundleread
                  Benchmark reading of bundle files.
    perfcca       (no help text available)
@@ -82,6 +84,8 @@
                  (no help text available)
    perfheads     (no help text available)
    perfindex     (no help text available)
+   perflinelogedits
+                 (no help text available)
    perfloadmarkers
                  benchmark the time to parse the on-disk markers for a repo
    perflog       (no help text available)
@@ -156,11 +160,16 @@
 #endif
   $ hg perfheads
   $ hg perfindex
+  $ hg perflinelogedits -n 1
   $ hg perfloadmarkers
   $ hg perflog
   $ hg perflookup 2
   $ hg perflrucache
   $ hg perfmanifest 2
+  $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
+  $ hg perfmanifest -m 44fe2c8352bb
+  abort: manifest revision must be integer or full node
+  [255]
   $ hg perfmergecalculate -r 3
   $ hg perfmoonwalk
   $ hg perfnodelookup 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-contrib-relnotes.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,290 @@
+#require test-repo py3exe
+  $ . "$TESTDIR/helpers-testrepo.sh"
+
+  $ cd $TESTDIR/..
+  $ python3 contrib/relnotes 4.4 --stoprev 4.5
+  New Features
+  ============
+  
+  revert --interactive
+  --------------------
+  
+  The revert command now accepts the flag --interactive to allow reverting only
+  some of the changes to the specified files.
+  
+  Rebase with different destination per source revision
+  -----------------------------------------------------
+  
+  Previously, rebase only supports one unique destination. Now "SRC" and
+  "ALLSRC" can be used in rebase destination revset to precisely define
+  destination per each individual source revision.
+  
+  For example, the following command could move some orphaned changesets to
+  reasonable new places so they become no longer orphaned:
+  
+  hg rebase   -r 'orphan()-obsolete()'   -d 'max((successors(max(roots(ALLSRC) &
+  ::SRC)^)-obsolete())::)'
+  
+  Accessing hidden changesets
+  ---------------------------
+  
+  Set config option 'experimental.directaccess = True' to access hidden
+  changesets from read only commands.
+  
+  githelp extension
+  -----------------
+  
+  The "githelp" extension provides the "hg githelp" command. This command
+  attempts to convert a "git" command to its Mercurial equivalent. The extension
+  can be useful to Git users new to Mercurial.
+  
+  Other Changes
+  -------------
+  
+  * When interactive revert is run against a revision other than the working
+    directory parent, the diff shown is the diff to *apply* to the working
+    directory, rather than the diff to *discard* from the working copy. This is
+    in line with related user experiences with 'git' and appears to be less
+    confusing with 'ui.interface=curses'.
+  
+  * Let 'hg rebase' avoid content-divergence by skipping obsolete changesets
+    (and their descendants) when they are present in the rebase set along with
+    one of their successors but none of their successors is in destination.
+  
+  * hgweb now displays phases of non-public changesets
+  
+  * The "HGPLAINEXCEPT" environment variable can now include "color" to allow
+    automatic output colorization in otherwise automated environments.
+  
+  * A new unamend command in uncommit extension which undoes the effect of the
+    amend command by creating a new changeset which was there before amend and
+    moving the changes that were amended to the working directory.
+  
+  * A '--abort' flag to merge command to abort the ongoing merge.
+  
+  * An experimental flag '--rev' to 'hg branch' which can be used to change
+    branch of changesets.
+  
+  Backwards Compatibility Changes
+  ===============================
+  
+  * "log --follow-first -rREV", which is deprecated, now follows the first
+    parent of merge revisions from the specified "REV" just like "log --follow
+    -rREV".
+  
+  * "log --follow -rREV FILE.." now follows file history across copies and
+    renames.
+  
+  Bug Fixes
+  =========
+  
+  Issue 5165
+  ----------
+  
+  Bookmark, whose name is longer than 255, can again be exchanged again between
+  4.4+ client and servers.
+  
+  Performance Improvements
+  ========================
+  
+  * bundle2 read I/O throughput significantly increased.
+  
+  * Significant memory use reductions when reading from bundle2 bundles.
+  
+    On the BSD repository, peak RSS during changegroup application decreased by
+    ~185 MB from ~752 MB to ~567 MB.
+  
+  API Changes
+  ===========
+  
+  * bundlerepo.bundlerepository.bundle and
+    bundlerepo.bundlerepository.bundlefile are now prefixed with an underscore.
+  
+  * Rename bundlerepo.bundlerepository.bundlefilespos to _cgfilespos.
+  
+  * dirstate no longer provides a 'dirs()' method.  To test for the existence of
+    a directory in the dirstate, use 'dirstate.hasdir(dirname)'.
+  
+  * bundle2 parts are no longer seekable by default.
+  
+  * mapping does not contain all template resources. use context.resource() in
+    template functions.
+  
+  * "text=False|True" option is dropped from the vfs interface because of Python
+    3 compatibility issue. Use "util.tonativeeol/fromnativeeol()" to convert EOL
+    manually.
+  
+  * wireproto.streamres.__init__ no longer accepts a "reader" argument. Use the
+    "gen" argument instead.
+  
+  * exchange.getbundlechunks() now returns a 2-tuple instead of just an
+    iterator.
+  
+  
+  === commands ===
+   * amend: do not drop missing files (Bts:issue5732)
+   * amend: do not take untracked files as modified or clean (Bts:issue5732)
+   * amend: update .hgsubstate before committing a memctx (Bts:issue5677)
+   * annotate: add support to specify hidden revs if directaccess config is set
+   * bookmark: add methods to binary encode and decode bookmark values
+   * bookmark: deprecate direct update of a bookmark value
+   * bookmark: introduce a 'bookmarks' part
+   * bookmark: introduce in advance a variant of the exchange test
+   * bookmark: run 'pushkey' hooks after bookmark move, not 'prepushkey'
+   * bookmarks: add bookmarks to hidden revs if directaccess config is set
+   * bookmarks: calculate visibility exceptions only once
+   * bookmarks: display the obsfate of hidden revision we create a bookmark on
+   * bookmarks: fix pushkey compatibility mode (Bts:issue5777)
+   * bookmarks: use context managers for lock and transaction in update()
+   * bookmarks: use context managers for locks and transaction in pushbookmark()
+   * branch: allow changing branch name to existing name if possible
+   * clone: add support for storing remotenames while cloning
+   * clone: use utility function to write hgrc
+   * clonebundle: make it possible to retrieve the initial bundle through largefile
+   * commandserver: restore cwd in case of exception
+   * commandserver: unblock SIGCHLD
+   * help: deprecate ui.slash in favor of slashpath template filter (Bts:issue5572)
+   * log: allow matchfn to be non-null even if both --patch/--stat are off
+   * log: build follow-log filematcher at once
+   * log: don't expand aliases in revset built from command options
+   * log: make "slowpath" condition slightly more readable
+   * log: make opt2revset table a module constant
+   * log: merge getlogrevs() and getgraphlogrevs()
+   * log: remove temporary variable 'date' used only once
+   * log: resolve --follow thoroughly in getlogrevs()
+   * log: resolve --follow with -rREV in cmdutil.getlogrevs()
+   * log: simplify 'x or ancestors(x)' expression
+   * log: translate column labels at once (Bts:issue5750)
+   * log: use revsetlang.formatspec() thoroughly
+   * log: use revsetlang.formatspec() to concatenate list expression
+   * log: use smartset.slice() to limit number of revisions to be displayed
+   * merge: cache unknown dir checks (Bts:issue5716)
+   * merge: check created file dirs for path conflicts only once (Bts:issue5716)
+   * patch: add within-line color diff capacity
+   * patch: catch unexpected case in _inlinediff
+   * patch: do not break up multibyte character when highlighting word
+   * patch: improve heuristics to not take the word "diff" as header (Bts:issue1879)
+   * patch: reverse _inlinediff output for consistency
+   * pull: clarify that -u only updates linearly
+   * pull: hold wlock for the full operation when --update is used
+   * pull: retrieve bookmarks through the binary part when possible
+   * pull: store binary node in pullop.remotebookmarks
+   * push: include a 'check:bookmarks' part when possible
+   * push: restrict common discovery to the pushed set
+   * revert: support reverting to hidden cset if directaccess config is set
+  
+  === core ===
+   * filelog: add the ability to report the user facing name
+   * revlog: choose between ifh and dfh once for all
+   * revlog: don't use slicing to return parents
+   * revlog: group delta computation methods under _deltacomputer object
+   * revlog: group revision info into a dedicated structure
+   * revlog: introduce 'deltainfo' to distinguish from 'delta'
+   * revlog: rename 'rev' to 'base', as it is the base revision
+   * revlog: separate diff computation from the collection of other info
+   * revset: evaluate filesets against each revision for 'file()' (Bts:issue5778)
+   * revset: parse x^:: as (x^):: (Bts:issue5764)
+   * templater: look up symbols/resources as if they were separated (Bts:issue5699)
+   * transaction: register summary callbacks only at start of transaction (BC)
+   * util: whitelist NTFS for hardlink creation (Bts:issue4580)
+  
+  === extensions ===
+   * convert: restore the ability to use bzr < 2.6.0 (Bts:issue5733)
+   * histedit: add support to output nodechanges using formatter
+   * largefiles: add a 'debuglfput' command to put largefile into the store
+   * largefiles: add support for 'largefiles://' url scheme
+   * largefiles: allow to run 'debugupgraderepo' on repo with largefiles
+   * largefiles: convert EOL of hgrc before appending to bytes IO
+   * largefiles: explicitly set the source and sink types to 'hg' for lfconvert
+   * largefiles: modernize how capabilities are added to the wire protocol
+   * largefiles: pay attention to dropped standin files when updating largefiles
+   * rebase: add concludememorynode(), and call it when rebasing in-memory
+   * rebase: add the --inmemory option flag; assign a wctx object for the rebase
+   * rebase: add ui.log calls for whether IMM used, whether rebasing WCP
+   * rebase: disable 'inmemory' if the rebaseset contains the working copy
+   * rebase: do not bail on uncomitted changes if rebasing in-memory
+   * rebase: do not update if IMM; instead, set the overlaywctx's parents
+   * rebase: don't run IMM if running rebase in a transaction
+   * rebase: don't take out a dirstate guard for in-memory rebase
+   * rebase: drop --style option
+   * rebase: fix for hgsubversion
+   * rebase: pass the wctx object (IMM or on-disk) to merge.update
+   * rebase: pass wctx to rebasenode()
+   * rebase: rerun a rebase on-disk if IMM merge conflicts arise
+   * rebase: switch ui.log calls to common style
+   * rebase: use fm.formatlist() and fm.formatdict() to support user template
+  
+  === hgweb ===
+   * hgweb: disable diff.noprefix option for diffstat
+   * hgweb: drop support of browsers that don't understand <canvas> (BC)
+   * hgweb: only include graph-related data in jsdata variable on /graph pages (BC)
+   * hgweb: stop adding strings to innerHTML of #graphnodes and #nodebgs (BC)
+  
+  === unsorted ===
+   * archive: add support to specify hidden revs if directaccess config is set
+   * atomicupdate: add an experimental option to use atomictemp when updating
+   * bundle: allow bundlerepo to support alternative manifest implementations
+   * changelog: introduce a 'tiprev' method
+   * changelog: use 'tiprev()' in 'tip()'
+   * completion: add support for new "amend" command
+   * debugssl: convert port number to int (Bts:issue5757)
+   * diff: disable diff.noprefix option for diffstat (Bts:issue5759)
+   * dispatch: abort if early boolean options can't be parsed
+   * dispatch: add HGPLAIN=+strictflags to restrict early parsing of global options
+   * dispatch: add option to not strip command args parsed by _earlygetopt()
+   * dispatch: alias --repo to --repository while parsing early options
+   * dispatch: convert non-list option parsed by _earlygetopt() to string
+   * dispatch: fix early parsing of short option with value like -R=foo
+   * dispatch: handle IOError when writing to stderr
+   * dispatch: stop parsing of early boolean option at "--"
+   * dispatch: verify result of early command parsing
+   * evolution: make reporting of new unstable changesets optional
+   * extdata: abort if external command exits with non-zero status (BC)
+   * fancyopts: add early-options parser compatible with getopt()
+   * graphlog: add another graph node type, unstable, using character "*" (BC)
+   * hgdemandimport: use correct hyperlink to python-bug in comments (Bts:issue5765)
+   * httppeer: add support for tracing all http request made by the peer
+   * identify: document -r. explicitly how to disable wdir scanning (Bts:issue5622)
+   * lfs: register config options
+   * localrepo: specify optional callback parameter to pathauditor as a keyword
+   * match: do not weirdly include explicit files excluded by -X option
+   * memfilectx: make changectx argument mandatory in constructor (API)
+   * morestatus: don't crash with different drive letters for repo.root and CWD
+   * outgoing: respect ":pushurl" paths (Bts:issue5365)
+   * remove: print message for each file in verbose mode only while using '-A' (BC)
+   * rewriteutil: use precheck() in uncommit and amend commands
+   * scmutil: don't try to delete origbackup symlinks to directories (Bts:issue5731)
+   * sshpeer: add support for request tracing
+   * streamclone: add support for bundle2 based stream clone
+   * streamclone: add support for cloning non append-only file
+   * streamclone: also stream caches to the client
+   * streamclone: define first iteration of version 2 of stream format
+   * streamclone: move wire protocol status code from wireproto command
+   * streamclone: rework canperformstreamclone
+   * streamclone: tests phase exchange during stream clone
+   * streamclone: use readexactly when reading stream v2
+   * subrepo: add config option to reject any subrepo operations (SEC)
+   * subrepo: disable git and svn subrepos by default (BC) (SEC)
+   * subrepo: extend config option to disable subrepos by type (SEC)
+   * subrepo: handle 'C:' style paths on the command line (Bts:issue5770)
+   * subrepo: use per-type config options to enable subrepos
+   * svnsubrepo: check if subrepo is missing when checking dirty state (Bts:issue5657)
+   * tr-summary: keep a weakref to the unfiltered repository
+   * unamend: fix command summary line
+   * uncommit: unify functions _uncommitdirstate and _unamenddirstate to one
+   * update: support updating to hidden cset if directaccess config is set
+  
+  === BC ===
+  
+   * extdata: abort if external command exits with non-zero status (BC)
+   * graphlog: add another graph node type, unstable, using character "*" (BC)
+   * hgweb: drop support of browsers that don't understand <canvas> (BC)
+   * hgweb: only include graph-related data in jsdata variable on /graph pages (BC)
+   * hgweb: stop adding strings to innerHTML of #graphnodes and #nodebgs (BC)
+   * remove: print message for each file in verbose mode only while using '-A' (BC)
+   * subrepo: disable git and svn subrepos by default (BC) (SEC)
+   * transaction: register summary callbacks only at start of transaction (BC)
+  
+  === API Changes ===
+  
+   * memfilectx: make changectx argument mandatory in constructor (API)
--- a/tests/test-convert-bzr-ghosts.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-bzr-ghosts.t	Tue Sep 04 12:16:28 2018 -0400
@@ -31,9 +31,9 @@
   1 Initial layout setup
   0 Commit with ghost revision
   $ glog -R source-hg
-  o  1@source "Commit with ghost revision" files: somefile
+  o  1@source "Commit with ghost revision" files+: [], files-: [], files: [somefile]
   |
-  o  0@source "Initial layout setup" files: somefile
+  o  0@source "Initial layout setup" files+: [somefile], files-: [], files: []
   
 
   $ cd ..
--- a/tests/test-convert-bzr-merges.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-bzr-merges.t	Tue Sep 04 12:16:28 2018 -0400
@@ -13,7 +13,8 @@
   $ bzr init -q source
   $ cd source
   $ echo content > file
-  $ bzr add -q file
+  $ echo text > rename_me
+  $ bzr add -q file rename_me
   $ bzr commit -q -m 'Initial add' '--commit-time=2009-10-10 08:00:00 +0100'
   $ cd ..
   $ bzr branch -q source source-branch1
@@ -32,6 +33,8 @@
   $ cd source-branch2
   $ echo somecontent > file-branch2
   $ bzr add -q file-branch2
+  $ bzr mv -q rename_me renamed
+  $ echo change > renamed
   $ bzr commit -q -m 'Added brach2 file' '--commit-time=2009-10-10 08:00:03 +0100'
   $ sleep 1
   $ cd ../source
@@ -39,6 +42,9 @@
   $ bzr merge -q --force ../source-branch2
   $ bzr commit -q -m 'Merged branches' '--commit-time=2009-10-10 08:00:04 +0100'
   $ cd ..
+
+BUG: file-branch2 should not be added in rev 4, and the rename_me -> renamed
+move should be recorded in the fixup merge.
   $ hg convert --datesort --config convert.bzr.saverev=False source source-hg
   initializing destination source-hg repository
   scanning source...
@@ -49,18 +55,19 @@
   2 Added parent file
   1 Added brach2 file
   0 Merged branches
+  warning: can't find ancestor for 'renamed' copied from 'rename_me'!
   $ glog -R source-hg
-  o    5@source "(octopus merge fixup)" files:
+  o    5@source "(octopus merge fixup)" files+: [], files-: [], files: [renamed]
   |\
-  | o    4@source "Merged branches" files: file-branch2
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
   | |\
-  o---+  3@source-branch2 "Added brach2 file" files: file-branch2
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
    / /
-  | o  2@source "Added parent file" files: file-parent
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
   | |
-  o |  1@source-branch1 "Added branch1 file" files: file file-branch1
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
   |/
-  o  0@source "Initial add" files: file
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
   
   $ manifest source-hg tip
   % manifest of tip
@@ -68,6 +75,7 @@
   644   file-branch1
   644   file-branch2
   644   file-parent
+  644   renamed
 
   $ hg convert source-hg hg2hg
   initializing destination hg2hg repository
@@ -80,38 +88,107 @@
   2 Added brach2 file
   1 Merged branches
   0 (octopus merge fixup)
+
+BUG: The manifest entries should be the same for matching revisions, and
+nothing should be outgoing
+
+  $ hg -R source-hg manifest --debug -r tip | grep renamed
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
+  $ hg -R hg2hg manifest --debug -r tip | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R source-hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R hg2hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+
+BUG: The revisions found should be the same in both repos
+
+  $ hg --cwd source-hg log -r 'file("renamed")' -G -Tcompact
+  o    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | |
+  | o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  | |\     Merged branches
+  | ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+  $ hg --cwd hg2hg log -r 'file("renamed")' -G -Tcompact
+  o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  |\     Merged branches
+  ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+
+BUG(?): The move seems to be recorded in rev 4, so it should probably show up
+there.  It's not recorded as a move in rev 5, even in source-hg.
+
+  $ hg -R source-hg up -q tip
+  $ hg -R hg2hg up -q tip
+  $ hg --cwd source-hg log -r 'follow("renamed")' -G -Tcompact
+  @    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | :
+  o :  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :/     Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+  $ hg --cwd hg2hg log -r 'follow("renamed")' -G -Tcompact
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :    Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+
   $ hg -R hg2hg out source-hg -T compact
   comparing with source-hg
   searching for changes
-  5[tip]:4,3   6bd55e826939   2009-10-10 08:00 +0100   foo
+  5[tip]:4,3   3be2299ccd31   2009-10-10 08:00 +0100   foo
     (octopus merge fixup)
   
-XXX: The manifest lines should probably agree, to avoid changing the hash when
-converting hg -> hg
+
+  $ glog -R hg2hg
+  @    5@source "(octopus merge fixup)" files+: [], files-: [], files: []
+  |\
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
+  | |\
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
+   / /
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
+  | |
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
+  |/
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
+  
 
   $ hg -R source-hg log --debug -r tip
-  changeset:   5:b209510f11b2c987f920749cd8e352aa4b3230f2
+  changeset:   5:6652429c300ab66fdeaf2e730945676a00b53231
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    5:1109e42bdcbd1f51baa69bc91079011d77057dbb
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    5:1eabd5f5d4b985784cf2c45c717ff053eca14b0d
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
+  files:       renamed
   extra:       branch=source
   description:
   (octopus merge fixup)
   
   
   $ hg -R hg2hg log --debug -r tip
-  changeset:   5:6bd55e8269392769783345686faf7ff7b3b0215d
+  changeset:   5:3be2299ccd315ff9aab2b49bdb0d14e3244435e8
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    4:daa315d56a98ba20811fdd0d9d575861f65cfa8c
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    4:3ece3c7f2cc6df15b3cbbf3273c69869fc7c3ab0
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
   extra:       branch=source
@@ -124,21 +201,25 @@
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
   $ hg -R source-hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ hg -R hg2hg manifest --debug -r tip
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
   $ hg -R hg2hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ cd ..
--- a/tests/test-convert-bzr.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-bzr.t	Tue Sep 04 12:16:28 2018 -0400
@@ -42,9 +42,9 @@
   1 Initial add: a, c, e
   0 rename a into b, create a, rename c into d
   $ glog -R source-hg
-  o  1@source "rename a into b, create a, rename c into d" files: a b c d e f
+  o  1@source "rename a into b, create a, rename c into d" files+: [b d f], files-: [c e], files: [a]
   |
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 manifest
@@ -64,7 +64,7 @@
   converting...
   0 Initial add: a, c, e
   $ glog -R source-1-hg
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 test with filemap
@@ -147,13 +147,13 @@
   1 Editing b
   0 Merged improve branch
   $ glog -R source-hg
-  o    3@source "Merged improve branch" files:
+  o    3@source "Merged improve branch" files+: [], files-: [], files: [b]
   |\
-  | o  2@source-improve "Editing b" files: b
+  | o  2@source-improve "Editing b" files+: [], files-: [], files: [b]
   | |
-  o |  1@source "Editing a" files: a
+  o |  1@source "Editing a" files+: [], files-: [], files: [a]
   |/
-  o  0@source "Initial add" files: a b
+  o  0@source "Initial add" files+: [a b], files-: [], files: []
   
   $ cd ..
 
@@ -250,13 +250,13 @@
   0 changea
   updating tags
   $ (cd repo-bzr; glog)
-  o  3@default "update tags" files: .hgtags
+  o  3@default "update tags" files+: [.hgtags], files-: [], files: []
   |
-  o  2@default "changea" files: a
+  o  2@default "changea" files+: [], files-: [], files: [a]
   |
-  | o  1@branch "addb" files: b
+  | o  1@branch "addb" files+: [b], files-: [], files: []
   |/
-  o  0@default "adda" files: a
+  o  0@default "adda" files+: [a], files-: [], files: []
   
 
 Test tags (converted identifiers are not stable because bzr ones are
--- a/tests/test-convert-filemap.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-filemap.t	Tue Sep 04 12:16:28 2018 -0400
@@ -780,7 +780,7 @@
   converting...
   0 3
   $ hg -R .-hg log -G -T '{shortest(node)} {desc}\n{files % "- {file}\n"}\n'
-  o    e9ed 3
+  o    bbfe 3
   |\
   | o  33a0 2
   | |  - f
--- a/tests/test-convert-svn-branches.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-svn-branches.t	Tue Sep 04 12:16:28 2018 -0400
@@ -85,8 +85,8 @@
   $ hg branches
   newbranch                     11:a6d7cc050ad1
   default                       10:6e2b33404495
-  old                            9:93c4b0f99529
-  old2                           8:b52884d7bead (inactive)
+  old                            9:1b494af68c0b
+  old2                           8:5be40b8dcbf6 (inactive)
   $ hg tags -q
   tip
   $ cd ..
--- a/tests/test-convert-svn-encoding.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert-svn-encoding.t	Tue Sep 04 12:16:28 2018 -0400
@@ -52,6 +52,7 @@
   5 init projA
   source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
   converting: 0/6 revisions (0.00%)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   4 hello
@@ -118,6 +119,7 @@
   converting: 4/6 revisions (66.67%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
   scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   0 branch to branch?e
@@ -125,6 +127,7 @@
   converting: 5/6 revisions (83.33%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
   scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   reparent to file:/*/$TESTTMP/svn-repo (glob)
--- a/tests/test-convert.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-convert.t	Tue Sep 04 12:16:28 2018 -0400
@@ -533,9 +533,11 @@
 
 test bogus URL
 
+#if no-msys
   $ hg convert -q bzr+ssh://foobar@selenic.com/baz baz
   abort: bzr+ssh://foobar@selenic.com/baz: missing or unsupported repository
   [255]
+#endif
 
 test revset converted() lookup
 
--- a/tests/test-copy-move-merge.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-copy-move-merge.t	Tue Sep 04 12:16:28 2018 -0400
@@ -88,7 +88,8 @@
   > c
   > EOF
   rebasing 2:add3f11052fa "other" (tip)
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
 
   $ cat b
--- a/tests/test-copy.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-copy.t	Tue Sep 04 12:16:28 2018 -0400
@@ -148,6 +148,7 @@
 copy --after to a nonexistent target filename
   $ hg cp -A foo dummy
   foo: not recording copy - dummy does not exist
+  [1]
 
 dry-run; should show that foo is clean
   $ hg copy --dry-run foo bar
@@ -224,12 +225,14 @@
 Trying to copy on top of an existing file fails,
   $ hg copy -A bar foo
   foo: not overwriting - file already committed
-  (hg copy --after --force to replace the file by recording a copy)
+  ('hg copy --after --force' to replace the file by recording a copy)
+  [1]
 same error without the --after, so the user doesn't have to go through
 two hints:
   $ hg copy bar foo
   foo: not overwriting - file already committed
-  (hg copy --force to replace the file by recording a copy)
+  ('hg copy --force' to replace the file by recording a copy)
+  [1]
 but it's considered modified after a copy --after --force
   $ hg copy -Af bar foo
   $ hg st -AC foo
@@ -240,6 +243,7 @@
   $ touch xyzzy
   $ hg cp bar xyzzy
   xyzzy: not overwriting - file exists
-  (hg copy --after to record the copy)
+  ('hg copy --after' to record the copy)
+  [1]
 
   $ cd ..
--- a/tests/test-copytrace-heuristics.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-copytrace-heuristics.t	Tue Sep 04 12:16:28 2018 -0400
@@ -86,7 +86,8 @@
 
   $ hg rebase -s . -d 1
   rebasing 2:d526312210b9 "mode a" (tip)
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
@@ -242,7 +243,8 @@
   $ hg rebase -s 2 -d 1 --config experimental.copytrace.movecandidateslimit=0
   rebasing 2:ef716627c70b "mod a" (tip)
   skipping copytracing for 'a', more candidates than the limit: 7
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
@@ -697,7 +699,8 @@
 
   $ hg rebase -s 8b6e13696 -d .
   rebasing 1:8b6e13696c38 "added more things to a"
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
--- a/tests/test-debugcommands.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-debugcommands.t	Tue Sep 04 12:16:28 2018 -0400
@@ -15,6 +15,39 @@
   adding a
   $ hg ci -Am make-it-full
 #if reporevlogstore
+  $ hg debugrevlog -c
+  format : 1
+  flags  : inline
+  
+  revisions     :   3
+      merges    :   0 ( 0.00%)
+      normal    :   3 (100.00%)
+  revisions     :   3
+      empty     :   0 ( 0.00%)
+                     text  :   0 (100.00%)
+                     delta :   0 (100.00%)
+      snapshot  :   3 (100.00%)
+        lvl-0   :         3 (100.00%)
+      deltas    :   0 ( 0.00%)
+  revision size : 191
+      snapshot  : 191 (100.00%)
+        lvl-0   :       191 (100.00%)
+      deltas    :   0 ( 0.00%)
+  
+  chunks        :   3
+      0x75 (u)  :   3 (100.00%)
+  chunks size   : 191
+      0x75 (u)  : 191 (100.00%)
+  
+  avg chain length  :  0
+  max chain length  :  0
+  max chain reach   : 67
+  compression ratio :  0
+  
+  uncompressed data size (min/max/avg) : 57 / 66 / 62
+  full revision size (min/max/avg)     : 58 / 67 / 63
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
   $ hg debugrevlog -m
   format : 1
   flags  : inline, generaldelta
@@ -23,10 +56,15 @@
       merges    :  0 ( 0.00%)
       normal    :  3 (100.00%)
   revisions     :  3
-      full      :  3 (100.00%)
+      empty     :  1 (33.33%)
+                     text  :  1 (100.00%)
+                     delta :  0 ( 0.00%)
+      snapshot  :  2 (66.67%)
+        lvl-0   :        2 (66.67%)
       deltas    :  0 ( 0.00%)
   revision size : 88
-      full      : 88 (100.00%)
+      snapshot  : 88 (100.00%)
+        lvl-0   :       88 (100.00%)
       deltas    :  0 ( 0.00%)
   
   chunks        :  3
@@ -42,39 +80,100 @@
   compression ratio :  0
   
   uncompressed data size (min/max/avg) : 0 / 43 / 28
-  full revision size (min/max/avg)     : 0 / 44 / 29
+  full revision size (min/max/avg)     : 44 / 44 / 44
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
+  $ hg debugrevlog a
+  format : 1
+  flags  : inline, generaldelta
+  
+  revisions     : 1
+      merges    : 0 ( 0.00%)
+      normal    : 1 (100.00%)
+  revisions     : 1
+      empty     : 0 ( 0.00%)
+                     text  : 0 (100.00%)
+                     delta : 0 (100.00%)
+      snapshot  : 1 (100.00%)
+        lvl-0   :       1 (100.00%)
+      deltas    : 0 ( 0.00%)
+  revision size : 3
+      snapshot  : 3 (100.00%)
+        lvl-0   :       3 (100.00%)
+      deltas    : 0 ( 0.00%)
+  
+  chunks        : 1
+      0x75 (u)  : 1 (100.00%)
+  chunks size   : 3
+      0x75 (u)  : 3 (100.00%)
+  
+  avg chain length  : 0
+  max chain length  : 0
+  max chain reach   : 3
+  compression ratio : 0
+  
+  uncompressed data size (min/max/avg) : 2 / 2 / 2
+  full revision size (min/max/avg)     : 3 / 3 / 3
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
   delta size (min/max/avg)             : 0 / 0 / 0
 #endif
 
 Test debugindex, with and without the --verbose/--debug flag
-  $ hg debugindex a
+  $ hg debugrevlogindex a
      rev linkrev nodeid       p1           p2
        0       0 b789fdd96dc2 000000000000 000000000000
 
 #if no-reposimplestore
-  $ hg --verbose debugindex a
+  $ hg --verbose debugrevlogindex a
      rev    offset  length linkrev nodeid       p1           p2
        0         0       3       0 b789fdd96dc2 000000000000 000000000000
 
-  $ hg --debug debugindex a
+  $ hg --debug debugrevlogindex a
      rev    offset  length linkrev nodeid                                   p1                                       p2
        0         0       3       0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
 #endif
 
-  $ hg debugindex -f 1 a
+  $ hg debugrevlogindex -f 1 a
      rev flag     size   link     p1     p2       nodeid
        0 0000        2      0     -1     -1 b789fdd96dc2
 
 #if no-reposimplestore
-  $ hg --verbose debugindex -f 1 a
+  $ hg --verbose debugrevlogindex -f 1 a
      rev flag   offset   length     size   link     p1     p2       nodeid
        0 0000        0        3        2      0     -1     -1 b789fdd96dc2
 
-  $ hg --debug debugindex -f 1 a
+  $ hg --debug debugrevlogindex -f 1 a
      rev flag   offset   length     size   link     p1     p2                                   nodeid
        0 0000        0        3        2      0     -1     -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
 #endif
 
+  $ hg debugindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 07f494440405 000000000000 000000000000
+       1       1 8cccb4b5fec2 07f494440405 000000000000
+       2       2 b1e228c512c5 8cccb4b5fec2 000000000000
+  $ hg debugindex -c --debug
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 07f4944404050f47db2e5c5071e0e84e7a27bba9 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 07f4944404050f47db2e5c5071e0e84e7a27bba9 0000000000000000000000000000000000000000
+       2       2 b1e228c512c5d7066d70562ed839c3323a62d6d2 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 0000000000000000000000000000000000000000
+  $ hg debugindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 a0c8bcbbb45c 000000000000 000000000000
+       1       1 57faf8a737ae a0c8bcbbb45c 000000000000
+       2       2 a35b10320954 57faf8a737ae 000000000000
+  $ hg debugindex -m --debug
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 57faf8a737ae7faf490582941a82319ba6529dca a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 0000000000000000000000000000000000000000
+       2       2 a35b103209548032201c16c7688cb2657f037a38 57faf8a737ae7faf490582941a82319ba6529dca 0000000000000000000000000000000000000000
+  $ hg debugindex a
+     rev linkrev nodeid       p1           p2
+       0       0 b789fdd96dc2 000000000000 000000000000
+  $ hg debugindex --debug a
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+
 debugdelta chain basic output
 
 #if reporevlogstore
@@ -411,6 +510,7 @@
   $ ls -r .hg/cache/*
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/branch2-served
 
 Test debugcolor
--- a/tests/test-debugindexdot.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-debugindexdot.t	Tue Sep 04 12:16:28 2018 -0400
@@ -13,6 +13,24 @@
   $ HGMERGE=true hg merge -q
   $ hg ci -m merge -d '3 0'
 
+  $ hg debugindexdot -c
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	0 -> 2
+  	2 -> 3
+  	1 -> 3
+  }
+
+  $ hg debugindexdot -m
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	0 -> 2
+  	2 -> 3
+  	1 -> 3
+  }
+
   $ hg debugindexdot a
   digraph G {
   	-1 -> 0
--- a/tests/test-diff-color.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-diff-color.t	Tue Sep 04 12:16:28 2018 -0400
@@ -22,7 +22,7 @@
   > c
   > EOF
   $ hg ci -Am adda
-  adding a
+  \x1b[0;32madding a\x1b[0m (esc)
   $ cat > a <<EOF
   > c
   > c
@@ -218,7 +218,7 @@
   $ hg init sub
   $ echo b > sub/b
   $ hg -R sub commit -Am 'create sub'
-  adding b
+  \x1b[0;32madding b\x1b[0m (esc)
   $ echo 'sub = sub' > .hgsub
   $ hg add .hgsub
   $ hg commit -m 'add subrepo sub'
--- a/tests/test-extension.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-extension.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1254,7 +1254,7 @@
   > def g():
   >     pass
   > EOF
-  $ hg --config extensions.path=./path.py help foo > /dev/null
+  $ hg --config extensions.path=./path.py help foo
   abort: no such help topic: foo
   (try 'hg help --keyword foo')
   [255]
@@ -1540,6 +1540,7 @@
   reposetup() for $TESTTMP/reposetup-test/src
   reposetup() for $TESTTMP/reposetup-test/src (chg !)
 
+#if no-extraextensions
   $ hg --cwd src debugextensions
   reposetup() for $TESTTMP/reposetup-test/src
   dodo (untested!)
@@ -1547,6 +1548,7 @@
   mq
   reposetuptest (untested!)
   strip
+#endif
 
   $ hg clone -U src clone-dst1
   reposetup() for $TESTTMP/reposetup-test/src
@@ -1683,6 +1685,7 @@
   *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
   *** (use @command decorator to register 'deprecatedcmd')
   hg: unknown command 'deprecatedcmd'
+  (use 'hg help' for a list of commands)
   [255]
 
  the extension shouldn't be loaded at all so the mq works:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-corrupt.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,83 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+  $ for i in 0 1 2 3 4; do
+  >   echo $i >> a
+  >   echo $i >> b
+  >   hg commit -A -m $i a b
+  > done
+
+use the "debugbuildannotatecache" command to build annotate cache at rev 0
+
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=0
+  fastannotate: a: 1 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"debugbuildannotatecache" should work with broken cache (and other files would
+be built without being affected). note: linelog being broken is only noticed
+when we try to append to it.
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=1
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 2 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ echo 'CANNOT REUSE!' > .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=2
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 3 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 4 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=4
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 5 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"fastannotate" should deal with file corruption as well
+
+  $ rm -rf .hg/fastannotate
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' > .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: using fast path (resolved fctx: True)
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 2 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 0
+  1: 1
+  2: 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-diffopts.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,33 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+changes to whitespaces
+
+  $ cat >> a << EOF
+  > 1
+  > 
+  >  
+  >  2
+  > EOF
+  $ hg commit -qAm '1'
+  $ cat > a << EOF
+  >  1
+  > 
+  > 2
+  > 
+  > 
+  > 3
+  > EOF
+  $ hg commit -m 2
+  $ hg fastannotate -wB a
+  0:  1
+  0: 
+  1: 2
+  0: 
+  1: 
+  1: 3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-hg.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,764 @@
+(this file is backported from core hg tests/test-annotate.t)
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > modes=fctx
+  > forcefollow=False
+  > mainbranch=.
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+init
+
+  $ hg init repo
+  $ cd repo
+
+commit
+
+  $ echo 'a' > a
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding a
+
+annotate -c
+
+  $ hg annotate -c a
+  8435f90966e4: a
+
+annotate -cl
+
+  $ hg annotate -cl a
+  8435f90966e4:1: a
+
+annotate -d
+
+  $ hg annotate -d a
+  Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -n
+
+  $ hg annotate -n a
+  0: a
+
+annotate -nl
+
+  $ hg annotate -nl a
+  0:1: a
+
+annotate -u
+
+  $ hg annotate -u a
+  nobody: a
+
+annotate -cdnu
+
+  $ hg annotate -cdnu a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -cdnul
+
+  $ hg annotate -cdnul a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
+
+annotate (JSON)
+
+  $ hg annotate -Tjson a
+  [
+   {
+    "lines": [{"line": "a\n", "rev": 0}],
+    "path": "a"
+   }
+  ]
+
+  $ hg annotate -Tjson -cdfnul a
+  [
+   {
+    "lines": [{"date": [1.0, 0], "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
+    "path": "a"
+   }
+  ]
+
+  $ cat <<EOF >>a
+  > a
+  > a
+  > EOF
+  $ hg ci -ma1 -d '1 0'
+  $ hg cp a b
+  $ hg ci -mb -d '1 0'
+  $ cat <<EOF >> b
+  > b4
+  > b5
+  > b6
+  > EOF
+  $ hg ci -mb2 -d '2 0'
+
+annotate -n b
+
+  $ hg annotate -n b
+  0: a
+  1: a
+  1: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate --no-follow b
+
+  $ hg annotate --no-follow b
+  2: a
+  2: a
+  2: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate -nl b
+
+  $ hg annotate -nl b
+  0:1: a
+  1:2: a
+  1:3: a
+  3:4: b4
+  3:5: b5
+  3:6: b6
+
+annotate -nf b
+
+  $ hg annotate -nf b
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  3 b: b5
+  3 b: b6
+
+annotate -nlf b
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  3 b:5: b5
+  3 b:6: b6
+
+  $ hg up -C 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ hg ci -mb2.1 -d '2 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -mmergeb -d '3 0'
+
+annotate after merge
+(note: the first one falls back to the vanilla annotate which does not use linelog)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: rebuilding broken cache
+  fastannotate: b: 5 new changesets in the main branch
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+
+(difference explained below)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: using fast path (resolved fctx: False)
+  0 a: a
+  1 a: a
+  1 a: a
+  4 b: b4
+  4 b: c
+  4 b: b5
+
+annotate after merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '2..5'
+  @    5:64afcdf8e29e063c635be123d8d2fb160af00f7e
+  |\
+  | o  4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
+  | |  --- a/b
+  | |  +++ b/b
+  | |  @@ -1,3 +1,6 @@
+  | |   a
+  | |   a
+  | |   a
+  | |  +b4
+  | |  +c
+  | |  +b5
+  | |
+  o |  3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
+  |/   --- a/b
+  |    +++ b/b
+  |    @@ -1,3 +1,6 @@
+  |     a
+  |     a
+  |     a
+  |    +b4
+  |    +b5
+  |    +b6
+  |
+  o  2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
+  |  copy from a
+  ~  copy to b
+  
+
+(in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
+ and that causes the rev number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  4 b:4: b4
+  4 b:5: c
+  4 b:6: b5
+
+  $ hg up -C 1
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg cp a b
+  $ cat <<EOF > b
+  > a
+  > z
+  > a
+  > EOF
+  $ hg ci -mc -d '3 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ echo d >> b
+  $ hg ci -mmerge2 -d '4 0'
+
+annotate after rename merge
+
+  $ hg annotate -nf b
+  0 a: a
+  6 b: z
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+  7 b: d
+
+annotate after rename merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
+  @    7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
+  |\   --- a/b
+  | :  +++ b/b
+  | :  @@ -1,3 +1,7 @@
+  | :   a
+  | :   z
+  | :   a
+  | :  +b4
+  | :  +c
+  | :  +b5
+  | :  +d
+  | :
+  o :  6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
+  :/   copy from a
+  :    copy to b
+  :    --- a/a
+  :    +++ b/b
+  :    @@ -1,3 +1,3 @@
+  :    -a (?)
+  :     a
+  :    +z
+  :     a
+  :    -a (?)
+  :
+  o  1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
+  |  --- a/a
+  |  +++ b/a
+  |  @@ -1,1 +1,3 @@
+  |   a
+  |  +a
+  |  +a
+  |
+  o  0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,1 @@
+     +a
+  
+
+(note on question marks:
+ the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
+ +38ed54888617) alters the output so deletion is not always at the end of the
+ output. for example:
+ | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
+ |-------------------|
+ | a | a |  a  | -a  |
+ | a | z | +z  |  a  |
+ | a | a |  a  | +z  |
+ |   |   | -a  |  a  |
+ |-------------------|
+ | a | a |     a     |
+ | a | a |     a     |
+ | a |   |    -a     |
+ this leads to more question marks below)
+
+(rev 1 adds two "a"s and rev 6 deletes one "a".
+ the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
+ and that causes the line number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  6 b:2: z
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+  $ hg annotate -nlf b
+  0 a:1: a (?)
+  1 a:2: a (?)
+  6 b:2: z
+  1 a:2: a (?)
+  1 a:3: a (?)
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+Issue2807: alignment of line numbers with -l
+(fastannotate differs from annotate, same reason as above)
+
+  $ echo more >> b
+  $ hg ci -mmore -d '5 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '6 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '7 0'
+  $ hg annotate -nlf b
+   0 a: 1: a (?)
+   1 a: 2: a (?)
+   6 b: 2: z
+   1 a: 2: a (?)
+   1 a: 3: a (?)
+   3 b: 4: b4
+   4 b: 5: c
+   3 b: 5: b5
+   7 b: 7: d
+   8 b: 8: more
+   9 b: 9: more
+  10 b:10: more
+
+linkrev vs rev
+
+  $ hg annotate -r tip -n a
+  0: a
+  1: a
+  1: a
+
+linkrev vs rev with -l
+
+  $ hg annotate -r tip -nl a
+  0:1: a
+  1:2: a
+  1:3: a
+
+Issue589: "undelete" sequence leads to crash
+
+annotate was crashing when trying to --follow something
+
+like A -> B -> A
+
+generate ABA rename configuration
+
+  $ echo foo > foo
+  $ hg add foo
+  $ hg ci -m addfoo
+  $ hg rename foo bar
+  $ hg ci -m renamefoo
+  $ hg rename bar foo
+  $ hg ci -m renamebar
+
+annotate after ABA with follow
+
+  $ hg annotate --follow foo
+  foo: foo
+
+missing file
+
+  $ hg ann nosuchfile
+  abort: nosuchfile: no such file in rev e9e6b4fa872f
+  [255]
+
+annotate file without '\n' on last line
+
+  $ printf "" > c
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding c
+  $ hg annotate c
+  $ printf "a\nb" > c
+  $ hg ci -m test
+  $ hg annotate c
+  [0-9]+: a (re)
+  [0-9]+: b (re)
+
+Issue3841: check annotation of the file of which filelog includes
+merging between the revision and its ancestor
+
+to reproduce the situation with recent Mercurial, this script uses (1)
+"hg debugsetparents" to merge without ancestor check by "hg merge",
+and (2) the extension to allow filelog merging between the revision
+and its ancestor by overriding "repo._filecommit".
+
+  $ cat > ../legacyrepo.py <<EOF
+  > from mercurial import node, error
+  > def reposetup(ui, repo):
+  >     class legacyrepo(repo.__class__):
+  >         def _filecommit(self, fctx, manifest1, manifest2,
+  >                         linkrev, tr, changelist):
+  >             fname = fctx.path()
+  >             text = fctx.data()
+  >             flog = self.file(fname)
+  >             fparent1 = manifest1.get(fname, node.nullid)
+  >             fparent2 = manifest2.get(fname, node.nullid)
+  >             meta = {}
+  >             copy = fctx.renamed()
+  >             if copy and copy[0] != fname:
+  >                 raise error.Abort('copying is not supported')
+  >             if fparent2 != node.nullid:
+  >                 changelist.append(fname)
+  >                 return flog.add(text, meta, tr, linkrev,
+  >                                 fparent1, fparent2)
+  >             raise error.Abort('only merging is supported')
+  >     repo.__class__ = legacyrepo
+  > EOF
+
+  $ cat > baz <<EOF
+  > 1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg add baz
+  $ hg commit -m "baz:0"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:1"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg debugsetparents 17 17
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:2"
+  $ hg debugindexdot baz
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  16: 3
+  16: 4
+  16: 5
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:3"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4 baz:4
+  > 5
+  > EOF
+  $ hg debugsetparents 19 18
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:4"
+  $ hg debugindexdot baz
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  	2 -> 3
+  	3 -> 4
+  	2 -> 4
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  19: 3 baz:3
+  20: 4 baz:4
+  16: 5
+
+annotate clean file
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+
+annotate modified file
+
+  $ echo foofoo >> foo
+  $ hg annotate -r "wdir()" foo
+  11 : foo
+  20+: foofoo
+
+  $ hg annotate -cr "wdir()" foo
+  472b18db256d : foo
+  b6bedd5477e7+: foofoo
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+  $ hg annotate --debug -ncr "wdir()" foo
+  11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
+  20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
+
+  $ hg annotate -udr "wdir()" foo
+  test Thu Jan 01 00:00:00 1970 +0000: foo
+  test [A-Za-z0-9:+ ]+: foofoo (re)
+
+  $ hg annotate -ncr "wdir()" -Tjson foo
+  [
+   {
+    "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
+    "path": "foo"
+   }
+  ]
+
+annotate added file
+
+  $ echo bar > bar
+  $ hg add bar
+  $ hg annotate -ncr "wdir()" bar
+  20 b6bedd5477e7+: bar
+
+annotate renamed file
+
+  $ hg rename foo renamefoo2
+  $ hg annotate -ncr "wdir()" renamefoo2
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+annotate missing file
+
+  $ rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+annotate removed file
+
+  $ hg rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+Test annotate with whitespace options
+
+  $ cd ..
+  $ hg init repo-ws
+  $ cd repo-ws
+  $ cat > a <<EOF
+  > aa
+  > 
+  > b b
+  > EOF
+  $ hg ci -Am "adda"
+  adding a
+  $ sed 's/EOL$//g' > a <<EOF
+  > a  a
+  > 
+  >  EOL
+  > b  b
+  > EOF
+  $ hg ci -m "changea"
+
+Annotate with no option
+
+  $ hg annotate a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+Annotate with --ignore-space-change
+
+  $ hg annotate --ignore-space-change a
+  1: a  a
+  1: 
+  0:  
+  0: b  b
+
+Annotate with --ignore-all-space
+
+  $ hg annotate --ignore-all-space a
+  0: a  a
+  0: 
+  1:  
+  0: b  b
+
+Annotate with --ignore-blank-lines (similar to no options case)
+
+  $ hg annotate --ignore-blank-lines a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+  $ cd ..
+
+Annotate with linkrev pointing to another branch
+------------------------------------------------
+
+create history with a filerev whose linkrev points to another branch
+
+  $ hg init branchedlinkrev
+  $ cd branchedlinkrev
+  $ echo A > a
+  $ hg commit -Am 'contentA'
+  adding a
+  $ echo B >> a
+  $ hg commit -m 'contentB'
+  $ hg up --rev 'desc(contentA)'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo unrelated > unrelated
+  $ hg commit -Am 'unrelated'
+  adding unrelated
+  created new head
+  $ hg graft -r 'desc(contentB)'
+  grafting 1:fd27c222e3e6 "contentB"
+  $ echo C >> a
+  $ hg commit -m 'contentC'
+  $ echo W >> a
+  $ hg log -G
+  @  changeset:   4:072f1e8df249
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentC
+  |
+  o  changeset:   3:ff38df03cc4b
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentB
+  |
+  o  changeset:   2:62aaf3f6fc06
+  |  parent:      0:f0932f74827e
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     unrelated
+  |
+  | o  changeset:   1:fd27c222e3e6
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     contentB
+  |
+  o  changeset:   0:f0932f74827e
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     contentA
+  
+
+Annotate should list ancestor of starting revision only
+
+  $ hg annotate a
+  0: A
+  3: B
+  4: C
+
+  $ hg annotate a -r 'wdir()'
+  0 : A
+  3 : B
+  4 : C
+  4+: W
+
+Even when the starting revision is the linkrev-shadowed one:
+
+  $ hg annotate a -r 3
+  0: A
+  3: B
+
+  $ cd ..
+
+Issue5360: Deleted chunk in p1 of a merge changeset
+
+  $ hg init repo-5360
+  $ cd repo-5360
+  $ echo 1 > a
+  $ hg commit -A a -m 1
+  $ echo 2 >> a
+  $ hg commit -m 2
+  $ echo a > a
+  $ hg commit -m a
+  $ hg update '.^' -q
+  $ echo 3 >> a
+  $ hg commit -m 3 -q
+  $ hg merge 2 -q
+  $ cat > a << EOF
+  > b
+  > 1
+  > 2
+  > 3
+  > a
+  > EOF
+  $ hg resolve --mark -q
+  $ hg commit -m m
+  $ hg annotate a
+  4: b
+  0: 1
+  1: 2
+  3: 3
+  2: a
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-perfhack.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,182 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > perfhack=1
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-protocol.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,215 @@
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=@
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+setup the server repo
+
+  $ hg init repo-server
+  $ cd repo-server
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > server=1
+  > EOF
+  $ for i in 1 2 3 4; do
+  >   echo $i >> a
+  >   hg commit -A -m $i a
+  > done
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg bookmark @
+  $ cd ..
+
+setup the local repo
+
+  $ hg clone 'ssh://user@dummy/repo-server' repo-local -q
+  $ cd repo-local
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > client=1
+  > clientfetchthreshold=0
+  > EOF
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+the cache could be reused and no download is necessary
+
+  $ hg fastannotate a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+if the client agrees where the head of the master branch is, no re-download
+happens even if the client has more commits
+
+  $ echo 5 >> a
+  $ hg commit -m 5
+  $ hg bookmark -r 3 @ -f
+  $ hg fastannotate a --debug
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" (head of the master branch) and "@" is ahead
+of the server, the server can detect things are unchanged and does not return
+full contents (not that there is no "writing ... to fastannotate"), but the
+client can also build things up on its own (causing diverge)
+
+  $ hg bookmark -r 4 @ -f
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" which is behind the server. no download is
+necessary
+
+  $ hg fastannotate a --debug --config fastannotate.mainbranch=2
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+define fastannotate on-disk paths
+
+  $ p1=.hg/fastannotate/default
+  $ p2=../repo-server/.hg/fastannotate/default
+
+revert bookmark change so the client is behind the server
+
+  $ hg bookmark -r 2 @ -f
+
+in the "fctx" mode with the "annotate" command, the client also downloads the
+cache. but not in the (default) "fastannotate" mode.
+
+  $ rm $p1/a.l $p1/a.m
+  $ hg annotate a --debug | grep 'fastannotate: writing'
+  [1]
+  $ hg annotate a --config fastannotate.modes=fctx --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+
+the fastannotate cache (built server-side, downloaded client-side) in two repos
+have the same content (because the client downloads from the server)
+
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+in the "fctx" mode, the client could also build the cache locally
+
+  $ hg annotate a --config fastannotate.modes=fctx --debug --config fastannotate.mainbranch=4 | grep fastannotate
+  fastannotate: requesting 1 files
+  fastannotate: server returned
+  fastannotate: a: 1 new changesets in the main branch
+
+the server would rebuild broken cache automatically
+
+  $ cp $p2/a.m $p2/a.m.bak
+  $ echo BROKEN1 > $p1/a.m
+  $ echo BROKEN2 > $p2/a.m
+  $ hg fastannotate a --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  $ diff $p1/a.m $p2/a.m
+  $ diff $p2/a.m $p2/a.m.bak
+
+use the "debugbuildannotatecache" command to build annotate cache
+
+  $ rm -rf $p1 $p2
+  $ hg --cwd ../repo-server debugbuildannotatecache a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  $ hg --cwd ../repo-local debugbuildannotatecache a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: server returned
+  fastannotate: writing * (glob)
+  fastannotate: writing * (glob)
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+with the clientfetchthreshold config option, the client can build up the cache
+without downloading from the server
+
+  $ rm -rf $p1
+  $ hg fastannotate a --debug --config fastannotate.clientfetchthreshold=10
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the fastannotate directory is not writable, the fctx mode still works
+
+  $ rm -rf $p1
+  $ touch $p1
+  $ hg annotate a --debug --traceback --config fastannotate.modes=fctx
+  fastannotate: a: cache broken and deleted
+  fastannotate: prefetch failed: * (glob)
+  fastannotate: a: cache broken and deleted
+  fastannotate: falling back to the vanilla annotate: * (glob)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+with serverbuildondemand=False, the server will not build anything
+
+  $ cat >> ../repo-server/.hg/hgrc <<EOF
+  > [fastannotate]
+  > serverbuildondemand=False
+  > EOF
+  $ rm -rf $p1 $p2
+  $ hg fastannotate a --debug | grep 'fastannotate: writing'
+  [1]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-renames.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,168 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=main
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+add or rename files on top of the master branch
+
+  $ echo a1 > a
+  $ echo b1 > b
+  $ hg commit -qAm 1
+  $ hg bookmark -i main
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  0 b: b1
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: 1 new changesets in the main branch
+  0 a: a1
+  $ echo a2 >> a
+  $ cat > b << EOF
+  > b0
+  > b1
+  > EOF
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names'
+
+existing linelogs are not helpful with such renames in side branches
+
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 a: a1
+  1 b: a2
+
+move main branch forward, rebuild should happen
+
+  $ hg bookmark -i main -r . -q
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 2 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+
+for rev 0, the existing linelog is still useful for a, but not for b
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf b -r 0
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 b: b1
+
+a rebuild can also be triggered if "the main branch last time" mismatches
+
+  $ echo a3 >> a
+  $ hg commit -m a3
+  $ cat >> b << EOF
+  > b3
+  > b4
+  > EOF
+  $ hg commit -m b4
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+linelog can be updated without being helpful
+
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names again'
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+move main branch forward again, rebuilds are one-time
+
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 4 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 4 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+list changeset hashes to improve readability
+
+  $ hg log -T '{rev}:{node}\n'
+  4:980e1ab8c516350172928fba95b49ede3b643dca
+  3:14e123fedad9f491f5dde0beca2a767625a0a93a
+  2:96495c41e4c12218766f78cdf244e768d7718b0f
+  1:35c2b781234c994896aba36bd3245d3104e023df
+  0:653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+
+annotate a revision not in the linelog. linelog cannot be used, but does not get rebuilt either
+
+  $ hg fastannotate --debug -nf a -r 96495c41e4c12218766f78cdf244e768d7718b0f
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r 2
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r .
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+
+annotate an ancient revision where the path matches. linelog can be used
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf a -r 653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+  fastannotate: a: using fast path (resolved fctx: False)
+  0 a: a1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-revmap.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,198 @@
+from __future__ import absolute_import, print_function
+
+import os
+import tempfile
+
+from mercurial import (
+    pycompat,
+    util,
+)
+
+from hgext.fastannotate import error, revmap
+
+if pycompat.ispy3:
+    xrange = range
+
+def genhsh(i):
+    return chr(i) + b'\0' * 19
+
+def gettemppath():
+    fd, path = tempfile.mkstemp()
+    os.close(fd)
+    os.unlink(path)
+    return path
+
+def ensure(condition):
+    if not condition:
+        raise RuntimeError('Unexpected')
+
+def testbasicreadwrite():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 0)
+    for i in xrange(5):
+        ensure(rm.rev2hsh(i) is None)
+    ensure(rm.hsh2rev(b'\0' * 20) is None)
+
+    paths = ['', 'a', None, 'b', 'b', 'c', 'c', None, 'a', 'b', 'a', 'a']
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
+
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+
+    # re-load and verify
+    rm.flush()
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    # append without calling save() explicitly
+    for i in xrange(5, 12):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i],
+                         flush=True) == i)
+
+    # re-load and verify
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 11)
+    for i in xrange(1, 12):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(rm.rev2path(i) == paths[i] or paths[i - 1])
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    os.unlink(path)
+
+    # missing keys
+    ensure(rm.rev2hsh(12) is None)
+    ensure(rm.rev2hsh(0) is None)
+    ensure(rm.rev2hsh(-1) is None)
+    ensure(rm.rev2flag(12) is None)
+    ensure(rm.rev2path(12) is None)
+    ensure(rm.hsh2rev(b'\1' * 20) is None)
+
+    # illformed hash (not 20 bytes)
+    try:
+        rm.append(b'\0')
+        ensure(False)
+    except Exception:
+        pass
+
+def testcorruptformat():
+    path = gettemppath()
+
+    # incorrect header
+    with open(path, 'w') as f:
+        f.write(b'NOT A VALID HEADER')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # rewrite the file
+    os.unlink(path)
+    rm = revmap.revmap(path)
+    rm.append(genhsh(0), flush=True)
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 1)
+
+    # corrupt the file by appending a byte
+    size = os.stat(path).st_size
+    with open(path, 'a') as f:
+        f.write('\xff')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # corrupt the file by removing the last byte
+    ensure(size > 0)
+    with open(path, 'w') as f:
+        f.truncate(size - 1)
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    os.unlink(path)
+
+def testcopyfrom():
+    path = gettemppath()
+    rm = revmap.revmap(path)
+    for i in xrange(1, 10):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=str(i // 3)) == i)
+    rm.flush()
+
+    # copy rm to rm2
+    rm2 = revmap.revmap()
+    rm2.copyfrom(rm)
+    path2 = gettemppath()
+    rm2.path = path2
+    rm2.flush()
+
+    # two files should be the same
+    ensure(len(set(util.readfile(p) for p in [path, path2])) == 1)
+
+    os.unlink(path)
+    os.unlink(path2)
+
+class fakefctx(object):
+    def __init__(self, node, path=None):
+        self._node = node
+        self._path = path
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+def testcontains():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1)) == i)
+
+    for i in xrange(1, 5):
+        ensure(((genhsh(i), None) in rm) == ((i & 1) == 0))
+        ensure((fakefctx(genhsh(i)) in rm) == ((i & 1) == 0))
+    for i in xrange(5, 10):
+        ensure(fakefctx(genhsh(i)) not in rm)
+        ensure((genhsh(i), None) not in rm)
+
+    # "contains" checks paths
+    rm = revmap.revmap()
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), path=str(i // 2)) == i)
+    for i in xrange(1, 5):
+        ensure(fakefctx(genhsh(i), path=str(i // 2)) in rm)
+        ensure(fakefctx(genhsh(i), path='a') not in rm)
+
+def testlastnode():
+    path = gettemppath()
+    ensure(revmap.getlastnode(path) is None)
+    rm = revmap.revmap(path)
+    ensure(revmap.getlastnode(path) is None)
+    for i in xrange(1, 10):
+        hsh = genhsh(i)
+        rm.append(hsh, path=str(i // 2), flush=True)
+        ensure(revmap.getlastnode(path) == hsh)
+        rm2 = revmap.revmap(path)
+        ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
+
+testbasicreadwrite()
+testcorruptformat()
+testcopyfrom()
+testcontains()
+testlastnode()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,263 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+config option "fastannotate.modes"
+
+  $ hg annotate -r 1 --debug a
+  0: 1
+  1: 2
+  $ hg annotate --config fastannotate.modes=fctx -r 1 --debug a
+  fastannotate: a: using fast path (resolved fctx: False)
+  0: 1
+  1: 2
+  $ hg fastannotate --config fastannotate.modes=fctx -h -q
+  hg: unknown command 'fastannotate'
+  (did you mean *) (glob)
+  [255]
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
+
+json format
+
+  $ hg fastannotate -Tjson -cludn b a empty
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [0.0, 0],
+    "line": "b\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   }
+  ]
+
+  $ hg fastannotate -Tjson -cludn empty
+  [
+  ]
+  $ hg fastannotate -Tjson --no-content -n a
+  [
+   {
+    "rev": 0
+   }
+  ]
+
+working copy
+
+  $ echo a >> a
+  $ hg fastannotate -r 'wdir()' a
+  abort: cannot update linelog to wdir()
+  (set fastannotate.mainbranch)
+  [255]
+  $ cat >> $HGRCPATH << EOF
+  > [fastannotate]
+  > mainbranch = .
+  > EOF
+  $ hg fastannotate -r 'wdir()' a
+  0 : a
+  1+: a
+  $ hg fastannotate -cludn -r 'wdir()' a
+  test 0 1fd620b16252  Thu Jan 01 00:00:00 1970 +0000:1: a
+  test 1 720582f5bdb6+ *:2: a (glob)
+  $ hg fastannotate -cludn -r 'wdir()' -Tjson a
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [*, 0], (glob)
+    "line": "a\n",
+    "line_number": 2,
+    "node": null,
+    "rev": null,
+    "user": "test"
+   }
+  ]
--- a/tests/test-fileset-generated.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-fileset-generated.t	Tue Sep 04 12:16:28 2018 -0400
@@ -187,11 +187,11 @@
   undeleting missing_content2_missing-untracked
 
   $ hg revert 'set:deleted()'
+  forgetting content1_missing_missing-tracked
+  forgetting missing_missing_missing-tracked
   reverting content1_content1_missing-tracked
   reverting content1_content2_missing-tracked
-  forgetting content1_missing_missing-tracked
   reverting missing_content2_missing-tracked
-  forgetting missing_missing_missing-tracked
 
   $ hg revert 'set:unknown()'
 
--- a/tests/test-fileset.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-fileset.t	Tue Sep 04 12:16:28 2018 -0400
@@ -18,13 +18,19 @@
 
   $ fileset -v a1
   (symbol 'a1')
+  * matcher:
+  <patternmatcher patterns='(?:a1$)'>
   a1
   $ fileset -v 'a*'
   (symbol 'a*')
+  * matcher:
+  <patternmatcher patterns='(?:a[^/]*$)'>
   a1
   a2
   $ fileset -v '"re:a\d"'
   (string 're:a\\d')
+  * matcher:
+  <patternmatcher patterns='(?:a\\d)'>
   a1
   a2
   $ fileset -v '!re:"a\d"'
@@ -32,6 +38,10 @@
     (kindpat
       (symbol 're')
       (string 'a\\d')))
+  * matcher:
+  <predicatenmatcher
+    pred=<not
+      <patternmatcher patterns='(?:a\\d)'>>>
   b1
   b2
   $ fileset -v 'path:a1 or glob:b?'
@@ -42,10 +52,12 @@
     (kindpat
       (symbol 'glob')
       (symbol 'b?')))
+  * matcher:
+  <patternmatcher patterns='(?:a1(?:/|$)|b.$)'>
   a1
   b1
   b2
-  $ fileset -v 'a1 or a2'
+  $ fileset -v --no-show-matcher 'a1 or a2'
   (or
     (symbol 'a1')
     (symbol 'a2'))
@@ -97,6 +109,15 @@
       None))
   hg: parse error: can't use negate operator in this context
   [255]
+  $ fileset -p parsed 'a, b, c'
+  * parsed:
+  (list
+    (symbol 'a')
+    (symbol 'b')
+    (symbol 'c'))
+  hg: parse error: can't use a list in this context
+  (see 'hg help "filesets.x or y"')
+  [255]
 
   $ fileset '"path":.'
   hg: parse error: not a symbol
@@ -114,6 +135,183 @@
   hg: parse error: invalid pattern kind: foo
   [255]
 
+Show parsed tree at stages:
+
+  $ fileset -p unknown a
+  abort: invalid stage name: unknown
+  [255]
+
+  $ fileset -p parsed 'path:a1 or glob:b?'
+  * parsed:
+  (or
+    (kindpat
+      (symbol 'path')
+      (symbol 'a1'))
+    (kindpat
+      (symbol 'glob')
+      (symbol 'b?')))
+  a1
+  b1
+  b2
+
+  $ fileset -p all -s 'a1 or a2 or (grep("b") & clean())'
+  * parsed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (group
+      (and
+        (func
+          (symbol 'grep')
+          (string 'b'))
+        (func
+          (symbol 'clean')
+          None))))
+  * analyzed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (and
+      (func
+        (symbol 'grep')
+        (string 'b'))
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))))
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'a2'))
+    (and
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))
+      (func
+        (symbol 'grep')
+        (string 'b'))))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|a2$)'>,
+    <intersectionmatcher
+      m1=<predicatenmatcher pred=clean>,
+      m2=<predicatenmatcher pred=grep('b')>>]>
+  a1
+  a2
+  b1
+  b2
+
+Union of basic patterns:
+
+  $ fileset -p optimized -s -r. 'a1 or a2 or path:b1'
+  * optimized:
+  (patterns
+    (symbol 'a1')
+    (symbol 'a2')
+    (kindpat
+      (symbol 'path')
+      (symbol 'b1')))
+  * matcher:
+  <patternmatcher patterns='(?:a1$|a2$|b1(?:/|$))'>
+  a1
+  a2
+  b1
+
+OR expression should be reordered by weight:
+
+  $ fileset -p optimized -s -r. 'grep("a") or a1 or grep("b") or b2'
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'b2'))
+    (func
+      (symbol 'grep')
+      (string 'a'))
+    (func
+      (symbol 'grep')
+      (string 'b')))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|b2$)'>,
+    <predicatenmatcher pred=grep('a')>,
+    <predicatenmatcher pred=grep('b')>]>
+  a1
+  a2
+  b1
+  b2
+
+Use differencematcher for 'x and not y':
+
+  $ fileset -p optimized -s 'a* and not a1'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p optimized -s '!binary() and a*'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  a1
+  a2
+
+'x - y' is rewritten to 'x and not y' first so the operands can be reordered:
+
+  $ fileset -p analyzed -p optimized -s 'a* - a1'
+  * analyzed:
+  (and
+    (symbol 'a*')
+    (not
+      (symbol 'a1')))
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p analyzed -p optimized -s 'binary() - a*'
+  * analyzed:
+  (and
+    (func
+      (symbol 'binary')
+      None)
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<predicatenmatcher
+      pred=<not
+        <patternmatcher patterns='(?:a[^/]*$)'>>>,
+    m2=<predicatenmatcher pred=binary>>
+
 Test files status
 
   $ rm a1
@@ -180,6 +378,156 @@
   b2
   c1
 
+Test insertion of status hints
+
+  $ fileset -p optimized 'added()'
+  * optimized:
+  (withstatus
+    (func
+      (symbol 'added')
+      None)
+    (string 'added'))
+  c1
+
+  $ fileset -p optimized 'a* & removed()'
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a2
+
+  $ fileset -p optimized 'a* - removed()'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a1
+
+  $ fileset -p analyzed -p optimized '(added() + removed()) - a*'
+  * analyzed:
+  (and
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed'))
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed')))
+  c1
+
+  $ fileset -p optimized 'a* + b* + added() + unknown()'
+  * optimized:
+  (withstatus
+    (or
+      (patterns
+        (symbol 'a*')
+        (symbol 'b*'))
+      (func
+        (symbol 'added')
+        None)
+      (func
+        (symbol 'unknown')
+        None))
+    (string 'added unknown'))
+  a1
+  a2
+  b1
+  b2
+  c1
+  c3
+
+  $ fileset -p analyzed -p optimized 'removed() & missing() & a*'
+  * analyzed:
+  (and
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing'))
+    (symbol 'a*'))
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing')))
+
+  $ fileset -p optimized 'clean() & revs(0, added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'revs')
+      (list
+        (symbol '0')
+        (withstatus
+          (func
+            (symbol 'added')
+            None)
+          (string 'added')))))
+  b1
+
+  $ fileset -p optimized 'clean() & status(null, 0, b* & added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'status')
+      (list
+        (symbol 'null')
+        (symbol '0')
+        (and
+          (symbol 'b*')
+          (withstatus
+            (func
+              (symbol 'added')
+              None)
+            (string 'added'))))))
+  b1
+
 Test files properties
 
   >>> open('bin', 'wb').write(b'\0a') and None
@@ -194,6 +542,19 @@
   $ fileset 'binary()'
   bin
 
+  $ fileset -p optimized -s 'binary() and b*'
+  * optimized:
+  (and
+    (symbol 'b*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<patternmatcher patterns='(?:b[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  bin
+
   $ fileset 'grep("b{1}")'
   .hgignore
   b1
@@ -231,7 +592,7 @@
   [255]
   $ fileset '(1k, 2k)'
   hg: parse error: can't use a list in this context
-  (see hg help "filesets.x or y")
+  (see 'hg help "filesets.x or y"')
   [255]
   $ fileset 'size(1k)'
   1k
--- a/tests/test-fix.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-fix.t	Tue Sep 04 12:16:28 2018 -0400
@@ -502,12 +502,13 @@
 
   $ cd ..
 
-When a fixer prints to stderr, we assume that it has failed. We should show the
-error messages to the user, and we should not let the failing fixer affect the
-file it was fixing (many code formatters might emit error messages on stderr
-and nothing on stdout, which would cause us the clear the file). We show the
-user which fixer failed and which revision, but we assume that the fixer will
-print the filename if it is relevant.
+When a fixer prints to stderr, we don't assume that it has failed. We show the
+error messages to the user, and we still let the fixer affect the file it was
+fixing if its exit code is zero. Some code formatters might emit error messages
+on stderr and nothing on stdout, which would cause us the clear the file,
+except that they also exit with a non-zero code. We show the user which fixer
+emitted the stderr, and which revision, but we assume that the fixer will print
+the filename if it is relevant (since the issue may be non-specific).
 
   $ hg init showstderr
   $ cd showstderr
@@ -515,17 +516,37 @@
   $ printf "hello\n" > hello.txt
   $ hg add
   adding hello.txt
-  $ cat >> $TESTTMP/cmd.sh <<'EOF'
+  $ cat > $TESTTMP/fail.sh <<'EOF'
   > printf 'HELLO\n'
   > printf "$@: some\nerror" >&2
+  > exit 0 # success despite the stderr output
   > EOF
-  $ hg --config "fix.fail:command=sh $TESTTMP/cmd.sh {rootpath}" \
+  $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
   >    --config "fix.fail:fileset=hello.txt" \
   >    fix --working-dir
   [wdir] fail: hello.txt: some
   [wdir] fail: error
   $ cat hello.txt
-  hello
+  HELLO
+
+  $ printf "goodbye\n" > hello.txt
+  $ cat > $TESTTMP/work.sh <<'EOF'
+  > printf 'GOODBYE\n'
+  > printf "$@: some\nerror\n" >&2
+  > exit 42 # success despite the stdout output
+  > EOF
+  $ hg --config "fix.fail:command=sh $TESTTMP/work.sh {rootpath}" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: hello.txt: some
+  [wdir] fail: error
+  $ cat hello.txt
+  goodbye
+
+  $ hg --config "fix.fail:command=exit 42" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: exited with status 42
 
   $ cd ..
 
@@ -830,9 +851,9 @@
   
   $ hg fix -r 0:2
   $ hg log --graph --template '{node|shortest} {files}'
-  o  3801 bar.whole
+  o  b4e2 bar.whole
   |
-  o  38cc
+  o  59f4
   |
   | @  bc05 bar.whole
   | |
--- a/tests/test-fncache.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-fncache.t	Tue Sep 04 12:16:28 2018 -0400
@@ -88,6 +88,7 @@
   .hg/00manifest.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/data
@@ -121,6 +122,7 @@
   .hg/00changelog.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/dirstate
--- a/tests/test-generaldelta.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-generaldelta.t	Tue Sep 04 12:16:28 2018 -0400
@@ -267,7 +267,7 @@
        51       4        3       50    prev        356        594        611   1.02862       611         0    0.00000
        52       4        4       51      p1         58        640        669   1.04531       669         0    0.00000
        53       5        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       5        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       6        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -333,7 +333,7 @@
        51       2       13       17      p1         58        594        739   1.24411      2781      2042    2.76319
        52       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
        53       6        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       6        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       7        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -399,4 +399,4 @@
        51       2       13       17      p1         58        594        739   1.24411      2642      1903    2.57510
        52       2       14       51      p1         58        640        797   1.24531      2700      1903    2.38770
        53       4        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       4        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
--- a/tests/test-glog-beautifygraph.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-glog-beautifygraph.t	Tue Sep 04 12:16:28 2018 -0400
@@ -80,52 +80,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > 
-  > from mercurial.utils import (
-  >   stringutil,
-  > )
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
   $ echo "beautifygraph=" >> $HGRCPATH
 
 Set a default of narrow-text UTF-8.
@@ -2043,7 +1999,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-glog.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-glog.t	Tue Sep 04 12:16:28 2018 -0400
@@ -81,49 +81,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > from mercurial.utils import stringutil
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
 
   $ hg init repo
   $ cd repo
@@ -1890,7 +1849,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-graft.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-graft.t	Tue Sep 04 12:16:28 2018 -0400
@@ -237,7 +237,7 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg graft --continue
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg graft --abort
   
 
 Commit while interrupted should fail:
@@ -699,8 +699,24 @@
   summary:     2
   
 ... grafts of grafts unfortunately can't
-  $ hg graft -q 13
+  $ hg graft -q 13 --debug
+  scanning for duplicate grafts
+  grafting 13:7a4785234d87 "2"
+    searching for copies back to rev 12
+    unmatched files in other (from topological common ancestor):
+     g
+    unmatched files new in both:
+     b
+  resolving manifests
+   branchmerge: True, force: True, partial: False
+   ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87
+  starting 4 threads for background file closing (?)
+  committing files:
+  b
   warning: can't find ancestor for 'b' copied from 'a'!
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
   $ hg log -r 'destination(13)'
 All copies of a cset
   $ hg log -r 'origin(13) or destination(origin(13))'
@@ -731,7 +747,7 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     2
   
-  changeset:   22:d1cb6591fa4b
+  changeset:   22:3a4e92d81b97
   branch:      dev
   tag:         tip
   user:        foo
@@ -743,8 +759,8 @@
 
   $ hg graft 'origin(13) or destination(origin(13))'
   skipping ancestor revision 21:7e61b508e709
-  skipping ancestor revision 22:d1cb6591fa4b
-  skipping revision 2:5c095ad7e90f (already grafted to 22:d1cb6591fa4b)
+  skipping ancestor revision 22:3a4e92d81b97
+  skipping revision 2:5c095ad7e90f (already grafted to 22:3a4e92d81b97)
   grafting 7:ef0ef43d49e7 "2"
   warning: can't find ancestor for 'b' copied from 'a'!
   grafting 13:7a4785234d87 "2"
@@ -758,7 +774,7 @@
   $ hg graft 19 0 6
   skipping ungraftable merge revision 6
   skipping ancestor revision 0:68795b066622
-  skipping already grafted revision 19:9627f653b421 (22:d1cb6591fa4b also has origin 2:5c095ad7e90f)
+  skipping already grafted revision 19:9627f653b421 (22:3a4e92d81b97 also has origin 2:5c095ad7e90f)
   [255]
   $ hg graft 19 0 6 --force
   skipping ungraftable merge revision 6
@@ -773,12 +789,12 @@
   $ hg ci -m 28
   $ hg backout 28
   reverting a
-  changeset 29:53177ba928f6 backs out changeset 28:50a516bb8b57
+  changeset 29:9d95e865b00c backs out changeset 28:cc20d29aec8d
   $ hg graft 28
-  skipping ancestor revision 28:50a516bb8b57
+  skipping ancestor revision 28:cc20d29aec8d
   [255]
   $ hg graft 28 --force
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   merging a
   $ cat a
   abc
@@ -788,7 +804,7 @@
   $ echo def > a
   $ hg ci -m 31
   $ hg graft 28 --force --tool internal:fail
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   abort: unresolved conflicts, can't continue
   (use 'hg resolve' and 'hg graft --continue')
   [255]
@@ -801,7 +817,7 @@
   (no more unresolved files)
   continue: hg graft --continue
   $ hg graft -c
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   $ cat a
   abc
 
@@ -822,8 +838,8 @@
   $ hg tag -f something
   $ hg graft -qr 27
   $ hg graft -f 27
-  grafting 27:ed6c7e54e319 "28"
-  note: graft of 27:ed6c7e54e319 created no changes to commit
+  grafting 27:17d42b8f5d50 "28"
+  note: graft of 27:17d42b8f5d50 created no changes to commit
 
   $ cd ..
 
--- a/tests/test-grep.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-grep.t	Tue Sep 04 12:16:28 2018 -0400
@@ -18,7 +18,7 @@
 pattern error
 
   $ hg grep '**test**'
-  grep: invalid match pattern: nothing to repeat
+  grep: invalid match pattern: nothing to repeat* (glob)
   [1]
 
 simple
@@ -43,17 +43,17 @@
 simple templated
 
   $ hg grep port -r tip:0 \
-  > -T '{file}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
+  > -T '{path}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
   port:4:914fa752cdea:exPORT
   port:4:914fa752cdea:vaPORTight
   port:4:914fa752cdea:imPORT/exPORT
 
-  $ hg grep port -r tip:0 -T '{file}:{rev}:{texts}\n'
+  $ hg grep port -r tip:0 -T '{path}:{rev}:{texts}\n'
   port:4:export
   port:4:vaportight
   port:4:import/export
 
-  $ hg grep port -r tip:0 -T '{file}:{tags}:{texts}\n'
+  $ hg grep port -r tip:0 -T '{path}:{tags}:{texts}\n'
   port:tip:export
   port:tip:vaportight
   port:tip:import/export
@@ -64,27 +64,27 @@
   [
    {
     "date": [4, 0],
-    "file": "port",
     "line_number": 1,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
    },
    {
     "date": [4, 0],
-    "file": "port",
     "line_number": 2,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
     "user": "spam"
    },
    {
     "date": [4, 0],
-    "file": "port",
     "line_number": 3,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -97,9 +97,9 @@
   [
    {
     "date": [4, 0],
-    "file": "port",
     "line_number": 1,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "user": "spam"
    }
@@ -125,9 +125,9 @@
    {
     "change": "-",
     "date": [4, 0],
-    "file": "port",
     "line_number": 4,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -135,9 +135,9 @@
    {
     "change": "+",
     "date": [3, 0],
-    "file": "port",
     "line_number": 4,
     "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
     "rev": 3,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "eggs"
@@ -145,9 +145,9 @@
    {
     "change": "-",
     "date": [2, 0],
-    "file": "port",
     "line_number": 1,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -155,9 +155,9 @@
    {
     "change": "-",
     "date": [2, 0],
-    "file": "port",
     "line_number": 2,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -165,9 +165,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
     "line_number": 1,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -175,9 +175,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
     "line_number": 2,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
     "user": "spam"
@@ -185,9 +185,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
     "line_number": 3,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -195,9 +195,9 @@
    {
     "change": "+",
     "date": [1, 0],
-    "file": "port",
     "line_number": 2,
     "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
+    "path": "port",
     "rev": 1,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "eggs"
@@ -205,9 +205,9 @@
    {
     "change": "+",
     "date": [0, 0],
-    "file": "port",
     "line_number": 1,
     "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
+    "path": "port",
     "rev": 0,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -481,9 +481,9 @@
   [
    {
     "date": [0, 0],
-    "file": "file2",
     "line_number": 1,
     "node": "ffffffffffffffffffffffffffffffffffffffff",
+    "path": "file2",
     "rev": 2147483647,
     "texts": [{"matched": true, "text": "some"}, {"matched": false, "text": " text"}],
     "user": "test"
@@ -491,3 +491,17 @@
   ]
 
   $ cd ..
+
+test -rMULTIREV with --all-files
+
+  $ cd sng
+  $ hg rm um
+  $ hg commit -m "deletes um"
+  $ hg grep -r "0:2" "unmod" --all-files
+  um:0:unmod
+  um:1:unmod
+  $ hg grep -r "0:2" "unmod" --all-files um
+  um:0:unmod
+  um:1:unmod
+  $ cd ..
+
--- a/tests/test-hardlinks.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-hardlinks.t	Tue Sep 04 12:16:28 2018 -0400
@@ -241,6 +241,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   ? r4/.hg/cache/checklink-target (glob) (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/dirstate
@@ -291,6 +292,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   2 r4/.hg/cache/checklink-target (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   1 r4/.hg/dirstate
--- a/tests/test-help.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-help.t	Tue Sep 04 12:16:28 2018 -0400
@@ -652,29 +652,7 @@
 
   $ hg skjdfks
   hg: unknown command 'skjdfks'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
 
 Typoed command gives suggestion
@@ -960,12 +938,15 @@
                  retrieves a bundle from a repo
    debugignore   display the combined ignore pattern and information about
                  ignored files
-   debugindex    dump the contents of an index file
+   debugindex    dump index data for a storage primitive
    debugindexdot
                  dump an index DAG as a graphviz dot file
    debuginstall  test Mercurial installation
    debugknown    test whether node ids are known to a repo
    debuglocks    show or modify state of locks
+   debugmanifestfulltextcache
+                 show, clear or amend the contents of the manifest fulltext
+                 cache
    debugmergestate
                  print merge state
    debugnamecomplete
@@ -989,6 +970,8 @@
                  rebuild the fncache file
    debugrename   dump rename information
    debugrevlog   show data and statistics about a revlog
+   debugrevlogindex
+                 dump the contents of a revlog index
    debugrevspec  parse and apply a revision specification
    debugserve    run a server with advanced settings
    debugsetparents
@@ -1027,6 +1010,7 @@
   
        bundle2       Bundle2
        bundles       Bundles
+       cbor          CBOR
        censor        Censor
        changegroups  Changegroups
        config        Config Registrar
@@ -1357,6 +1341,55 @@
       "smtp.host"
           Host name of mail server, e.g. "mail.example.com".
   
+
+Test section name with dot
+
+  $ hg help config.ui.username
+      "ui.username"
+          The committer of a changeset created when running "commit". Typically
+          a person's name and email address, e.g. "Fred Widget
+          <fred@example.com>". Environment variables in the username are
+          expanded.
+  
+          (default: "$EMAIL" or "username@hostname". If the username in hgrc is
+          empty, e.g. if the system admin set "username =" in the system hgrc,
+          it has to be specified manually or in a different hgrc file)
+  
+
+  $ hg help config.annotate.git
+  abort: help section not found: config.annotate.git
+  [255]
+
+  $ hg help config.update.check
+      "commands.update.check"
+          Determines what level of checking 'hg update' will perform before
+          moving to a destination revision. Valid values are "abort", "none",
+          "linear", and "noconflict". "abort" always fails if the working
+          directory has uncommitted changes. "none" performs no checking, and
+          may result in a merge with uncommitted changes. "linear" allows any
+          update as long as it follows a straight line in the revision history,
+          and may trigger a merge with uncommitted changes. "noconflict" will
+          allow any update which would not trigger a merge with uncommitted
+          changes, if any are present. (default: "linear")
+  
+
+  $ hg help config.commands.update.check
+      "commands.update.check"
+          Determines what level of checking 'hg update' will perform before
+          moving to a destination revision. Valid values are "abort", "none",
+          "linear", and "noconflict". "abort" always fails if the working
+          directory has uncommitted changes. "none" performs no checking, and
+          may result in a merge with uncommitted changes. "linear" allows any
+          update as long as it follows a straight line in the revision history,
+          and may trigger a merge with uncommitted changes. "noconflict" will
+          allow any update which would not trigger a merge with uncommitted
+          changes, if any are present. (default: "linear")
+  
+
+  $ hg help config.ommands.update.check
+  abort: help section not found: config.ommands.update.check
+  [255]
+
 Unrelated trailing paragraphs shouldn't be included
 
   $ hg help config.extramsg | grep '^$'
@@ -1377,6 +1410,14 @@
   $ hg help config.type | egrep '^$'|wc -l
   \s*3 (re)
 
+  $ hg help config.profiling.type.ls
+          "profiling.type.ls"
+            Use Python's built-in instrumenting profiler. This profiler works on
+            all platforms, but each line number it reports is the first line of
+            a function. This restriction makes it difficult to identify the
+            expensive parts of a non-trivial function.
+  
+
 Separate sections from subsections
 
   $ hg help config.format | egrep '^    ("|-)|^\s*$' | uniq
@@ -1499,7 +1540,7 @@
   Commands:
   $ hg help -c commit > /dev/null
   $ hg help -e -c commit > /dev/null
-  $ hg help -e commit > /dev/null
+  $ hg help -e commit
   abort: no such help topic: commit
   (try 'hg help --keyword commit')
   [255]
@@ -1848,18 +1889,26 @@
         This implies premerge. Therefore, files aren't dumped, if premerge runs
         successfully. Use :forcedump to forcibly write files out.
   
+        (actual capabilities: binary, symlink)
+  
       ":fail"
         Rather than attempting to merge files that were modified on both
         branches, it marks them as unresolved. The resolve command must be used
         to resolve these conflicts.
   
+        (actual capabilities: binary, symlink)
+  
       ":forcedump"
         Creates three versions of the files as same as :dump, but omits
         premerge.
   
+        (actual capabilities: binary, symlink)
+  
       ":local"
         Uses the local 'p1()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":merge"
         Uses the internal non-interactive simple merge algorithm for merging
         files. It will fail if there are any conflicts and leave markers in the
@@ -1883,10 +1932,14 @@
       ":other"
         Uses the other 'p2()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":prompt"
         Asks the user which of the local 'p1()' or the other 'p2()' version to
         keep as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":tagmerge"
         Uses the internal tag merge algorithm (experimental).
   
@@ -1896,7 +1949,8 @@
         markers are inserted.
   
       Internal tools are always available and do not require a GUI but will by
-      default not handle symlinks or binary files.
+      default not handle symlinks or binary files. See next section for detail
+      about "actual capabilities" described above.
   
       Choosing a merge tool
       =====================
@@ -1911,8 +1965,7 @@
          must be executable by the shell.
       3. If the filename of the file to be merged matches any of the patterns in
          the merge-patterns configuration section, the first usable merge tool
-         corresponding to a matching pattern is used. Here, binary capabilities
-         of the merge tool are not considered.
+         corresponding to a matching pattern is used.
       4. If ui.merge is set it will be considered next. If the value is not the
          name of a configured tool, the specified value is used and must be
          executable by the shell. Otherwise the named tool is used if it is
@@ -1925,6 +1978,27 @@
          internal ":merge" is used.
       8. Otherwise, ":prompt" is used.
   
+      For historical reason, Mercurial treats merge tools as below while
+      examining rules above.
+  
+      step specified via  binary symlink
+      ----------------------------------
+      1.   --tool         o/o    o/o
+      2.   HGMERGE        o/o    o/o
+      3.   merge-patterns o/o(*) x/?(*)
+      4.   ui.merge       x/?(*) x/?(*)
+  
+      Each capability column indicates Mercurial behavior for internal/external
+      merge tools at examining each rule.
+  
+      - "o": "assume that a tool has capability"
+      - "x": "assume that a tool does not have capability"
+      - "?": "check actual capability of a tool"
+  
+      If "merge.strict-capability-check" configuration is true, Mercurial checks
+      capabilities of merge tools strictly in (*) cases above (= each capability
+      column becomes "?/?"). It is false by default for backward compatibility.
+  
       Note:
          After selecting a merge program, Mercurial will by default attempt to
          merge the files using a simple merge algorithm first. Only if it
@@ -3221,6 +3295,13 @@
   Bundles
   </td></tr>
   <tr><td>
+  <a href="/help/internals.cbor">
+  cbor
+  </a>
+  </td><td>
+  CBOR
+  </td></tr>
+  <tr><td>
   <a href="/help/internals.censor">
   censor
   </a>
--- a/tests/test-http-api-httpv2.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-http-api-httpv2.t	Tue Sep 04 12:16:28 2018 -0400
@@ -210,7 +210,12 @@
   received frame(size=42; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [{b'status': b'ok'}, b'customreadonly bytes response']
+  response: [
+    {
+      b'status': b'ok'
+    },
+    b'customreadonly bytes response'
+  ]
 
 Request to read-write command fails because server is read-only by default
 
--- a/tests/test-http-protocol.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-http-protocol.t	Tue Sep 04 12:16:28 2018 -0400
@@ -213,7 +213,11 @@
   s>     bookmarks\t\n
   s>     namespaces\t\n
   s>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
 
 Same thing, but with "httprequest" command
 
@@ -279,7 +283,9 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     0000000000000000000000000000000000000000\n
-  response: [b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
 
   $ killdaemons.py
   $ enablehttpv2 empty
@@ -332,7 +338,9 @@
   received frame(size=33; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
 
   $ killdaemons.py
 
@@ -463,7 +471,9 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
-  response: [b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL']
+  response: [
+    b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
+  ]
 
   $ killdaemons.py
 
@@ -725,4 +735,6 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
-  response: [b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL']
+  response: [
+    b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
+  ]
--- a/tests/test-http.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-http.t	Tue Sep 04 12:16:28 2018 -0400
@@ -476,7 +476,7 @@
 #endif
 
 ... and also keep partial clones and pulls working
-  $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
+  $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
   adding changesets
   adding manifests
   adding file changes
@@ -484,7 +484,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg pull -R test-partial-clone
+  $ hg pull -R test/partial/clone
   pulling from http://localhost:$HGPORT1/
   searching for changes
   adding changesets
@@ -494,6 +494,13 @@
   new changesets 5fed3813f7f5:56f9bc90cce6
   (run 'hg update' to get a working copy)
 
+  $ hg clone -U -r 0 test/partial/clone test/another/clone
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 4 changes to 4 files
+  new changesets 8b6053c928fe
+
 corrupt cookies file should yield a warning
 
   $ cat > $TESTTMP/cookies.txt << EOF
--- a/tests/test-import-git.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-import-git.t	Tue Sep 04 12:16:28 2018 -0400
@@ -615,8 +615,8 @@
 Prefix with strip, renames, creates etc
 
   $ hg revert -aC
+  forgetting b
   undeleting a
-  forgetting b
   $ rm b
   $ mkdir -p dir/dir2
   $ echo b > dir/dir2/b
@@ -715,10 +715,10 @@
 
   $ hg revert -aC
   forgetting dir/a
+  forgetting dir/dir2/b2
+  reverting dir/dir2/c
   undeleting dir/d
   undeleting dir/dir2/b
-  forgetting dir/dir2/b2
-  reverting dir/dir2/c
   $ rm dir/a dir/dir2/b2
   $ hg import --similarity 90 --no-commit - <<EOF
   > diff --git a/a b/b
--- a/tests/test-import.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-import.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1014,8 +1014,8 @@
     a
   R a
   $ hg revert -a
+  forgetting b
   undeleting a
-  forgetting b
   $ cat b
   mod b
   $ rm b
--- a/tests/test-inherit-mode.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-inherit-mode.t	Tue Sep 04 12:16:28 2018 -0400
@@ -69,6 +69,7 @@
   00600 ./.hg/00changelog.i
   00770 ./.hg/cache/
   00660 ./.hg/cache/branch2-served
+  00660 ./.hg/cache/manifestfulltextcache (reporevlogstore !)
   00660 ./.hg/cache/rbc-names-v1
   00660 ./.hg/cache/rbc-revs-v1
   00660 ./.hg/dirstate
--- a/tests/test-issue660.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-issue660.t	Tue Sep 04 12:16:28 2018 -0400
@@ -66,9 +66,9 @@
 Revert all - should succeed:
 
   $ hg revert --all
-  undeleting a
   forgetting a/a
   forgetting b
+  undeleting a
   undeleting b/b
 
   $ hg st
--- a/tests/test-largefiles-misc.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-largefiles-misc.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1135,8 +1135,8 @@
   ? large.orig
 
   $ hg revert --all
+  forgetting .hglf/dir/subdir2/large.bin
   undeleting .hglf/dir/subdir/large.bin
-  forgetting .hglf/dir/subdir2/large.bin
   reverting subrepo no-largefiles
 
   $ hg status -C
@@ -1214,8 +1214,8 @@
   large
 
   $ hg revert --all
+  forgetting .hglf/dir2/subdir/large.bin
   undeleting .hglf/dir/subdir/large.bin
-  forgetting .hglf/dir2/subdir/large.bin
   reverting subrepo no-largefiles
 
   $ hg status -C
--- a/tests/test-largefiles-update.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-largefiles-update.t	Tue Sep 04 12:16:28 2018 -0400
@@ -611,7 +611,8 @@
   > EOF
   rebasing 1:72518492caa6 "#1"
   rebasing 4:07d6153b5c04 "#4"
-  local [dest] changed .hglf/large1 which other [source] deleted
+  file '.hglf/large1' was deleted in other [source] but was modified in local [dest].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
 
   $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
--- a/tests/test-largefiles.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-largefiles.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1513,9 +1513,9 @@
   $ cat sub/large4
   large4-modified
   $ hg revert -a --no-backup
-  undeleting .hglf/sub2/large6
   forgetting .hglf/sub2/large8
   reverting normal3
+  undeleting .hglf/sub2/large6
   $ hg status
   ? sub/large4.orig
   ? sub/normal4.orig
--- a/tests/test-lfconvert.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-lfconvert.t	Tue Sep 04 12:16:28 2018 -0400
@@ -101,6 +101,7 @@
   largefiles
   revlogv1
   store
+  testonly-simplestore (reposimplestore !)
 
 "lfconvert" includes a newline at the end of the standin files.
   $ cat .hglf/large .hglf/sub/maybelarge.dat
--- a/tests/test-lfs-test-server.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-lfs-test-server.t	Tue Sep 04 12:16:28 2018 -0400
@@ -694,10 +694,6 @@
   $ rm *
   $ hg revert --all -r 1 --debug
   http auth: user foo, password ***
-  adding a
-  reverting b
-  reverting c
-  reverting d
   http auth: user foo, password ***
   Status: 200
   Content-Length: 905 (git-server !)
@@ -778,9 +774,13 @@
   lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
   lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
   lfs: downloaded 3 files (51 bytes)
+  reverting b
   lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+  reverting c
   lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+  reverting d
   lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+  adding a
   lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
 
 Check error message when the remote missed a blob:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-linelog.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,193 @@
+from __future__ import absolute_import, print_function
+
+import difflib
+import random
+import unittest
+
+from mercurial import linelog
+
+vecratio = 3 # number of replacelines / number of replacelines_vec
+maxlinenum = 0xffffff
+maxb1 = 0xffffff
+maxdeltaa = 10
+maxdeltab = 10
+
+def _genedits(seed, endrev):
+    lines = []
+    random.seed(seed)
+    rev = 0
+    for rev in range(0, endrev):
+        n = len(lines)
+        a1 = random.randint(0, n)
+        a2 = random.randint(a1, min(n, a1 + maxdeltaa))
+        b1 = random.randint(0, maxb1)
+        b2 = random.randint(b1, b1 + maxdeltab)
+        usevec = not bool(random.randint(0, vecratio))
+        if usevec:
+            blines = [(random.randint(0, rev), random.randint(0, maxlinenum))
+                      for _ in range(b1, b2)]
+        else:
+            blines = [(rev, bidx) for bidx in range(b1, b2)]
+        lines[a1:a2] = blines
+        yield lines, rev, a1, a2, b1, b2, blines, usevec
+
+class linelogtests(unittest.TestCase):
+    def testlinelogencodedecode(self):
+        program = [linelog._eof(0, 0),
+                   linelog._jge(41, 42),
+                   linelog._jump(0, 43),
+                   linelog._eof(0, 0),
+                   linelog._jl(44, 45),
+                   linelog._line(46, 47),
+                   ]
+        ll = linelog.linelog(program, maxrev=100)
+        enc = ll.encode()
+        # round-trips okay
+        self.assertEqual(linelog.linelog.fromdata(enc)._program, ll._program)
+        self.assertEqual(linelog.linelog.fromdata(enc), ll)
+        # This encoding matches the encoding used by hg-experimental's
+        # linelog file, or is supposed to if it doesn't.
+        self.assertEqual(enc, (b'\x00\x00\x01\x90\x00\x00\x00\x06'
+                               b'\x00\x00\x00\xa4\x00\x00\x00*'
+                               b'\x00\x00\x00\x00\x00\x00\x00+'
+                               b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                               b'\x00\x00\x00\xb1\x00\x00\x00-'
+                               b'\x00\x00\x00\xba\x00\x00\x00/'))
+
+    def testsimpleedits(self):
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # annotation of 1 is unchanged
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        ll.annotate(3) # set internal state to revision 3
+        start = ll.getoffset(0)
+        end = ll.getoffset(1)
+        self.assertEqual(ll.getalllines(start, end), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+        ])
+        self.assertEqual(ll.getalllines(), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+            (1, 2),
+        ])
+
+    def testparseclinelogfile(self):
+        # This data is what the replacements in testsimpleedits
+        # produce when fed to the original linelog.c implementation.
+        data = (b'\x00\x00\x00\x0c\x00\x00\x00\x0f'
+                b'\x00\x00\x00\x00\x00\x00\x00\x02'
+                b'\x00\x00\x00\x05\x00\x00\x00\x06'
+                b'\x00\x00\x00\x06\x00\x00\x00\x00'
+                b'\x00\x00\x00\x00\x00\x00\x00\x07'
+                b'\x00\x00\x00\x06\x00\x00\x00\x02'
+                b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                b'\x00\x00\x00\t\x00\x00\x00\t'
+                b'\x00\x00\x00\x00\x00\x00\x00\x0c'
+                b'\x00\x00\x00\x08\x00\x00\x00\x05'
+                b'\x00\x00\x00\x06\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\x05'
+                b'\x00\x00\x00\x0c\x00\x00\x00\x05'
+                b'\x00\x00\x00\n\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\t')
+        llc = linelog.linelog.fromdata(data)
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # Check we emit the same bytecode.
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        diff = '\n   ' + '\n   '.join(difflib.unified_diff(
+            ll.debugstr().splitlines(), llc.debugstr().splitlines(),
+            'python', 'c', lineterm=''))
+        self.assertEqual(ll._program, llc._program, 'Program mismatch: ' + diff)
+        # Done as a secondary step so we get a better result if the
+        # program is where the mismatch is.
+        self.assertEqual(ll, llc)
+        self.assertEqual(ll.encode(), data)
+
+    def testanothersimplecase(self):
+        ll = linelog.linelog()
+        ll.replacelines(3, 0, 0, 0, 2)
+        ll.replacelines(4, 0, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)],
+                         [])
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(3, 0), (3, 1)])
+        # rev 2 is empty because contents were only ever introduced in rev 3
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [])
+
+    def testrandomedits(self):
+        # Inspired by original linelog tests.
+        seed = random.random()
+        numrevs = 2000
+        ll = linelog.linelog()
+        # Populate linelog
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            if usevec:
+                ll.replacelines_vec(rev, a1, a2, blines)
+            else:
+                ll.replacelines(rev, a1, a2, b1, b2)
+            ar = ll.annotate(rev)
+            self.assertEqual(ll.annotateresult, lines)
+        # Verify we can get back these states by annotating each rev
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            ar = ll.annotate(rev)
+            self.assertEqual([(l.rev, l.linenum) for l in ar], lines)
+
+    def testinfinitebadprogram(self):
+        ll = linelog.linelog.fromdata(
+            b'\x00\x00\x00\x00\x00\x00\x00\x02'  # header
+            b'\x00\x00\x00\x00\x00\x00\x00\x01'  # JUMP to self
+        )
+        with self.assertRaises(linelog.LineLogError):
+            # should not be an infinite loop and raise
+            ll.annotate(1)
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-locate.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-locate.t	Tue Sep 04 12:16:28 2018 -0400
@@ -158,7 +158,7 @@
 
 Convert native path separator to slash (issue5572)
 
-  $ hg files -T '{path|slashpath}\n'
+  $ hg files -T '{path|relpath|slashpath}\n'
   ../b
   ../dir.h/foo
   ../t.h
--- a/tests/test-logexchange.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-logexchange.t	Tue Sep 04 12:16:28 2018 -0400
@@ -77,10 +77,8 @@
 
   $ hg show work
   o  3e14 (wat) (default/wat) added bar
-  |
   ~
   @  ec24 (default/default) Added h
-  |
   ~
 
   $ hg update "default/wat"
--- a/tests/test-match.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-match.py	Tue Sep 04 12:16:28 2018 -0400
@@ -6,14 +6,832 @@
 
 from mercurial import (
     match as matchmod,
+    util,
 )
 
+class BaseMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class AlwaysMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitdir(b'.'), b'all')
+        self.assertEqual(m.visitdir(b'dir'), b'all')
+
+    def testVisitchildrenset(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'all')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'all')
+
 class NeverMatcherTests(unittest.TestCase):
 
     def testVisitdir(self):
-        m = matchmod.nevermatcher('', '')
-        self.assertFalse(m.visitdir('.'))
-        self.assertFalse(m.visitdir('dir'))
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertFalse(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+
+class PredicateMatcherTests(unittest.TestCase):
+    # predicatematcher does not currently define either of these methods, so
+    # this is equivalent to BaseMatcherTests.
+
+    def testVisitdir(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class PatternMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # FIXME: These should probably be True.
+        self.assertFalse(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'dir/subdir'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        # FIXME: These should probably be {'subdir'} and 'this', respectively,
+        # or at least 'this' and 'this'.
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IncludeMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class ExactMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/foo.txt'))
+        self.assertFalse(m.visitdir(b'dir/foo'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), {b'foo.txt'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/foo.txt'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitchildrensetFilesAndDirs(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfile.txt',
+                                                b'a/file1.txt',
+                                                b'a/b/file2.txt',
+                                                # no file in a/b/c
+                                                b'a/b/c/d/file4.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'a', b'rootfile.txt'})
+        self.assertEqual(m.visitchildrenset(b'a'), {b'b', b'file1.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b'), {b'c', b'file2.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c'), {b'd'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d'), {b'file4.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d/e'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+class DifferenceMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertFalse(dm.visitdir(b'.'))
+        self.assertFalse(dm.visitdir(b'dir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/z'))
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/x'))
+        self.assertFalse(dm.visitdir(b'folder'))
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher. OPT: if m2 is a
+        # nevermatcher, we could return 'all' for these.
+        #
+        # We're testing Equal-to-True instead of just 'assertTrue' since
+        # assertTrue does NOT verify that it's a bool, just that it's truthy.
+        # While we may want to eventually make these return 'all', they should
+        # not currently do so.
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        # OPT: We could return 'all' for these.
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'folder'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(dm.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IntersectionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitdir(b'.'), b'all')
+        self.assertEqual(im.visitdir(b'dir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitchildrenset(b'.'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermatcher.
+        self.assertFalse(im.visitdir(b'.'))
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+        self.assertFalse(im.visitdir(b'folder'))
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermqtcher.
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        # OPT: We should probably return 'all' for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return 'all' for these
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is True correct here?
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is set() correct here?
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), {b'x'})
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next two could probably be set() as well.
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+class UnionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        # OPT: These two should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        # OPT: These should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'folder', b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), True)
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), {b'x', b'z'})
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+class SubdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitdir(b'.'), True)
+        self.assertEqual(sm.visitdir(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not True.
+        self.assertEqual(sm.visitdir(b'subdir/x'), True)
+        self.assertEqual(sm.visitdir(b'subdir/z'), True)
+        self.assertFalse(sm.visitdir(b'foo'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitchildrenset(b'.'), {b'subdir'})
+        self.assertEqual(sm.visitchildrenset(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not 'this'.
+        self.assertEqual(sm.visitchildrenset(b'subdir/x'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'subdir/z'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'foo'), set())
+
+class PrefixdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # `m` elides 'd' because it's part of the root, and the rest of the
+        # patterns are relative.
+        self.assertEqual(bool(m(b'a.txt')), False)
+        self.assertEqual(bool(m(b'b.txt')), False)
+        self.assertEqual(bool(m(b'e/a.txt')), True)
+        self.assertEqual(bool(m(b'e/b.txt')), False)
+        self.assertEqual(bool(m(b'e/f/b.txt')), True)
+
+        # The prefix matcher re-adds 'd' to the paths, so they need to be
+        # specified when using the prefixdirmatcher.
+        self.assertEqual(bool(pm(b'a.txt')), False)
+        self.assertEqual(bool(pm(b'b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/a.txt')), True)
+        self.assertEqual(bool(pm(b'd/e/b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/f/b.txt')), True)
+
+        self.assertEqual(m.visitdir(b'.'), True)
+        self.assertEqual(m.visitdir(b'e'), True)
+        self.assertEqual(m.visitdir(b'e/f'), True)
+        self.assertEqual(m.visitdir(b'e/f/g'), False)
+
+        self.assertEqual(pm.visitdir(b'.'), True)
+        self.assertEqual(pm.visitdir(b'd'), True)
+        self.assertEqual(pm.visitdir(b'd/e'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f/g'), False)
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # OPT: visitchildrenset could possibly return {'e'} and {'f'} for these
+        # next two, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f/g'), set())
+
+        # OPT: visitchildrenset could possibly return {'d'}, {'e'}, and {'f'}
+        # for these next three, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(pm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f/g'), set())
 
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-merge-changedelete.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-merge-changedelete.t	Tue Sep 04 12:16:28 2018 -0400
@@ -54,9 +54,11 @@
 Non-interactive merge:
 
   $ hg merge -y
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -121,9 +123,11 @@
   > c
   > d
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -189,18 +193,23 @@
   > baz
   > c
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? foo
   unrecognized response
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? bar
   unrecognized response
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? d
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? baz
   unrecognized response
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -262,9 +271,11 @@
   $ hg merge --config ui.interactive=true <<EOF
   > d
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? d
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -473,9 +484,11 @@
   1 other heads for branch "default"
 
   $ hg merge --config ui.interactive=True --tool :prompt
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   0 files updated, 0 files merged, 0 files removed, 3 files unresolved
@@ -532,9 +545,11 @@
   1 other heads for branch "default"
 
   $ hg merge --tool :prompt
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? u
   0 files updated, 0 files merged, 0 files removed, 3 files unresolved
@@ -589,9 +604,11 @@
   1 other heads for branch "default"
 
   $ hg merge --tool :merge3
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -679,9 +696,11 @@
   (status identical)
   
   === :other -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -707,9 +726,11 @@
   (status identical)
   
   === :local -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -725,9 +746,11 @@
   (status identical)
   
   === :fail -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -751,9 +774,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 -y
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -927,9 +952,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 --config ui.interactive=True --tool :prompt
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -977,9 +1004,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 --tool :merge3
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -1033,9 +1062,11 @@
   (status identical)
   
   === :other -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
@@ -1060,9 +1091,11 @@
   (status identical)
   
   === :local -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
@@ -1077,9 +1110,11 @@
   (status identical)
   
   === :fail -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
--- a/tests/test-merge-force.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-merge-force.t	Tue Sep 04 12:16:28 2018 -0400
@@ -142,55 +142,80 @@
 #   in the same way, so it could potentially be left alone
 
   $ hg merge -f --tool internal:merge3 'desc("remote")' 2>&1 | tee $TESTTMP/merge-output-1
-  local [working copy] changed content1_missing_content1_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content1_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content3-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content3-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_missing_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_missing_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content3-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content1-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content2-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content4-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-tracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-untracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content1_content4-tracked
   merging content1_content2_content2_content1-tracked
@@ -703,63 +728,88 @@
   (no more unresolved files)
   $ hg resolve --unmark --all
   $ hg resolve --all --tool internal:merge3
-  other [merge rev] changed content1_content2_content1_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content1_content4-tracked
-  other [merge rev] changed content1_content2_content1_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content2_content1-tracked
-  other [merge rev] changed content1_content2_content2_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content2_content4-tracked
-  other [merge rev] changed content1_content2_content2_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content1-tracked
-  other [merge rev] changed content1_content2_content3_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content3-tracked
-  other [merge rev] changed content1_content2_content3_content3-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content4-tracked
-  other [merge rev] changed content1_content2_content3_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_missing_content1-tracked
-  other [merge rev] changed content1_content2_missing_content1-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content2-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_missing_content4-tracked
-  other [merge rev] changed content1_content2_missing_content4-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-tracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-untracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content1_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content1_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content3-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content3-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_missing_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_missing_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
   merging missing_content2_content2_content4-tracked
   merging missing_content2_content3_content3-tracked
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-merge-no-file-change.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,379 @@
+  $ cat <<'EOF' >> "$HGRCPATH"
+  > [extensions]
+  > convert =
+  > [templates]
+  > l = '{rev}:{node|short} p={p1rev},{p2rev} m={manifest} f={files|json}'
+  > EOF
+
+  $ check_convert_identity () {
+  >     hg convert -q "$1" "$1.converted"
+  >     hg outgoing -q -R "$1.converted" "$1"
+  >     if [ "$?" != 1 ]; then
+  >         echo '*** BUG: hash changes on convert ***'
+  >         hg log -R "$1.converted" -GTl
+  >     fi
+  > }
+
+Files added at both parents:
+
+  $ hg init added-both
+  $ cd added-both
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:7aa8a293f5d97377037afc21e871e036e718d659
+  $ hg log -GTl
+  @    3:7aa8a293f5d9 p=2,1 m=3:8667461869a1 f=[]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+Files added at both parents, but the other removed at the merge:
+(In this case, ctx.files() after the commit contains the removed file "b", but
+its manifest does not differ from p1.)
+
+  $ hg init added-both-removed-at-merge
+  $ cd added-both-removed-at-merge
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg rm -f b
+  $ hg ci --debug -m merge
+  committing files:
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:915745f3ca3d9d699925269474c2d0a9526e8dfa
+  $ hg log -GTl
+  @    3:915745f3ca3d p=2,1 m=3:8e9cf3456921 f=["b"]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+An identical file added at both parents:
+
+  $ hg init added-identical
+  $ cd added-identical
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:de26182cd210f0c3fb175ca7616704ab963d3024
+  $ hg log -GTl
+  @    3:de26182cd210 p=2,1 m=1:686dbf0aeca4 f=[]
+  |\
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-identical
+
+#if execbit
+
+An identical file added at both parents, but the flag differs. Take local:
+
+  $ hg init flag-change-take-p1
+  $ cd flag-change-take-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod +x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:c8d50407916ef8a5a97cb6e36ca9bc844a6ee13e
+  $ hg log -GTl
+  @    3:c8d50407916e p=2,1 m=2:36b69ba4b24b f=[]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0 x b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p1
+
+An identical file added at both parents, but the flag differs. Take other:
+
+  $ hg init flag-change-take-p2
+  $ cd flag-change-take-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod -x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:06a62a687d87c7d8944743dee1ee9d8c66b3f6e3
+  $ hg log -GTl
+  @    3:06a62a687d87 p=2,1 m=3:2a315ba1aa45 f=["b"]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0   b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p2
+
+#endif
+
+An identical file added at both parents, one more file added at p2:
+
+  $ hg init added-some-p2
+  $ cd added-some-p2
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg ci -qAm2 c
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm3 b
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  c
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 4:f7fbc4e4d9a8fde03ba475adad675578c8bf472d
+  $ hg log -GTl
+  @    4:f7fbc4e4d9a8 p=3,2 m=3:92acd5bfd716 f=[]
+  |\
+  | o  3:e9d9f3cc981f p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  2:93c5529a4ec7 p=1,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p2
+
+An identical file added at both parents, one more file added at p1:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init added-some-p1
+  $ cd added-some-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b c
+  $ hg ci -qAm2 b
+  $ hg ci -qAm3 c
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 4:a9f0f589a913f5a149dc10dfbd5af726977c36c4
+  $ hg log -GTl
+  @    4:a9f0f589a913 p=3,1 m=2:ae25a31b30b3 f=[]
+  |\
+  | o  3:b8dc385241b5 p=2,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p1
+
+A file added at p2, a named branch created at p1:
+
+  $ hg init named-branch-p1
+  $ cd named-branch-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg branch -q foo
+  $ hg ci -m2
+
+  $ hg merge default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:fb97d83b02fd072295cfc2171f21b7d38509bfd7
+  $ hg log -GT'{l} branch={branch}'
+  @    3:fb97d83b02fd p=2,1 m=2:9091c64f4ea1 f=[] branch=foo
+  |\
+  | o  2:a3a9fa6587e5 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p1
+
+A file added at p1, a named branch created at p2:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init named-branch-p2
+  $ cd named-branch-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg branch -q foo
+  $ hg ci -m1
+  $ hg up -q 0
+  $ hg ci -qAm1 b
+
+  $ hg merge foo
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:036823e24692218324d4af43b07ff89f8a000096
+  $ hg log -GT'{l} branch={branch}'
+  @    3:036823e24692 p=2,1 m=1:686dbf0aeca4 f=[] branch=default
+  |\
+  | o  2:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  | |
+  o |  1:da38c8e00727 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p2
+
+A file changed once at both parents, but amended to have identical content:
+
+  $ hg init amend-p1
+  $ cd amend-p1
+  $ touch a
+  $ hg ci -qAm0 a
+  $ echo foo > a
+  $ hg ci -m1
+  $ hg up -q 0
+  $ echo bar > a
+  $ hg ci -qm2
+  $ echo foo > a
+  $ hg ci -qm3 --amend
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:314e5bc5adf5c58ea571efabe33eedba20a201aa
+  $ hg log -GT'{l} branch={branch}'
+  @    3:314e5bc5adf5 p=2,1 m=1:d33ea248bd73 f=[] branch=default
+  |\
+  | o  2:de9c64f226a3 p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  | |
+  o |  1:6a74aec01b3c p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity amend-p1
--- a/tests/test-merge-remove.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-merge-remove.t	Tue Sep 04 12:16:28 2018 -0400
@@ -69,8 +69,8 @@
 
   $ hg revert -vr . foo1 bar
   saving current version of bar as bar.orig
+  saving current version of foo1 as foo1.orig
   reverting bar
-  saving current version of foo1 as foo1.orig
   reverting foo1
 
   $ hg debugstate --nodates
@@ -102,7 +102,8 @@
 Those who use force will lose
 
   $ hg merge -f
-  other [merge rev] changed bar which local [working copy] deleted
+  file 'bar' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging foo1 and foo to foo1
   0 files updated, 1 files merged, 0 files removed, 1 files unresolved
--- a/tests/test-merge-subrepos.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-merge-subrepos.t	Tue Sep 04 12:16:28 2018 -0400
@@ -110,7 +110,8 @@
   $ hg up -r '.^' --config ui.interactive=True << EOF
   > d
   > EOF
-  other [destination] changed b which local [working copy] deleted
+  file 'b' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
--- a/tests/test-merge-tools.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-merge-tools.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1701,6 +1701,35 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg update -C 1 > /dev/null
+
+#else
+
+Match the non-portable filename commits above for test stability
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 5
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 5
+  > EOF
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 6
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 6
+  > EOF
+
 #endif
 
 Merge post-processing
@@ -1737,14 +1766,64 @@
   # hg resolve --list
   U f
 
-#if symlink
+missingbinary is a merge-tool that doesn't exist:
+
+  $ echo "missingbinary.executable=doesnotexist" >> .hg/hgrc
+  $ beforemerge
+  [merge-tools]
+  false.whatever=
+  true.priority=1
+  true.executable=cat
+  missingbinary.executable=doesnotexist
+  # hg update -C 1
+  $ hg merge -y -r 2 --config ui.merge=missingbinary
+  couldn't find merge tool missingbinary (for pattern f)
+  merging f
+  couldn't find merge tool missingbinary (for pattern f)
+  revision 1
+  space
+  revision 0
+  space
+  revision 2
+  space
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg update -q -C 1
+  $ rm f
 
 internal merge cannot handle symlinks and shouldn't try:
 
-  $ hg update -q -C 1
-  $ rm f
+#if symlink
+
   $ ln -s symlink f
   $ hg commit -qm 'f is symlink'
+
+#else
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > f is symlink
+  > 
+  > diff --git a/f b/f
+  > old mode 100644
+  > new mode 120000
+  > --- a/f
+  > +++ b/f
+  > @@ -1,2 +1,1 @@
+  > -revision 1
+  > -space
+  > +symlink
+  > \ No newline at end of file
+  > EOF
+
+Resolve 'other [destination] changed f which local [working copy] deleted' prompt
+  $ hg up -q -C --config ui.interactive=True << EOF
+  > c
+  > EOF
+
+#endif
+
   $ hg merge -r 2 --tool internal:merge
   merging f
   warning: internal :merge cannot merge symlinks for f
@@ -1753,8 +1832,6 @@
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
   [1]
 
-#endif
-
 Verify naming of temporary files and that extension is preserved:
 
   $ hg update -q -C 1
@@ -1782,6 +1859,89 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
 
+Binary files capability checking
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x00\x01\x02\x03')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#1)"
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x03\x02\x01\x00')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#2)"
+
+By default, binary files capability of internal merge tools is not
+checked strictly.
+
+(for merge-patterns, chosen unintentionally)
+
+  $ hg merge 9 \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  warning: check merge-patterns configurations, if ':merge-other' for binary file 'b' is unintentional
+  (see 'hg help merge-tools' for binary files capability)
+  merging b
+  warning: b looks like a binary file.
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+(for ui.merge, ignored unintentionally)
+
+  $ hg merge 9 \
+  > --config merge-tools.:other.binary=true \
+  > --config ui.merge=:other
+  tool :other (for pattern b) can't handle binary
+  tool true can't handle binary
+  tool :other can't handle binary
+  tool false can't handle binary
+  no tool found to merge b
+  keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for b? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+With merge.strict-capability-check=true, binary files capability of
+internal merge tools is checked strictly.
+
+  $ f --hexdump b
+  b:
+  0000: 03 02 01 00                                     |....|
+
+(for merge-patterns)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config merge-tools.:merge-other.binary=true \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  tool :merge-other (for pattern b) can't handle binary
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
+(for ui.merge)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config ui.merge=:other
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
 Check that debugpicktool examines which merge tool is chosen for
 specified file as expected
 
@@ -1790,6 +1950,7 @@
   false.whatever=
   true.priority=1
   true.executable=cat
+  missingbinary.executable=doesnotexist
   # hg update -C 1
 
 (default behavior: checking files in the working parent context)
@@ -1812,9 +1973,9 @@
 
 (-r REV causes checking files in specified revision)
 
-  $ hg manifest -r tip
+  $ hg manifest -r 8
   f.txt
-  $ hg debugpickmergetool -r tip
+  $ hg debugpickmergetool -r 8
   f.txt = true
 
 #if symlink
@@ -1824,6 +1985,45 @@
   $ hg debugpickmergetool -r 6d00b3726f6e
   f = :prompt
 
+(by default, it is assumed that no internal merge tools has symlinks
+capability)
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :prompt
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:other.symlink=true \
+  > --config ui.merge=:other
+  f = :prompt
+
+(with strict-capability-check=true, actual symlink capabilities are
+checked striclty)
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :other
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config ui.merge=:other
+  f = :other
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config ui.merge=:merge-other
+  f = :prompt
+
 #endif
 
 (--verbose shows some configurations)
--- a/tests/test-minirst.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-minirst.py	Tue Sep 04 12:16:28 2018 -0400
@@ -7,6 +7,7 @@
 )
 
 def debugformat(text, form, **kwargs):
+    blocks, pruned = minirst.parse(text, **kwargs)
     if form == b'html':
         print("html format:")
         out = minirst.format(text, style=form, **kwargs)
@@ -15,12 +16,10 @@
         out = minirst.format(text, width=form, **kwargs)
 
     print("-" * 70)
-    if type(out) == tuple:
-        print(out[0][:-1].decode('utf8'))
+    print(out[:-1].decode('utf8'))
+    if kwargs.get('keep'):
         print("-" * 70)
-        print(stringutil.pprint(out[1]).decode('utf8'))
-    else:
-        print(out[:-1].decode('utf8'))
+        print(stringutil.pprint(pruned).decode('utf8'))
     print("-" * 70)
     print()
 
--- a/tests/test-narrow-clone-no-ellipsis.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-clone-no-ellipsis.t	Tue Sep 04 12:16:28 2018 -0400
@@ -30,10 +30,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -55,11 +53,9 @@
   added 40 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg update
   19 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -97,11 +93,9 @@
   added 40 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg update
   20 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -129,3 +123,39 @@
   dir/src/f9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 40 changesets with 20 changes to 20 files
+  new changesets 681085829a73:26ce255d5b5d
+  updating to branch default
+  20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/tests
+  I path:file:dir/src/f12
+  $ cd ..
--- a/tests/test-narrow-clone.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-clone.t	Tue Sep 04 12:16:28 2018 -0400
@@ -34,10 +34,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg tracked
   I path:dir/src/f10
   $ hg update
@@ -69,11 +67,9 @@
   added 21 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg tracked
   I path:dir/tests
   X path:dir/tests/t19
@@ -114,11 +110,9 @@
   added 21 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg tracked
   I path:.
   X path:dir/tests
@@ -224,3 +218,39 @@
   dir/tests/t9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > file:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 21 changesets with 20 changes to 20 files
+  new changesets f93383bb3e99:26ce255d5b5d
+  updating to branch default
+  20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/tests
+  I path:file:dir/src/f12
+  $ cd ..
--- a/tests/test-narrow-debugcommands.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-debugcommands.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1,10 +1,10 @@
   $ . "$TESTDIR/narrow-library.sh"
   $ hg init repo
   $ cd repo
-  $ cat << EOF > .hg/narrowspec
-  > [includes]
+  $ cat << EOF > .hg/store/narrowspec
+  > [include]
   > path:foo
-  > [excludes]
+  > [exclude]
   > EOF
   $ echo treemanifest >> .hg/requires
   $ echo narrowhg-experimental >> .hg/requires
--- a/tests/test-narrow-expanddirstate.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-expanddirstate.t	Tue Sep 04 12:16:28 2018 -0400
@@ -27,16 +27,16 @@
 
   $ mkdir outside
   $ echo other_contents > outside/f2
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
   $ hg status
 
 `hg status` did not add outside.
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
 
 Unfortunately this is not really a candidate for adding to narrowhg proper,
@@ -115,12 +115,12 @@
 `hg status` will now add outside, but not patchdir.
   $ DIRSTATEINCLUDES=path:outside hg status
   M outside/f2
-  $ grep outside .hg/narrowspec
-  path:outside
-  $ grep outside .hg/dirstate > /dev/null
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep outside
+  I path:outside
+  $ hg files | grep outside > /dev/null
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate
+  $ hg files | grep patchdir
   [1]
 
 Get rid of the modification to outside/f2.
@@ -142,9 +142,9 @@
   1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
   abort: patch failed to apply
   [255]
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg files | grep patchdir > /dev/null
   [1]
 
 Let's make it apply cleanly and see that it *did* expand properly
@@ -159,6 +159,6 @@
   applying $TESTTMP/foo.patch
   $ cat patchdir/f3
   patched_this
-  $ grep patchdir .hg/narrowspec
-  path:patchdir
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg tracked | grep patchdir
+  I path:patchdir
+  $ hg files | grep patchdir > /dev/null
--- a/tests/test-narrow-patterns.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-patterns.t	Tue Sep 04 12:16:28 2018 -0400
@@ -88,15 +88,13 @@
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ cd narrow
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ hg manifest -r tip
   dir1/bar
   dir1/dirA/bar
@@ -144,14 +142,12 @@
   adding file changes
   added 9 changesets with 6 changes to 6 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -206,14 +202,12 @@
   adding file changes
   added 11 changesets with 7 changes to 7 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
-  path:dir2/dirA
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
+  X path:dir2/dirA
   $ find * | sort
   dir1
   dir1/bar
@@ -266,14 +260,12 @@
   adding file changes
   added 13 changesets with 8 changes to 8 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -327,13 +319,11 @@
   adding file changes
   added 13 changesets with 9 changes to 9 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
--- a/tests/test-narrow-pull.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-pull.t	Tue Sep 04 12:16:28 2018 -0400
@@ -166,7 +166,6 @@
 
 We should also be able to unshare without breaking everything:
   $ hg unshare
-  devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
   $ hg verify
   checking changesets
   checking manifests
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-widen-no-ellipsis.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,385 @@
+#testcases tree flat
+  $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+  $ cat << EOF >> $HGRCPATH
+  > [experimental]
+  > treemanifest = 1
+  > EOF
+#endif
+
+  $ hg init master
+  $ cd master
+
+  $ mkdir inside
+  $ echo 'inside' > inside/f
+  $ hg add inside/f
+  $ hg commit -m 'add inside'
+
+  $ mkdir widest
+  $ echo 'widest' > widest/f
+  $ hg add widest/f
+  $ hg commit -m 'add widest'
+
+  $ mkdir outside
+  $ echo 'outside' > outside/f
+  $ hg add outside/f
+  $ hg commit -m 'add outside'
+
+  $ cd ..
+
+narrow clone the inside file
+
+  $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 1 changes to 1 files
+  new changesets *:* (glob)
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd narrow
+  $ hg tracked
+  I path:inside
+  $ ls
+  inside
+  $ cat inside/f
+  inside
+  $ cd ..
+
+add more upstream files which we will include in a wider narrow spec
+
+  $ cd master
+
+  $ mkdir wider
+  $ echo 'wider' > wider/f
+  $ hg add wider/f
+  $ echo 'widest v2' > widest/f
+  $ hg commit -m 'add wider, update widest'
+
+  $ echo 'widest v3' > widest/f
+  $ hg commit -m 'update widest v3'
+
+  $ echo 'inside v2' > inside/f
+  $ hg commit -m 'update inside'
+
+  $ mkdir outside2
+  $ echo 'outside2' > outside2/f
+  $ hg add outside2/f
+  $ hg commit -m 'add outside2'
+
+  $ echo 'widest v4' > widest/f
+  $ hg commit -m 'update widest v4'
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+  $ cd ..
+
+Widen the narrow spec to see the wider file. This should not get the newly
+added upstream revisions.
+
+  $ cd narrow
+  $ hg tracked --addinclude wider/f
+  comparing with ssh://user@dummy/master
+  searching for changes
+  no changes found
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 0 changes to 1 files
+  3 local changesets published
+  $ hg tracked
+  I path:inside
+  I path:wider/f
+
+Pull down the newly added upstream revision.
+
+  $ hg pull
+  pulling from ssh://user@dummy/master
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 2 changes to 2 files
+  new changesets *:* (glob)
+  (run 'hg update' to get a working copy)
+  $ hg update -r 'desc("add wider")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat wider/f
+  wider
+
+  $ hg update -r 'desc("update inside")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat wider/f
+  wider
+  $ cat inside/f
+  inside v2
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+Check that widening with a newline fails
+
+  $ hg tracked --addinclude 'widest
+  > '
+  abort: newlines are not allowed in narrowspec paths
+  [255]
+
+widen the narrow spec to include the widest file
+
+  $ hg tracked --addinclude widest
+  comparing with ssh://user@dummy/master
+  searching for changes
+  no changes found
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 4 changes to 3 files
+  5 local changesets published
+  $ hg tracked
+  I path:inside
+  I path:wider/f
+  I path:widest
+  $ hg update 'desc("add widest")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cat widest/f
+  widest
+  $ hg update 'desc("add wider, update widest")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat wider/f
+  wider
+  $ cat widest/f
+  widest v2
+  $ hg update 'desc("update widest v3")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v3
+  $ hg update 'desc("update widest v4")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v4
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+separate suite of tests: files from 0-10 modified in changes 0-10. This allows
+more obvious precise tests tickling particular corner cases.
+
+  $ cd ..
+  $ hg init upstream
+  $ cd upstream
+  $ for x in `$TESTDIR/seq.py 0 10`
+  > do
+  >   mkdir d$x
+  >   echo $x > d$x/f
+  >   hg add d$x/f
+  >   hg commit -m "add d$x/f"
+  > done
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+
+make narrow clone with every third node.
+
+  $ cd ..
+  $ hg clone --narrow ssh://user@dummy/upstream narrow2 --include d0 --include d3 --include d6 --include d9
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 11 changesets with 4 changes to 4 files
+  new changesets *:* (glob)
+  updating to branch default
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd narrow2
+  $ hg tracked
+  I path:d0
+  I path:d3
+  I path:d6
+  I path:d9
+  $ hg verify
+  checking changesets
+  checking manifests
+  checking directory manifests (tree !)
+  crosschecking files in changesets and manifests
+  checking files
+  4 files, 11 changesets, 4 total revisions
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg tracked --addinclude d1
+  comparing with ssh://user@dummy/upstream
+  searching for changes
+  no changes found
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 5 files
+  11 local changesets published
+  $ hg tracked
+  I path:d0
+  I path:d1
+  I path:d3
+  I path:d6
+  I path:d9
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+
+Verify shouldn't claim the repo is corrupt after a widen.
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  checking directory manifests (tree !)
+  crosschecking files in changesets and manifests
+  checking files
+  5 files, 11 changesets, 5 total revisions
+
+Widening preserves parent of local commit
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
+  $ cd narrow3
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg pull -q -r 3
+  $ hg co -q tip
+  $ hg pull -q -r 4
+  $ echo local > d2/f
+  $ hg ci -m local
+  created new head
+  $ hg tracked -q --addinclude d0 --addinclude d9
+
+Widening preserves bookmarks
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream narrow-bookmarks --include d4
+  $ cd narrow-bookmarks
+  $ echo local > d4/f
+  $ hg ci -m local
+  $ hg bookmarks bookmark
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg -q tracked --addinclude d2
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg log -r bookmark -T '{desc}\n'
+  local
+
+Widening that fails can be recovered from
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream interrupted --include d0
+  $ cd interrupted
+  $ echo local > d0/f
+  $ hg ci -m local
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks bookmark
+  $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1
+  comparing with ssh://user@dummy/upstream
+  searching for changes
+  no changes found
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 2 files
+  11 local changesets published
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg unbundle .hg/strip-backup/*-widen.hg
+  abort: .hg/strip-backup/*-widen.hg: $ENOTDIR$ (windows !)
+  abort: $ENOENT$: .hg/strip-backup/*-widen.hg (no-windows !)
+  [255]
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
--- a/tests/test-narrow-widen.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow-widen.t	Tue Sep 04 12:16:28 2018 -0400
@@ -76,15 +76,15 @@
   $ echo 'widest v4' > widest/f
   $ hg commit -m 'update widest v4'
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: update widest v4 (glob)
-  *: add outside2 (glob)
-  *: update inside (glob)
-  *: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  *: add outside (glob)
-  *: add widest (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
 
   $ cd ..
 
@@ -129,13 +129,13 @@
   $ cat inside/f
   inside v2
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: update widest v4 (glob)
-  *: update inside (glob)
-  ...*: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  ...*: add outside (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  ...5: update widest v4
+  4: update inside
+  ...3: update widest v3
+  2: add wider, update widest
+  ...1: add outside
+  0: add inside
 
 Check that widening with a newline fails
 
@@ -179,15 +179,15 @@
   $ cat widest/f
   widest v4
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: update widest v4 (glob)
-  ...*: add outside2 (glob)
-  *: update inside (glob)
-  *: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  ...*: add outside (glob)
-  *: add widest (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  ...6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  ...2: add outside
+  1: add widest
+  0: add inside
 
 separate suite of tests: files from 0-10 modified in changes 0-10. This allows
 more obvious precise tests tickling particular corner cases.
@@ -206,18 +206,18 @@
   >   hg add d$x/f
   >   hg commit -m "add d$x/f"
   > done
-  $ hg log -T "{node|short}: {desc}\n"
-  *: add d10/f (glob)
-  *: add d9/f (glob)
-  *: add d8/f (glob)
-  *: add d7/f (glob)
-  *: add d6/f (glob)
-  *: add d5/f (glob)
-  *: add d4/f (glob)
-  *: add d3/f (glob)
-  *: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
 
 make narrow clone with every third node.
 
@@ -244,15 +244,15 @@
   crosschecking files in changesets and manifests
   checking files
   4 files, 8 changesets, 4 total revisions
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: add d10/f (glob)
-  *: add d9/f (glob)
-  ...*: add d8/f (glob)
-  *: add d6/f (glob)
-  ...*: add d5/f (glob)
-  *: add d3/f (glob)
-  ...*: add d2/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  ...7: add d10/f
+  6: add d9/f
+  ...5: add d8/f
+  4: add d6/f
+  ...3: add d5/f
+  2: add d3/f
+  ...1: add d2/f
+  0: add d0/f
   $ hg tracked --addinclude d1
   comparing with ssh://user@dummy/upstream
   searching for changes
@@ -269,16 +269,16 @@
   I path:d3
   I path:d6
   I path:d9
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: add d10/f (glob)
-  *: add d9/f (glob)
-  ...*: add d8/f (glob)
-  *: add d6/f (glob)
-  ...*: add d5/f (glob)
-  *: add d3/f (glob)
-  ...*: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  ...8: add d10/f
+  7: add d9/f
+  ...6: add d8/f
+  5: add d6/f
+  ...4: add d5/f
+  3: add d3/f
+  ...2: add d2/f
+  1: add d1/f
+  0: add d0/f
 
 Verify shouldn't claim the repo is corrupt after a widen.
 
@@ -295,9 +295,9 @@
   $ cd ..
   $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
   $ cd narrow3
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: add d2/f (glob)
-  ...*: add d1/f (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  1: add d2/f
+  ...0: add d1/f
   $ hg pull -q -r 3
   $ hg co -q tip
   $ hg pull -q -r 4
--- a/tests/test-narrow.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-narrow.t	Tue Sep 04 12:16:28 2018 -0400
@@ -22,18 +22,18 @@
   >   hg add d$x/f
   >   hg commit -m "add d$x/f"
   > done
-  $ hg log -T "{node|short}: {desc}\n"
-  *: add d10/f (glob)
-  *: add d9/f (glob)
-  *: add d8/f (glob)
-  *: add d7/f (glob)
-  *: add d6/f (glob)
-  *: add d5/f (glob)
-  *: add d4/f (glob)
-  *: add d3/f (glob)
-  *: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
   $ cd ..
 
 Error if '.' or '..' are in the directory to track.
@@ -111,15 +111,15 @@
   d6/f
   $ hg verify -q
 Force deletion of local changes
-  $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
-  *: local change to d3  (glob)
-  *: local change to d0  (glob)
-  *: add d10/f outsidenarrow (glob)
-  *: add d6/f  (glob)
-  *: add d5/f outsidenarrow (glob)
-  *: add d3/f  (glob)
-  *: add d2/f outsidenarrow (glob)
-  *: add d0/f  (glob)
+  $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
+  8: local change to d3 
+  6: local change to d0 
+  5: add d10/f outsidenarrow
+  4: add d6/f 
+  3: add d5/f outsidenarrow
+  2: add d3/f 
+  1: add d2/f outsidenarrow
+  0: add d0/f 
   $ hg tracked --removeinclude d0 --force-delete-local-changes
   comparing with ssh://user@dummy/master
   searching for changes
@@ -133,14 +133,14 @@
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
 
-  $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
-  *: local change to d3  (glob)
-  *: add d10/f outsidenarrow (glob)
-  *: add d6/f  (glob)
-  *: add d5/f outsidenarrow (glob)
-  *: add d3/f  (glob)
-  *: add d2/f outsidenarrow (glob)
-  *: add d0/f outsidenarrow (glob)
+  $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
+  7: local change to d3 
+  5: add d10/f outsidenarrow
+  4: add d6/f 
+  3: add d5/f outsidenarrow
+  2: add d3/f 
+  1: add d2/f outsidenarrow
+  0: add d0/f outsidenarrow
 Can restore stripped local changes after widening
   $ hg tracked --addinclude d0 -q
   $ hg unbundle .hg/strip-backup/*-narrow.hg -q
--- a/tests/test-parseindex.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-parseindex.t	Tue Sep 04 12:16:28 2018 -0400
@@ -145,7 +145,7 @@
   >     open(n + b"/.hg/store/00changelog.i", "wb").write(d)
   > EOF
 
-  $ hg -R limit debugindex -f1 -c
+  $ hg -R limit debugrevlogindex -f1 -c
      rev flag     size   link     p1     p2       nodeid
        0 0000       62      0      2     -1 7c31755bf9b5
        1 0000       65      1      0      2 26333235a41c
@@ -155,7 +155,7 @@
         0       1        1       -1    base         63         62         63   1.01613        63         0    0.00000
         1       2        1       -1    base         66         65         66   1.01538        66         0    0.00000
 
-  $ hg -R segv debugindex -f1 -c
+  $ hg -R segv debugrevlogindex -f1 -c
      rev flag     size   link     p1     p2       nodeid
        0 0000       62      0  65536     -1 7c31755bf9b5
        1 0000       65      1      0  65536 26333235a41c
--- a/tests/test-parseindex2.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-parseindex2.py	Tue Sep 04 12:16:28 2018 -0400
@@ -8,12 +8,14 @@
 import struct
 import subprocess
 import sys
+import unittest
 
 from mercurial.node import (
     nullid,
     nullrev,
 )
 from mercurial import (
+    node as nodemod,
     policy,
     pycompat,
 )
@@ -61,9 +63,6 @@
     e[0] = offset_type(0, type)
     index[0] = tuple(e)
 
-    # add the magic null revision at -1
-    index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
     return index, cache
 
 data_inlined = (
@@ -132,88 +131,92 @@
                          stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
     return p.communicate()  # returns stdout, stderr
 
-def printhexfail(testnumber, hexversion, stdout, expected):
+def hexfailmsg(testnumber, hexversion, stdout, expected):
     try:
         hexstring = hex(hexversion)
     except TypeError:
         hexstring = None
-    print("FAILED: version test #%s with Python %s and patched "
-          "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
-          (testnumber, sys.version_info, hexversion, hexstring, expected,
-           stdout))
-
-def testversionokay(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    if stdout:
-        printhexfail(testnumber, hexversion, stdout, expected="no stdout")
-
-def testversionfail(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    # We include versionerrortext to distinguish from other ImportErrors.
-    errtext = b"ImportError: %s" % pycompat.sysbytes(parsers.versionerrortext)
-    if errtext not in stdout:
-        printhexfail(testnumber, hexversion, stdout,
-                     expected="stdout to contain %r" % errtext)
+    return ("FAILED: version test #%s with Python %s and patched "
+            "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
+            (testnumber, sys.version_info, hexversion, hexstring, expected,
+             stdout))
 
 def makehex(major, minor, micro):
     return int("%x%02x%02x00" % (major, minor, micro), 16)
 
-def runversiontests():
-    """Check the version-detection logic when importing parsers."""
-    info = sys.version_info
-    major, minor, micro = info[0], info[1], info[2]
-    # Test same major-minor versions.
-    testversionokay(1, makehex(major, minor, micro))
-    testversionokay(2, makehex(major, minor, micro + 1))
-    # Test different major-minor versions.
-    testversionfail(3, makehex(major + 1, minor, micro))
-    testversionfail(4, makehex(major, minor + 1, micro))
-    testversionfail(5, "'foo'")
+class parseindex2tests(unittest.TestCase):
+
+    def assertversionokay(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        self.assertFalse(
+            stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout'))
+
+    def assertversionfail(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        # We include versionerrortext to distinguish from other ImportErrors.
+        errtext = b"ImportError: %s" % pycompat.sysbytes(
+            parsers.versionerrortext)
+        self.assertIn(errtext, stdout,
+                      hexfailmsg(testnumber, hexversion, stdout,
+                                 expected="stdout to contain %r" % errtext))
 
-def runtest() :
-    # Only test the version-detection logic if it is present.
-    try:
-        parsers.versionerrortext
-    except AttributeError:
-        pass
-    else:
-        runversiontests()
+    def testversiondetection(self):
+        """Check the version-detection logic when importing parsers."""
+        # Only test the version-detection logic if it is present.
+        try:
+            parsers.versionerrortext
+        except AttributeError:
+            return
+        info = sys.version_info
+        major, minor, micro = info[0], info[1], info[2]
+        # Test same major-minor versions.
+        self.assertversionokay(1, makehex(major, minor, micro))
+        self.assertversionokay(2, makehex(major, minor, micro + 1))
+        # Test different major-minor versions.
+        self.assertversionfail(3, makehex(major + 1, minor, micro))
+        self.assertversionfail(4, makehex(major, minor + 1, micro))
+        self.assertversionfail(5, "'foo'")
 
-    # Check that parse_index2() raises TypeError on bad arguments.
-    try:
-        parse_index2(0, True)
-    except TypeError:
-        pass
-    else:
-        print("Expected to get TypeError.")
+    def testbadargs(self):
+        # Check that parse_index2() raises TypeError on bad arguments.
+        with self.assertRaises(TypeError):
+            parse_index2(0, True)
 
-   # Check parsers.parse_index2() on an index file against the original
-   # Python implementation of parseindex, both with and without inlined data.
-
-    py_res_1 = py_parseindex(data_inlined, True)
-    c_res_1 = parse_index2(data_inlined, True)
+    def testparseindexfile(self):
+        # Check parsers.parse_index2() on an index file against the
+        # original Python implementation of parseindex, both with and
+        # without inlined data.
 
-    py_res_2 = py_parseindex(data_non_inlined, False)
-    c_res_2 = parse_index2(data_non_inlined, False)
+        want = py_parseindex(data_inlined, True)
+        got = parse_index2(data_inlined, True)
+        self.assertEqual(want, got) # inline data
 
-    if py_res_1 != c_res_1:
-        print("Parse index result (with inlined data) differs!")
-
-    if py_res_2 != c_res_2:
-        print("Parse index result (no inlined data) differs!")
+        want = py_parseindex(data_non_inlined, False)
+        got = parse_index2(data_non_inlined, False)
+        self.assertEqual(want, got) # no inline data
 
-    ix = parsers.parse_index2(data_inlined, True)[0]
-    for i, r in enumerate(ix):
-        if r[7] == nullid:
-            i = -1
-        try:
-            if ix[r[7]] != i:
-                print('Reverse lookup inconsistent for %r'
-                    % r[7].encode('hex'))
-        except TypeError:
-            # pure version doesn't support this
-            break
+        ix = parsers.parse_index2(data_inlined, True)[0]
+        for i, r in enumerate(ix):
+            if r[7] == nullid:
+                i = -1
+            try:
+                self.assertEqual(
+                    ix[r[7]], i,
+                    'Reverse lookup inconsistent for %r' % nodemod.hex(r[7]))
+            except TypeError:
+                # pure version doesn't support this
+                break
 
-    print("done")
+    def testminusone(self):
+        want = (0, 0, 0, -1, -1, -1, -1, nullid)
+        index, junk = parsers.parse_index2(data_inlined, True)
+        got = index[-1]
+        self.assertEqual(want, got) # inline data
 
-runtest()
+        index, junk = parsers.parse_index2(data_non_inlined, False)
+        got = index[-1]
+        self.assertEqual(want, got) # no inline data
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-parseindex2.py.out	Tue Sep 04 11:59:12 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-done
--- a/tests/test-patchbomb-bookmark.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-patchbomb-bookmark.t	Tue Sep 04 12:16:28 2018 -0400
@@ -35,7 +35,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] bookmark
-  Message-Id: <patchbomb.347155260@*> (glob)
+  Message-Id: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:00 +0000
   From: test
@@ -50,10 +50,10 @@
   X-Mercurial-Node: accde9b8b6dce861c185d0825c1affc09a79cb26
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:01 +0000
   From: test
@@ -81,10 +81,10 @@
   X-Mercurial-Node: 417defd1559c396ba06a44dce8dc1c2d2d653f3f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <417defd1559c396ba06a.347155262@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <417defd1559c396ba06a.347155262@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:02 +0000
   From: test
@@ -145,8 +145,8 @@
   X-Mercurial-Node: 8dab2639fd35f1e337ad866c372a5c44f1064e3c
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
-  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
+  Message-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
+  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Fri, 01 Jan 1982 00:01:00 +0000
   From: test
--- a/tests/test-patchbomb.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-patchbomb.t	Tue Sep 04 12:16:28 2018 -0400
@@ -2,7 +2,6 @@
 wildcards in test expectations due to how many things like hostnames
 tend to make it into outputs. As a result, you may need to perform the
 following regular expression substitutions:
-@$HOSTNAME> -> @*> (glob)
 Mercurial-patchbomb/.* -> Mercurial-patchbomb/* (glob)
 /mixed; boundary="===+[0-9]+==" -> /mixed; boundary="===*== (glob)"
 --===+[0-9]+=+--$ -> --===*=-- (glob)
@@ -45,8 +44,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -84,8 +83,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <*@*> (glob)
-  X-Mercurial-Series-Id: <*@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -159,8 +158,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -197,8 +196,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -236,7 +235,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.120@*> (glob)
+  Message-Id: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:00 +0000
   From: quux
@@ -252,10 +251,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.121@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.121@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:01 +0000
   From: quux
@@ -284,10 +283,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.122@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.122@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:02 +0000
   From: quux
@@ -366,7 +365,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -412,7 +411,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -439,10 +438,11 @@
   CgZcySARUyA2A2LGZKiZ3Y+Lu786z4z4MWXmsrAZCsqrl1az5y21PMcjpbThzWeXGT+/nutbmvvz
   zXYS3BoGxdrJDIYmlimJJiZpRokmqYYmaSYWFknmSSkmhqbmliamiZYWxuYmBhbJBgZcUBNZQe5K
   Epm7xF/LT+RLx/a9juFTomaYO/Rgsx4rwBN+IMCUDLOKAQBrsmti
+   (?)
   --===============*==-- (glob)
 
 utf-8 patch:
-  $ $PYTHON -c 'fp = open("utf", "wb"); fp.write("h\xC3\xB6mma!\n"); fp.close();'
+  $ $PYTHON -c 'fp = open("utf", "wb"); fp.write(b"h\xC3\xB6mma!\n"); fp.close();'
   $ hg commit -A -d '4 0' -m 'utf-8 content'
   adding description
   adding utf
@@ -454,14 +454,14 @@
   
   displaying [PATCH] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -487,7 +487,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
 
 mime encoded mbox (base64):
@@ -506,8 +506,8 @@
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: Q <quux>
@@ -526,7 +526,14 @@
   QEAgLTAsMCArMSwxIEBACitow7ZtbWEhCg==
   
   
-  $ $PYTHON -c 'print open("mbox").read().split("\n\n")[1].decode("base64")'
+  >>> import base64
+  >>> patch = base64.b64decode(open("mbox").read().split("\n\n")[1])
+  >>> if not isinstance(patch, str):
+  ...     import sys
+  ...     sys.stdout.flush()
+  ...     junk = sys.stdout.buffer.write(patch + b"\n")
+  ... else:
+  ...     print(patch)
   # HG changeset patch
   # User test
   # Date 4 0
@@ -551,7 +558,7 @@
   $ rm mbox
 
 mime encoded mbox (quoted-printable):
-  $ $PYTHON -c 'fp = open("long", "wb"); fp.write("%s\nfoo\n\nbar\n" % ("x" * 1024)); fp.close();'
+  $ $PYTHON -c 'fp = open("long", "wb"); fp.write(b"%s\nfoo\n\nbar\n" % (b"x" * 1024)); fp.close();'
   $ hg commit -A -d '4 0' -m 'long line'
   adding long
 
@@ -568,8 +575,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -622,8 +629,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -665,7 +672,7 @@
   $ rm mbox
 
 iso-8859-1 patch:
-  $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write("h\xF6mma!\n"); fp.close();'
+  $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write(b"h\xF6mma!\n"); fp.close();'
   $ hg commit -A -d '5 0' -m 'isolatin 8-bit encoding'
   adding isolatin
 
@@ -684,8 +691,8 @@
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
-  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
+  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:05:00 +0000
   From: quux
@@ -732,8 +739,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -791,7 +798,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -811,10 +818,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -847,10 +854,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -888,8 +895,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -931,8 +938,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -991,7 +998,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1006,10 +1013,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1044,10 +1051,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1082,10 +1089,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1142,8 +1149,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1193,8 +1200,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1260,8 +1267,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1323,7 +1330,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1338,10 +1345,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1385,10 +1392,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1432,10 +1439,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1503,7 +1510,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1519,10 +1526,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1556,7 +1563,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1573,10 +1580,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1612,7 +1619,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1628,10 +1635,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1660,10 +1667,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1699,8 +1706,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1737,8 +1744,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1779,8 +1786,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1823,7 +1830,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1838,10 +1845,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1876,10 +1883,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1923,8 +1930,8 @@
   X-Mercurial-Node: 7aead2484924c445ad8ce2613df91f52f9e502ed
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <7aead2484924c445ad8c.60@*> (glob)
-  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@*> (glob)
+  Message-Id: <7aead2484924c445ad8c.60@test-hostname>
+  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1966,8 +1973,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1998,8 +2005,8 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2038,7 +2045,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2056,10 +2063,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2088,10 +2095,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2129,8 +2136,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2167,7 +2174,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2183,10 +2190,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2215,10 +2222,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2256,8 +2263,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2293,7 +2300,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag barFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2309,10 +2316,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2341,10 +2348,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2383,8 +2390,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2422,7 +2429,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 R1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2438,10 +2445,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2469,10 +2476,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2508,8 +2515,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2531,10 +2538,11 @@
   
 
 test multi-byte domain parsing:
-  $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'`
+  >>> with open('toaddress.txt', 'wb') as f:
+  ...  f.write(b'bar@\xfcnicode.com') and None
   $ HGENCODING=iso-8859-1
   $ export HGENCODING
-  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "bar@${UUML}nicode.com" -s test -r 0
+  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "`cat toaddress.txt`" -s test -r 0
   this patch series consists of 1 patches.
   
   Cc: 
@@ -2550,8 +2558,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2625,7 +2633,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 6] test
-  Message-Id: <patchbomb.315532860@*> (glob)
+  Message-Id: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
@@ -2640,10 +2648,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 6
-  Message-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:01 +0000
   From: test
@@ -2665,16 +2673,16 @@
   
   displaying [PATCH 2 of 6] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 2 of 6] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 6
-  Message-Id: <909a00e13e9d78b575ae.315532862@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.315532862@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:02 +0000
   From: test
@@ -2699,7 +2707,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
   displaying [PATCH 3 of 6] long line ...
   MIME-Version: 1.0
@@ -2709,10 +2717,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 6
-  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:03 +0000
   From: test
@@ -2750,16 +2758,16 @@
   
   displaying [PATCH 4 of 6] isolatin 8-bit encoding ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 4 of 6] isolatin 8-bit encoding
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 4
   X-Mercurial-Series-Total: 6
-  Message-Id: <240fb913fc1b7ff15ddb.315532864@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.315532864@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:04 +0000
   From: test
@@ -2777,7 +2785,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/isolatin	Thu Jan 01 00:00:05 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xf6mma! (esc)
+  +h=F6mma!
   
   displaying [PATCH 5 of 6] Added tag zero, zero.foo for changeset 8580ff50825a ...
   MIME-Version: 1.0
@@ -2787,10 +2795,10 @@
   X-Mercurial-Node: 5d5ef15dfe5e7bd3a4ee154b5fff76c7945ec433
   X-Mercurial-Series-Index: 5
   X-Mercurial-Series-Total: 6
-  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:05 +0000
   From: test
@@ -2819,10 +2827,10 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 6
   X-Mercurial-Series-Total: 6
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:06 +0000
   From: test
@@ -2864,8 +2872,8 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
-  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
+  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
--- a/tests/test-phases.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-phases.t	Tue Sep 04 12:16:28 2018 -0400
@@ -826,3 +826,81 @@
   rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
   [255]
+
+  $ cd ..
+
+Test for the "internal" phase
+=============================
+
+Check we deny its usage on older repository
+
+  $ hg init no-internal-phase --config format.internal-phase=no
+  $ cd no-internal-phase
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+  $ echo X > X
+  $ hg add X
+  $ hg status
+  A X
+  $ hg --config "phases.new-commit=internal" commit -m "my test internal commit" 2>&1 | grep ProgrammingError
+  ** ProgrammingError: this repository does not support the internal phase
+      raise error.ProgrammingError(msg)
+  mercurial.error.ProgrammingError: this repository does not support the internal phase
+
+  $ cd ..
+
+Check it works fine with repository that supports it.
+
+  $ hg init internal-phase --config format.internal-phase=yes
+  $ cd internal-phase
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  internal-phase
+  revlogv1
+  store
+  $ mkcommit A
+  test-debug-phase: new rev 0:  x -> 1
+  test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256:   -> draft
+
+Commit an internal changesets
+
+  $ echo B > B
+  $ hg add B
+  $ hg status
+  A B
+  $ hg --config "phases.new-commit=internal" commit -m "my test internal commit"
+  test-debug-phase: new rev 1:  x -> 96
+  test-hook-close-phase: c01c42dffc7f81223397e99652a0703f83e1c5ea:   -> internal
+
+Usual visibility rules apply when working directory parents
+
+  $ hg log -G -l 3
+  @  changeset:   1:c01c42dffc7f
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     my test internal commit
+  |
+  o  changeset:   0:4a2df7238c3b
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
+
+Commit is hidden as expected
+
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg log -G
+  @  changeset:   0:4a2df7238c3b
+     tag:         tip
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
--- a/tests/test-py3-commands.t	Tue Sep 04 11:59:12 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-#require py3exe
-
-This test helps in keeping a track on which commands we can run on
-Python 3 and see what kind of errors are coming up.
-The full traceback is hidden to have a stable output.
-  $ HGBIN=`which hg`
-
-  $ for cmd in version debuginstall ; do
-  >   echo $cmd
-  >   $PYTHON3 $HGBIN $cmd 2>&1 2>&1 | tail -1
-  > done
-  version
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-  debuginstall
-  no problems detected
-
-#if test-repo
-Make a clone so that any features in the developer's .hg/hgrc that
-might confuse Python 3 don't break this test. When we can do commit in
-Python 3, we'll stop doing this. We use e76ed1e480ef for the clone
-because it has different files than 273ce12ad8f1, so we can test both
-`files` from dirstate and `files` loaded from a specific revision.
-
-  $ hg clone -r e76ed1e480ef "`dirname "$TESTDIR"`" testrepo 2>&1 | tail -1
-  15 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test using -R, which exercises some URL code:
-  $ $PYTHON3 $HGBIN -R testrepo files -r 273ce12ad8f1 | tail -1
-  testrepo/tkmerge
-
-Now prove `hg files` is reading the whole manifest. We have to grep
-out some potential warnings that come from hgrc as yet.
-  $ cd testrepo
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1
-  .hgignore
-  PKG-INFO
-  README
-  hg
-  mercurial/__init__.py
-  mercurial/byterange.py
-  mercurial/fancyopts.py
-  mercurial/hg.py
-  mercurial/mdiff.py
-  mercurial/revlog.py
-  mercurial/transaction.py
-  notes.txt
-  setup.py
-  tkmerge
-
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 | wc -l
-  \s*14 (re)
-  $ $PYTHON3 $HGBIN files | wc -l
-  \s*15 (re)
-
-Test if log-like commands work:
-
-  $ $PYTHON3 $HGBIN tip
-  changeset:   10:e76ed1e480ef
-  tag:         tip
-  user:        oxymoron@cinder.waste.org
-  date:        Tue May 03 23:37:43 2005 -0800
-  summary:     Fix linking of changeset revs when merging
-  
-
-  $ $PYTHON3 $HGBIN log -r0
-  changeset:   0:9117c6561b0b
-  user:        mpm@selenic.com
-  date:        Tue May 03 13:16:10 2005 -0800
-  summary:     Add back links from file revisions to changeset revisions
-  
-
-  $ cd ..
-#endif
-
-Test if `hg config` works:
-
-  $ $PYTHON3 $HGBIN config
-  devel.all-warnings=true
-  devel.default-date=0 0
-  largefiles.usercache=$TESTTMP/.cache/largefiles
-  ui.slash=True
-  ui.interactive=False
-  ui.mergemarkers=detailed
-  ui.promptecho=True
-  web.address=localhost
-  web.ipv6=False
-
-  $ cat > included-hgrc <<EOF
-  > [extensions]
-  > babar = imaginary_elephant
-  > EOF
-  $ cat >> $HGRCPATH <<EOF
-  > %include $TESTTMP/included-hgrc
-  > EOF
-  $ $PYTHON3 $HGBIN version | tail -1
-  *** failed to import extension babar from imaginary_elephant: *: 'imaginary_elephant' (glob)
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-  $ rm included-hgrc
-  $ touch included-hgrc
-
-Test bytes-ness of policy.policy with HGMODULEPOLICY
-
-  $ HGMODULEPOLICY=py
-  $ export HGMODULEPOLICY
-  $ $PYTHON3 `which hg` debuginstall 2>&1 2>&1 | tail -1
-  no problems detected
-
-`hg init` can create empty repos
-`hg status works fine`
-`hg summary` also works!
-
-  $ $PYTHON3 `which hg` init py3repo
-  $ cd py3repo
-  $ echo "This is the file 'iota'." > iota
-  $ $PYTHON3 $HGBIN status
-  ? iota
-  $ $PYTHON3 $HGBIN add iota
-  $ $PYTHON3 $HGBIN status
-  A iota
-  $ hg diff --nodates --git
-  diff --git a/iota b/iota
-  new file mode 100644
-  --- /dev/null
-  +++ b/iota
-  @@ -0,0 +1,1 @@
-  +This is the file 'iota'.
-  $ $PYTHON3 $HGBIN commit --message 'commit performed in Python 3'
-  $ $PYTHON3 $HGBIN status
-
-  $ mkdir A
-  $ echo "This is the file 'mu'." > A/mu
-  $ $PYTHON3 $HGBIN addremove
-  adding A/mu
-  $ $PYTHON3 $HGBIN status
-  A A/mu
-  $ HGEDITOR='echo message > ' $PYTHON3 $HGBIN commit
-  $ $PYTHON3 $HGBIN status
-  $ $PYHON3 $HGBIN summary
-  parent: 1:e1e9167203d4 tip
-   message
-  branch: default
-  commit: (clean)
-  update: (current)
-  phases: 2 draft
-
-Test weird unicode-vs-bytes stuff
-
-  $ $PYTHON3 $HGBIN help | egrep -v '^ |^$'
-  Mercurial Distributed SCM
-  list of commands:
-  additional help topics:
-  (use 'hg help -v' to show built-in aliases and global options)
-
-  $ $PYTHON3 $HGBIN help help | egrep -v '^ |^$'
-  hg help [-ecks] [TOPIC]
-  show help for a given topic or a help overview
-  options ([+] can be repeated):
-  (some details hidden, use --verbose to show complete help)
-
-  $ $PYTHON3 $HGBIN help -k notopic
-  abort: no matches
-  (try 'hg help' for a list of topics)
-  [255]
-
-Prove the repo is valid using the Python 2 `hg`:
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  2 files, 2 changesets, 2 total revisions
-  $ hg log
-  changeset:   1:e1e9167203d4
-  tag:         tip
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     message
-  
-  changeset:   0:71c96e924262
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     commit performed in Python 3
-  
-
-  $ $PYTHON3 $HGBIN log -G
-  @  changeset:   1:e1e9167203d4
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     message
-  |
-  o  changeset:   0:71c96e924262
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     commit performed in Python 3
-  
-  $ $PYTHON3 $HGBIN log -Tjson
-  [
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "message",
-    "node": "e1e9167203d450ca2f558af628955b5f5afd4489",
-    "parents": ["71c96e924262969ff0d8d3d695b0f75412ccc3d8"],
-    "phase": "draft",
-    "rev": 1,
-    "tags": ["tip"],
-    "user": "test"
-   },
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "commit performed in Python 3",
-    "node": "71c96e924262969ff0d8d3d695b0f75412ccc3d8",
-    "parents": ["0000000000000000000000000000000000000000"],
-    "phase": "draft",
-    "rev": 0,
-    "tags": [],
-    "user": "test"
-   }
-  ]
-
-Show that update works now!
-
-  $ $PYTHON3 $HGBIN up 0
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ $PYTHON3 $HGBIN identify
-  71c96e924262
-
-branches and bookmarks also works!
-
-  $ $PYTHON3 $HGBIN branches
-  default                        1:e1e9167203d4
-  $ $PYTHON3 $HGBIN bookmark book
-  $ $PYTHON3 $HGBIN bookmarks
-   * book                      0:71c96e924262
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-backup.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,150 @@
+  $ cat << EOF >> $HGRCPATH
+  > [extensions]
+  > rebase=
+  > EOF
+
+==========================================
+Test history-editing-backup config option |
+==========================================
+Test with Pre-obsmarker rebase:
+1) When config option is not set:
+  $ hg init repo1
+  $ cd repo1
+  $ echo a>a
+  $ hg ci -qAma
+  $ echo b>b
+  $ hg ci -qAmb
+  $ echo c>c
+  $ hg ci -qAmc
+  $ hg up 0 -q
+  $ echo d>d
+  $ hg ci -qAmd
+  $ echo e>e
+  $ hg ci -qAme
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  4: e
+  |
+  o  3: d
+  |
+  | o  2: c
+  | |
+  | o  1: b
+  |/
+  o  0: a
+  
+  $ hg rebase -s 1 -d .
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/d2ae7f538514-c7ed7a78-rebase.hg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  4: c
+  |
+  o  3: b
+  |
+  @  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+
+2) When config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo g>g
+  $ hg ci -Aqmg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  6: g
+  |
+  o  5: f
+  |
+  | o  4: c
+  | |
+  | o  3: b
+  |/
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+  $ hg rebase -s 3 -d .
+  rebasing 3:05bff2a95b12 "b"
+  rebasing 4:1762bde4404d "c"
+
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  6: c
+  |
+  o  5: b
+  |
+  @  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+Test when rebased revisions are stripped during abort:
+======================================================
+
+  $ echo conflict > c
+  $ hg ci -Am "conflict with c"
+  adding c
+  created new head
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  7: conflict with c
+  |
+  | o  6: c
+  | |
+  | o  5: b
+  |/
+  o  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+When history-editing-backup = True:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = True
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
+  rebase aborted
+
+When history-editing-backup = False:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
+
--- a/tests/test-rebase-inmemory.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rebase-inmemory.t	Tue Sep 04 12:16:28 2018 -0400
@@ -156,7 +156,33 @@
   |/
   o  0: b173517d0057 'a'
   
+
+Test reporting of path conflicts
+
+  $ hg rm a
+  $ mkdir a
+  $ touch a/a
+  $ hg ci -Am "a/a"
+  adding a/a
+  $ hg tglog
+  @  4: daf7dfc139cb 'a/a'
+  |
+  o  3: 844a7de3e617 'c'
+  |
+  | o  2: 09c044d2cb43 'd'
+  | |
+  | o  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+  $ hg rebase -r . -d 2
+  rebasing 4:daf7dfc139cb "a/a" (tip)
+  saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
+
+  $ cd ..
+
 Test dry-run rebasing
+
   $ hg init repo3
   $ cd repo3
   $ echo a>a
@@ -325,6 +351,25 @@
   hit a merge conflict
   [1]
 
+In-memory rebase that fails due to merge conflicts
+
+  $ hg rebase -s 2 -d 7
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  transaction abort!
+  rollback completed
+  hit merge conflicts; re-running rebase without in-memory merge
+  rebase aborted
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
 ==========================
 Test for --confirm option|
 ==========================
@@ -509,3 +554,31 @@
   o  0:cb9a9f314b8b test
      a
   
+#if execbit
+
+Test a metadata-only in-memory merge
+  $ cd $TESTTMP
+  $ hg init no_exception
+  $ cd no_exception
+# Produce the following graph:
+#   o  'add +x to foo.txt'
+#   | o  r1  (adds bar.txt, just for something to rebase to)
+#   |/
+#   o  r0   (adds foo.txt, no +x)
+  $ echo hi > foo.txt
+  $ hg ci -qAm r0
+  $ echo hi > bar.txt
+  $ hg ci -qAm r1
+  $ hg co -qr ".^"
+  $ chmod +x foo.txt
+  $ hg ci -qAm 'add +x to foo.txt'
+issue5960: this was raising an AttributeError exception
+  $ hg rebase -r . -d 1
+  rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
+  saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
+  $ hg diff -c tip
+  diff --git a/foo.txt b/foo.txt
+  old mode 100644
+  new mode 100755
+
+#endif
--- a/tests/test-rebase-newancestor.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rebase-newancestor.t	Tue Sep 04 12:16:28 2018 -0400
@@ -133,7 +133,8 @@
   note: rebase of 1:1d1a643d390e created no changes to commit
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  other [source] changed f-default which local [dest] deleted
+  file 'f-default' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   rebasing 6:9455ee510502 "dev: merge default"
   saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-43e9e04b-rebase.hg
@@ -162,7 +163,8 @@
   > EOF
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  other [source] changed f-default which local [dest] deleted
+  file 'f-default' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   rebasing 6:9455ee510502 "dev: merge default"
   saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-62d0b222-rebase.hg
--- a/tests/test-rebase-obsolete.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rebase-obsolete.t	Tue Sep 04 12:16:28 2018 -0400
@@ -15,6 +15,7 @@
   > [extensions]
   > rebase=
   > drawdag=$TESTDIR/drawdag.py
+  > strip=
   > EOF
 
 Setup rebase canonical repo
@@ -1122,6 +1123,23 @@
   o  0:b173517d0057 a
   
   $ hg strip -r 8:
+  $ hg log -G -r 'a'::
+  *  7:1143e9adc121 f
+  |
+  | o  6:d60ebfa0f1cb e
+  | |
+  | o  5:027ad6c5830d d'
+  | |
+  x |  4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
+  |/
+  o  3:a82ac2b38757 c
+  |
+  | o  2:630d7c95eff7 x
+  | |
+  o |  1:488e1b7e7341 b
+  |/
+  o  0:b173517d0057 a
+  
 
 If the rebase set has an obsolete (d) with a successor (d') outside the rebase
 set and none in destination, we still get the divergence warning.
@@ -1493,6 +1511,26 @@
   
   $ cd ..
 
+Rebase merge where extinct node has successor that is not an ancestor of
+destination
+
+  $ hg init extinct-with-succ-not-in-dest
+  $ cd extinct-with-succ-not-in-dest
+
+  $ hg debugdrawdag <<EOF
+  > E C # replace: C -> E
+  > | |
+  > D B
+  > |/
+  > A
+  > EOF
+
+  $ hg rebase -d D -s B
+  rebasing 1:112478962961 "B" (B)
+  note: not rebasing 3:26805aba1e60 "C" (C) and its descendants as this would cause divergence
+
+  $ cd ..
+
   $ hg init p2-succ-in-dest-c
   $ cd p2-succ-in-dest-c
 
@@ -1788,3 +1826,312 @@
   |
   o  0:426bada5c675 A
   
+====================
+Test --stop option |
+====================
+  $ cd ..
+  $ hg init rbstop
+  $ cd rbstop
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo b>b
+  $ hg ci -Aqmb
+  $ echo c>c
+  $ hg ci -Aqmc
+  $ echo d>d
+  $ hg ci -Aqmd
+  $ hg up 0 -q
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo D>d
+  $ hg ci -Aqm "conflict with d"
+  $ hg up 3 -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | x  2:177f92b77385 test
+  | |  c
+  | |
+  | x  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test it aborts if unstable csets is not allowed:
+===============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=False
+  > EOF
+
+  $ hg strip 6 --no-backup -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot remove original changesets with unrebased descendants
+  (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
+  [255]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  rebase aborted
+
+Test --stop when --keep is passed:
+==================================
+  $ hg rebase -s 1 -d 5 --keep
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop aborts when --collapse was passed:
+=============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=True
+  > EOF
+
+  $ hg strip 6
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d"
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot stop in --collapse session
+  [255]
+  $ hg rebase --abort
+  rebase aborted
+  $ hg diff
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop raise errors with conflicting options:
+=================================================
+  $ hg rebase -s 3 -d 5
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop --dry-run
+  abort: cannot specify both --dry-run and --stop
+  [255]
+
+  $ hg rebase -s 3 -d 5
+  abort: rebase in progress
+  (use 'hg rebase --continue' or 'hg rebase --abort')
+  [255]
+  $ hg rebase --stop --continue
+  abort: cannot use --stop with --continue
+  [255]
+
+Test --stop moves bookmarks of original revisions to new rebased nodes:
+======================================================================
+  $ cd ..
+  $ hg init repo
+  $ cd repo
+
+  $ echo a > a
+  $ hg ci -Am A
+  adding a
+
+  $ echo b > b
+  $ hg ci -Am B
+  adding b
+  $ hg book X
+  $ hg book Y
+
+  $ echo c > c
+  $ hg ci -Am C
+  adding c
+  $ hg book Z
+
+  $ echo d > d
+  $ hg ci -Am D
+  adding d
+
+  $ hg up 0 -q
+  $ echo e > e
+  $ hg ci -Am E
+  adding e
+  created new head
+
+  $ echo doubt > d
+  $ hg ci -Am "conflict with d"
+  adding d
+
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | o  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | o  2: 49cb3485fa0c 'C' bookmarks: Y
+  | |
+  | o  1: 6c81ed0049f8 'B' bookmarks: X
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:6c81ed0049f8 "B" (X)
+  rebasing 2:49cb3485fa0c "C" (Y)
+  rebasing 3:67a385d4e6f2 "D" (Z)
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  o  7: 9c86c650b686 'C' bookmarks: Y
+  |
+  o  6: 9b87b54e5fd8 'B' bookmarks: X
+  |
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | *  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | x  2: 49cb3485fa0c 'C' bookmarks:
+  | |
+  | x  1: 6c81ed0049f8 'B' bookmarks:
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
--- a/tests/test-rebase-parameters.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rebase-parameters.t	Tue Sep 04 12:16:28 2018 -0400
@@ -61,7 +61,7 @@
   [1]
 
   $ hg rebase --continue --abort
-  abort: cannot use both abort and continue
+  abort: cannot use --abort with --continue
   [255]
 
   $ hg rebase --continue --collapse
--- a/tests/test-remove.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-remove.t	Tue Sep 04 12:16:28 2018 -0400
@@ -520,6 +520,14 @@
   deleting [===========================================>] 1/1\r (no-eol) (esc)
                                                               \r (no-eol) (esc)
   removing a
+  $ hg remove a -nv --color debug
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  [addremove.removed ui.status|removing a]
   $ hg diff
 
   $ cat >> .hg/hgrc <<EOF
--- a/tests/test-rename-merge2.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rename-merge2.t	Tue Sep 04 12:16:28 2018 -0400
@@ -692,7 +692,8 @@
   starting 4 threads for background file closing (?)
    a: prompt deleted/changed -> m (premerge)
   picked tool ':prompt' for a (binary False symlink False changedelete True)
-  other [merge rev] changed a which local [working copy] deleted
+  file 'a' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
@@ -737,7 +738,8 @@
   starting 4 threads for background file closing (?)
    a: prompt changed/deleted -> m (premerge)
   picked tool ':prompt' for a (binary False symlink False changedelete True)
-  local [working copy] changed a which other [merge rev] deleted
+  file 'a' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
--- a/tests/test-rename.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-rename.t	Tue Sep 04 12:16:28 2018 -0400
@@ -71,6 +71,7 @@
 
   $ hg rename --after d1/a dummy
   d1/a: not recording move - dummy does not exist
+  [1]
 
 move a single file to an existing directory
 
@@ -266,8 +267,9 @@
 
   $ hg rename d1/* d2
   d2/b: not overwriting - file already committed
-  (hg rename --force to replace the file by recording a rename)
+  ('hg rename --force' to replace the file by recording a rename)
   moving d1/d11/a1 to d2/d11/a1
+  [1]
   $ hg status -C
   A d2/a
     d1/a
@@ -338,6 +340,7 @@
   d1/b: not recording move - d2/d21/b does not exist
   d1/ba: not recording move - d2/d21/ba does not exist
   moving d1/d11/a1 to d2/d21/a1
+  [1]
   $ hg status -C
   A d2/d21/a
     d1/a
@@ -371,7 +374,8 @@
   $ echo "ca" > d1/ca
   $ hg rename d1/ba d1/ca
   d1/ca: not overwriting - file exists
-  (hg rename --after to record the rename)
+  ('hg rename --after' to record the rename)
+  [1]
   $ hg status -C
   ? d1/ca
   $ hg update -C
@@ -395,7 +399,8 @@
   $ ln -s ba d1/ca
   $ hg rename --traceback d1/ba d1/ca
   d1/ca: not overwriting - file exists
-  (hg rename --after to record the rename)
+  ('hg rename --after' to record the rename)
+  [1]
   $ hg status -C
   ? d1/ca
   $ hg update -C
@@ -421,6 +426,7 @@
   $ hg rename d1/* d2/* d3
   moving d1/d11/a1 to d3/d11/a1
   d3/b: not overwriting - d2/b collides with d1/b
+  [1]
   $ hg status -C
   A d3/a
     d1/a
--- a/tests/test-resolve.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-resolve.t	Tue Sep 04 12:16:28 2018 -0400
@@ -67,6 +67,9 @@
   $ hg resolve -l
   R file1
   U file2
+  $ hg resolve --re-merge filez file2
+  arguments do not match paths that need resolving
+  (try: hg resolve --re-merge path:filez path:file2)
   $ hg resolve -m filez file2
   arguments do not match paths that need resolving
   (try: hg resolve -m path:filez path:file2)
@@ -373,4 +376,241 @@
 
   $ hg resolve -l
 
+resolve -m can be configured to look for remaining conflict markers
+  $ hg up -qC 2
+  $ hg merge -q --tool=internal:merge 1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+  [1]
+  $ hg resolve -l
+  U file1
+  U file2
+  $ echo 'remove markers' > file1
+  $ hg --config commands.resolve.mark-check=abort resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  abort: conflict markers detected
+  (use --all to mark anyway)
+  [255]
+  $ hg resolve -l
+  U file1
+  U file2
+Try with --all from the hint
+  $ hg --config commands.resolve.mark-check=abort resolve -m --all
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test option value 'warn'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the file is already marked as resolved, we don't warn about it
+  $ hg resolve --unmark file1
+  $ hg resolve -l
+  U file1
+  R file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the user passes an invalid value, we treat it as 'none'.
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=nope resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test explicitly setting the otion to 'none'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=none resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Testing the --re-merge flag
+  $ hg resolve --unmark file1
+  $ hg resolve -l
+  U file1
+  R file2
+  $ hg resolve --mark --re-merge
+  abort: too many actions specified
+  [255]
+  $ hg resolve --re-merge --all
+  merging file1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  [1]
+Explicit re-merge
+  $ hg resolve --unmark file1
+  $ hg resolve --config commands.resolve.explicit-re-merge=1 --all
+  abort: no action specified
+  (use --mark, --unmark, --list or --re-merge)
+  [255]
+  $ hg resolve --config commands.resolve.explicit-re-merge=1 --re-merge --all
+  merging file1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  [1]
+
   $ cd ..
+
+======================================================
+Test 'hg resolve' confirm config option functionality |
+======================================================
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > rebase=
+  > EOF
+
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo boss > boss
+  $ hg ci -Am "add boss"
+  adding boss
+
+  $ for emp in emp1 emp2 emp3; do echo work > $emp; done;
+  $ hg ci -Aqm "added emp1 emp2 emp3"
+
+  $ hg up 0
+  0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+
+  $ for emp in emp1 emp2 emp3; do echo nowork > $emp; done;
+  $ hg ci -Aqm "added lazy emp1 emp2 emp3"
+
+  $ hg log -GT "{rev} {node|short} {firstline(desc)}\n"
+  @  2 0acfd4a49af0 added lazy emp1 emp2 emp3
+  |
+  | o  1 f30f98a8181f added emp1 emp2 emp3
+  |/
+  o  0 88660038d466 add boss
+  
+  $ hg rebase -s 1 -d 2
+  rebasing 1:f30f98a8181f "added emp1 emp2 emp3"
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
+Test when commands.resolve.confirm config option is not set:
+===========================================================
+  $ hg resolve --all
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test when config option is set:
+==============================
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > interactive = True
+  > [commands]
+  > resolve.confirm = True
+  > EOF
+
+  $ hg resolve
+  abort: no files or directories specified
+  (use --all to re-merge all unresolved files)
+  [255]
+  $ hg resolve --all << EOF
+  > n
+  > EOF
+  re-merge all unresolved files (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve --all << EOF
+  > y
+  > EOF
+  re-merge all unresolved files (yn)? y
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test that commands.resolve.confirm respect --mark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -m emp1
+  $ hg resolve -l
+  R emp1
+  U emp2
+  U emp3
+
+  $ hg resolve -m << EOF
+  > n
+  > EOF
+  mark all unresolved files as resolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+Test that commands.resolve.confirm respect --unmark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -u emp1
+
+  $ hg resolve -l
+  U emp1
+  R emp2
+  R emp3
+
+  $ hg resolve -u << EOF
+  > n
+  > EOF
+  mark all resolved files as unresolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
--- a/tests/test-revert-interactive.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revert-interactive.t	Tue Sep 04 12:16:28 2018 -0400
@@ -51,11 +51,8 @@
   > n
   > n
   > EOF
-  reverting f
-  reverting folder1/g
+  remove added file folder1/i (Yn)? y
   removing folder1/i
-  reverting folder2/h
-  remove added file folder1/i (Yn)? y
   diff --git a/f b/f
   2 hunks, 2 lines changed
   examine changes to 'f'? [Ynesfdaq?] y
@@ -115,6 +112,8 @@
   2 hunks, 2 lines changed
   examine changes to 'folder2/h'? [Ynesfdaq?] n
   
+  reverting f
+  reverting folder1/g
   $ cat f
   1
   2
@@ -140,8 +139,6 @@
 Test that --interactive lift the need for --all
 
   $ echo q | hg revert -i -r 2
-  reverting folder1/g
-  reverting folder2/h
   diff --git a/folder1/g b/folder1/g
   1 hunks, 1 lines changed
   examine changes to 'folder1/g'? [Ynesfdaq?] q
@@ -197,10 +194,6 @@
   > n
   > n
   > EOF
-  reverting f
-  reverting folder1/g
-  removing folder1/i
-  reverting folder2/h
   remove added file folder1/i (Yn)? n
   diff --git a/f b/f
   2 hunks, 2 lines changed
@@ -250,6 +243,8 @@
   2 hunks, 2 lines changed
   examine changes to 'folder2/h'? [Ynesfdaq?] n
   
+  reverting f
+  reverting folder1/g
   $ cat f
   1
   2
@@ -354,7 +349,6 @@
   > y
   > e
   > EOF
-  reverting k
   diff --git a/k b/k
   1 hunks, 2 lines changed
   examine changes to 'k'? [Ynesfdaq?] y
@@ -365,6 +359,7 @@
   +2
   discard this change to 'k'? [Ynesfdaq?] e
   
+  reverting k
   $ cat k
   42
 
@@ -378,15 +373,14 @@
   $ hg revert -i <<EOF
   > n
   > EOF
-  forgetting newfile
   forget added file newfile (Yn)? n
   $ hg status
   A newfile
   $ hg revert -i <<EOF
   > y
   > EOF
+  forget added file newfile (Yn)? y
   forgetting newfile
-  forget added file newfile (Yn)? y
   $ hg status
   ? newfile
 
@@ -406,7 +400,6 @@
   > y
   > y
   > EOF
-  reverting a
   diff --git a/a b/a
   1 hunks, 1 lines changed
   examine changes to 'a'? [Ynesfdaq?] y
@@ -417,6 +410,7 @@
   \ No newline at end of file
   apply this change to 'a'? [Ynesfdaq?] y
   
+  reverting a
   $ cat a
   0
 
--- a/tests/test-revert.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revert.t	Tue Sep 04 12:16:28 2018 -0400
@@ -129,9 +129,9 @@
 ----------------------------------
 
   $ hg revert --all -r0
-  adding a
+  forgetting z
   removing d
-  forgetting z
+  adding a
 
 revert explicitly to parent (--rev)
 -----------------------------------
@@ -283,8 +283,8 @@
   $ echo foo > newdir/newfile
   $ hg add newdir/newfile
   $ hg revert b newdir
+  forgetting newdir/newfile
   reverting b/b
-  forgetting newdir/newfile
   $ echo foobar > b/b
   $ hg revert .
   reverting b/b
@@ -368,9 +368,9 @@
   $ hg update '.^'
   1 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg revert -rtip -a
+  removing ignored
   adding allyour
   adding base
-  removing ignored
   $ hg status -C
   A allyour
     ignored
@@ -790,28 +790,28 @@
 check revert output
 
   $ hg revert --all
-  undeleting content1_content1_content1-untracked
-  reverting content1_content1_content3-tracked
-  undeleting content1_content1_content3-untracked
-  reverting content1_content1_missing-tracked
-  undeleting content1_content1_missing-untracked
-  reverting content1_content2_content1-tracked
-  undeleting content1_content2_content1-untracked
-  undeleting content1_content2_content2-untracked
-  reverting content1_content2_content3-tracked
-  undeleting content1_content2_content3-untracked
-  reverting content1_content2_missing-tracked
-  undeleting content1_content2_missing-untracked
   forgetting content1_missing_content1-tracked
   forgetting content1_missing_content3-tracked
   forgetting content1_missing_missing-tracked
-  undeleting missing_content2_content2-untracked
-  reverting missing_content2_content3-tracked
-  undeleting missing_content2_content3-untracked
-  reverting missing_content2_missing-tracked
-  undeleting missing_content2_missing-untracked
   forgetting missing_missing_content3-tracked
   forgetting missing_missing_missing-tracked
+  reverting content1_content1_content3-tracked
+  reverting content1_content1_missing-tracked
+  reverting content1_content2_content1-tracked
+  reverting content1_content2_content3-tracked
+  reverting content1_content2_missing-tracked
+  reverting missing_content2_content3-tracked
+  reverting missing_content2_missing-tracked
+  undeleting content1_content1_content1-untracked
+  undeleting content1_content1_content3-untracked
+  undeleting content1_content1_missing-untracked
+  undeleting content1_content2_content1-untracked
+  undeleting content1_content2_content2-untracked
+  undeleting content1_content2_content3-untracked
+  undeleting content1_content2_missing-untracked
+  undeleting missing_content2_content2-untracked
+  undeleting missing_content2_content3-untracked
+  undeleting missing_content2_missing-untracked
 
 Compare resulting directory with revert target.
 
@@ -847,28 +847,28 @@
 check revert output
 
   $ hg revert --all --rev 'desc(base)'
-  undeleting content1_content1_content1-untracked
-  reverting content1_content1_content3-tracked
-  undeleting content1_content1_content3-untracked
-  reverting content1_content1_missing-tracked
-  undeleting content1_content1_missing-untracked
-  undeleting content1_content2_content1-untracked
-  reverting content1_content2_content2-tracked
-  undeleting content1_content2_content2-untracked
-  reverting content1_content2_content3-tracked
-  undeleting content1_content2_content3-untracked
-  reverting content1_content2_missing-tracked
-  undeleting content1_content2_missing-untracked
-  adding content1_missing_content1-untracked
-  reverting content1_missing_content3-tracked
-  adding content1_missing_content3-untracked
-  reverting content1_missing_missing-tracked
-  adding content1_missing_missing-untracked
+  forgetting missing_missing_content3-tracked
+  forgetting missing_missing_missing-tracked
   removing missing_content2_content2-tracked
   removing missing_content2_content3-tracked
   removing missing_content2_missing-tracked
-  forgetting missing_missing_content3-tracked
-  forgetting missing_missing_missing-tracked
+  reverting content1_content1_content3-tracked
+  reverting content1_content1_missing-tracked
+  reverting content1_content2_content2-tracked
+  reverting content1_content2_content3-tracked
+  reverting content1_content2_missing-tracked
+  reverting content1_missing_content3-tracked
+  reverting content1_missing_missing-tracked
+  adding content1_missing_content1-untracked
+  adding content1_missing_content3-untracked
+  adding content1_missing_missing-untracked
+  undeleting content1_content1_content1-untracked
+  undeleting content1_content1_content3-untracked
+  undeleting content1_content1_missing-untracked
+  undeleting content1_content2_content1-untracked
+  undeleting content1_content2_content2-untracked
+  undeleting content1_content2_content3-untracked
+  undeleting content1_content2_missing-untracked
 
 Compare resulting directory with revert target.
 
@@ -1120,8 +1120,8 @@
   M A
   A B
   $ hg revert --rev 1 --all
+  removing B
   reverting A
-  removing B
   $ hg status --rev 1
 
 From the other parents
@@ -1140,8 +1140,8 @@
   M A
   A B
   $ hg revert --rev 1 --all
+  removing B
   reverting A
-  removing B
   $ hg status --rev 1
 
   $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revisions.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,45 @@
+  $ hg init repo
+  $ cd repo
+
+  $ echo 0 > a
+  $ hg ci -qAm 0
+  $ for i in 5 8 14 43 167; do
+  >   hg up -q 0
+  >   echo $i > a
+  >   hg ci -qm $i
+  > done
+  $ cat <<EOF >> .hg/hgrc
+  > [alias]
+  > l = log -T '{rev}:{shortest(node,1)}\n'
+  > EOF
+
+  $ hg l
+  5:00f
+  4:7ba5d
+  3:7ba57
+  2:72
+  1:9
+  0:b
+  $ cat <<EOF >> .hg/hgrc
+  > [experimental]
+  > revisions.disambiguatewithin=not 4
+  > EOF
+  $ hg l
+  5:0
+  4:7ba5d
+  3:7b
+  2:72
+  1:9
+  0:b
+9 was unambiguous and still is
+  $ hg l -r 9
+  1:9
+7 was ambiguous and still is
+  $ hg l -r 7
+  abort: 00changelog.i@7: ambiguous identifier!
+  [255]
+7b is no longer ambiguous
+  $ hg l -r 7b
+  3:7b
+
+  $ cd ..
--- a/tests/test-revlog-raw.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revlog-raw.py	Tue Sep 04 12:16:28 2018 -0400
@@ -20,7 +20,7 @@
 
 # The test wants to control whether to use delta explicitly, based on
 # "storedeltachains".
-revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self.storedeltachains
+revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
 
 def abort(msg):
     print('abort: %s' % msg)
@@ -78,7 +78,7 @@
     else:
         flags = revlog.REVIDX_DEFAULT_FLAGS
     # Change storedeltachains temporarily, to override revlog's delta decision
-    rlog.storedeltachains = isdelta
+    rlog._storedeltachains = isdelta
     try:
         rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
         return nextrev
@@ -86,7 +86,7 @@
         abort('rev %d: failed to append: %s' % (nextrev, ex))
     finally:
         # Restore storedeltachains. It is always True, see revlog.__init__
-        rlog.storedeltachains = True
+        rlog._storedeltachains = True
 
 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
     '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
--- a/tests/test-revlog.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revlog.t	Tue Sep 04 12:16:28 2018 -0400
@@ -39,9 +39,14 @@
   ... Joa3dYtcYYYBAQ8Qr4OqZAYRICPTSr5WKd/42rV36d+8/VmrNpv7NP1jQAXrQE4BqQUARngwVA=="""
   ... .decode("base64").decode("zlib"))
 
-  $ hg debugindex a.i
+  $ hg debugrevlogindex a.i
      rev linkrev nodeid       p1           p2
        0       2 99e0332bd498 000000000000 000000000000
        1       3 6674f57a23d8 99e0332bd498 000000000000
-  $ hg debugdata a.i 1 2>&1 | egrep 'Error:.*decoded'
-  (mercurial\.\w+\.mpatch\.)?mpatchError: patch cannot be decoded (re)
+
+  >>> from mercurial import revlog, vfs
+  >>> tvfs = vfs.vfs(b'.')
+  >>> tvfs.options = {b'revlogv1': True}
+  >>> rl = revlog.revlog(tvfs, b'a.i')
+  >>> rl.revision(1)
+  mpatchError('patch cannot be decoded',)
--- a/tests/test-revset.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revset.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1773,6 +1773,16 @@
 
 Test hexadecimal revision
   $ log 'id(2)'
+  $ log 'id(5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x5'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
+  abort: 00changelog.i@: ambiguous identifier!
+  [255]
   $ log 'id(23268)'
   4
   $ log 'id(2785f51eece)'
--- a/tests/test-revset2.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-revset2.t	Tue Sep 04 12:16:28 2018 -0400
@@ -346,7 +346,7 @@
 test ',' in `_list`
   $ log '0,1'
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
   $ try '0,1,2'
   (list
@@ -354,7 +354,7 @@
     (symbol '1')
     (symbol '2'))
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
 
 test that chained `or` operations make balanced addsets
--- a/tests/test-run-tests.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-run-tests.t	Tue Sep 04 12:16:28 2018 -0400
@@ -850,7 +850,7 @@
   > EOF
   
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.a.err
+  +++ $TESTTMP/test-cases.t#a.err
   @@ -1,6 +1,7 @@
    #testcases a b
    #if a
@@ -861,7 +861,7 @@
      $ echo 2
   Accept this change? [n] .
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.b.err
+  +++ $TESTTMP/test-cases.t#b.err
   @@ -5,4 +5,5 @@
    #endif
    #if b
@@ -896,6 +896,40 @@
   ..
   # Ran 2 tests, 0 skipped, 0 failed.
 
+When using multiple dimensions of "#testcases" in .t files
+
+  $ cat > test-cases.t <<'EOF'
+  > #testcases a b
+  > #testcases c d
+  > #if a d
+  >   $ echo $TESTCASE
+  >   a#d
+  > #endif
+  > #if b c
+  >   $ echo yes
+  >   no
+  > #endif
+  > EOF
+  $ rt test-cases.t
+  ..
+  --- $TESTTMP/test-cases.t
+  +++ $TESTTMP/test-cases.t#b#c.err
+  @@ -6,5 +6,5 @@
+   #endif
+   #if b c
+     $ echo yes
+  -  no
+  +  yes
+   #endif
+  
+  ERROR: test-cases.t#b#c output changed
+  !.
+  Failed test-cases.t#b#c: output changed
+  # Ran 4 tests, 0 skipped, 1 failed.
+  python hash seed: * (glob)
+  [1]
+
+  $ rm test-cases.t#b#c.err
   $ rm test-cases.t
 
 (reinstall)
@@ -1540,7 +1574,7 @@
   $ rt
   .
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1563,7 +1597,7 @@
   $ rt --restart
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1584,11 +1618,11 @@
 --restart works with outputdir
 
   $ mkdir output
-  $ mv test-cases-abc.t.B.err output
+  $ mv test-cases-abc.t#B.err output
   $ rt --restart --outputdir output
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1631,7 +1665,7 @@
   $ rt "test-cases-abc.t#B"
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1654,7 +1688,7 @@
   $ rt test-cases-abc.t#B test-cases-abc.t#C
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1677,7 +1711,7 @@
   $ rt test-cases-abc.t#B test-cases-abc.t#D
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1711,7 +1745,7 @@
   $ rt test-cases-advanced-cases.t
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1721,7 +1755,7 @@
   ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
   !
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1739,7 +1773,7 @@
   $ rt "test-cases-advanced-cases.t#case-with-dashes"
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1756,7 +1790,7 @@
   $ rt "test-cases-advanced-cases.t#casewith_-.chars"
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
--- a/tests/test-share.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-share.t	Tue Sep 04 12:16:28 2018 -0400
@@ -32,6 +32,7 @@
   [1]
   $ ls -1 ../repo1/.hg/cache
   branch2-served
+  manifestfulltextcache (reporevlogstore !)
   rbc-names-v1
   rbc-revs-v1
   tags2-visible
@@ -297,15 +298,15 @@
 
 test behavior when sharing a shared repo
 
-  $ hg share -B repo3 repo5
+  $ hg share -B repo3 missingdir/repo5
   updating working directory
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cd repo5
+  $ cd missingdir/repo5
   $ hg book
      bm1                       3:b87954705719
      bm3                       4:62f4ded848e4
      bm4                       5:92793bfc8cad
-  $ cd ..
+  $ cd ../..
 
 test what happens when an active bookmark is deleted
 
--- a/tests/test-shelve.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-shelve.t	Tue Sep 04 12:16:28 2018 -0400
@@ -102,6 +102,7 @@
   $ ls .hg/shelve-backup
   default.hg
   default.patch
+  default.shelve
 
 checks to make sure we dont create a directory or
 hidden file while choosing a new shelve name
@@ -206,8 +207,10 @@
   $ ls .hg/shelve-backup/
   default-1.hg
   default-1.patch
+  default-1.shelve
   default.hg
   default.patch
+  default.shelve
 
 local edits should not prevent a shelved change from applying
 
@@ -250,10 +253,13 @@
   $ ls .hg/shelve-backup/
   default-01.hg
   default-01.patch
+  default-01.shelve
   default-1.hg
   default-1.patch
+  default-1.shelve
   default.hg
   default.patch
+  default.shelve
 
   $ hg unshelve
   abort: no shelved changes to apply!
@@ -314,8 +320,10 @@
   $ ls .hg/shelve-backup/
   default-01.hg
   default-01.patch
+  default-01.shelve
   wibble.hg
   wibble.patch
+  wibble.shelve
 
 cause unshelving to result in a merge with 'a' conflicting
 
@@ -379,11 +387,11 @@
   +++ b/a/a
   @@ -1,2 +1,6 @@
    a
-  +<<<<<<< shelve:       562f7831e574 - shelve: pending changes temporary commit
+  +<<<<<<< shelve:       2377350b6337 - shelve: pending changes temporary commit
    c
   +=======
   +a
-  +>>>>>>> working-copy: 32c69314e062 - shelve: changes to: [mq]: second.patch
+  +>>>>>>> working-copy: a68ec3400638 - shelve: changes to: [mq]: second.patch
   diff --git a/b/b b/b.rename/b
   rename from b/b
   rename to b.rename/b
@@ -801,11 +809,11 @@
   M f
   ? f.orig
   $ cat f
-  <<<<<<< shelve:       5f6b880e719b - shelve: pending changes temporary commit
+  <<<<<<< shelve:       d44eae5c3d33 - shelve: pending changes temporary commit
   g
   =======
   f
-  >>>>>>> working-copy: 81152db69da7 - shelve: changes to: commit stuff
+  >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
   $ cat f.orig
   g
   $ hg unshelve --abort -t false
@@ -847,7 +855,7 @@
   g
   =======
   f
-  >>>>>>> working-copy: 81152db69da7 - shelve: changes to: commit stuff
+  >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
   $ cat f.orig
   g
   $ hg unshelve --abort
@@ -1109,7 +1117,7 @@
   shelved as default
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugbundle .hg/shelved/*.hg
-  45993d65fe9dc3c6d8764b9c3b07fa831ee7d92d
+  330882a04d2ce8487636b1fb292e5beea77fa1e3
   $ cd ..
 
 with general delta
@@ -1132,7 +1140,7 @@
   $ hg debugbundle .hg/shelved/*.hg
   Stream params: {Compression: BZ}
   changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
-      45993d65fe9dc3c6d8764b9c3b07fa831ee7d92d
+      330882a04d2ce8487636b1fb292e5beea77fa1e3
   $ cd ..
 
 Test visibility of in-memory changes inside transaction to external hook
@@ -1187,15 +1195,15 @@
   temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   ==== preupdate:
-  VISIBLE 6:66b86db80ee4
+  VISIBLE 6:54c00d20fb3f
   ACTUAL  5:703117a2acfb
   ====
   ==== preupdate:
-  VISIBLE 8:92fdbb7b4de7
+  VISIBLE 8:8efe6f7537dc
   ACTUAL  5:703117a2acfb
   ====
   ==== preupdate:
-  VISIBLE 6:66b86db80ee4
+  VISIBLE 6:54c00d20fb3f
   ACTUAL  5:703117a2acfb
   ====
 
@@ -1231,12 +1239,12 @@
   temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   ==== update:
-  VISIBLE 6:66b86db80ee4
-  VISIBLE 7:206bf5d4f922
+  VISIBLE 6:54c00d20fb3f
+  VISIBLE 7:492ed9d705e5
   ACTUAL  5:703117a2acfb
   ====
   ==== update:
-  VISIBLE 6:66b86db80ee4
+  VISIBLE 6:54c00d20fb3f
   ACTUAL  5:703117a2acfb
   ====
   ==== update:
@@ -1772,8 +1780,8 @@
   > ashelve
   > 8b058dae057a5a78f393f4535d9e363dd5efac9d
   > 8b058dae057a5a78f393f4535d9e363dd5efac9d
-  > 8b058dae057a5a78f393f4535d9e363dd5efac9d 003d2d94241cc7aff0c3a148e966d6a4a377f3a7
-  > 003d2d94241cc7aff0c3a148e966d6a4a377f3a7
+  > 8b058dae057a5a78f393f4535d9e363dd5efac9d f543b27db2cdb41737e2e0008dc524c471da1446
+  > f543b27db2cdb41737e2e0008dc524c471da1446
   > 
   > nokeep
   > :no-active-bookmark
@@ -1785,5 +1793,23 @@
 mercurial does not crash
   $ hg unshelve --continue
   unshelve of 'ashelve' complete
+
+Unshelve without .shelve metadata:
+
+  $ hg shelve
+  shelved as default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ rm .hg/shelved/default.shelve
+  $ echo 3 > a
+  $ hg unshelve
+  unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ cat .hg/shelved/default.shelve
+  node=82e0cb9893247d12667017593ce1e5655860f1ac
+
   $ cd ..
-
--- a/tests/test-show-work.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-show-work.t	Tue Sep 04 12:16:28 2018 -0400
@@ -57,7 +57,6 @@
   $ hg show work
   @  128c commit 2
   o  181c commit 1
-  |
   ~
 
 Multiple DAG heads will be shown
@@ -72,7 +71,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
 Even when wdir is something else
@@ -84,7 +82,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
 Draft child shows public head (multiple heads)
@@ -131,7 +128,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
   $ cd ..
@@ -162,7 +158,6 @@
   | o  128c (@) commit 2
   |/
   o  181c commit 1
-  |
   ~
 
   $ cd ..
@@ -185,7 +180,6 @@
   @  3758 Added tag 0.2 for changeset 6379c25b76f1
   o  6379 (0.2) commit 3
   o  a2ad Added tag 0.1 for changeset 6a75536ea0b1
-  |
   ~
 
   $ cd ..
@@ -246,7 +240,6 @@
   $ hg show work --color=debug
   @  [log.changeset changeset.draft changeset.unstable instability.orphan|32f3] [log.description|commit 3]
   x  [log.changeset changeset.draft changeset.obsolete|6a75] [log.description|commit 2]
-  |
   ~
 
   $ cd ..
--- a/tests/test-sparse-merges.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-sparse-merges.t	Tue Sep 04 12:16:28 2018 -0400
@@ -113,7 +113,8 @@
 
   $ hg merge
   temporarily included 1 file(s) in the sparse checkout for merging
-  local [working copy] changed d which other [merge rev] deleted
+  file 'd' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
--- a/tests/test-ssh-bundle1.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-ssh-bundle1.t	Tue Sep 04 12:16:28 2018 -0400
@@ -59,10 +59,12 @@
 
 non-existent absolute path
 
+#if no-msys
   $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
   remote: abort: repository /$TESTTMP/nonexistent not found!
   abort: no suitable response from remote hg!
   [255]
+#endif
 
 clone remote via stream
 
@@ -502,7 +504,7 @@
 
   $ cat dummylog
   Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
-  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio
+  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio (no-msys !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
--- a/tests/test-ssh-proto.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-ssh-proto.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1357,7 +1357,11 @@
   o>     bookmarks\t\n
   o>     namespaces\t\n
   o>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1388,7 +1392,11 @@
   o>     bookmarks\t\n
   o>     namespaces\t\n
   o>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
 
   $ cd ..
 
@@ -1495,7 +1503,9 @@
   o> bufferedreadline() -> 3:
   o>     46\n
   o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1523,7 +1533,9 @@
   o> bufferedreadline() -> 3:
   o>     46\n
   o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
+  }
 
 With multiple bookmarks set
 
@@ -1560,7 +1572,10 @@
   o> bufferedread(93) -> 93:
   o>     bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n
   o>     bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
+    b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1590,7 +1605,10 @@
   o> bufferedread(93) -> 93:
   o>     bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n
   o>     bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
+    b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
+  }
 
 Test pushkey for bookmarks
 
@@ -1719,7 +1737,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1747,7 +1767,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
 
 Create some commits
 
@@ -1801,7 +1823,11 @@
   o>     20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1', b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1',
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1832,7 +1858,11 @@
   o>     20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1', b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1',
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
 
 Single draft head
 
@@ -1869,7 +1899,10 @@
   o> bufferedread(58) -> 58:
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1899,7 +1932,10 @@
   o> bufferedread(58) -> 58:
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
 
 All public heads
 
@@ -1934,7 +1970,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
@@ -1962,7 +2000,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
 
 Setting public phase via pushkey
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-repoerror.t	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,68 @@
+#require unix-permissions no-root
+
+initial setup
+
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > ssh=$PYTHON "$TESTDIR/dummyssh"
+  > EOF
+
+repository itself is non-readable
+---------------------------------
+
+  $ hg init no-read
+  $ hg id ssh://user@dummy/no-read
+  000000000000
+  $ chmod a-rx no-read
+
+  $ hg id ssh://user@dummy/no-read
+  remote: abort: Permission denied: '$TESTTMP/no-read/.hg'
+  abort: no suitable response from remote hg!
+  [255]
+
+special case files are visible, but unreadable
+----------------------------------------------
+
+This is "similar" to the test above, but the directory is "traversable". This
+seems an unexpected case in real life, but we test it anyway.
+
+  $ hg init other
+  $ hg id ssh://user@dummy/other
+  000000000000
+  $ for item in `find other | sort -r` ; do
+  >     chmod a-r $item
+  > done
+
+  $ hg id ssh://user@dummy/other
+  remote: abort: Permission denied: $TESTTMP/other/.hg/requires
+  abort: no suitable response from remote hg!
+  [255]
+
+directory toward the repository is read only
+--------------------------------------------
+
+  $ mkdir deep
+  $ hg init deep/nested
+
+  $ hg id ssh://user@dummy/deep/nested
+  000000000000
+
+  $ chmod a-rx deep
+
+  $ hg id ssh://user@dummy/deep/nested
+  remote: abort: Permission denied: '$TESTTMP/deep/nested/.hg'
+  abort: no suitable response from remote hg!
+  [255]
+
+repository has wrong requirement
+--------------------------------
+
+  $ hg init repo-future
+  $ hg id ssh://user@dummy/repo-future
+  000000000000
+  $ echo flying-car >> repo-future/.hg/requires
+  $ hg id ssh://user@dummy/repo-future
+  remote: abort: repository requires features unknown to this Mercurial: flying-car!
+  remote: (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  abort: no suitable response from remote hg!
+  [255]
--- a/tests/test-status-color.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-status-color.t	Tue Sep 04 12:16:28 2018 -0400
@@ -168,10 +168,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg ci -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ hg log --color=debug
   [log.changeset changeset.draft|changeset:   0:389aef86a55e]
   [log.tag|tag:         tip]
@@ -296,10 +296,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg commit -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ touch added unknown ignored
   $ hg add added
   $ echo "test" >> modified
@@ -393,6 +393,7 @@
 
   $ hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
 color coding of error message without curses
@@ -400,6 +401,7 @@
   $ echo 'raise ImportError' > curses.py
   $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
   $ cd ..
--- a/tests/test-status.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-status.t	Tue Sep 04 12:16:28 2018 -0400
@@ -239,8 +239,8 @@
     "status": "A"
    },
    {
-    "copy": "modified",
     "path": "copied",
+    "source": "modified",
     "status": "A"
    },
    {
@@ -282,7 +282,7 @@
 
 Test templater support:
 
-  $ hg status -AT "[{status}]\t{if(copy, '{copy} -> ')}{path}\n"
+  $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
   [M]	.hgignore
   [A]	added
   [A]	modified -> copied
--- a/tests/test-stream-bundle-v2.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-stream-bundle-v2.t	Tue Sep 04 12:16:28 2018 -0400
@@ -88,6 +88,7 @@
   transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
   bundle2-input-part: total payload size 1840
   bundle2-input-bundle: 0 parts total
+  updating the branch cache
   finished applying clone bundle
   query 1; heads
   sending batch command
@@ -142,6 +143,7 @@
   transferred 1.65 KB in *.* seconds (*/sec) (glob)
   bundle2-input-part: total payload size 1840
   bundle2-input-bundle: 0 parts total
+  updating the branch cache
   finished applying clone bundle
   query 1; heads
   sending batch command
--- a/tests/test-strict.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-strict.t	Tue Sep 04 12:16:28 2018 -0400
@@ -15,29 +15,7 @@
 
   $ hg an a
   hg: unknown command 'an'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
   $ hg annotate a
   0: a
--- a/tests/test-subrepo.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-subrepo.t	Tue Sep 04 12:16:28 2018 -0400
@@ -1066,19 +1066,18 @@
   $ hg cat sub/repo/foo -Tjson | sed 's|\\\\|/|g'
   [
    {
-    "abspath": "foo",
     "data": "test\ntest\n",
-    "path": "sub/repo/foo"
+    "path": "foo"
    }
   ]
 
  non-exact match:
 
-  $ hg cat -T '{path}\n' 'glob:**'
+  $ hg cat -T '{path|relpath}\n' 'glob:**'
   .hgsub
   .hgsubstate
   sub/repo/foo
-  $ hg cat -T '{path}\n' 're:^sub'
+  $ hg cat -T '{path|relpath}\n' 're:^sub'
   sub/repo/foo
 
  missing subrepos in working directory:
--- a/tests/test-template-functions.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-template-functions.t	Tue Sep 04 12:16:28 2018 -0400
@@ -892,6 +892,11 @@
   $ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
   4:107
 
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n'
+  4:x10
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
+  4:x10
+
  node 'c562' should be unique if the other 'c562' nodes are hidden
  (but we don't try the slow path to filter out hidden nodes for now)
 
@@ -1193,6 +1198,12 @@
   
   0
   
+
+  $ hg log -l1 -T "{files('aa') % '{file}\n'}"
+  aa
+  $ hg log -l1 -T "{files('aa') % '{path}\n'}"
+  aa
+
   $ hg rm a
   $ hg log -r "wdir()" -T "{rev}\n{join(files('*'), '\n')}\n"
   2147483647
--- a/tests/test-template-keywords.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-template-keywords.t	Tue Sep 04 12:16:28 2018 -0400
@@ -91,7 +91,7 @@
   $ for key in author branch branches date desc file_adds file_dels file_mods \
   >         file_copies file_copies_switch files \
   >         manifest node parents rev tags diffstat extras \
-  >         p1rev p2rev p1node p2node; do
+  >         p1rev p2rev p1node p2node user; do
   >     for mode in '' --verbose --debug; do
   >         hg log $mode --template "$key$mode: {$key}\n"
   >     done
@@ -702,6 +702,33 @@
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
+  user: test
+  user: User Name <user@hostname>
+  user: person
+  user: person
+  user: person
+  user: person
+  user: other@place
+  user: A. N. Other <other@place>
+  user: User Name <user@hostname>
+  user--verbose: test
+  user--verbose: User Name <user@hostname>
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: other@place
+  user--verbose: A. N. Other <other@place>
+  user--verbose: User Name <user@hostname>
+  user--debug: test
+  user--debug: User Name <user@hostname>
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: other@place
+  user--debug: A. N. Other <other@place>
+  user--debug: User Name <user@hostname>
 
 Add a dummy commit to make up for the instability of the above:
 
@@ -718,6 +745,48 @@
   $ hg rm a
   $ hg ci -m "Modify, add, remove, rename"
 
+Test files list:
+
+  $ hg log -l1 -T '{join(file_mods, " ")}\n'
+  third
+  $ hg log -l1 -T '{file_mods % "{file}\n"}'
+  third
+  $ hg log -l1 -T '{file_mods % "{path}\n"}'
+  third
+
+  $ hg log -l1 -T '{join(files, " ")}\n'
+  a b fifth fourth third
+  $ hg log -l1 -T '{files % "{file}\n"}'
+  a
+  b
+  fifth
+  fourth
+  third
+  $ hg log -l1 -T '{files % "{path}\n"}'
+  a
+  b
+  fifth
+  fourth
+  third
+
+Test file copies dict:
+
+  $ hg log -r8 -T '{join(file_copies, " ")}\n'
+  fourth (second)
+  $ hg log -r8 -T '{file_copies % "{name} <- {source}\n"}'
+  fourth <- second
+  $ hg log -r8 -T '{file_copies % "{path} <- {source}\n"}'
+  fourth <- second
+
+  $ hg log -r8 -T '{join(file_copies_switch, " ")}\n'
+  
+  $ hg log -r8 -C -T '{join(file_copies_switch, " ")}\n'
+  fourth (second)
+  $ hg log -r8 -C -T '{file_copies_switch % "{name} <- {source}\n"}'
+  fourth <- second
+  $ hg log -r8 -C -T '{file_copies_switch % "{path} <- {source}\n"}'
+  fourth <- second
+
 Test index keyword:
 
   $ hg log -l 2 -T '{index + 10}{files % " {index}:{file}"}\n'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-util.py	Tue Sep 04 12:16:28 2018 -0400
@@ -0,0 +1,137 @@
+# unit tests for mercuril.util utilities
+from __future__ import absolute_import
+
+import contextlib
+import itertools
+import unittest
+
+from mercurial import pycompat, util, utils
+
+@contextlib.contextmanager
+def mocktimer(incr=0.1, *additional_targets):
+    """Replaces util.timer and additional_targets with a mock
+
+    The timer starts at 0. On each call the time incremented by the value
+    of incr. If incr is an iterable, then the time is incremented by the
+    next value from that iterable, looping in a cycle when reaching the end.
+
+    additional_targets must be a sequence of (object, attribute_name) tuples;
+    the mock is set with setattr(object, attribute_name, mock).
+
+    """
+    time = [0]
+    try:
+        incr = itertools.cycle(incr)
+    except TypeError:
+        incr = itertools.repeat(incr)
+
+    def timer():
+        time[0] += next(incr)
+        return time[0]
+
+    # record original values
+    orig = util.timer
+    additional_origs = [(o, a, getattr(o, a)) for o, a in additional_targets]
+
+    # mock out targets
+    util.timer = timer
+    for obj, attr in additional_targets:
+        setattr(obj, attr, timer)
+
+    try:
+        yield
+    finally:
+        # restore originals
+        util.timer = orig
+        for args in additional_origs:
+            setattr(*args)
+
+# attr.s default factory for util.timedstats.start binds the timer we
+# need to mock out.
+_start_default = (util.timedcmstats.start.default, 'factory')
+
+@contextlib.contextmanager
+def capturestderr():
+    """Replace utils.procutil.stderr with a pycompat.bytesio instance
+
+    The instance is made available as the return value of __enter__.
+
+    This contextmanager is reentrant.
+
+    """
+    orig = utils.procutil.stderr
+    utils.procutil.stderr = pycompat.bytesio()
+    try:
+        yield utils.procutil.stderr
+    finally:
+        utils.procutil.stderr = orig
+
+class timedtests(unittest.TestCase):
+    def testtimedcmstatsstr(self):
+        stats = util.timedcmstats()
+        self.assertEqual(str(stats), '<unknown>')
+        self.assertEqual(bytes(stats), b'<unknown>')
+        stats.elapsed = 12.34
+        self.assertEqual(str(stats), pycompat.sysstr(util.timecount(12.34)))
+        self.assertEqual(bytes(stats), util.timecount(12.34))
+
+    def testtimedcmcleanexit(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            with util.timedcm('pass') as stats:
+                # actual context doesn't matter
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimedcmnested(self):
+        # timestamps 1, 3, 6, 10, elapsed times of 6 - 3 = 3 and 10 - 1 = 9
+        with mocktimer([1, 2, 3, 4], _start_default):
+            with util.timedcm('outer') as outer_stats:
+                with util.timedcm('inner') as inner_stats:
+                    # actual context doesn't matter
+                    pass
+
+        self.assertEqual(outer_stats.start, 1)
+        self.assertEqual(outer_stats.elapsed, 9)
+        self.assertEqual(outer_stats.level, 1)
+
+        self.assertEqual(inner_stats.start, 3)
+        self.assertEqual(inner_stats.elapsed, 3)
+        self.assertEqual(inner_stats.level, 2)
+
+    def testtimedcmexception(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            try:
+                with util.timedcm('exceptional') as stats:
+                    raise ValueError()
+            except ValueError:
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimeddecorator(self):
+        @util.timed
+        def testfunc(callcount=1):
+            callcount -= 1
+            if callcount:
+                testfunc(callcount)
+
+        # timestamps 1, 2, 3, 4, elapsed time of 3 - 2 = 1 and 4 - 1 = 3
+        with mocktimer(1, _start_default):
+            with capturestderr() as out:
+                testfunc(2)
+
+        self.assertEqual(out.getvalue(), (
+            b'    testfunc: 1.000 s\n'
+            b'  testfunc: 3.000 s\n'
+        ))
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-wireproto-command-branchmap.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-branchmap.t	Tue Sep 04 12:16:28 2018 -0400
@@ -67,6 +67,17 @@
   received frame(size=123; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: {b'branch1': [b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88'], b'branch2': [b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc'], b'default': [b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{', b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82']}
+  response: {
+    b'branch1': [
+      b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88'
+    ],
+    b'branch2': [
+      b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc'
+    ],
+    b'default': [
+      b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{',
+      b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82'
+    ]
+  }
 
   $ cat error.log
--- a/tests/test-wireproto-command-capabilities.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-capabilities.t	Tue Sep 04 12:16:28 2018 -0400
@@ -146,7 +146,11 @@
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
   s>     \xa3Dapis\xa0GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  cbor> {
+    b'apibase': b'api/',
+    b'apis': {},
+    b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+  }
 
 Restart server to enable HTTPv2
 
@@ -179,7 +183,11 @@
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
   s>     \xa3Dapis\xa0GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  cbor> {
+    b'apibase': b'api/',
+    b'apis': {},
+    b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+  }
 
 Request for HTTPv2 service returns information about it
 
@@ -205,7 +213,85 @@
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
   s>     \xa3Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Eheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyCnewCnewColdColdInamespaceBnsKpermissions\x81DpushHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullKcompression\x81\xa1DnameDzlibNrawrepoformats\x82LgeneraldeltaHrevlogv1Qframingmediatypes\x81X&application/mercurial-exp-framing-0005GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {b'exp-http-v2-0001': {b'commands': {b'branchmap': {b'args': {}, b'permissions': [b'pull']}, b'capabilities': {b'args': {}, b'permissions': [b'pull']}, b'heads': {b'args': {b'publiconly': False}, b'permissions': [b'pull']}, b'known': {b'args': {b'nodes': [b'deadbeef']}, b'permissions': [b'pull']}, b'listkeys': {b'args': {b'namespace': b'ns'}, b'permissions': [b'pull']}, b'lookup': {b'args': {b'key': b'foo'}, b'permissions': [b'pull']}, b'pushkey': {b'args': {b'key': b'key', b'namespace': b'ns', b'new': b'new', b'old': b'old'}, b'permissions': [b'push']}}, b'compression': [{b'name': b'zlib'}], b'framingmediatypes': [b'application/mercurial-exp-framing-0005'], b'rawrepoformats': [b'generaldelta', b'revlogv1']}}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  cbor> {
+    b'apibase': b'api/',
+    b'apis': {
+      b'exp-http-v2-0001': {
+        b'commands': {
+          b'branchmap': {
+            b'args': {},
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'capabilities': {
+            b'args': {},
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'heads': {
+            b'args': {
+              b'publiconly': False
+            },
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'known': {
+            b'args': {
+              b'nodes': [
+                b'deadbeef'
+              ]
+            },
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'listkeys': {
+            b'args': {
+              b'namespace': b'ns'
+            },
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'lookup': {
+            b'args': {
+              b'key': b'foo'
+            },
+            b'permissions': [
+              b'pull'
+            ]
+          },
+          b'pushkey': {
+            b'args': {
+              b'key': b'key',
+              b'namespace': b'ns',
+              b'new': b'new',
+              b'old': b'old'
+            },
+            b'permissions': [
+              b'push'
+            ]
+          }
+        },
+        b'compression': [
+          {
+            b'name': b'zlib'
+          }
+        ],
+        b'framingmediatypes': [
+          b'application/mercurial-exp-framing-0005'
+        ],
+        b'rawrepoformats': [
+          b'generaldelta',
+          b'revlogv1'
+        ]
+      }
+    },
+    b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+  }
 
 capabilities command returns expected info
 
@@ -254,6 +340,83 @@
   received frame(size=463; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [{b'status': b'ok'}, {b'commands': {b'branchmap': {b'args': {}, b'permissions': [b'pull']}, b'capabilities': {b'args': {}, b'permissions': [b'pull']}, b'heads': {b'args': {b'publiconly': False}, b'permissions': [b'pull']}, b'known': {b'args': {b'nodes': [b'deadbeef']}, b'permissions': [b'pull']}, b'listkeys': {b'args': {b'namespace': b'ns'}, b'permissions': [b'pull']}, b'lookup': {b'args': {b'key': b'foo'}, b'permissions': [b'pull']}, b'pushkey': {b'args': {b'key': b'key', b'namespace': b'ns', b'new': b'new', b'old': b'old'}, b'permissions': [b'push']}}, b'compression': [{b'name': b'zlib'}], b'framingmediatypes': [b'application/mercurial-exp-framing-0005'], b'rawrepoformats': [b'generaldelta', b'revlogv1']}]
+  response: [
+    {
+      b'status': b'ok'
+    },
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': False
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': [
+              b'deadbeef'
+            ]
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': b'ns'
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': b'foo'
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': b'key',
+            b'namespace': b'ns',
+            b'new': b'new',
+            b'old': b'old'
+          },
+          b'permissions': [
+            b'push'
+          ]
+        }
+      },
+      b'compression': [
+        {
+          b'name': b'zlib'
+        }
+      ],
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0005'
+      ],
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ]
+    }
+  ]
 
   $ cat error.log
--- a/tests/test-wireproto-command-heads.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-heads.t	Tue Sep 04 12:16:28 2018 -0400
@@ -58,7 +58,11 @@
   received frame(size=75; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b', b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^', b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A']
+  response: [
+    b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b',
+    b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^',
+    b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A'
+  ]
 
 Requesting just the public heads works
 
@@ -91,6 +95,8 @@
   received frame(size=33; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc']
+  response: [
+    b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc'
+  ]
 
   $ cat error.log
--- a/tests/test-wireproto-command-known.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-known.t	Tue Sep 04 12:16:28 2018 -0400
@@ -83,7 +83,9 @@
   received frame(size=13; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [True]
+  response: [
+    True
+  ]
 
 Multiple nodes works
 
@@ -116,6 +118,10 @@
   received frame(size=15; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: [True, False, True]
+  response: [
+    True,
+    False,
+    True
+  ]
 
   $ cat error.log
--- a/tests/test-wireproto-command-listkeys.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-listkeys.t	Tue Sep 04 12:16:28 2018 -0400
@@ -54,7 +54,11 @@
   received frame(size=43; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
 
 Request for phases works
 
@@ -87,7 +91,10 @@
   received frame(size=72; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: {b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1', b'publishing': b'True'}
+  response: {
+    b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1',
+    b'publishing': b'True'
+  }
 
 Request for bookmarks works
 
@@ -120,6 +127,8 @@
   received frame(size=56; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: {b'@': b'26805aba1e600a82e93661149f2313866a221a7b'}
+  response: {
+    b'@': b'26805aba1e600a82e93661149f2313866a221a7b'
+  }
 
   $ cat error.log
--- a/tests/test-wireproto-command-pushkey.t	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-command-pushkey.t	Tue Sep 04 12:16:28 2018 -0400
@@ -84,6 +84,8 @@
   received frame(size=56; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
   s>     0\r\n
   s>     \r\n
-  response: {b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0'}
+  response: {
+    b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0'
+  }
 
   $ cat error.log
--- a/tests/test-wireproto-framing.py	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/test-wireproto-framing.py	Tue Sep 04 12:16:28 2018 -0400
@@ -44,9 +44,6 @@
         self.assertEqual(ffs(b"1 1 0 1 0 cbor:b'foo'"),
                          b'\x04\x00\x00\x01\x00\x01\x00\x10Cfoo')
 
-        self.assertEqual(ffs(b"1 1 0 1 0 cbor:u'foo'"),
-                         b'\x04\x00\x00\x01\x00\x01\x00\x10cfoo')
-
     def testcborlists(self):
         self.assertEqual(ffs(b"1 1 0 1 0 cbor:[None, True, False, 42, b'foo']"),
                          b'\n\x00\x00\x01\x00\x01\x00\x10\x85\xf6\xf5\xf4'
--- a/tests/wireprotohelpers.sh	Tue Sep 04 11:59:12 2018 -0400
+++ b/tests/wireprotohelpers.sh	Tue Sep 04 12:16:28 2018 -0400
@@ -20,19 +20,19 @@
     wireprotov2server,
 )
 
-@wireprotov1server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv1(repo, proto):
     return wireprototypes.bytesresponse(b'customreadonly bytes response')
 
-@wireprotov2server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv2(repo, proto):
     return wireprototypes.cborresponse(b'customreadonly bytes response')
 
-@wireprotov1server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwrite(repo, proto):
     return wireprototypes.bytesresponse(b'customreadwrite bytes response')
 
-@wireprotov2server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwritev2(repo, proto):
     return wireprototypes.cborresponse(b'customreadwrite bytes response')
 EOF