changeset 44470:a08bbdf839ae

merge with stable
author Martin von Zweigbergk <martinvonz@google.com>
date Mon, 09 Mar 2020 10:18:40 -0700
parents 69ae64637be5 (diff) 9803b374389a (current diff)
children ad718271a9eb
files mercurial/discovery.py tests/run-tests.py
diffstat 269 files changed, 11368 insertions(+), 3630 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Mon Mar 09 01:11:59 2020 +0100
+++ b/Makefile	Mon Mar 09 10:18:40 2020 -0700
@@ -64,6 +64,7 @@
 	$(MAKE) -C doc
 
 cleanbutpackages:
+	rm -f hg.exe
 	-$(PYTHON) setup.py clean --all # ignore errors from this command
 	find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \
 		\( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';'
--- a/black.toml	Mon Mar 09 01:11:59 2020 +0100
+++ b/black.toml	Mon Mar 09 10:18:40 2020 -0700
@@ -9,7 +9,6 @@
 | \.mypy_cache/
 | \.venv/
 | mercurial/thirdparty/
-| contrib/python-zstandard/
 '''
 skip-string-normalization = true
 quiet = true
--- a/contrib/benchmarks/__init__.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/benchmarks/__init__.py	Mon Mar 09 10:18:40 2020 -0700
@@ -81,7 +81,7 @@
     output = ui.popbuffer()
     match = outputre.search(output)
     if not match:
-        raise ValueError("Invalid output {0}".format(output))
+        raise ValueError("Invalid output {}".format(output))
     return float(match.group(1))
 
 
--- a/contrib/check-py3-compat.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/check-py3-compat.py	Mon Mar 09 10:18:40 2020 -0700
@@ -32,7 +32,7 @@
     for node in ast.walk(root):
         if isinstance(node, ast.ImportFrom):
             if node.module == '__future__':
-                futures |= set(n.name for n in node.names)
+                futures |= {n.name for n in node.names}
         elif isinstance(node, ast.Print):
             haveprint = True
 
--- a/contrib/chg/chg.c	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/chg/chg.c	Mon Mar 09 10:18:40 2020 -0700
@@ -226,6 +226,16 @@
 	}
 	argv[argsize - 1] = NULL;
 
+	const char *lc_ctype_env = getenv("LC_CTYPE");
+	if (lc_ctype_env == NULL) {
+		if (putenv("CHG_CLEAR_LC_CTYPE=") != 0)
+			abortmsgerrno("failed to putenv CHG_CLEAR_LC_CTYPE");
+	} else {
+		if (setenv("CHGORIG_LC_CTYPE", lc_ctype_env, 1) != 0) {
+			abortmsgerrno("failed to setenv CHGORIG_LC_CTYYPE");
+		}
+	}
+
 	if (putenv("CHGINTERNALMARK=") != 0)
 		abortmsgerrno("failed to putenv");
 	if (execvp(hgcmd, (char **)argv) < 0)
--- a/contrib/examples/fix.hgrc	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/examples/fix.hgrc	Mon Mar 09 10:18:40 2020 -0700
@@ -6,7 +6,7 @@
 rustfmt:pattern = set:**.rs
 
 black:command = black --config=black.toml -
-black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"
+black:pattern = set:**.py - mercurial/thirdparty/**
 
 # Mercurial doesn't have any Go code, but if we did this is how we
 # would configure `hg fix` for Go:
--- a/contrib/import-checker.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/import-checker.py	Mon Mar 09 10:18:40 2020 -0700
@@ -392,9 +392,10 @@
                     modnotfound = True
                     continue
                 yield found[1]
-            if modnotfound:
+            if modnotfound and dottedpath != modulename:
                 # "dottedpath" is a package, but imported because of non-module
                 # lookup
+                # specifically allow "from . import foo" from __init__.py
                 yield dottedpath
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/pyoxidizer.bzl	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,60 @@
+# Instructions:
+#
+# 1. cargo install --version 0.5.0 pyoxidizer
+# 2. cd /path/to/hg
+# 3. pyoxidizer build --path contrib/packaging [--release]
+# 4. Run build/pyoxidizer/<arch>/<debug|release>/app/hg
+#
+# If you need to build again, you need to remove the build/lib.* and
+# build/temp.* directories, otherwise PyOxidizer fails to pick up C
+# extensions. This is a bug in PyOxidizer.
+
+ROOT = CWD + "/../.."
+
+set_build_path(ROOT + "/build/pyoxidizer")
+
+def make_exe():
+    dist = default_python_distribution()
+
+    code = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
+
+    config = PythonInterpreterConfig(
+        raw_allocator = "system",
+        run_eval = code,
+        # We want to let the user load extensions from the file system
+        filesystem_importer = True,
+        # We need this to make resourceutil happy, since it looks for sys.frozen.
+        sys_frozen = True,
+        legacy_windows_stdio = True,
+    )
+
+    exe = dist.to_python_executable(
+        name = "hg",
+        config = config,
+    )
+
+    # Use setup.py install to build Mercurial and collect Python resources to
+    # embed in the executable.
+    resources = dist.setup_py_install(ROOT)
+    exe.add_python_resources(resources)
+
+    return exe
+
+def make_install(exe):
+    m = FileManifest()
+
+    # `hg` goes in root directory.
+    m.add_python_resource(".", exe)
+
+    templates = glob(
+        include=[ROOT + "/mercurial/templates/**/*"],
+        strip_prefix = ROOT + "/mercurial/",
+    )
+    m.add_manifest(templates)
+
+    return m
+
+register_target("exe", make_exe)
+register_target("app", make_install, depends = ["exe"], default = True)
+
+resolve_targets()
--- a/contrib/perf.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/perf.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1536,6 +1536,7 @@
     matters.
 
     Example of useful set to test:
+
     * tip
     * 0
     * -10:
@@ -2522,7 +2523,7 @@
     }
 
     for diffopt in ('', 'w', 'b', 'B', 'wB'):
-        opts = dict((options[c], b'1') for c in diffopt)
+        opts = {options[c]: b'1' for c in diffopt}
 
         def d():
             ui.pushbuffer()
@@ -3047,7 +3048,7 @@
 
     # Verify engines argument.
     if engines:
-        engines = set(e.strip() for e in engines.split(b','))
+        engines = {e.strip() for e in engines.split(b',')}
         for engine in engines:
             try:
                 util.compressionengines[engine]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/phab-clean.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+#
+# A small script to automatically reject idle Diffs
+#
+# you need to set the PHABBOT_USER and PHABBOT_TOKEN environment variable for authentication
+from __future__ import absolute_import, print_function
+
+import datetime
+import os
+import sys
+
+import phabricator
+
+MESSAGE = """There seems to have been no activities on this Diff for the past 3 Months.
+
+By policy, we are automatically moving it out of the `need-review` state.
+
+Please, move it back to `need-review` without hesitation if this diff should still be discussed.
+
+:baymax:need-review-idle:
+"""
+
+
+PHAB_URL = "https://phab.mercurial-scm.org/api/"
+USER = os.environ.get("PHABBOT_USER", "baymax")
+TOKEN = os.environ.get("PHABBOT_TOKEN")
+
+
+NOW = datetime.datetime.now()
+
+# 3 months in seconds
+DELAY = 60 * 60 * 24 * 30 * 3
+
+
+def get_all_diff(phab):
+    """Fetch all the diff that the need review"""
+    return phab.differential.query(
+        status="status-needs-review",
+        order="order-modified",
+        paths=[('HG', None)],
+    )
+
+
+def filter_diffs(diffs, older_than):
+    """filter diffs to only keep the one unmodified sin <older_than> seconds"""
+    olds = []
+    for d in diffs:
+        modified = int(d['dateModified'])
+        modified = datetime.datetime.fromtimestamp(modified)
+        d["idleFor"] = idle_for = NOW - modified
+        if idle_for.total_seconds() > older_than:
+            olds.append(d)
+    return olds
+
+
+def nudge_diff(phab, diff):
+    """Comment on the idle diff and reject it"""
+    diff_id = int(d['id'])
+    phab.differential.createcomment(
+        revision_id=diff_id, message=MESSAGE, action="reject"
+    )
+
+
+if not USER:
+    print(
+        "not user specified please set PHABBOT_USER and PHABBOT_TOKEN",
+        file=sys.stderr,
+    )
+elif not TOKEN:
+    print(
+        "not api-token specified please set PHABBOT_USER and PHABBOT_TOKEN",
+        file=sys.stderr,
+    )
+    sys.exit(1)
+
+phab = phabricator.Phabricator(USER, host=PHAB_URL, token=TOKEN)
+phab.connect()
+phab.update_interfaces()
+print('Hello "%s".' % phab.user.whoami()['realName'])
+
+diffs = get_all_diff(phab)
+print("Found %d Diffs" % len(diffs))
+olds = filter_diffs(diffs, DELAY)
+print("Found %d old Diffs" % len(olds))
+for d in olds:
+    diff_id = d['id']
+    status = d['statusName']
+    modified = int(d['dateModified'])
+    idle_for = d["idleFor"]
+    msg = 'nudging D%s in "%s" state for %s'
+    print(msg % (diff_id, status, idle_for))
+    # uncomment to actually affect phab
+    nudge_diff(phab, d)
--- a/contrib/python-zstandard/make_cffi.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/make_cffi.py	Mon Mar 09 10:18:40 2020 -0700
@@ -52,7 +52,8 @@
 
 # Headers whose preprocessed output will be fed into cdef().
 HEADERS = [
-    os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
+    os.path.join(HERE, "zstd", *p)
+    for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
 ]
 
 INCLUDE_DIRS = [
@@ -139,7 +140,9 @@
         env = dict(os.environ)
         if getattr(compiler, "_paths", None):
             env["PATH"] = compiler._paths
-        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env)
+        process = subprocess.Popen(
+            args + [input_file], stdout=subprocess.PIPE, env=env
+        )
         output = process.communicate()[0]
         ret = process.poll()
         if ret:
--- a/contrib/python-zstandard/setup.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/setup.py	Mon Mar 09 10:18:40 2020 -0700
@@ -87,7 +87,9 @@
         break
 
 if not version:
-    raise Exception("could not resolve package version; " "this should never happen")
+    raise Exception(
+        "could not resolve package version; " "this should never happen"
+    )
 
 setup(
     name="zstandard",
--- a/contrib/python-zstandard/setup_zstd.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/setup_zstd.py	Mon Mar 09 10:18:40 2020 -0700
@@ -138,12 +138,16 @@
     if not system_zstd:
         sources.update([os.path.join(actual_root, p) for p in zstd_sources])
         if support_legacy:
-            sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy])
+            sources.update(
+                [os.path.join(actual_root, p) for p in zstd_sources_legacy]
+            )
     sources = list(sources)
 
     include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
     if not system_zstd:
-        include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
+        include_dirs.update(
+            [os.path.join(actual_root, d) for d in zstd_includes]
+        )
         if support_legacy:
             include_dirs.update(
                 [os.path.join(actual_root, d) for d in zstd_includes_legacy]
--- a/contrib/python-zstandard/tests/common.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/common.py	Mon Mar 09 10:18:40 2020 -0700
@@ -50,7 +50,9 @@
         os.environ.update(old_env)
 
     if mod.backend != "cffi":
-        raise Exception("got the zstandard %s backend instead of cffi" % mod.backend)
+        raise Exception(
+            "got the zstandard %s backend instead of cffi" % mod.backend
+        )
 
     # If CFFI version is available, dynamically construct test methods
     # that use it.
@@ -84,7 +86,9 @@
                 fn.__func__.func_defaults,
                 fn.__func__.func_closure,
             )
-            new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class)
+            new_method = types.UnboundMethodType(
+                new_fn, fn.im_self, fn.im_class
+            )
 
         setattr(cls, name, new_method)
 
@@ -194,4 +198,6 @@
     expensive_settings = hypothesis.settings(deadline=None, max_examples=10000)
     hypothesis.settings.register_profile("expensive", expensive_settings)
 
-    hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default"))
+    hypothesis.settings.load_profile(
+        os.environ.get("HYPOTHESIS_PROFILE", "default")
+    )
--- a/contrib/python-zstandard/tests/test_buffer_util.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_buffer_util.py	Mon Mar 09 10:18:40 2020 -0700
@@ -67,7 +67,8 @@
             self.skipTest("BufferWithSegments not available")
 
         b = zstd.BufferWithSegments(
-            b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)])
+            b"foofooxfooxy",
+            b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]),
         )
         self.assertEqual(len(b), 3)
         self.assertEqual(b.size, 12)
@@ -83,17 +84,23 @@
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass at least 1 argument"
+        ):
             zstd.BufferWithSegmentsCollection()
 
     def test_argument_validation(self):
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(None)
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(
                 zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None
             )
--- a/contrib/python-zstandard/tests/test_compressor.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_compressor.py	Mon Mar 09 10:18:40 2020 -0700
@@ -24,7 +24,9 @@
 
 
 def multithreaded_chunk_size(level, source_size=0):
-    params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size)
+    params = zstd.ZstdCompressionParameters.from_level(
+        level, source_size=source_size
+    )
 
     return 1 << (params.window_log + 2)
 
@@ -86,7 +88,9 @@
 
         # This matches the test for read_to_iter() below.
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o")
+        result = cctx.compress(
+            b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o"
+        )
         self.assertEqual(
             result,
             b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
@@ -99,7 +103,9 @@
         result = cctx.compress(b"foo" * 256)
 
     def test_no_magic(self):
-        params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1)
+        params = zstd.ZstdCompressionParameters.from_level(
+            1, format=zstd.FORMAT_ZSTD1
+        )
         cctx = zstd.ZstdCompressor(compression_params=params)
         magic = cctx.compress(b"foobar")
 
@@ -223,7 +229,8 @@
 
         self.assertEqual(
             result,
-            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f",
+            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00"
+            b"\x66\x6f\x6f",
         )
 
     def test_multithreaded_compression_params(self):
@@ -234,7 +241,9 @@
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, 3)
 
-        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f")
+        self.assertEqual(
+            result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f"
+        )
 
 
 @make_cffi
@@ -347,7 +356,9 @@
         )
         self.assertEqual(cobj.compress(b"bar"), b"")
         # 3 byte header plus content.
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar")
+        self.assertEqual(
+            cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar"
+        )
         self.assertEqual(cobj.flush(), b"\x01\x00\x00")
 
     def test_flush_empty_block(self):
@@ -445,7 +456,9 @@
         self.assertEqual(int(r), 0)
         self.assertEqual(w, 9)
 
-        self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
+        self.assertEqual(
+            dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00"
+        )
 
     def test_large_data(self):
         source = io.BytesIO()
@@ -478,7 +491,9 @@
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         cctx.copy_stream(source, with_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
         no_params = zstd.get_frame_parameters(no_checksum.getvalue())
         with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -585,7 +600,9 @@
         cctx = zstd.ZstdCompressor()
 
         with cctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -744,7 +761,9 @@
         source = io.BytesIO(b"foobar")
 
         with cctx.stream_reader(source, size=2) as reader:
-            with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            with self.assertRaisesRegex(
+                zstd.ZstdError, "Src size is incorrect"
+            ):
                 reader.read(10)
 
         # Try another compression operation.
@@ -1126,7 +1145,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertTrue(with_params.has_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
     def test_write_content_size(self):
         no_size = NonClosingBytesIO()
@@ -1145,7 +1166,9 @@
 
         # Declaring size will write the header.
         with_size = NonClosingBytesIO()
-        with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor:
+        with cctx.stream_writer(
+            with_size, size=len(b"foobar" * 256)
+        ) as compressor:
             self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         no_params = zstd.get_frame_parameters(no_size.getvalue())
@@ -1191,7 +1214,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertFalse(with_params.has_checksum)
 
-        self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
+        self.assertEqual(
+            len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4
+        )
 
     def test_memory_size(self):
         cctx = zstd.ZstdCompressor(level=3)
@@ -1337,7 +1362,9 @@
         for chunk in cctx.read_to_iter(b"foobar"):
             pass
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             for chunk in cctx.read_to_iter(True):
                 pass
 
@@ -1513,7 +1540,9 @@
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24))
+        self.assertEqual(
+            dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)
+        )
 
     def test_small_chunk_size(self):
         cctx = zstd.ZstdCompressor()
@@ -1533,7 +1562,8 @@
 
         dctx = zstd.ZstdDecompressor()
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024
+            dctx.decompress(b"".join(chunks), max_output_size=10000),
+            b"foo" * 1024,
         )
 
     def test_input_types(self):
@@ -1602,7 +1632,8 @@
         list(chunker.finish())
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, r"cannot call compress\(\) after compression finished"
+            zstd.ZstdError,
+            r"cannot call compress\(\) after compression finished",
         ):
             list(chunker.compress(b"foo"))
 
@@ -1644,7 +1675,9 @@
         with self.assertRaises(TypeError):
             cctx.multi_compress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             cctx.multi_compress_to_buffer([u"foo"])
 
     def test_empty_input(self):
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Mon Mar 09 10:18:40 2020 -0700
@@ -28,9 +28,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read(self, original, level, source_read_size, read_size):
+    def test_stream_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -58,9 +62,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -155,9 +163,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_readinto(self, original, level, source_read_size, read_size):
+    def test_stream_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -184,9 +196,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_readinto(self, original, level, source_read_size, read_size):
+    def test_buffer_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
@@ -285,9 +301,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read1(self, original, level, source_read_size, read_size):
+    def test_stream_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -315,9 +335,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read1(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -412,7 +436,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_stream_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -446,7 +472,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_buffer_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -576,7 +604,9 @@
         read_size=strategies.integers(min_value=1, max_value=1048576),
         write_size=strategies.integers(min_value=1, max_value=1048576),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -585,7 +615,11 @@
         dest = io.BytesIO()
 
         cctx.copy_stream(
-            source, dest, size=len(original), read_size=read_size, write_size=write_size
+            source,
+            dest,
+            size=len(original),
+            read_size=read_size,
+            write_size=write_size,
         )
 
         self.assertEqual(dest.getvalue(), ref_frame)
@@ -675,7 +709,9 @@
         decompressed_chunks.append(dobj.decompress(chunk))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
@@ -690,7 +726,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refcctx = zstd.ZstdCompressor(level=level)
         ref_frame = refcctx.compress(original)
 
@@ -699,7 +737,10 @@
         cctx = zstd.ZstdCompressor(level=level)
         chunks = list(
             cctx.read_to_iter(
-                source, size=len(original), read_size=read_size, write_size=write_size
+                source,
+                size=len(original),
+                read_size=read_size,
+                write_size=write_size,
             )
         )
 
@@ -710,7 +751,9 @@
 class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
@@ -776,7 +819,8 @@
         dctx = zstd.ZstdDecompressor()
 
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=len(original)), original
+            dctx.decompress(b"".join(chunks), max_output_size=len(original)),
+            original,
         )
 
         self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
@@ -794,7 +838,9 @@
         input_sizes=strategies.data(),
         flushes=strategies.data(),
     )
-    def test_flush_block(self, original, level, chunk_size, input_sizes, flushes):
+    def test_flush_block(
+        self, original, level, chunk_size, input_sizes, flushes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         chunker = cctx.chunker(chunk_size=chunk_size)
 
@@ -830,7 +876,9 @@
         decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Mon Mar 09 10:18:40 2020 -0700
@@ -65,7 +65,9 @@
         p = zstd.ZstdCompressionParameters(threads=4)
         self.assertEqual(p.threads, 4)
 
-        p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6)
+        p = zstd.ZstdCompressionParameters(
+            threads=2, job_size=1048576, overlap_log=6
+        )
         self.assertEqual(p.threads, 2)
         self.assertEqual(p.job_size, 1048576)
         self.assertEqual(p.overlap_log, 6)
@@ -128,7 +130,9 @@
         with self.assertRaisesRegex(
             ValueError, "cannot specify both ldm_hash_rate_log"
         ):
-            zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4)
+            zstd.ZstdCompressionParameters(
+                ldm_hash_rate_log=8, ldm_hash_every_log=4
+            )
 
         p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8)
         self.assertEqual(p.ldm_hash_every_log, 8)
@@ -137,7 +141,9 @@
         self.assertEqual(p.ldm_hash_every_log, 16)
 
     def test_overlap_log(self):
-        with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"):
+        with self.assertRaisesRegex(
+            ValueError, "cannot specify both overlap_log"
+        ):
             zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9)
 
         p = zstd.ZstdCompressionParameters(overlap_log=2)
@@ -169,10 +175,14 @@
                     zstd.get_frame_parameters(u"foobarbaz")
 
     def test_invalid_input_sizes(self):
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(b"")
 
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(zstd.FRAME_HEADER)
 
     def test_invalid_frame(self):
@@ -201,7 +211,9 @@
         self.assertTrue(params.has_checksum)
 
         # Upper 2 bits indicate content size.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x40\x00\xff\x00"
+        )
         self.assertEqual(params.content_size, 511)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
@@ -215,7 +227,9 @@
         self.assertFalse(params.has_checksum)
 
         # Set multiple things.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00"
+        )
         self.assertEqual(params.content_size, 272)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 15)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Mon Mar 09 10:18:40 2020 -0700
@@ -23,7 +23,9 @@
 s_chainlog = strategies.integers(
     min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX
 )
-s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX)
+s_hashlog = strategies.integers(
+    min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX
+)
 s_searchlog = strategies.integers(
     min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX
 )
@@ -61,7 +63,14 @@
         s_strategy,
     )
     def test_valid_init(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         zstd.ZstdCompressionParameters(
             window_log=windowlog,
@@ -83,7 +92,14 @@
         s_strategy,
     )
     def test_estimated_compression_context_size(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         if minmatch == zstd.MINMATCH_MIN and strategy in (
             zstd.STRATEGY_FAST,
--- a/contrib/python-zstandard/tests/test_decompressor.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Mon Mar 09 10:18:40 2020 -0700
@@ -170,11 +170,15 @@
             dctx.decompress(compressed, max_output_size=len(source) - 1)
 
         # Input size + 1 works
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) + 1
+        )
         self.assertEqual(decompressed, source)
 
         # A much larger buffer works.
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) * 64
+        )
         self.assertEqual(decompressed, source)
 
     def test_stupidly_large_output_buffer(self):
@@ -237,7 +241,8 @@
         dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, "decompression error: Frame requires too much memory"
+            zstd.ZstdError,
+            "decompression error: Frame requires too much memory",
         ):
             dctx.decompress(frame, max_output_size=len(source))
 
@@ -291,7 +296,9 @@
         self.assertEqual(w, len(source.getvalue()))
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
 
         dest = OpCountingBytesIO()
         dctx = zstd.ZstdDecompressor()
@@ -309,7 +316,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -474,7 +483,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(frame) as reader:
-            with self.assertRaisesRegex(ValueError, "cannot seek to negative position"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot seek to negative position"
+            ):
                 reader.seek(-1, os.SEEK_SET)
 
             reader.read(1)
@@ -490,7 +501,8 @@
                 reader.seek(-1, os.SEEK_CUR)
 
             with self.assertRaisesRegex(
-                ValueError, "zstd decompression streams cannot be seeked with SEEK_END"
+                ValueError,
+                "zstd decompression streams cannot be seeked with SEEK_END",
             ):
                 reader.seek(0, os.SEEK_END)
 
@@ -743,7 +755,9 @@
 
     def test_read_lines(self):
         cctx = zstd.ZstdCompressor()
-        source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024))
+        source = b"\n".join(
+            ("line %d" % i).encode("ascii") for i in range(1024)
+        )
 
         frame = cctx.compress(source)
 
@@ -821,7 +835,9 @@
         dobj = dctx.decompressobj()
         dobj.decompress(data)
 
-        with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "cannot use a decompressobj"
+        ):
             dobj.decompress(data)
             self.assertIsNone(dobj.flush())
 
@@ -1124,7 +1140,9 @@
         # Buffer protocol works.
         dctx.read_to_iter(b"foobar")
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             b"".join(dctx.read_to_iter(True))
 
     def test_empty_input(self):
@@ -1226,7 +1244,9 @@
         decompressed = b"".join(chunks)
         self.assertEqual(decompressed, source.getvalue())
 
-    @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+    @unittest.skipUnless(
+        "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set"
+    )
     def test_large_input(self):
         bytes = list(struct.Struct(">B").pack(i) for i in range(256))
         compressed = NonClosingBytesIO()
@@ -1241,13 +1261,16 @@
                     len(compressed.getvalue())
                     > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
                 )
-                have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                have_raw = (
+                    input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                )
                 if have_compressed and have_raw:
                     break
 
         compressed = io.BytesIO(compressed.getvalue())
         self.assertGreater(
-            len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+            len(compressed.getvalue()),
+            zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
         )
 
         dctx = zstd.ZstdDecompressor()
@@ -1303,7 +1326,9 @@
         self.assertEqual(streamed, source.getvalue())
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
         dctx = zstd.ZstdDecompressor()
         for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
             self.assertEqual(len(chunk), 1)
@@ -1355,10 +1380,14 @@
         ):
             dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 0 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 0 missing content size in frame"
@@ -1389,10 +1418,14 @@
         ):
             dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 1 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([initial, b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 1 missing content size in frame"
@@ -1400,7 +1433,9 @@
             dctx.decompress_content_dict_chain([initial, no_size])
 
         # Corrupt second frame.
-        cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64))
+        cctx = zstd.ZstdCompressor(
+            dict_data=zstd.ZstdCompressionDict(b"foo" * 64)
+        )
         frame = cctx.compress(b"bar" * 64)
         frame = frame[0:12] + frame[15:]
 
@@ -1447,7 +1482,9 @@
         with self.assertRaises(TypeError):
             dctx.multi_decompress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             dctx.multi_decompress_to_buffer([u"foo"])
 
         with self.assertRaisesRegex(
@@ -1491,7 +1528,9 @@
         if not hasattr(dctx, "multi_decompress_to_buffer"):
             self.skipTest("multi_decompress_to_buffer not available")
 
-        result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+        result = dctx.multi_decompress_to_buffer(
+            frames, decompressed_sizes=sizes
+        )
 
         self.assertEqual(len(result), len(frames))
         self.assertEqual(result.size(), sum(map(len, original)))
@@ -1582,10 +1621,15 @@
         # And a manual mode.
         b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
         b1 = zstd.BufferWithSegments(
-            b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]))
+            b,
+            struct.pack(
+                "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
+            ),
         )
 
-        b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+        b = b"".join(
+            [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]
+        )
         b2 = zstd.BufferWithSegments(
             b,
             struct.pack(
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Mon Mar 09 10:18:40 2020 -0700
@@ -196,7 +196,9 @@
         streaming=strategies.booleans(),
         source_read_size=strategies.integers(1, 1048576),
     )
-    def test_stream_source_readall(self, original, level, streaming, source_read_size):
+    def test_stream_source_readall(
+        self, original, level, streaming, source_read_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -398,7 +400,9 @@
         write_size=strategies.integers(min_value=1, max_value=8192),
         input_sizes=strategies.data(),
     )
-    def test_write_size_variance(self, original, level, write_size, input_sizes):
+    def test_write_size_variance(
+        self, original, level, write_size, input_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -433,7 +437,9 @@
         read_size=strategies.integers(min_value=1, max_value=8192),
         write_size=strategies.integers(min_value=1, max_value=8192),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -441,7 +447,9 @@
         dest = io.BytesIO()
 
         dctx = zstd.ZstdDecompressor()
-        dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
+        dctx.copy_stream(
+            source, dest, read_size=read_size, write_size=write_size
+        )
 
         self.assertEqual(dest.getvalue(), original)
 
@@ -490,11 +498,14 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         write_size=strategies.integers(
-            min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+            min_value=1,
+            max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
         ),
         chunk_sizes=strategies.data(),
     )
-    def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
+    def test_random_output_sizes(
+        self, original, level, write_size, chunk_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -524,7 +535,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -532,7 +545,9 @@
 
         dctx = zstd.ZstdDecompressor()
         chunks = list(
-            dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
+            dctx.read_to_iter(
+                source, read_size=read_size, write_size=write_size
+            )
         )
 
         self.assertEqual(b"".join(chunks), original)
@@ -542,7 +557,9 @@
 class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Mar 09 10:18:40 2020 -0700
@@ -51,11 +51,15 @@
         self.assertEqual(d.d, 16)
 
     def test_set_dict_id(self):
-        d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), k=64, d=16, dict_id=42
+        )
         self.assertEqual(d.dict_id(), 42)
 
     def test_optimize(self):
-        d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), threads=-1, steps=1, d=16
+        )
 
         # This varies by platform.
         self.assertIn(d.k, (50, 2000))
@@ -71,10 +75,14 @@
     def test_bad_precompute_compress(self):
         d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16)
 
-        with self.assertRaisesRegex(ValueError, "must specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must specify one of level or "
+        ):
             d.precompute_compress()
 
-        with self.assertRaisesRegex(ValueError, "must only specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must only specify one of level or "
+        ):
             d.precompute_compress(
                 level=3, compression_params=zstd.CompressionParameters()
             )
@@ -88,5 +96,7 @@
         d = zstd.ZstdCompressionDict(
             b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT
         )
-        with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "unable to precompute dictionary"
+        ):
             d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/cffi.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/contrib/python-zstandard/zstandard/cffi.py	Mon Mar 09 10:18:40 2020 -0700
@@ -299,10 +299,14 @@
         _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
         _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
         _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
-        _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_targetLength, target_length
+        )
 
         if strategy != -1 and compression_strategy != -1:
-            raise ValueError("cannot specify both compression_strategy and strategy")
+            raise ValueError(
+                "cannot specify both compression_strategy and strategy"
+            )
 
         if compression_strategy != -1:
             strategy = compression_strategy
@@ -313,12 +317,16 @@
         _set_compression_parameter(
             params, lib.ZSTD_c_contentSizeFlag, write_content_size
         )
-        _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_checksumFlag, write_checksum
+        )
         _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
         _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
 
         if overlap_log != -1 and overlap_size_log != -1:
-            raise ValueError("cannot specify both overlap_log and overlap_size_log")
+            raise ValueError(
+                "cannot specify both overlap_log and overlap_size_log"
+            )
 
         if overlap_size_log != -1:
             overlap_log = overlap_size_log
@@ -326,12 +334,16 @@
             overlap_log = 0
 
         _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
-        _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_forceMaxWindow, force_max_window
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
         )
         _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
-        _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
         )
@@ -346,7 +358,9 @@
         elif ldm_hash_rate_log == -1:
             ldm_hash_rate_log = 0
 
-        _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+        )
 
     @property
     def format(self):
@@ -354,7 +368,9 @@
 
     @property
     def compression_level(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_compressionLevel)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_compressionLevel
+        )
 
     @property
     def window_log(self):
@@ -386,7 +402,9 @@
 
     @property
     def write_content_size(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_contentSizeFlag)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_contentSizeFlag
+        )
 
     @property
     def write_checksum(self):
@@ -410,7 +428,9 @@
 
     @property
     def force_max_window(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_forceMaxWindow)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_forceMaxWindow
+        )
 
     @property
     def enable_ldm(self):
@@ -428,11 +448,15 @@
 
     @property
     def ldm_bucket_size_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmBucketSizeLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmBucketSizeLog
+        )
 
     @property
     def ldm_hash_rate_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashRateLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmHashRateLog
+        )
 
     @property
     def ldm_hash_every_log(self):
@@ -457,7 +481,8 @@
     zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to set compression context parameter: %s" % _zstd_error(zresult)
+            "unable to set compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
 
@@ -467,14 +492,17 @@
     zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to get compression context parameter: %s" % _zstd_error(zresult)
+            "unable to get compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
     return result[0]
 
 
 class ZstdCompressionWriter(object):
-    def __init__(self, compressor, writer, source_size, write_size, write_return_read):
+    def __init__(
+        self, compressor, writer, source_size, write_size, write_return_read
+    ):
         self._compressor = compressor
         self._writer = writer
         self._write_size = write_size
@@ -491,7 +519,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
     def __enter__(self):
         if self._closed:
@@ -595,13 +625,20 @@
 
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_compressStream2(
-                self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                self._compressor._cctx,
+                out_buffer,
+                in_buffer,
+                lib.ZSTD_e_continue,
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -637,10 +674,14 @@
                 self._compressor._cctx, out_buffer, in_buffer, flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -672,7 +713,9 @@
                 self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
@@ -681,7 +724,10 @@
         return b"".join(chunks)
 
     def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
-        if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+        if flush_mode not in (
+            COMPRESSOBJ_FLUSH_FINISH,
+            COMPRESSOBJ_FLUSH_BLOCK,
+        ):
             raise ValueError("flush mode not recognized")
 
         if self._finished:
@@ -768,7 +814,9 @@
                 self._in.pos = 0
 
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos == self._out.size:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -780,7 +828,8 @@
 
         if self._in.src != ffi.NULL:
             raise ZstdError(
-                "cannot call flush() before consuming output from " "previous operation"
+                "cannot call flush() before consuming output from "
+                "previous operation"
             )
 
         while True:
@@ -788,7 +837,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -812,7 +863,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -939,7 +992,10 @@
         old_pos = out_buffer.pos
 
         zresult = lib.ZSTD_compressStream2(
-            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue
+            self._compressor._cctx,
+            out_buffer,
+            self._in_buffer,
+            lib.ZSTD_e_continue,
         )
 
         self._bytes_compressed += out_buffer.pos - old_pos
@@ -997,7 +1053,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1102,7 +1160,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1170,13 +1230,17 @@
         threads=0,
     ):
         if level > lib.ZSTD_maxCLevel():
-            raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel())
+            raise ValueError(
+                "level must be less than %d" % lib.ZSTD_maxCLevel()
+            )
 
         if threads < 0:
             threads = _cpu_count()
 
         if compression_params and write_checksum is not None:
-            raise ValueError("cannot define compression_params and " "write_checksum")
+            raise ValueError(
+                "cannot define compression_params and " "write_checksum"
+            )
 
         if compression_params and write_content_size is not None:
             raise ValueError(
@@ -1184,7 +1248,9 @@
             )
 
         if compression_params and write_dict_id is not None:
-            raise ValueError("cannot define compression_params and " "write_dict_id")
+            raise ValueError(
+                "cannot define compression_params and " "write_dict_id"
+            )
 
         if compression_params and threads:
             raise ValueError("cannot define compression_params and threads")
@@ -1201,7 +1267,9 @@
 
             self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
 
-            _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level)
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_compressionLevel, level
+            )
 
             _set_compression_parameter(
                 self._params,
@@ -1210,7 +1278,9 @@
             )
 
             _set_compression_parameter(
-                self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0
+                self._params,
+                lib.ZSTD_c_checksumFlag,
+                1 if write_checksum else 0,
             )
 
             _set_compression_parameter(
@@ -1218,7 +1288,9 @@
             )
 
             if threads:
-                _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads)
+                _set_compression_parameter(
+                    self._params, lib.ZSTD_c_nbWorkers, threads
+                )
 
         cctx = lib.ZSTD_createCCtx()
         if cctx == ffi.NULL:
@@ -1237,10 +1309,13 @@
             )
 
     def _setup_cctx(self):
-        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params)
+        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+            self._cctx, self._params
+        )
         if lib.ZSTD_isError(zresult):
             raise ZstdError(
-                "could not set compression parameters: %s" % _zstd_error(zresult)
+                "could not set compression parameters: %s"
+                % _zstd_error(zresult)
             )
 
         dict_data = self._dict_data
@@ -1259,7 +1334,8 @@
 
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "could not load compression dictionary: %s" % _zstd_error(zresult)
+                    "could not load compression dictionary: %s"
+                    % _zstd_error(zresult)
                 )
 
     def memory_size(self):
@@ -1275,7 +1351,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         out_buffer = ffi.new("ZSTD_outBuffer *")
         in_buffer = ffi.new("ZSTD_inBuffer *")
@@ -1307,11 +1385,15 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         cobj = ZstdCompressionObj()
         cobj._out = ffi.new("ZSTD_outBuffer *")
-        cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._dst_buffer = ffi.new(
+            "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        )
         cobj._out.dst = cobj._dst_buffer
         cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
         cobj._out.pos = 0
@@ -1328,7 +1410,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionChunker(self, chunk_size=chunk_size)
 
@@ -1353,7 +1437,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1381,7 +1467,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1423,7 +1511,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionReader(self, source, read_size)
 
@@ -1443,7 +1533,9 @@
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
 
-        return ZstdCompressionWriter(self, writer, size, write_size, write_return_read)
+        return ZstdCompressionWriter(
+            self, writer, size, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -1473,7 +1565,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1517,7 +1611,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1596,10 +1692,14 @@
     data_buffer = ffi.from_buffer(data)
     zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
     if lib.ZSTD_isError(zresult):
-        raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult))
+        raise ZstdError(
+            "cannot get frame parameters: %s" % _zstd_error(zresult)
+        )
 
     if zresult:
-        raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult)
+        raise ZstdError(
+            "not enough data for frame parameters; need %d bytes" % zresult
+        )
 
     return FrameParameters(params[0])
 
@@ -1611,9 +1711,14 @@
         self.k = k
         self.d = d
 
-        if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT):
+        if dict_type not in (
+            DICT_TYPE_AUTO,
+            DICT_TYPE_RAWCONTENT,
+            DICT_TYPE_FULLDICT,
+        ):
             raise ValueError(
-                "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants"
+                "invalid dictionary load mode: %d; must use "
+                "DICT_TYPE_* constants"
             )
 
         self._dict_type = dict_type
@@ -1630,7 +1735,9 @@
 
     def precompute_compress(self, level=0, compression_params=None):
         if level and compression_params:
-            raise ValueError("must only specify one of level or " "compression_params")
+            raise ValueError(
+                "must only specify one of level or " "compression_params"
+            )
 
         if not level and not compression_params:
             raise ValueError("must specify one of level or compression_params")
@@ -1675,7 +1782,9 @@
         if ddict == ffi.NULL:
             raise ZstdError("could not create decompression dict")
 
-        ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict))
+        ddict = ffi.gc(
+            ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+        )
         self.__dict__["_ddict"] = ddict
 
         return ddict
@@ -1805,7 +1914,9 @@
                 self._decompressor._dctx, out_buffer, in_buffer
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompressor error: %s" % _zstd_error(zresult)
+                )
 
             if zresult == 0:
                 self._finished = True
@@ -2105,16 +2216,22 @@
 
         if whence == os.SEEK_SET:
             if pos < 0:
-                raise ValueError("cannot seek to negative position with SEEK_SET")
+                raise ValueError(
+                    "cannot seek to negative position with SEEK_SET"
+                )
 
             if pos < self._bytes_decompressed:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos - self._bytes_decompressed
 
         elif whence == os.SEEK_CUR:
             if pos < 0:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos
         elif whence == os.SEEK_END:
@@ -2123,7 +2240,9 @@
             )
 
         while read_amount:
-            result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
+            result = self.read(
+                min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+            )
 
             if not result:
                 break
@@ -2257,10 +2376,14 @@
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 out_buffer.pos = 0
 
@@ -2299,7 +2422,9 @@
 
         data_buffer = ffi.from_buffer(data)
 
-        output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+        output_size = lib.ZSTD_getFrameContentSize(
+            data_buffer, len(data_buffer)
+        )
 
         if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
             raise ZstdError("error determining content size from frame header")
@@ -2307,7 +2432,9 @@
             return b""
         elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
             if not max_output_size:
-                raise ZstdError("could not determine content size in frame header")
+                raise ZstdError(
+                    "could not determine content size in frame header"
+                )
 
             result_buffer = ffi.new("char[]", max_output_size)
             result_size = max_output_size
@@ -2330,7 +2457,9 @@
         if lib.ZSTD_isError(zresult):
             raise ZstdError("decompression error: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError("decompression error: did not decompress full frame")
+            raise ZstdError(
+                "decompression error: did not decompress full frame"
+            )
         elif output_size and out_buffer.pos != output_size:
             raise ZstdError(
                 "decompression error: decompressed %d bytes; expected %d"
@@ -2346,7 +2475,9 @@
         read_across_frames=False,
     ):
         self._ensure_dctx()
-        return ZstdDecompressionReader(self, source, read_size, read_across_frames)
+        return ZstdDecompressionReader(
+            self, source, read_size, read_across_frames
+        )
 
     def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
         if write_size < 1:
@@ -2421,9 +2552,13 @@
             while in_buffer.pos < in_buffer.size:
                 assert out_buffer.pos == 0
 
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd decompress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -2449,7 +2584,9 @@
         if not hasattr(writer, "write"):
             raise ValueError("must pass an object with a write() method")
 
-        return ZstdDecompressionWriter(self, writer, write_size, write_return_read)
+        return ZstdDecompressionWriter(
+            self, writer, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -2491,7 +2628,9 @@
 
             # Flush all read data to output.
             while in_buffer.pos < in_buffer.size:
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
                     raise ZstdError(
                         "zstd decompressor error: %s" % _zstd_error(zresult)
@@ -2521,7 +2660,9 @@
         # All chunks should be zstd frames and should have content size set.
         chunk_buffer = ffi.from_buffer(chunk)
         params = ffi.new("ZSTD_frameHeader *")
-        zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+        zresult = lib.ZSTD_getFrameHeader(
+            params, chunk_buffer, len(chunk_buffer)
+        )
         if lib.ZSTD_isError(zresult):
             raise ValueError("chunk 0 is not a valid zstd frame")
         elif zresult:
@@ -2546,7 +2687,9 @@
 
         zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "could not decompress chunk 0: %s" % _zstd_error(zresult)
+            )
         elif zresult:
             raise ZstdError("chunk 0 did not decompress full frame")
 
@@ -2561,11 +2704,15 @@
                 raise ValueError("chunk %d must be bytes" % i)
 
             chunk_buffer = ffi.from_buffer(chunk)
-            zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+            zresult = lib.ZSTD_getFrameHeader(
+                params, chunk_buffer, len(chunk_buffer)
+            )
             if lib.ZSTD_isError(zresult):
                 raise ValueError("chunk %d is not a valid zstd frame" % i)
             elif zresult:
-                raise ValueError("chunk %d is too small to contain a zstd frame" % i)
+                raise ValueError(
+                    "chunk %d is too small to contain a zstd frame" % i
+                )
 
             if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
                 raise ValueError("chunk %d missing content size in frame" % i)
@@ -2580,7 +2727,9 @@
             in_buffer.size = len(chunk_buffer)
             in_buffer.pos = 0
 
-            zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+            zresult = lib.ZSTD_decompressStream(
+                self._dctx, out_buffer, in_buffer
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "could not decompress chunk %d: %s" % _zstd_error(zresult)
@@ -2597,7 +2746,9 @@
         lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
 
         if self._max_window_size:
-            zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size)
+            zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+                self._dctx, self._max_window_size
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "unable to set max window size: %s" % _zstd_error(zresult)
@@ -2605,11 +2756,14 @@
 
         zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "unable to set decoding format: %s" % _zstd_error(zresult)
+            )
 
         if self._dict_data and load_dict:
             zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "unable to reference prepared dictionary: %s" % _zstd_error(zresult)
+                    "unable to reference prepared dictionary: %s"
+                    % _zstd_error(zresult)
                 )
--- a/hgext/absorb.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/absorb.py	Mon Mar 09 10:18:40 2020 -0700
@@ -407,7 +407,7 @@
             involved = [
                 annotated[i] for i in nearbylinenums if annotated[i][0] != 1
             ]
-        involvedrevs = list(set(r for r, l in involved))
+        involvedrevs = list({r for r, l in involved})
         newfixups = []
         if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
             # chunk belongs to a single revision
@@ -734,10 +734,10 @@
     @property
     def chunkstats(self):
         """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
-        return dict(
-            (path, state.chunkstats)
+        return {
+            path: state.chunkstats
             for path, state in pycompat.iteritems(self.fixupmap)
-        )
+        }
 
     def commit(self):
         """commit changes. update self.finalnode, self.replacemap"""
@@ -1077,7 +1077,7 @@
             b'i',
             b'interactive',
             None,
-            _(b'interactively select which chunks to apply (EXPERIMENTAL)'),
+            _(b'interactively select which chunks to apply'),
         ),
         (
             b'e',
--- a/hgext/beautifygraph.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/beautifygraph.py	Mon Mar 09 10:18:40 2020 -0700
@@ -71,6 +71,8 @@
         return b'\xE2\x97\x8B'  # U+25CB ○
     if node == b'@':
         return b'\xE2\x97\x8D'  # U+25CD ◍
+    if node == b'%':
+        return b'\xE2\x97\x8D'  # U+25CE ◎
     if node == b'*':
         return b'\xE2\x88\x97'  # U+2217 ∗
     if node == b'x':
--- a/hgext/closehead.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/closehead.py	Mon Mar 09 10:18:40 2020 -0700
@@ -76,7 +76,7 @@
     heads = []
     for branch in repo.branchmap():
         heads.extend(repo.branchheads(branch))
-    heads = set(repo[h].rev() for h in heads)
+    heads = {repo[h].rev() for h in heads}
     for rev in revs:
         if rev not in heads:
             raise error.Abort(_(b'revision is not an open head: %d') % rev)
--- a/hgext/convert/hg.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/convert/hg.py	Mon Mar 09 10:18:40 2020 -0700
@@ -677,13 +677,9 @@
             for t in self.repo.tagslist()
             if self.repo.tagtype(t[0]) == b'global'
         ]
-        return dict(
-            [
-                (name, nodemod.hex(node))
-                for name, node in tags
-                if self.keep(node)
-            ]
-        )
+        return {
+            name: nodemod.hex(node) for name, node in tags if self.keep(node)
+        }
 
     def getchangedfiles(self, rev, i):
         ctx = self._changectx(rev)
--- a/hgext/convert/subversion.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/convert/subversion.py	Mon Mar 09 10:18:40 2020 -0700
@@ -710,11 +710,11 @@
                 # Here/tags/tag.1 discarded as well as its children.
                 # It happens with tools like cvs2svn. Such tags cannot
                 # be represented in mercurial.
-                addeds = dict(
-                    (p, e.copyfrom_path)
+                addeds = {
+                    p: e.copyfrom_path
                     for p, e in pycompat.iteritems(origpaths)
                     if e.action == b'A' and e.copyfrom_path
-                )
+                }
                 badroots = set()
                 for destroot in addeds:
                     for source, sourcerev, dest in pendings:
--- a/hgext/eol.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/eol.py	Mon Mar 09 10:18:40 2020 -0700
@@ -221,7 +221,7 @@
         self.match = match.match(root, b'', [], include, exclude)
 
     def copytoui(self, ui):
-        newpatterns = set(pattern for pattern, key, m in self.patterns)
+        newpatterns = {pattern for pattern, key, m in self.patterns}
         for section in (b'decode', b'encode'):
             for oldpattern, _filter in ui.configitems(section):
                 if oldpattern not in newpatterns:
--- a/hgext/fastannotate/commands.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/fastannotate/commands.py	Mon Mar 09 10:18:40 2020 -0700
@@ -233,7 +233,7 @@
                         showlines=(showlines and not showdeleted),
                     )
                     if showdeleted:
-                        existinglines = set((l[0], l[1]) for l in result)
+                        existinglines = {(l[0], l[1]) for l in result}
                         result = a.annotatealllines(
                             rev, showpath=showpath, showlines=showlines
                         )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastexport.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,218 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""export repositories as git fast-import stream"""
+
+# The format specification for fast-import streams can be found at
+# https://git-scm.com/docs/git-fast-import#_input_format
+
+from __future__ import absolute_import
+import re
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullrev
+from mercurial.utils import stringutil
+from mercurial import (
+    error,
+    pycompat,
+    registrar,
+    scmutil,
+)
+from .convert import convcmd
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = b"ships-with-hg-core"
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]')
+GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]")
+
+
+def convert_to_git_user(authormap, user, rev):
+    mapped_user = authormap.get(user, user)
+    user_person = stringutil.person(mapped_user)
+    user_email = stringutil.email(mapped_user)
+    if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match(
+        user_person
+    ):
+        raise error.Abort(
+            _(b"Unable to parse user into person and email for revision " + rev)
+        )
+    if user_person:
+        return b'"' + user_person + b'" <' + user_email + b'>'
+    else:
+        return b"<" + user_email + b">"
+
+
+def convert_to_git_date(date):
+    timestamp, utcoff = date
+    tzsign = b"+" if utcoff < 0 else b"-"
+    if utcoff % 60 != 0:
+        raise error.Abort(
+            _(b"UTC offset in %b is not an integer number of seconds") % (date,)
+        )
+    utcoff = abs(utcoff) // 60
+    tzh = utcoff // 60
+    tzmin = utcoff % 60
+    return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin)
+
+
+def convert_to_git_ref(branch):
+    # XXX filter/map depending on git restrictions
+    return b"refs/heads/" + branch
+
+
+def write_data(buf, data, skip_newline):
+    buf.append(b"data %d\n" % len(data))
+    buf.append(data)
+    if not skip_newline or data[-1:] != b"\n":
+        buf.append(b"\n")
+
+
+def export_commit(ui, repo, rev, marks, authormap):
+    ctx = repo[rev]
+    revid = ctx.hex()
+    if revid in marks:
+        ui.warn(_(b"warning: revision %s already exported, skipped\n") % revid)
+        return
+    parents = [p for p in ctx.parents() if p.rev() != nullrev]
+    for p in parents:
+        if p.hex() not in marks:
+            ui.warn(
+                _(b"warning: parent %s of %s has not been exported, skipped\n")
+                % (p, revid)
+            )
+            return
+
+    # For all files modified by the commit, check if they have already
+    # been exported and otherwise dump the blob with the new mark.
+    for fname in ctx.files():
+        if fname not in ctx:
+            continue
+        filectx = ctx.filectx(fname)
+        filerev = hex(filectx.filenode())
+        if filerev not in marks:
+            mark = len(marks) + 1
+            marks[filerev] = mark
+            data = filectx.data()
+            buf = [b"blob\n", b"mark :%d\n" % mark]
+            write_data(buf, data, False)
+            ui.write(*buf, keepprogressbar=True)
+            del buf
+
+    # Assign a mark for the current revision for references by
+    # latter merge commits.
+    mark = len(marks) + 1
+    marks[revid] = mark
+
+    ref = convert_to_git_ref(ctx.branch())
+    buf = [
+        b"commit %s\n" % ref,
+        b"mark :%d\n" % mark,
+        b"committer %s %s\n"
+        % (
+            convert_to_git_user(authormap, ctx.user(), revid),
+            convert_to_git_date(ctx.date()),
+        ),
+    ]
+    write_data(buf, ctx.description(), True)
+    if parents:
+        buf.append(b"from :%d\n" % marks[parents[0].hex()])
+    if len(parents) == 2:
+        buf.append(b"merge :%d\n" % marks[parents[1].hex()])
+        p0ctx = repo[parents[0]]
+        files = ctx.manifest().diff(p0ctx.manifest())
+    else:
+        files = ctx.files()
+    filebuf = []
+    for fname in files:
+        if fname not in ctx:
+            filebuf.append((fname, b"D %s\n" % fname))
+        else:
+            filectx = ctx.filectx(fname)
+            filerev = filectx.filenode()
+            fileperm = b"755" if filectx.isexec() else b"644"
+            changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname)
+            filebuf.append((fname, changed))
+    filebuf.sort()
+    buf.extend(changed for (fname, changed) in filebuf)
+    del filebuf
+    buf.append(b"\n")
+    ui.write(*buf, keepprogressbar=True)
+    del buf
+
+
+isrev = re.compile(b"^[0-9a-f]{40}$")
+
+
+@command(
+    b"fastexport",
+    [
+        (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")),
+        (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")),
+        (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")),
+        (
+            b"A",
+            b"authormap",
+            b"",
+            _(b"remap usernames using this file"),
+            _(b"FILE"),
+        ),
+    ],
+    _(b"[OPTION]... [REV]..."),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
+def fastexport(ui, repo, *revs, **opts):
+    """export repository as git fast-import stream
+
+    This command lets you dump a repository as a human-readable text stream.
+    It can be piped into corresponding import routines like "git fast-import".
+    Incremental dumps can be created by using marks files.
+    """
+    opts = pycompat.byteskwargs(opts)
+
+    revs += tuple(opts.get(b"rev", []))
+    if not revs:
+        revs = scmutil.revrange(repo, [b":"])
+    else:
+        revs = scmutil.revrange(repo, revs)
+    if not revs:
+        raise error.Abort(_(b"no revisions matched"))
+    authorfile = opts.get(b"authormap")
+    if authorfile:
+        authormap = convcmd.readauthormap(ui, authorfile)
+    else:
+        authormap = {}
+
+    import_marks = opts.get(b"import_marks")
+    marks = {}
+    if import_marks:
+        with open(import_marks, "rb") as import_marks_file:
+            for line in import_marks_file:
+                line = line.strip()
+                if not isrev.match(line) or line in marks:
+                    raise error.Abort(_(b"Corrupted marks file"))
+                marks[line] = len(marks) + 1
+
+    revs.sort()
+    with ui.makeprogress(
+        _(b"exporting"), unit=_(b"revisions"), total=len(revs)
+    ) as progress:
+        for rev in revs:
+            export_commit(ui, repo, rev, marks, authormap)
+            progress.increment()
+
+    export_marks = opts.get(b"export_marks")
+    if export_marks:
+        with open(export_marks, "wb") as export_marks_file:
+            output_marks = [None] * len(marks)
+            for k, v in marks.items():
+                output_marks[v - 1] = k
+            for k in output_marks:
+                export_marks_file.write(k + b"\n")
--- a/hgext/fetch.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/fetch.py	Mon Mar 09 10:18:40 2020 -0700
@@ -171,11 +171,11 @@
                     % (repo.changelog.rev(firstparent), short(firstparent))
                 )
             hg.clean(repo, firstparent)
+            p2ctx = repo[secondparent]
             ui.status(
-                _(b'merging with %d:%s\n')
-                % (repo.changelog.rev(secondparent), short(secondparent))
+                _(b'merging with %d:%s\n') % (p2ctx.rev(), short(secondparent))
             )
-            err = hg.merge(repo, secondparent, remind=False)
+            err = hg.merge(p2ctx, remind=False)
 
         if not err:
             # we don't translate commit messages
--- a/hgext/fix.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/fix.py	Mon Mar 09 10:18:40 2020 -0700
@@ -735,15 +735,7 @@
 
     wctx = context.overlayworkingctx(repo)
     wctx.setbase(repo[newp1node])
-    merge.update(
-        repo,
-        ctx.rev(),
-        branchmerge=False,
-        force=True,
-        ancestor=p1rev,
-        mergeancestor=False,
-        wc=wctx,
-    )
+    merge.revert_to(ctx, wc=wctx)
     copies.graftcopies(wctx, ctx, ctx.p1())
 
     for path in filedata.keys():
--- a/hgext/fsmonitor/__init__.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/fsmonitor/__init__.py	Mon Mar 09 10:18:40 2020 -0700
@@ -397,7 +397,7 @@
     # for file paths which require normalization and we encounter a case
     # collision, we store our own foldmap
     if normalize:
-        foldmap = dict((normcase(k), k) for k in results)
+        foldmap = {normcase(k): k for k in results}
 
     switch_slashes = pycompat.ossep == b'\\'
     # The order of the results is, strictly speaking, undefined.
@@ -459,22 +459,16 @@
     if normalize:
         # any notable files that have changed case will already be handled
         # above, so just check membership in the foldmap
-        notefiles = set(
-            (
-                normalize(f, True, True)
-                for f in notefiles
-                if normcase(f) not in foldmap
-            )
-        )
-    visit = set(
-        (
-            f
+        notefiles = {
+            normalize(f, True, True)
             for f in notefiles
-            if (
-                f not in results and matchfn(f) and (f in dmap or not ignore(f))
-            )
-        )
-    )
+            if normcase(f) not in foldmap
+        }
+    visit = {
+        f
+        for f in notefiles
+        if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
+    }
 
     if not fresh_instance:
         if matchalways:
--- a/hgext/histedit.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/histedit.py	Mon Mar 09 10:18:40 2020 -0700
@@ -649,7 +649,7 @@
             repo.ui.setconfig(
                 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
             )
-            stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
+            stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
         finally:
             repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
     return stats
@@ -835,10 +835,10 @@
             return ctx, [(self.node, (parentctxnode,))]
 
         parentctx = repo[parentctxnode]
-        newcommits = set(
+        newcommits = {
             c.node()
             for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
-        )
+        }
         if not newcommits:
             repo.ui.warn(
                 _(
@@ -945,7 +945,7 @@
 class base(histeditaction):
     def run(self):
         if self.repo[b'.'].node() != self.node:
-            mergemod.update(self.repo, self.node, branchmerge=False, force=True)
+            mergemod.clean_update(self.repo[self.node])
         return self.continueclean()
 
     def continuedirty(self):
@@ -2412,7 +2412,7 @@
     Will abort if there are to many or too few rules, a malformed rule,
     or a rule on a changeset outside of the user-given range.
     """
-    expected = set(c.node() for c in ctxs)
+    expected = {c.node() for c in ctxs}
     seen = set()
     prev = None
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/__init__.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,26 @@
+"""collection of simple hooks for common tasks (EXPERIMENTAL)
+
+This extension provides a number of simple hooks to handle issues
+commonly found in repositories with many contributors:
+- email notification when changesets move from draft to public phase
+- email notification when changesets are obsoleted
+- enforcement of draft phase for all incoming changesets
+- enforcement of a no-branch-merge policy
+- enforcement of a no-multiple-heads policy
+
+The implementation of the hooks is subject to change, e.g. whether to
+implement them as individual hooks or merge them into the notify
+extension as option. The functionality itself is planned to be supported
+long-term.
+"""
+from __future__ import absolute_import
+from . import (
+    changeset_obsoleted,
+    changeset_published,
+)
+
+# configtable is only picked up from the "top-level" module of the extension,
+# so expand it here to ensure all items are properly loaded
+configtable = {}
+configtable.update(changeset_published.configtable)
+configtable.update(changeset_obsoleted.configtable)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/changeset_obsoleted.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,131 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""changeset_obsoleted is a hook to send a mail when an
+existing draft changeset is obsoleted by an obsmarker without successor.
+
+Correct message threading requires the same messageidseed to be used for both
+the original notification and the new mail.
+
+Usage:
+  [notify]
+  messageidseed = myseed
+
+  [hooks]
+  pretxnclose.changeset_obsoleted = \
+    python:hgext.hooklib.changeset_obsoleted.hook
+"""
+
+from __future__ import absolute_import
+
+import email.errors as emailerrors
+import email.utils as emailutils
+
+from mercurial.i18n import _
+from mercurial import (
+    encoding,
+    error,
+    logcmdutil,
+    mail,
+    obsutil,
+    pycompat,
+    registrar,
+)
+from mercurial.utils import dateutil
+from .. import notify
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(
+    b'notify_obsoleted', b'domain', default=None,
+)
+configitem(
+    b'notify_obsoleted', b'messageidseed', default=None,
+)
+configitem(
+    b'notify_obsoleted',
+    b'template',
+    default=b'''Subject: changeset abandoned
+
+This changeset has been abandoned.
+''',
+)
+
+
+def _report_commit(ui, repo, ctx):
+    domain = ui.config(b'notify_obsoleted', b'domain') or ui.config(
+        b'notify', b'domain'
+    )
+    messageidseed = ui.config(
+        b'notify_obsoleted', b'messageidseed'
+    ) or ui.config(b'notify', b'messageidseed')
+    template = ui.config(b'notify_obsoleted', b'template')
+    spec = logcmdutil.templatespec(template, None)
+    templater = logcmdutil.changesettemplater(ui, repo, spec)
+    ui.pushbuffer()
+    n = notify.notifier(ui, repo, b'incoming')
+
+    subs = set()
+    for sub, spec in n.subs:
+        if spec is None:
+            subs.add(sub)
+            continue
+        revs = repo.revs(b'%r and %d:', spec, ctx.rev())
+        if len(revs):
+            subs.add(sub)
+            continue
+    if len(subs) == 0:
+        ui.debug(
+            b'notify_obsoleted: no subscribers to selected repo and revset\n'
+        )
+        return
+
+    templater.show(
+        ctx,
+        changes=ctx.changeset(),
+        baseurl=ui.config(b'web', b'baseurl'),
+        root=repo.root,
+        webroot=n.root,
+    )
+    data = ui.popbuffer()
+
+    try:
+        msg = mail.parsebytes(data)
+    except emailerrors.MessageParseError as inst:
+        raise error.Abort(inst)
+
+    msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
+    msg['Message-Id'] = notify.messageid(
+        ctx, domain, messageidseed + b'-obsoleted'
+    )
+    msg['Date'] = encoding.strfromlocal(
+        dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
+    )
+    if not msg['From']:
+        sender = ui.config(b'email', b'from') or ui.username()
+        if b'@' not in sender or b'@localhost' in sender:
+            sender = n.fixmail(sender)
+        msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test)
+    msg['To'] = ', '.join(sorted(subs))
+
+    msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string()
+    if ui.configbool(b'notify', b'test'):
+        ui.write(msgtext)
+        if not msgtext.endswith(b'\n'):
+            ui.write(b'\n')
+    else:
+        ui.status(_(b'notify_obsoleted: sending mail for %d\n') % ctx.rev())
+        mail.sendmail(
+            ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox
+        )
+
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    if hooktype != b"pretxnclose":
+        raise error.Abort(
+            _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
+        )
+    for rev in obsutil.getobsoleted(repo, repo.currenttransaction()):
+        _report_commit(ui, repo, repo.unfiltered()[rev])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/changeset_published.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,131 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""changeset_published is a hook to send a mail when an
+existing draft changeset is moved to the public phase.
+
+Correct message threading requires the same messageidseed to be used for both
+the original notification and the new mail.
+
+Usage:
+  [notify]
+  messageidseed = myseed
+
+  [hooks]
+  txnclose-phase.changeset_published = \
+    python:hgext.hooklib.changeset_published.hook
+"""
+
+from __future__ import absolute_import
+
+import email.errors as emailerrors
+import email.utils as emailutils
+
+from mercurial.i18n import _
+from mercurial import (
+    encoding,
+    error,
+    logcmdutil,
+    mail,
+    pycompat,
+    registrar,
+)
+from mercurial.utils import dateutil
+from .. import notify
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(
+    b'notify_published', b'domain', default=None,
+)
+configitem(
+    b'notify_published', b'messageidseed', default=None,
+)
+configitem(
+    b'notify_published',
+    b'template',
+    default=b'''Subject: changeset published
+
+This changeset has been published.
+''',
+)
+
+
+def _report_commit(ui, repo, ctx):
+    domain = ui.config(b'notify_published', b'domain') or ui.config(
+        b'notify', b'domain'
+    )
+    messageidseed = ui.config(
+        b'notify_published', b'messageidseed'
+    ) or ui.config(b'notify', b'messageidseed')
+    template = ui.config(b'notify_published', b'template')
+    spec = logcmdutil.templatespec(template, None)
+    templater = logcmdutil.changesettemplater(ui, repo, spec)
+    ui.pushbuffer()
+    n = notify.notifier(ui, repo, b'incoming')
+
+    subs = set()
+    for sub, spec in n.subs:
+        if spec is None:
+            subs.add(sub)
+            continue
+        revs = repo.revs(b'%r and %d:', spec, ctx.rev())
+        if len(revs):
+            subs.add(sub)
+            continue
+    if len(subs) == 0:
+        ui.debug(
+            b'notify_published: no subscribers to selected repo and revset\n'
+        )
+        return
+
+    templater.show(
+        ctx,
+        changes=ctx.changeset(),
+        baseurl=ui.config(b'web', b'baseurl'),
+        root=repo.root,
+        webroot=n.root,
+    )
+    data = ui.popbuffer()
+
+    try:
+        msg = mail.parsebytes(data)
+    except emailerrors.MessageParseError as inst:
+        raise error.Abort(inst)
+
+    msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
+    msg['Message-Id'] = notify.messageid(
+        ctx, domain, messageidseed + b'-published'
+    )
+    msg['Date'] = encoding.strfromlocal(
+        dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
+    )
+    if not msg['From']:
+        sender = ui.config(b'email', b'from') or ui.username()
+        if b'@' not in sender or b'@localhost' in sender:
+            sender = n.fixmail(sender)
+        msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test)
+    msg['To'] = ', '.join(sorted(subs))
+
+    msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string()
+    if ui.configbool(b'notify', b'test'):
+        ui.write(msgtext)
+        if not msgtext.endswith(b'\n'):
+            ui.write(b'\n')
+    else:
+        ui.status(_(b'notify_published: sending mail for %d\n') % ctx.rev())
+        mail.sendmail(
+            ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox
+        )
+
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    if hooktype != b"txnclose-phase":
+        raise error.Abort(
+            _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
+        )
+    ctx = repo.unfiltered()[node]
+    if kwargs['oldphase'] == b'draft' and kwargs['phase'] == b'public':
+        _report_commit(ui, repo, ctx)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/enforce_draft_commits.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,45 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""enforce_draft_commits us a hook to ensure that all new changesets are
+in the draft phase. This allows enforcing policies for work-in-progress
+changes in overlay repositories, i.e. a shared hidden repositories with
+different views for work-in-progress code and public history.
+
+Usage:
+  [hooks]
+  pretxnclose-phase.enforce_draft_commits = \
+    python:hgext.hooklib.enforce_draft_commits.hook
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    pycompat,
+)
+
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    if hooktype != b"pretxnclose-phase":
+        raise error.Abort(
+            _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
+        )
+    ctx = repo.unfiltered()[node]
+    if kwargs['oldphase']:
+        raise error.Abort(
+            _(b'Phase change from %r to %r for %s rejected')
+            % (
+                pycompat.bytestr(kwargs['oldphase']),
+                pycompat.bytestr(kwargs['phase']),
+                ctx,
+            )
+        )
+    elif kwargs['phase'] != b'draft':
+        raise error.Abort(
+            _(b'New changeset %s in phase %r rejected')
+            % (ctx, pycompat.bytestr(kwargs['phase']))
+        )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/reject_merge_commits.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,45 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""reject_merge_commits is a hook to check new changesets for merge commits.
+Merge commits are allowed only between different branches, i.e. merging
+a feature branch into the main development branch. This can be used to
+enforce policies for linear commit histories.
+
+Usage:
+  [hooks]
+  pretxnchangegroup.reject_merge_commits = \
+    python:hgext.hooklib.reject_merge_commits.hook
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    pycompat,
+)
+
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    if hooktype != b"pretxnchangegroup":
+        raise error.Abort(
+            _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
+        )
+
+    ctx = repo.unfiltered()[node]
+    for rev in repo.changelog.revs(start=ctx.rev()):
+        rev = repo[rev]
+        parents = rev.parents()
+        if len(parents) < 2:
+            continue
+        if all(repo[p].branch() == rev.branch() for p in parents):
+            raise error.Abort(
+                _(
+                    b'%s rejected as merge on the same branch. '
+                    b'Please consider rebase.'
+                )
+                % rev
+            )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/hooklib/reject_new_heads.py	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,41 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""reject_new_heads is a hook to check that branches touched by new changesets
+have at most one open head. It can be used to enforce policies for
+merge-before-push or rebase-before-push. It does not handle pre-existing
+hydras.
+
+Usage:
+  [hooks]
+  pretxnclose.reject_new_heads = \
+    python:hgext.hooklib.reject_new_heads.hook
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    pycompat,
+)
+
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    if hooktype != b"pretxnclose":
+        raise error.Abort(
+            _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
+        )
+    ctx = repo.unfiltered()[node]
+    branches = set()
+    for rev in repo.changelog.revs(start=ctx.rev()):
+        rev = repo[rev]
+        branches.add(rev.branch())
+    for branch in branches:
+        if len(repo.revs("head() and not closed() and branch(%s)", branch)) > 1:
+            raise error.Abort(
+                _(b'Changes on branch %r resulted in multiple heads')
+                % pycompat.bytestr(branch)
+            )
--- a/hgext/largefiles/basestore.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/largefiles/basestore.py	Mon Mar 09 10:18:40 2020 -0700
@@ -67,7 +67,7 @@
         ui = self.ui
 
         at = 0
-        available = self.exists(set(hash for (_filename, hash) in files))
+        available = self.exists({hash for (_filename, hash) in files})
         with ui.makeprogress(
             _(b'getting largefiles'), unit=_(b'files'), total=len(files)
         ) as progress:
--- a/hgext/largefiles/lfutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/largefiles/lfutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -92,16 +92,30 @@
     path = ui.configpath(name, b'usercache')
     if path:
         return path
+
+    hint = None
+
     if pycompat.iswindows:
         appdata = encoding.environ.get(
             b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
         )
         if appdata:
             return os.path.join(appdata, name)
+
+        hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
+            b"LOCALAPPDATA",
+            b"APPDATA",
+            name,
+        )
     elif pycompat.isdarwin:
         home = encoding.environ.get(b'HOME')
         if home:
             return os.path.join(home, b'Library', b'Caches', name)
+
+        hint = _(b"define %s in the environment, or set %s.usercache") % (
+            b"HOME",
+            name,
+        )
     elif pycompat.isposix:
         path = encoding.environ.get(b'XDG_CACHE_HOME')
         if path:
@@ -109,11 +123,18 @@
         home = encoding.environ.get(b'HOME')
         if home:
             return os.path.join(home, b'.cache', name)
+
+        hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
+            b"XDG_CACHE_HOME",
+            b"HOME",
+            name,
+        )
     else:
         raise error.Abort(
             _(b'unknown operating system: %s\n') % pycompat.osname
         )
-    raise error.Abort(_(b'unknown %s usercache location') % name)
+
+    raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
 
 
 def inusercache(ui, hash):
--- a/hgext/largefiles/overrides.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/largefiles/overrides.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1564,11 +1564,11 @@
 def overriderollback(orig, ui, repo, **opts):
     with repo.wlock():
         before = repo.dirstate.parents()
-        orphans = set(
+        orphans = {
             f
             for f in repo.dirstate
             if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
-        )
+        }
         result = orig(ui, repo, **opts)
         after = repo.dirstate.parents()
         if before == after:
--- a/hgext/largefiles/remotestore.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/largefiles/remotestore.py	Mon Mar 09 10:18:40 2020 -0700
@@ -48,12 +48,12 @@
         )
 
     def exists(self, hashes):
-        return dict(
-            (h, s == 0)
+        return {
+            h: s == 0
             for (h, s) in pycompat.iteritems(
                 self._stat(hashes)
             )  # dict-from-generator
-        )
+        }
 
     def sendfile(self, filename, hash):
         self.ui.debug(b'remotestore: sendfile(%s, %s)\n' % (filename, hash))
--- a/hgext/lfs/TODO.rst	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/lfs/TODO.rst	Mon Mar 09 10:18:40 2020 -0700
@@ -38,9 +38,6 @@
 
    * `hg diff` is similar, and probably shouldn't see the pointer file
 
-#. `Fix https multiplexing, and re-enable workers
-   <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/109916.html>`_.
-
 #. Show to-be-applied rules with `hg files -r 'wdir()' 'set:lfs()'`
 
    * `debugignore` can show file + line number, so a dedicated command could be
--- a/hgext/lfs/__init__.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/lfs/__init__.py	Mon Mar 09 10:18:40 2020 -0700
@@ -181,7 +181,7 @@
     b'experimental', b'lfs.disableusercache', default=False,
 )
 eh.configitem(
-    b'experimental', b'lfs.worker-enable', default=False,
+    b'experimental', b'lfs.worker-enable', default=True,
 )
 
 eh.configitem(
--- a/hgext/lfs/blobstore.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/lfs/blobstore.py	Mon Mar 09 10:18:40 2020 -0700
@@ -21,6 +21,7 @@
 from mercurial import (
     encoding,
     error,
+    httpconnection as httpconnectionmod,
     node,
     pathutil,
     pycompat,
@@ -94,33 +95,16 @@
         pass
 
 
-class filewithprogress(object):
-    """a file-like object that supports __len__ and read.
-
-    Useful to provide progress information for how many bytes are read.
+class lfsuploadfile(httpconnectionmod.httpsendfile):
+    """a file-like object that supports keepalive.
     """
 
-    def __init__(self, fp, callback):
-        self._fp = fp
-        self._callback = callback  # func(readsize)
-        fp.seek(0, os.SEEK_END)
-        self._len = fp.tell()
-        fp.seek(0)
-
-    def __len__(self):
-        return self._len
+    def __init__(self, ui, filename):
+        super(lfsuploadfile, self).__init__(ui, filename, b'rb')
+        self.read = self._data.read
 
-    def read(self, size):
-        if self._fp is None:
-            return b''
-        data = self._fp.read(size)
-        if data:
-            if self._callback:
-                self._callback(len(data))
-        else:
-            self._fp.close()
-            self._fp = None
-        return data
+    def _makeprogress(self):
+        return None  # progress is handled by the worker client
 
 
 class local(object):
@@ -144,6 +128,17 @@
     def open(self, oid):
         """Open a read-only file descriptor to the named blob, in either the
         usercache or the local store."""
+        return open(self.path(oid), 'rb')
+
+    def path(self, oid):
+        """Build the path for the given blob ``oid``.
+
+        If the blob exists locally, the path may point to either the usercache
+        or the local store.  If it doesn't, it will point to the local store.
+        This is meant for situations where existing code that isn't LFS aware
+        needs to open a blob.  Generally, prefer the ``open`` method on this
+        class.
+        """
         # The usercache is the most likely place to hold the file.  Commit will
         # write to both it and the local store, as will anything that downloads
         # the blobs.  However, things like clone without an update won't
@@ -151,9 +146,9 @@
         # the usercache is the only place it _could_ be.  If not present, the
         # missing file msg here will indicate the local repo, not the usercache.
         if self.cachevfs.exists(oid):
-            return self.cachevfs(oid, b'rb')
+            return self.cachevfs.join(oid)
 
-        return self.vfs(oid, b'rb')
+        return self.vfs.join(oid)
 
     def download(self, oid, src, content_length):
         """Read the blob from the remote source in chunks, verify the content,
@@ -495,15 +490,17 @@
                     _(b'detected corrupt lfs object: %s') % oid,
                     hint=_(b'run hg verify'),
                 )
-            request.data = filewithprogress(localstore.open(oid), None)
-            request.get_method = lambda: r'PUT'
-            request.add_header('Content-Type', 'application/octet-stream')
-            request.add_header('Content-Length', len(request.data))
 
         for k, v in headers:
             request.add_header(pycompat.strurl(k), pycompat.strurl(v))
 
         try:
+            if action == b'upload':
+                request.data = lfsuploadfile(self.ui, localstore.path(oid))
+                request.get_method = lambda: 'PUT'
+                request.add_header('Content-Type', 'application/octet-stream')
+                request.add_header('Content-Length', request.data.length)
+
             with contextlib.closing(self.urlopener.open(request)) as res:
                 contentlength = res.info().get(b"content-length")
                 ui = self.ui  # Shorten debug lines
@@ -545,6 +542,9 @@
             raise LfsRemoteError(
                 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
             )
+        finally:
+            if request.data:
+                request.data.close()
 
     def _batch(self, pointers, localstore, action):
         if action not in [b'upload', b'download']:
--- a/hgext/logtoprocess.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/logtoprocess.py	Mon Mar 09 10:18:40 2020 -0700
@@ -59,6 +59,13 @@
 
     def log(self, ui, event, msg, opts):
         script = self._scripts[event]
+        maxmsg = 100000
+        if len(msg) > maxmsg:
+            # Each env var has a 128KiB limit on linux. msg can be long, in
+            # particular for command event, where it's the full command line.
+            # Prefer truncating the message than raising "Argument list too
+            # long" error.
+            msg = msg[:maxmsg] + b' (truncated)'
         env = {
             b'EVENT': event,
             b'HGPID': os.getpid(),
--- a/hgext/mq.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/mq.py	Mon Mar 09 10:18:40 2020 -0700
@@ -858,7 +858,7 @@
         strip(self.ui, repo, [n], update=False, backup=False)
 
         ctx = repo[rev]
-        ret = hg.merge(repo, rev)
+        ret = hg.merge(ctx, remind=False)
         if ret:
             raise error.Abort(_(b"update returned %d") % ret)
         n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
@@ -1162,7 +1162,7 @@
 
         if unknown:
             if numrevs:
-                rev = dict((entry.name, entry.node) for entry in qfinished)
+                rev = {entry.name: entry.node for entry in qfinished}
                 for p in unknown:
                     msg = _(b'revision %s refers to unknown patches: %s\n')
                     self.ui.warn(msg % (short(rev[p]), p))
@@ -3361,7 +3361,7 @@
         ui.write(b'\n')
 
     q = repo.mq
-    applied = set(p.name for p in q.applied)
+    applied = {p.name for p in q.applied}
     patch = None
     args = list(args)
     if opts.get('list'):
--- a/hgext/phabricator.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/phabricator.py	Mon Mar 09 10:18:40 2020 -0700
@@ -257,15 +257,17 @@
                         return fn(*args, **kwargs)
             return fn(*args, **kwargs)
 
-        inner.__name__ = fn.__name__
-        inner.__doc__ = fn.__doc__
+        cmd = util.checksignature(inner, depth=2)
+        cmd.__name__ = fn.__name__
+        cmd.__doc__ = fn.__doc__
+
         return command(
             name,
             fullflags,
             spec,
             helpcategory=helpcategory,
             optionalrepo=optionalrepo,
-        )(inner)
+        )(cmd)
 
     return decorate
 
@@ -481,7 +483,7 @@
             ]
 
             # "precursors" as known by Phabricator
-            phprecset = set(getnode(d) for d in diffs)
+            phprecset = {getnode(d) for d in diffs}
 
             # Ignore if precursors (Phabricator and local repo) do not overlap,
             # and force is not set (when commit message says nothing)
@@ -747,12 +749,14 @@
     return fphid
 
 
-def addoldbinary(pchange, fctx):
+def addoldbinary(pchange, oldfctx, fctx):
     """add the metadata for the previous version of a binary file to the
     phabchange for the new version
+
+    ``oldfctx`` is the previous version of the file; ``fctx`` is the new
+    version of the file, or None if the file is being removed.
     """
-    oldfctx = fctx.p1()
-    if fctx.cmp(oldfctx):
+    if not fctx or fctx.cmp(oldfctx):
         # Files differ, add the old one
         pchange.metadata[b'old:file:size'] = oldfctx.size()
         mimeguess, _enc = mimetypes.guess_type(
@@ -794,8 +798,6 @@
     """
     try:
         fctx.data().decode('utf-8')
-        if fctx.parents():
-            fctx.p1().data().decode('utf-8')
         return False
     except UnicodeDecodeError:
         fctx.repo().ui.write(
@@ -812,8 +814,8 @@
             currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
         )
         pchange.addoldmode(gitmode[ctx.p1()[fname].flags()])
-        fctx = ctx.p1()[fname]
-        if not (fctx.isbinary() or notutf8(fctx)):
+        oldfctx = ctx.p1()[fname]
+        if not (oldfctx.isbinary() or notutf8(oldfctx)):
             maketext(pchange, ctx, fname)
 
         pdiff.addchange(pchange)
@@ -823,6 +825,7 @@
     """add modified files to the phabdiff"""
     for fname in modified:
         fctx = ctx[fname]
+        oldfctx = fctx.p1()
         pchange = phabchange(currentPath=fname, oldPath=fname)
         filemode = gitmode[ctx[fname].flags()]
         originalmode = gitmode[ctx.p1()[fname].flags()]
@@ -830,9 +833,14 @@
             pchange.addoldmode(originalmode)
             pchange.addnewmode(filemode)
 
-        if fctx.isbinary() or notutf8(fctx):
+        if (
+            fctx.isbinary()
+            or notutf8(fctx)
+            or oldfctx.isbinary()
+            or notutf8(oldfctx)
+        ):
             makebinary(pchange, fctx)
-            addoldbinary(pchange, fctx)
+            addoldbinary(pchange, fctx.p1(), fctx)
         else:
             maketext(pchange, ctx, fname)
 
@@ -847,6 +855,7 @@
     movedchanges = {}
     for fname in added:
         fctx = ctx[fname]
+        oldfctx = None
         pchange = phabchange(currentPath=fname)
 
         filemode = gitmode[ctx[fname].flags()]
@@ -854,7 +863,8 @@
 
         if renamed:
             originalfname = renamed[0]
-            originalmode = gitmode[ctx.p1()[originalfname].flags()]
+            oldfctx = ctx.p1()[originalfname]
+            originalmode = gitmode[oldfctx.flags()]
             pchange.oldPath = originalfname
 
             if originalfname in removed:
@@ -889,10 +899,14 @@
             pchange.addnewmode(gitmode[fctx.flags()])
             pchange.type = DiffChangeType.ADD
 
-        if fctx.isbinary() or notutf8(fctx):
+        if (
+            fctx.isbinary()
+            or notutf8(fctx)
+            or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
+        ):
             makebinary(pchange, fctx)
             if renamed:
-                addoldbinary(pchange, fctx)
+                addoldbinary(pchange, oldfctx, fctx)
         else:
             maketext(pchange, ctx, fname)
 
@@ -1040,15 +1054,15 @@
     return revision, diff
 
 
-def userphids(repo, names):
+def userphids(ui, names):
     """convert user names to PHIDs"""
     names = [name.lower() for name in names]
     query = {b'constraints': {b'usernames': names}}
-    result = callconduit(repo.ui, b'user.search', query)
+    result = callconduit(ui, b'user.search', query)
     # username not found is not an error of the API. So check if we have missed
     # some names here.
     data = result[b'data']
-    resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
+    resolved = {entry[b'fields'][b'username'].lower() for entry in data}
     unresolved = set(names) - resolved
     if unresolved:
         raise error.Abort(
@@ -1127,10 +1141,13 @@
     blockers = opts.get(b'blocker', [])
     phids = []
     if reviewers:
-        phids.extend(userphids(repo, reviewers))
+        phids.extend(userphids(repo.ui, reviewers))
     if blockers:
         phids.extend(
-            map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
+            map(
+                lambda phid: b'blocking(%s)' % phid,
+                userphids(repo.ui, blockers),
+            )
         )
     if phids:
         actions.append({b'type': b'reviewers.add', b'value': phids})
@@ -1183,7 +1200,7 @@
         else:
             # Nothing changed. But still set "newrevphid" so the next revision
             # could depend on this one and "newrevid" for the summary line.
-            newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
+            newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
             newrevid = revid
             action = b'skipped'
 
@@ -1398,7 +1415,7 @@
     return drevs, ancestordrevs
 
 
-def querydrev(repo, spec):
+def querydrev(ui, spec):
     """return a list of "Differential Revision" dicts
 
     spec is a string using a simple query language, see docstring in phabread
@@ -1407,46 +1424,49 @@
     A "Differential Revision dict" looks like:
 
         {
-            "id": "2",
-            "phid": "PHID-DREV-672qvysjcczopag46qty",
-            "title": "example",
-            "uri": "https://phab.example.com/D2",
+            "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
+            "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
+            "auxiliary": {
+              "phabricator:depends-on": [
+                "PHID-DREV-gbapp366kutjebt7agcd"
+              ]
+              "phabricator:projects": [],
+            },
+            "branch": "default",
+            "ccs": [],
+            "commits": [],
             "dateCreated": "1499181406",
             "dateModified": "1499182103",
-            "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
-            "status": "0",
-            "statusName": "Needs Review",
-            "properties": [],
-            "branch": null,
-            "summary": "",
-            "testPlan": "",
-            "lineCount": "2",
-            "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
             "diffs": [
               "3",
               "4",
             ],
-            "commits": [],
+            "hashes": [],
+            "id": "2",
+            "lineCount": "2",
+            "phid": "PHID-DREV-672qvysjcczopag46qty",
+            "properties": {},
+            "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
             "reviewers": [],
-            "ccs": [],
-            "hashes": [],
-            "auxiliary": {
-              "phabricator:projects": [],
-              "phabricator:depends-on": [
-                "PHID-DREV-gbapp366kutjebt7agcd"
-              ]
-            },
-            "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
             "sourcePath": null
+            "status": "0",
+            "statusName": "Needs Review",
+            "summary": "",
+            "testPlan": "",
+            "title": "example",
+            "uri": "https://phab.example.com/D2",
         }
     """
+    # TODO: replace differential.query and differential.querydiffs with
+    # differential.diff.search because the former (and their output) are
+    # frozen, and planned to be deprecated and removed.
 
     def fetch(params):
         """params -> single drev or None"""
         key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
         if key in prefetched:
             return prefetched[key]
-        drevs = callconduit(repo.ui, b'differential.query', params)
+        drevs = callconduit(ui, b'differential.query', params)
         # Fill prefetched with the result
         for drev in drevs:
             prefetched[drev[b'phid']] = drev
@@ -1483,7 +1503,7 @@
     drevs, ancestordrevs = _prefetchdrevs(tree)
 
     # developer config: phabricator.batchsize
-    batchsize = repo.ui.configint(b'phabricator', b'batchsize')
+    batchsize = ui.configint(b'phabricator', b'batchsize')
 
     # Prefetch Differential Revisions in batch
     tofetch = set(drevs)
@@ -1544,6 +1564,7 @@
 
         "properties": {
           "hg:meta": {
+            "branch": "default",
             "date": "1499571514 25200",
             "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
             "user": "Foo Bar <foo@example.com>",
@@ -1557,16 +1578,16 @@
           "local:commits": {
             "98c08acae292b2faf60a279b4189beb6cff1414d": {
               "author": "Foo Bar",
-              "time": 1499546314,
+              "authorEmail": "foo@example.com"
               "branch": "default",
-              "tag": "",
               "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
+              "local": "1000",
+              "message": "...",
+              "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
               "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
-              "local": "1000",
-              "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
               "summary": "...",
-              "message": "...",
-              "authorEmail": "foo@example.com"
+              "tag": "",
+              "time": 1499546314,
             }
           }
         }
@@ -1605,24 +1626,26 @@
     return meta
 
 
-def readpatch(repo, drevs, write):
+def readpatch(ui, drevs, write):
     """generate plain-text patch readable by 'hg import'
 
-    write is usually ui.write. drevs is what "querydrev" returns, results of
+    write takes a list of (DREV, bytes), where DREV is the differential number
+    (as bytes, without the "D" prefix) and the bytes are the text of a patch
+    to be imported. drevs is what "querydrev" returns, results of
     "differential.query".
     """
     # Prefetch hg:meta property for all diffs
-    diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
-    diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
+    diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
+    diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
+
+    patches = []
 
     # Generate patch for each drev
     for drev in drevs:
-        repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
+        ui.note(_(b'reading D%s\n') % drev[b'id'])
 
         diffid = max(int(v) for v in drev[b'diffs'])
-        body = callconduit(
-            repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
-        )
+        body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
         desc = getdescfromdrev(drev)
         header = b'# HG changeset patch\n'
 
@@ -1635,7 +1658,10 @@
                 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
 
         content = b'%s%s\n%s' % (header, desc, body)
-        write(content)
+        patches.append((drev[b'id'], content))
+
+    # Write patches to the supplied callback
+    write(patches)
 
 
 @vcrcommand(
@@ -1643,6 +1669,7 @@
     [(b'', b'stack', False, _(b'read dependencies'))],
     _(b'DREVSPEC [OPTIONS]'),
     helpcategory=command.CATEGORY_IMPORT_EXPORT,
+    optionalrepo=True,
 )
 def phabread(ui, repo, spec, **opts):
     """print patches from Phabricator suitable for importing
@@ -1666,8 +1693,13 @@
     opts = pycompat.byteskwargs(opts)
     if opts.get(b'stack'):
         spec = b':(%s)' % spec
-    drevs = querydrev(repo, spec)
-    readpatch(repo, drevs, ui.write)
+    drevs = querydrev(ui, spec)
+
+    def _write(patches):
+        for drev, content in patches:
+            ui.write(content)
+
+    readpatch(ui, drevs, _write)
 
 
 @vcrcommand(
@@ -1681,6 +1713,7 @@
     ],
     _(b'DREVSPEC [OPTIONS]'),
     helpcategory=command.CATEGORY_IMPORT_EXPORT,
+    optionalrepo=True,
 )
 def phabupdate(ui, repo, spec, **opts):
     """update Differential Revision in batch
@@ -1696,7 +1729,7 @@
     for f in flags:
         actions.append({b'type': f, b'value': True})
 
-    drevs = querydrev(repo, spec)
+    drevs = querydrev(ui, spec)
     for i, drev in enumerate(drevs):
         if i + 1 == len(drevs) and opts.get(b'comment'):
             actions.append({b'type': b'comment', b'value': opts[b'comment']})
@@ -1759,11 +1792,11 @@
     """Phabricator differiential status"""
     revs = repo.revs('sort(_underway(), topo)')
     drevmap = getdrevmap(repo, revs)
-    unknownrevs, drevids, revsbydrevid = [], set([]), {}
+    unknownrevs, drevids, revsbydrevid = [], set(), {}
     for rev, drevid in pycompat.iteritems(drevmap):
         if drevid is not None:
             drevids.add(drevid)
-            revsbydrevid.setdefault(drevid, set([])).add(rev)
+            revsbydrevid.setdefault(drevid, set()).add(rev)
         else:
             unknownrevs.append(rev)
 
--- a/hgext/purge.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/purge.py	Mon Mar 09 10:18:40 2020 -0700
@@ -48,6 +48,7 @@
     [
         (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
         (b'', b'all', None, _(b'purge ignored files too')),
+        (b'i', b'ignored', None, _(b'purge only ignored files')),
         (b'', b'dirs', None, _(b'purge empty directories')),
         (b'', b'files', None, _(b'purge files')),
         (b'p', b'print', None, _(b'print filenames instead of deleting them')),
@@ -80,7 +81,7 @@
     But it will leave untouched:
 
     - Modified and unmodified tracked files
-    - Ignored files (unless --all is specified)
+    - Ignored files (unless -i or --all is specified)
     - New files added to the repository (with :hg:`add`)
 
     The --files and --dirs options can be used to direct purge to delete
@@ -96,12 +97,19 @@
     option.
     '''
     opts = pycompat.byteskwargs(opts)
+    cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
 
     act = not opts.get(b'print')
     eol = b'\n'
     if opts.get(b'print0'):
         eol = b'\0'
         act = False  # --print0 implies --print
+    if opts.get(b'all', False):
+        ignored = True
+        unknown = True
+    else:
+        ignored = opts.get(b'ignored', False)
+        unknown = not ignored
 
     removefiles = opts.get(b'files')
     removedirs = opts.get(b'dirs')
@@ -115,7 +123,8 @@
     paths = mergemod.purge(
         repo,
         match,
-        ignored=opts.get(b'all', False),
+        unknown=unknown,
+        ignored=ignored,
         removeemptydirs=removedirs,
         removefiles=removefiles,
         abortonerror=opts.get(b'abort_on_err'),
--- a/hgext/rebase.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/rebase.py	Mon Mar 09 10:18:40 2020 -0700
@@ -37,6 +37,7 @@
     hg,
     merge as mergemod,
     mergeutil,
+    node as nodemod,
     obsolete,
     obsutil,
     patch,
@@ -177,6 +178,7 @@
         # --continue or --abort)), the original repo should be used so
         # visibility-dependent revsets are correct.
         self.prepared = False
+        self.resume = False
         self._repo = repo
 
         self.ui = ui
@@ -366,6 +368,7 @@
         _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
 
     def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
+        self.resume = True
         try:
             self.restorestatus()
             self.collapsemsg = restorecollapsemsg(self.repo, isabort)
@@ -503,7 +506,7 @@
         p.complete()
         ui.note(_(b'rebase merging completed\n'))
 
-    def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
+    def _concludenode(self, rev, p1, editor, commitmsg=None):
         '''Commit the wd changes with parents p1 and p2.
 
         Reuse commit info from rev but also store useful information in extra.
@@ -527,8 +530,6 @@
             if self.inmemory:
                 newnode = commitmemorynode(
                     repo,
-                    p1,
-                    p2,
                     wctx=self.wctx,
                     extra=extra,
                     commitmsg=commitmsg,
@@ -540,8 +541,6 @@
             else:
                 newnode = commitnode(
                     repo,
-                    p1,
-                    p2,
                     extra=extra,
                     commitmsg=commitmsg,
                     editor=editor,
@@ -549,11 +548,6 @@
                     date=date,
                 )
 
-            if newnode is None:
-                # If it ended up being a no-op commit, then the normal
-                # merge state clean-up path doesn't happen, so do it
-                # here. Fix issue5494
-                mergemod.mergestate.clean(repo)
             return newnode
 
     def _rebasenode(self, tr, rev, allowdivergence, progressfn):
@@ -605,8 +599,9 @@
                 self.skipped,
                 self.obsoletenotrebased,
             )
-            if not self.inmemory and len(repo[None].parents()) == 2:
+            if self.resume and self.wctx.p1().rev() == p1:
                 repo.ui.debug(b'resuming interrupted rebase\n')
+                self.resume = False
             else:
                 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
                 with ui.configoverride(overrides, b'rebase'):
@@ -614,6 +609,7 @@
                         repo,
                         rev,
                         p1,
+                        p2,
                         base,
                         self.collapsef,
                         dest,
@@ -635,13 +631,9 @@
                 editor = cmdutil.getcommiteditor(
                     editform=editform, **pycompat.strkwargs(opts)
                 )
-                newnode = self._concludenode(rev, p1, p2, editor)
+                newnode = self._concludenode(rev, p1, editor)
             else:
                 # Skip commit if we are collapsing
-                if self.inmemory:
-                    self.wctx.setbase(repo[p1])
-                else:
-                    repo.setparents(repo[p1].node())
                 newnode = None
             # Update the state
             if newnode is not None:
@@ -696,8 +688,9 @@
             editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
             revtoreuse = max(self.state)
 
+            self.wctx.setparents(repo[p1].node(), repo[self.external].node())
             newnode = self._concludenode(
-                revtoreuse, p1, self.external, editor, commitmsg=commitmsg
+                revtoreuse, p1, editor, commitmsg=commitmsg
             )
 
             if newnode is not None:
@@ -799,9 +792,7 @@
 
                 # Update away from the rebase if necessary
                 if shouldupdate:
-                    mergemod.update(
-                        repo, self.originalwd, branchmerge=False, force=True
-                    )
+                    mergemod.clean_update(repo[self.originalwd])
 
                 # Strip from the first rebased revision
                 if rebased:
@@ -1011,10 +1002,10 @@
     action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
     if action:
         cmdutil.check_incompatible_arguments(
-            opts, action, b'confirm', b'dry_run'
+            opts, action, [b'confirm', b'dry_run']
         )
         cmdutil.check_incompatible_arguments(
-            opts, action, b'rev', b'source', b'base', b'dest'
+            opts, action, [b'rev', b'source', b'base', b'dest']
         )
     cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
     cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
@@ -1028,7 +1019,7 @@
     if opts.get(b'auto_orphans'):
         disallowed_opts = set(opts) - {b'auto_orphans'}
         cmdutil.check_incompatible_arguments(
-            opts, b'auto_orphans', *disallowed_opts
+            opts, b'auto_orphans', disallowed_opts
         )
 
         userrevs = list(repo.revs(opts.get(b'auto_orphans')))
@@ -1265,8 +1256,7 @@
         if not src:
             ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
             return None
-        rebaseset = repo.revs(b'(%ld)::', src)
-        assert rebaseset
+        rebaseset = repo.revs(b'(%ld)::', src) or src
     else:
         base = scmutil.revrange(repo, [basef or b'.'])
         if not base:
@@ -1341,6 +1331,8 @@
                 )
             return None
 
+    if nodemod.wdirrev in rebaseset:
+        raise error.Abort(_(b'cannot rebase the working copy'))
     rebasingwcp = repo[b'.'].rev() in rebaseset
     ui.log(
         b"rebase",
@@ -1420,7 +1412,7 @@
     )
 
 
-def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
+def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
     '''Commit the memory changes with parents p1 and p2.
     Return node of committed revision.'''
     # Replicates the empty check in ``repo.commit``.
@@ -1433,7 +1425,6 @@
     if b'branch' in extra:
         branch = extra[b'branch']
 
-    wctx.setparents(repo[p1].node(), repo[p2].node())
     memctx = wctx.tomemctx(
         commitmsg,
         date=date,
@@ -1447,15 +1438,13 @@
     return commitres
 
 
-def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
+def commitnode(repo, editor, extra, user, date, commitmsg):
     '''Commit the wd changes with parents p1 and p2.
     Return node of committed revision.'''
     dsguard = util.nullcontextmanager()
     if not repo.ui.configbool(b'rebase', b'singletransaction'):
         dsguard = dirstateguard.dirstateguard(repo, b'rebase')
     with dsguard:
-        repo.setparents(repo[p1].node(), repo[p2].node())
-
         # Commit might fail if unresolved files exist
         newnode = repo.commit(
             text=commitmsg, user=user, date=date, extra=extra, editor=editor
@@ -1465,7 +1454,7 @@
         return newnode
 
 
-def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
+def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
     """Rebase a single revision rev on top of p1 using base as merge ancestor"""
     # Merge phase
     # Update to destination and merge it with local
@@ -1475,7 +1464,7 @@
     else:
         if repo[b'.'].rev() != p1:
             repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
-            mergemod.update(repo, p1, branchmerge=False, force=True)
+            mergemod.clean_update(p1ctx)
         else:
             repo.ui.debug(b" already in destination\n")
         # This is, alas, necessary to invalidate workingctx's manifest cache,
@@ -1499,6 +1488,7 @@
         labels=[b'dest', b'source'],
         wc=wctx,
     )
+    wctx.setparents(p1ctx.node(), repo[p2].node())
     if collapse:
         copies.graftcopies(wctx, ctx, repo[dest])
     else:
@@ -1678,22 +1668,6 @@
             elif p in state and state[p] > 0:
                 np = state[p]
 
-            # "bases" only record "special" merge bases that cannot be
-            # calculated from changelog DAG (i.e. isancestor(p, np) is False).
-            # For example:
-            #
-            #   B'   # rebase -s B -d D, when B was rebased to B'. dest for C
-            #   | C  # is B', but merge base for C is B, instead of
-            #   D |  # changelog.ancestor(C, B') == A. If changelog DAG and
-            #   | B  # "state" edges are merged (so there will be an edge from
-            #   |/   # B to B'), the merge base is still ancestor(C, B') in
-            #   A    # the merged graph.
-            #
-            # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
-            # which uses "virtual null merge" to explain this situation.
-            if isancestor(p, np):
-                bases[i] = nullrev
-
             # If one parent becomes an ancestor of the other, drop the ancestor
             for j, x in enumerate(newps[:i]):
                 if x == nullrev:
@@ -1739,12 +1713,6 @@
     if any(p != nullrev and isancestor(rev, p) for p in newps):
         raise error.Abort(_(b'source is ancestor of destination'))
 
-    # "rebasenode" updates to new p1, use the corresponding merge base.
-    if bases[0] != nullrev:
-        base = bases[0]
-    else:
-        base = None
-
     # Check if the merge will contain unwanted changes. That may happen if
     # there are multiple special (non-changelog ancestor) merge bases, which
     # cannot be handled well by the 3-way merge algorithm. For example:
@@ -1760,15 +1728,16 @@
     # But our merge base candidates (D and E in above case) could still be
     # better than the default (ancestor(F, Z) == null). Therefore still
     # pick one (so choose p1 above).
-    if sum(1 for b in set(bases) if b != nullrev) > 1:
+    if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
         unwanted = [None, None]  # unwanted[i]: unwanted revs if choose bases[i]
         for i, base in enumerate(bases):
-            if base == nullrev:
+            if base == nullrev or base in newps:
                 continue
             # Revisions in the side (not chosen as merge base) branch that
             # might contain "surprising" contents
+            other_bases = set(bases) - {base}
             siderevs = list(
-                repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
+                repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
             )
 
             # If those revisions are covered by rebaseset, the result is good.
@@ -1786,35 +1755,40 @@
                     )
                 )
 
-        # Choose a merge base that has a minimal number of unwanted revs.
-        l, i = min(
-            (len(revs), i)
-            for i, revs in enumerate(unwanted)
-            if revs is not None
-        )
-        base = bases[i]
-
-        # newps[0] should match merge base if possible. Currently, if newps[i]
-        # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
-        # the other's ancestor. In that case, it's fine to not swap newps here.
-        # (see CASE-1 and CASE-2 above)
-        if i != 0 and newps[i] != nullrev:
-            newps[0], newps[i] = newps[i], newps[0]
+        if any(revs is not None for revs in unwanted):
+            # Choose a merge base that has a minimal number of unwanted revs.
+            l, i = min(
+                (len(revs), i)
+                for i, revs in enumerate(unwanted)
+                if revs is not None
+            )
 
-        # The merge will include unwanted revisions. Abort now. Revisit this if
-        # we have a more advanced merge algorithm that handles multiple bases.
-        if l > 0:
-            unwanteddesc = _(b' or ').join(
-                (
-                    b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
-                    for revs in unwanted
-                    if revs is not None
+            # The merge will include unwanted revisions. Abort now. Revisit this if
+            # we have a more advanced merge algorithm that handles multiple bases.
+            if l > 0:
+                unwanteddesc = _(b' or ').join(
+                    (
+                        b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
+                        for revs in unwanted
+                        if revs is not None
+                    )
                 )
-            )
-            raise error.Abort(
-                _(b'rebasing %d:%s will include unwanted changes from %s')
-                % (rev, repo[rev], unwanteddesc)
-            )
+                raise error.Abort(
+                    _(b'rebasing %d:%s will include unwanted changes from %s')
+                    % (rev, repo[rev], unwanteddesc)
+                )
+
+            # newps[0] should match merge base if possible. Currently, if newps[i]
+            # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
+            # the other's ancestor. In that case, it's fine to not swap newps here.
+            # (see CASE-1 and CASE-2 above)
+            if i != 0:
+                if newps[i] != nullrev:
+                    newps[0], newps[i] = newps[i], newps[0]
+                bases[0], bases[i] = bases[i], bases[0]
+
+    # "rebasenode" updates to new p1, use the corresponding merge base.
+    base = bases[0]
 
     repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
 
@@ -1962,7 +1936,7 @@
     # applied patch. But it prevents messing up the working directory when
     # a partially completed rebase is blocked by mq.
     if b'qtip' in repo.tags():
-        mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
+        mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
         if set(destmap.values()) & mqapplied:
             raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
 
@@ -2147,7 +2121,7 @@
 
 def _filterobsoleterevs(repo, revs):
     """returns a set of the obsolete revisions in revs"""
-    return set(r for r in revs if repo[r].obsolete())
+    return {r for r in revs if repo[r].obsolete()}
 
 
 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
--- a/hgext/releasenotes.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/releasenotes.py	Mon Mar 09 10:18:40 2020 -0700
@@ -654,7 +654,7 @@
     opts = pycompat.byteskwargs(opts)
     sections = releasenotessections(ui, repo)
 
-    cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check')
+    cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check'])
 
     if opts.get(b'list'):
         return _getadmonitionlist(ui, sections)
--- a/hgext/remotefilelog/__init__.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/__init__.py	Mon Mar 09 10:18:40 2020 -0700
@@ -737,7 +737,7 @@
             # "link" is actually wrong here (it is set to len(changelog))
             # if changelog remains unchanged, skip writing file revisions
             # but still do a sanity check about pending multiple revisions
-            if len(set(x[3] for x in pendingfilecommits)) > 1:
+            if len({x[3] for x in pendingfilecommits}) > 1:
                 raise error.ProgrammingError(
                     b'pending multiple integer revisions are not supported'
                 )
--- a/hgext/remotefilelog/basepack.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/basepack.py	Mon Mar 09 10:18:40 2020 -0700
@@ -101,7 +101,7 @@
             self._lastpack = pack
             yield pack
 
-        cachedpacks = set(pack for pack in self._lrucache)
+        cachedpacks = {pack for pack in self._lrucache}
         # Yield for paths not in the cache.
         for pack in self._packs - cachedpacks:
             self._lastpack = pack
@@ -259,7 +259,7 @@
         newpacks = []
         if now > self.lastrefresh + REFRESHRATE:
             self.lastrefresh = now
-            previous = set(p.path for p in self.packs)
+            previous = {p.path for p in self.packs}
             for filepath, __, __ in self._getavailablepackfilessorted():
                 if filepath not in previous:
                     newpack = self.getpack(filepath)
--- a/hgext/remotefilelog/contentstore.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/contentstore.py	Mon Mar 09 10:18:40 2020 -0700
@@ -300,7 +300,7 @@
 
         rl = self._revlog(name)
         ancestors = {}
-        missing = set((node,))
+        missing = {node}
         for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
             ancnode = rl.node(ancrev)
             missing.discard(ancnode)
--- a/hgext/remotefilelog/datapack.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/datapack.py	Mon Mar 09 10:18:40 2020 -0700
@@ -271,9 +271,9 @@
     def cleanup(self, ledger):
         entries = ledger.sources.get(self, [])
         allkeys = set(self)
-        repackedkeys = set(
+        repackedkeys = {
             (e.filename, e.node) for e in entries if e.datarepacked or e.gced
-        )
+        }
 
         if len(allkeys - repackedkeys) == 0:
             if self.path not in ledger.created:
--- a/hgext/remotefilelog/historypack.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/historypack.py	Mon Mar 09 10:18:40 2020 -0700
@@ -132,7 +132,7 @@
             known = set()
         section = self._findsection(name)
         filename, offset, size, nodeindexoffset, nodeindexsize = section
-        pending = set((node,))
+        pending = {node}
         o = 0
         while o < size:
             if not pending:
@@ -291,9 +291,9 @@
     def cleanup(self, ledger):
         entries = ledger.sources.get(self, [])
         allkeys = set(self)
-        repackedkeys = set(
+        repackedkeys = {
             (e.filename, e.node) for e in entries if e.historyrepacked
-        )
+        }
 
         if len(allkeys - repackedkeys) == 0:
             if self.path not in ledger.created:
@@ -452,7 +452,7 @@
             sectionstart = self.packfp.tell()
 
             # Write the file section content
-            entrymap = dict((e[0], e) for e in entries)
+            entrymap = {e[0]: e for e in entries}
 
             def parentfunc(node):
                 x, p1, p2, x, x, x = entrymap[node]
--- a/hgext/remotefilelog/remotefilelog.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/remotefilelog.py	Mon Mar 09 10:18:40 2020 -0700
@@ -429,7 +429,7 @@
             return nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
-        nodemap = dict(((v, k) for (k, v) in pycompat.iteritems(revmap)))
+        nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
 
         ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
         if ancs:
@@ -444,7 +444,7 @@
             return nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
-        nodemap = dict(((v, k) for (k, v) in pycompat.iteritems(revmap)))
+        nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
 
         ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
         return map(nodemap.__getitem__, ancs)
--- a/hgext/remotefilelog/repack.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/remotefilelog/repack.py	Mon Mar 09 10:18:40 2020 -0700
@@ -321,7 +321,7 @@
 
 def _allpackfileswithsuffix(files, packsuffix, indexsuffix):
     result = []
-    fileset = set(fn for fn, mode, stat in files)
+    fileset = {fn for fn, mode, stat in files}
     for filename, mode, stat in files:
         if not filename.endswith(packsuffix):
             continue
--- a/hgext/schemes.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/schemes.py	Mon Mar 09 10:18:40 2020 -0700
@@ -97,7 +97,7 @@
             parts = parts[:-1]
         else:
             tail = b''
-        context = dict((b'%d' % (i + 1), v) for i, v in enumerate(parts))
+        context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
         return b''.join(self.templater.process(self.url, context)) + tail
 
 
--- a/hgext/sparse.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/sparse.py	Mon Mar 09 10:18:40 2020 -0700
@@ -246,7 +246,7 @@
             if changedfiles is not None:
                 # In _rebuild, these files will be deleted from the dirstate
                 # when they are not found to be in allfiles
-                dirstatefilestoremove = set(f for f in self if not matcher(f))
+                dirstatefilestoremove = {f for f in self if not matcher(f)}
                 changedfiles = dirstatefilestoremove.union(changedfiles)
 
         return orig(self, parent, allfiles, changedfiles)
--- a/hgext/strip.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/strip.py	Mon Mar 09 10:18:40 2020 -0700
@@ -228,7 +228,7 @@
             for p in repo.dirstate.parents()
         )
 
-        rootnodes = set(cl.node(r) for r in roots)
+        rootnodes = {cl.node(r) for r in roots}
 
         q = getattr(repo, 'mq', None)
         if q is not None and q.applied:
--- a/hgext/transplant.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/transplant.py	Mon Mar 09 10:18:40 2020 -0700
@@ -761,12 +761,12 @@
     def checkopts(opts, revs):
         if opts.get(b'continue'):
             cmdutil.check_incompatible_arguments(
-                opts, b'continue', b'branch', b'all', b'merge'
+                opts, b'continue', [b'branch', b'all', b'merge']
             )
             return
         if opts.get(b'stop'):
             cmdutil.check_incompatible_arguments(
-                opts, b'stop', b'branch', b'all', b'merge'
+                opts, b'stop', [b'branch', b'all', b'merge']
             )
             return
         if not (
@@ -840,10 +840,10 @@
 
         tf = tp.transplantfilter(repo, source, p1)
         if opts.get(b'prune'):
-            prune = set(
+            prune = {
                 source[r].node()
                 for r in scmutil.revrange(source, opts.get(b'prune'))
-            )
+            }
             matchfn = lambda x: tf(x) and x not in prune
         else:
             matchfn = tf
--- a/hgext/uncommit.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/hgext/uncommit.py	Mon Mar 09 10:18:40 2020 -0700
@@ -65,7 +65,7 @@
     base = ctx.p1()
     # ctx
     initialfiles = set(ctx.files())
-    exclude = set(f for f in initialfiles if match(f))
+    exclude = {f for f in initialfiles if match(f)}
 
     # No files matched commit, so nothing excluded
     if not exclude:
@@ -78,9 +78,9 @@
     files = initialfiles - exclude
     # Filter copies
     copied = copiesmod.pathcopies(base, ctx)
-    copied = dict(
-        (dst, src) for dst, src in pycompat.iteritems(copied) if dst in files
-    )
+    copied = {
+        dst: src for dst, src in pycompat.iteritems(copied) if dst in files
+    }
 
     def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
         if path not in contentctx:
--- a/i18n/polib.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/i18n/polib.py	Mon Mar 09 10:18:40 2020 -0700
@@ -722,8 +722,8 @@
             object POFile, the reference catalog.
         """
         # Store entries in dict/set for faster access
-        self_entries = dict((entry.msgid, entry) for entry in self)
-        refpot_msgids = set(entry.msgid for entry in refpot)
+        self_entries = {entry.msgid: entry for entry in self}
+        refpot_msgids = {entry.msgid for entry in refpot}
         # Merge entries that are in the refpot
         for entry in refpot:
             e = self_entries.get(entry.msgid)
@@ -1808,9 +1808,9 @@
                 entry = self._build_entry(
                     msgid=msgid_tokens[0],
                     msgid_plural=msgid_tokens[1],
-                    msgstr_plural=dict(
-                        (k, v) for k, v in enumerate(msgstr.split(b('\0')))
-                    ),
+                    msgstr_plural={
+                        k: v for k, v in enumerate(msgstr.split(b('\0')))
+                    },
                 )
             else:
                 entry = self._build_entry(msgid=msgid, msgstr=msgstr)
--- a/mercurial/ancestor.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/ancestor.py	Mon Mar 09 10:18:40 2020 -0700
@@ -138,7 +138,7 @@
         k = 0
         for i in interesting:
             k |= i
-        return set(n for (i, n) in mapping if k & i)
+        return {n for (i, n) in mapping if k & i}
 
     gca = commonancestorsheads(pfunc, *orignodes)
 
--- a/mercurial/archival.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/archival.py	Mon Mar 09 10:18:40 2020 -0700
@@ -355,7 +355,7 @@
         if match(name):
             write(name, 0o644, False, lambda: buildmetadata(ctx))
 
-    files = [f for f in ctx.manifest().matches(match)]
+    files = list(ctx.manifest().walk(match))
     total = len(files)
     if total:
         files.sort()
--- a/mercurial/bookmarks.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/bookmarks.py	Mon Mar 09 10:18:40 2020 -0700
@@ -173,6 +173,8 @@
             nrefs.sort()
 
     def _del(self, mark):
+        if mark not in self._refmap:
+            return
         self._clean = False
         node = self._refmap.pop(mark)
         nrefs = self._nodemap[node]
@@ -461,6 +463,10 @@
     return bool(bmchanges)
 
 
+def isdivergent(b):
+    return b'@' in b and not b.endswith(b'@')
+
+
 def listbinbookmarks(repo):
     # We may try to list bookmarks on a repo type that does not
     # support it (e.g., statichttprepository).
@@ -469,7 +475,7 @@
     hasnode = repo.changelog.hasnode
     for k, v in pycompat.iteritems(marks):
         # don't expose local divergent bookmarks
-        if hasnode(v) and (b'@' not in k or k.endswith(b'@')):
+        if hasnode(v) and not isdivergent(k):
             yield k, v
 
 
@@ -481,6 +487,8 @@
 
 
 def pushbookmark(repo, key, old, new):
+    if isdivergent(key):
+        return False
     if bookmarksinstore(repo):
         wlock = util.nullcontextmanager()
     else:
--- a/mercurial/branchmap.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/branchmap.py	Mon Mar 09 10:18:40 2020 -0700
@@ -291,8 +291,8 @@
                     % (
                         _branchcachedesc(repo),
                         pycompat.bytestr(
-                            inst  # pytype: disable=wrong-arg-types
-                        ),
+                            inst
+                        ),  # pytype: disable=wrong-arg-types
                     )
                 )
             bcache = None
@@ -446,7 +446,7 @@
         # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
         for branch, newheadrevs in pycompat.iteritems(newbranches):
             bheads = self._entries.setdefault(branch, [])
-            bheadset = set(cl.rev(node) for node in bheads)
+            bheadset = {cl.rev(node) for node in bheads}
 
             # This have been tested True on all internal usage of this function.
             # run it again in case of doubt
@@ -582,7 +582,7 @@
 
     @util.propertycache
     def _namesreverse(self):
-        return dict((b, r) for r, b in enumerate(self._names))
+        return {b: r for r, b in enumerate(self._names)}
 
     def branchinfo(self, rev):
         """Return branch name and close flag for rev, using and updating
--- a/mercurial/bundle2.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/bundle2.py	Mon Mar 09 10:18:40 2020 -0700
@@ -2368,6 +2368,11 @@
                     b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
                 )
 
+        for book, node in changes:
+            if bookmarks.isdivergent(book):
+                msg = _(b'cannot accept divergent bookmark %s!') % book
+                raise error.Abort(msg)
+
         bookstore.applychanges(op.repo, op.gettransaction(), changes)
 
         if pushkeycompat:
--- a/mercurial/cext/revlog.c	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/cext/revlog.c	Mon Mar 09 10:18:40 2020 -0700
@@ -208,7 +208,7 @@
  *
  * Returns 0 on success or -1 on failure.
  */
-int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
+static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
 {
 	int tiprev;
 	if (!op || !HgRevlogIndex_Check(op) || !ps) {
--- a/mercurial/cext/revlog.h	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/cext/revlog.h	Mon Mar 09 10:18:40 2020 -0700
@@ -14,6 +14,4 @@
 
 #define HgRevlogIndex_Check(op) PyObject_TypeCheck(op, &HgRevlogIndex_Type)
 
-int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps);
-
 #endif /* _HG_REVLOG_H_ */
--- a/mercurial/changegroup.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/changegroup.py	Mon Mar 09 10:18:40 2020 -0700
@@ -993,7 +993,7 @@
             ]
 
         manifests.clear()
-        clrevs = set(cl.rev(x) for x in clnodes)
+        clrevs = {cl.rev(x) for x in clnodes}
 
         it = self.generatefiles(
             changedfiles,
@@ -1149,8 +1149,8 @@
             if fastpathlinkrev:
                 assert not tree
                 return (
-                    manifests.__getitem__  # pytype: disable=unsupported-operands
-                )
+                    manifests.__getitem__
+                )  # pytype: disable=unsupported-operands
 
             def lookupmflinknode(x):
                 """Callback for looking up the linknode for manifests.
@@ -1282,9 +1282,7 @@
                 flinkrev = store.linkrev
                 fnode = store.node
                 revs = ((r, flinkrev(r)) for r in store)
-                return dict(
-                    (fnode(r), cln(lr)) for r, lr in revs if lr in clrevs
-                )
+                return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
 
         clrevtolocalrev = {}
 
--- a/mercurial/changelog.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/changelog.py	Mon Mar 09 10:18:40 2020 -0700
@@ -385,6 +385,9 @@
             datafile=datafile,
             checkambig=True,
             mmaplargeindex=True,
+            persistentnodemap=opener.options.get(
+                b'exp-persistent-nodemap', False
+            ),
         )
 
         if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
--- a/mercurial/chgserver.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/chgserver.py	Mon Mar 09 10:18:40 2020 -0700
@@ -551,40 +551,6 @@
             raise ValueError(b'unexpected value in setenv request')
         self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
 
-        # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
-        # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
-        # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
-        # disabled with PYTHONCOERCECLOCALE=0 in the environment.
-        #
-        # When fromui is called via _inithashstate, python has already set
-        # this, so that's in the environment right when we start up the hg
-        # process. Then chg will call us and tell us to set the environment to
-        # the one it has; this might NOT have LC_CTYPE, so we'll need to
-        # carry-forward the LC_CTYPE that was coerced in these situations.
-        #
-        # If this is not handled, we will fail config+env validation and fail
-        # to start chg. If this is just ignored instead of carried forward, we
-        # may have different behavior between chg and non-chg.
-        if pycompat.ispy3:
-            # Rename for wordwrapping purposes
-            oldenv = encoding.environ
-            if not any(
-                e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
-            ):
-                keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
-                old_keys = [k for k, v in oldenv.items() if k in keys and v]
-                new_keys = [k for k, v in newenv.items() if k in keys and v]
-                # If the user's environment (from chg) doesn't have ANY of the
-                # keys that python looks for, and the environment (from
-                # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
-                # carry it forward.
-                if (
-                    not new_keys
-                    and old_keys == [b'LC_CTYPE']
-                    and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
-                ):
-                    newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
-
         encoding.environ.clear()
         encoding.environ.update(newenv)
 
@@ -731,6 +697,16 @@
     # environ cleaner.
     if b'CHGINTERNALMARK' in encoding.environ:
         del encoding.environ[b'CHGINTERNALMARK']
+    # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
+    # it thinks the current value is "C". This breaks the hash computation and
+    # causes chg to restart loop.
+    if b'CHGORIG_LC_CTYPE' in encoding.environ:
+        encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
+        del encoding.environ[b'CHGORIG_LC_CTYPE']
+    elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
+        if b'LC_CTYPE' in encoding.environ:
+            del encoding.environ[b'LC_CTYPE']
+        del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
 
     if repo:
         # one chgserver can serve multiple repos. drop repo information
--- a/mercurial/cmdutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/cmdutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -170,7 +170,12 @@
 
 diffopts = [
     (b'a', b'text', None, _(b'treat all files as text')),
-    (b'g', b'git', None, _(b'use git extended diff format')),
+    (
+        b'g',
+        b'git',
+        None,
+        _(b'use git extended diff format (DEFAULT: diff.git)'),
+    ),
     (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
     (b'', b'nodates', None, _(b'omit dates from diff headers')),
 ]
@@ -209,7 +214,9 @@
             b'p',
             b'show-function',
             None,
-            _(b'show which function each change is in'),
+            _(
+                b'show which function each change is in (DEFAULT: diff.showfunc)'
+            ),
         ),
         (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
     ]
@@ -281,11 +288,11 @@
     return previous
 
 
-def check_incompatible_arguments(opts, first, *others):
+def check_incompatible_arguments(opts, first, others):
     """abort if the first argument is given along with any of the others
 
     Unlike check_at_most_one_arg(), `others` are not mutually exclusive
-    among themselves.
+    among themselves, and they're passed as a single collection.
     """
     for other in others:
         check_at_most_one_arg(opts, first, other)
@@ -584,15 +591,8 @@
             [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
             # 3a. apply filtered patch to clean repo  (clean)
             if backups:
-                # Equivalent to hg.revert
                 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
-                mergemod.update(
-                    repo,
-                    repo.dirstate.p1(),
-                    branchmerge=False,
-                    force=True,
-                    matcher=m,
-                )
+                mergemod.revert_to(repo[b'.'], matcher=m)
 
             # 3b. (apply)
             if dopatch:
@@ -1414,46 +1414,165 @@
 
 
 def copy(ui, repo, pats, opts, rename=False):
+    check_incompatible_arguments(opts, b'forget', [b'dry_run'])
+
     # called with the repo lock held
     #
     # hgsep => pathname that uses "/" to separate directories
     # ossep => pathname that uses os.sep to separate directories
     cwd = repo.getcwd()
     targets = {}
+    forget = opts.get(b"forget")
     after = opts.get(b"after")
     dryrun = opts.get(b"dry_run")
-    wctx = repo[None]
+    rev = opts.get(b'at_rev')
+    if rev:
+        if not forget and not after:
+            # TODO: Remove this restriction and make it also create the copy
+            #       targets (and remove the rename source if rename==True).
+            raise error.Abort(_(b'--at-rev requires --after'))
+        ctx = scmutil.revsingle(repo, rev)
+        if len(ctx.parents()) > 1:
+            raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
+    else:
+        ctx = repo[None]
+
+    pctx = ctx.p1()
 
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
 
+    if forget:
+        if ctx.rev() is None:
+            new_ctx = ctx
+        else:
+            if len(ctx.parents()) > 1:
+                raise error.Abort(_(b'cannot unmark copy in merge commit'))
+            # avoid cycle context -> subrepo -> cmdutil
+            from . import context
+
+            rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
+            new_ctx = context.overlayworkingctx(repo)
+            new_ctx.setbase(ctx.p1())
+            mergemod.graft(repo, ctx, wctx=new_ctx)
+
+        match = scmutil.match(ctx, pats, opts)
+
+        current_copies = ctx.p1copies()
+        current_copies.update(ctx.p2copies())
+
+        uipathfn = scmutil.getuipathfn(repo)
+        for f in ctx.walk(match):
+            if f in current_copies:
+                new_ctx[f].markcopied(None)
+            elif match.exact(f):
+                ui.warn(
+                    _(
+                        b'%s: not unmarking as copy - file is not marked as copied\n'
+                    )
+                    % uipathfn(f)
+                )
+
+        if ctx.rev() is not None:
+            with repo.lock():
+                mem_ctx = new_ctx.tomemctx_for_amend(ctx)
+                new_node = mem_ctx.commit()
+
+                if repo.dirstate.p1() == ctx.node():
+                    with repo.dirstate.parentchange():
+                        scmutil.movedirstate(repo, repo[new_node])
+                replacements = {ctx.node(): [new_node]}
+                scmutil.cleanupnodes(
+                    repo, replacements, b'uncopy', fixphase=True
+                )
+
+        return
+
+    pats = scmutil.expandpats(pats)
+    if not pats:
+        raise error.Abort(_(b'no source or destination specified'))
+    if len(pats) == 1:
+        raise error.Abort(_(b'no destination specified'))
+    dest = pats.pop()
+
     def walkpat(pat):
         srcs = []
-        if after:
-            badstates = b'?'
-        else:
-            badstates = b'?r'
-        m = scmutil.match(wctx, [pat], opts, globbed=True)
-        for abs in wctx.walk(m):
-            state = repo.dirstate[abs]
+        m = scmutil.match(ctx, [pat], opts, globbed=True)
+        for abs in ctx.walk(m):
             rel = uipathfn(abs)
             exact = m.exact(abs)
-            if state in badstates:
-                if exact and state == b'?':
-                    ui.warn(_(b'%s: not copying - file is not managed\n') % rel)
-                if exact and state == b'r':
-                    ui.warn(
-                        _(
-                            b'%s: not copying - file has been marked for'
-                            b' remove\n'
+            if abs not in ctx:
+                if abs in pctx:
+                    if not after:
+                        if exact:
+                            ui.warn(
+                                _(
+                                    b'%s: not copying - file has been marked '
+                                    b'for remove\n'
+                                )
+                                % rel
+                            )
+                        continue
+                else:
+                    if exact:
+                        ui.warn(
+                            _(b'%s: not copying - file is not managed\n') % rel
                         )
-                        % rel
-                    )
-                continue
+                    continue
+
             # abs: hgsep
             # rel: ossep
             srcs.append((abs, rel, exact))
         return srcs
 
+    if ctx.rev() is not None:
+        rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
+        absdest = pathutil.canonpath(repo.root, cwd, dest)
+        if ctx.hasdir(absdest):
+            raise error.Abort(
+                _(b'%s: --at-rev does not support a directory as destination')
+                % uipathfn(absdest)
+            )
+        if absdest not in ctx:
+            raise error.Abort(
+                _(b'%s: copy destination does not exist in %s')
+                % (uipathfn(absdest), ctx)
+            )
+
+        # avoid cycle context -> subrepo -> cmdutil
+        from . import context
+
+        copylist = []
+        for pat in pats:
+            srcs = walkpat(pat)
+            if not srcs:
+                continue
+            for abs, rel, exact in srcs:
+                copylist.append(abs)
+
+        # TODO: Add support for `hg cp --at-rev . foo bar dir` and
+        # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
+        # existing functions below.
+        if len(copylist) != 1:
+            raise error.Abort(_(b'--at-rev requires a single source'))
+
+        new_ctx = context.overlayworkingctx(repo)
+        new_ctx.setbase(ctx.p1())
+        mergemod.graft(repo, ctx, wctx=new_ctx)
+
+        new_ctx.markcopied(absdest, copylist[0])
+
+        with repo.lock():
+            mem_ctx = new_ctx.tomemctx_for_amend(ctx)
+            new_node = mem_ctx.commit()
+
+            if repo.dirstate.p1() == ctx.node():
+                with repo.dirstate.parentchange():
+                    scmutil.movedirstate(repo, repo[new_node])
+            replacements = {ctx.node(): [new_node]}
+            scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
+
+        return
+
     # abssrc: hgsep
     # relsrc: ossep
     # otarget: ossep
@@ -1583,13 +1702,13 @@
 
         # fix up dirstate
         scmutil.dirstatecopy(
-            ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
+            ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
         )
         if rename and not dryrun:
             if not after and srcexists and not samefile:
                 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
                 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
-            wctx.forget([abssrc])
+            ctx.forget([abssrc])
 
     # pat: ossep
     # dest ossep
@@ -1659,12 +1778,6 @@
                     res = lambda p: dest
         return res
 
-    pats = scmutil.expandpats(pats)
-    if not pats:
-        raise error.Abort(_(b'no source or destination specified'))
-    if len(pats) == 1:
-        raise error.Abort(_(b'no destination specified'))
-    dest = pats.pop()
     destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
     if not destdirexists:
         if len(pats) > 1 or matchmod.patkind(pats[0]):
@@ -3012,7 +3125,7 @@
         ms = mergemod.mergestate.read(repo)
         mergeutil.checkunresolved(ms)
 
-        filestoamend = set(f for f in wctx.files() if matcher(f))
+        filestoamend = {f for f in wctx.files() if matcher(f)}
 
         changes = len(filestoamend) > 0
         if changes:
@@ -3804,7 +3917,7 @@
         # Apply changes
         fp = stringio()
         # chunks are serialized per file, but files aren't sorted
-        for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
+        for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
             prntstatusmsg(b'revert', f)
         files = set()
         for c in chunks:
--- a/mercurial/color.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/color.py	Mon Mar 09 10:18:40 2020 -0700
@@ -44,7 +44,7 @@
         b'cyan': (False, curses.COLOR_CYAN, b''),
         b'white': (False, curses.COLOR_WHITE, b''),
     }
-except ImportError:
+except (ImportError, AttributeError):
     curses = None
     _baseterminfoparams = {}
 
--- a/mercurial/commands.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/commands.py	Mon Mar 09 10:18:40 2020 -0700
@@ -876,7 +876,7 @@
         )
         overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
         with ui.configoverride(overrides, b'backout'):
-            return hg.merge(repo, hex(repo.changelog.tip()))
+            return hg.merge(repo[b'tip'])
     return 0
 
 
@@ -1228,7 +1228,7 @@
 
     action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
     if action:
-        cmdutil.check_incompatible_arguments(opts, action, b'rev')
+        cmdutil.check_incompatible_arguments(opts, action, [b'rev'])
     elif names or rev:
         action = b'add'
     elif inactive:
@@ -1236,7 +1236,9 @@
     else:
         action = b'list'
 
-    cmdutil.check_incompatible_arguments(opts, b'inactive', b'delete', b'list')
+    cmdutil.check_incompatible_arguments(
+        opts, b'inactive', [b'delete', b'list']
+    )
     if not names and action in {b'add', b'delete'}:
         raise error.Abort(_(b"bookmark name required"))
 
@@ -2307,8 +2309,16 @@
 @command(
     b'copy|cp',
     [
+        (b'', b'forget', None, _(b'unmark a file as copied')),
         (b'A', b'after', None, _(b'record a copy that has already occurred')),
         (
+            b'',
+            b'at-rev',
+            b'',
+            _(b'(un)mark copies in the given revision (EXPERIMENTAL)'),
+            _(b'REV'),
+        ),
+        (
             b'f',
             b'force',
             None,
@@ -2331,8 +2341,11 @@
     exist in the working directory. If invoked with -A/--after, the
     operation is recorded, but no copying is performed.
 
-    This command takes effect with the next commit. To undo a copy
-    before that, see :hg:`revert`.
+    To undo marking a file as copied, use --forget. With that option,
+    all given (positional) arguments are unmarked as copies. The destination
+    file(s) will be left in place (still tracked).
+
+    This command takes effect with the next commit by default.
 
     Returns 0 on success, 1 if errors are encountered.
     """
@@ -3708,9 +3721,9 @@
         heads = [repo[h] for h in heads]
 
     if branchrevs:
-        branches = set(
+        branches = {
             repo[r].branch() for r in scmutil.revrange(repo, branchrevs)
-        )
+        }
         heads = [h for h in heads if h.branch() in branches]
 
     if opts.get(b'active') and branchrevs:
@@ -3718,7 +3731,7 @@
         heads = [h for h in heads if h.node() in dagheads]
 
     if branchrevs:
-        haveheads = set(h.branch() for h in heads)
+        haveheads = {h.branch() for h in heads}
         if branches - haveheads:
             headless = b', '.join(b for b in branches - haveheads)
             msg = _(b'no open branch heads found on branches %s')
@@ -4847,6 +4860,7 @@
     abort = opts.get(b'abort')
     if abort and repo.dirstate.p2() == nullid:
         cmdutil.wrongtooltocontinue(repo, _(b'merge'))
+    cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
     if abort:
         state = cmdutil.getunfinishedstate(repo)
         if state and state._opname != b'merge':
@@ -4856,19 +4870,16 @@
             )
         if node:
             raise error.Abort(_(b"cannot specify a node with --abort"))
-        if opts.get(b'rev'):
-            raise error.Abort(_(b"cannot specify both --rev and --abort"))
-        if opts.get(b'preview'):
-            raise error.Abort(_(b"cannot specify --preview with --abort"))
+        return hg.abortmerge(repo.ui, repo)
+
     if opts.get(b'rev') and node:
         raise error.Abort(_(b"please specify just one revision"))
     if not node:
         node = opts.get(b'rev')
 
     if node:
-        node = scmutil.revsingle(repo, node).node()
-
-    if not node and not abort:
+        ctx = scmutil.revsingle(repo, node)
+    else:
         if ui.configbool(b'commands', b'merge.require-rev'):
             raise error.Abort(
                 _(
@@ -4876,12 +4887,15 @@
                     b'with'
                 )
             )
-        node = repo[destutil.destmerge(repo)].node()
+        ctx = repo[destutil.destmerge(repo)]
+
+    if ctx.node() is None:
+        raise error.Abort(_(b'merging with the working copy has no effect'))
 
     if opts.get(b'preview'):
         # find nodes that are ancestors of p2 but not of p1
-        p1 = repo.lookup(b'.')
-        p2 = node
+        p1 = repo[b'.'].node()
+        p2 = ctx.node()
         nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
 
         displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
@@ -4895,14 +4909,7 @@
     with ui.configoverride(overrides, b'merge'):
         force = opts.get(b'force')
         labels = [b'working copy', b'merge rev']
-        return hg.merge(
-            repo,
-            node,
-            force=force,
-            mergeforce=force,
-            labels=labels,
-            abort=abort,
-        )
+        return hg.merge(ctx, force=force, labels=labels)
 
 
 statemod.addunfinished(
@@ -5671,7 +5678,7 @@
 
 @command(
     b'recover',
-    [(b'', b'verify', True, b"run `hg verify` after successful recover"),],
+    [(b'', b'verify', False, b"run `hg verify` after successful recover"),],
     helpcategory=command.CATEGORY_MAINTENANCE,
 )
 def recover(ui, repo, **opts):
@@ -6648,7 +6655,12 @@
         (b'i', b'ignored', None, _(b'show only ignored files')),
         (b'n', b'no-status', None, _(b'hide status prefix')),
         (b't', b'terse', _NOTTERSE, _(b'show the terse output (EXPERIMENTAL)')),
-        (b'C', b'copies', None, _(b'show source of copied files')),
+        (
+            b'C',
+            b'copies',
+            None,
+            _(b'show source of copied files (DEFAULT: ui.statuscopies)'),
+        ),
         (
             b'0',
             b'print0',
@@ -7653,6 +7665,7 @@
 
     Returns 0 on success, 1 if there are unresolved files.
     """
+    cmdutil.check_at_most_one_arg(opts, 'clean', 'check', 'merge')
     rev = opts.get('rev')
     date = opts.get('date')
     clean = opts.get('clean')
@@ -7674,14 +7687,6 @@
     if date and rev is not None:
         raise error.Abort(_(b"you can't specify a revision and a date"))
 
-    if len([x for x in (clean, check, merge) if x]) > 1:
-        raise error.Abort(
-            _(
-                b"can only specify one of -C/--clean, -c/--check, "
-                b"or -m/--merge"
-            )
-        )
-
     updatecheck = None
     if check:
         updatecheck = b'abort'
--- a/mercurial/commandserver.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/commandserver.py	Mon Mar 09 10:18:40 2020 -0700
@@ -545,6 +545,10 @@
         if maxlen < 0:
             raise error.Abort(_(b'negative max-repo-cache size not allowed'))
         self._repoloader = repocache.repoloader(ui, maxlen)
+        # attempt to avoid crash in CoreFoundation when using chg after fix in
+        # a89381e04c58
+        if pycompat.isdarwin:
+            procutil.gui()
 
     def init(self):
         self._sock = socket.socket(socket.AF_UNIX)
--- a/mercurial/configitems.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/configitems.py	Mon Mar 09 10:18:40 2020 -0700
@@ -406,6 +406,9 @@
     b'devel', b'legacy.exchange', default=list,
 )
 coreconfigitem(
+    b'devel', b'persistent-nodemap', default=False,
+)
+coreconfigitem(
     b'devel', b'servercafile', default=b'',
 )
 coreconfigitem(
@@ -660,6 +663,12 @@
     b'experimental', b'rust.index', default=False,
 )
 coreconfigitem(
+    b'experimental', b'exp-persistent-nodemap', default=False,
+)
+coreconfigitem(
+    b'experimental', b'exp-persistent-nodemap.mmap', default=True,
+)
+coreconfigitem(
     b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
 )
 coreconfigitem(
@@ -750,7 +759,7 @@
 coreconfigitem(
     b'format',
     b'revlog-compression',
-    default=b'zlib',
+    default=lambda: [b'zlib'],
     alias=[(b'experimental', b'format.compression')],
 )
 coreconfigitem(
@@ -1107,7 +1116,7 @@
     b'server', b'compressionengines', default=list,
 )
 coreconfigitem(
-    b'server', b'concurrent-push-mode', default=b'strict',
+    b'server', b'concurrent-push-mode', default=b'check-related',
 )
 coreconfigitem(
     b'server', b'disablefullbundle', default=False,
--- a/mercurial/context.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/context.py	Mon Mar 09 10:18:40 2020 -0700
@@ -267,7 +267,7 @@
     def _fileinfo(self, path):
         if '_manifest' in self.__dict__:
             try:
-                return self._manifest[path], self._manifest.flags(path)
+                return self._manifest.find(path)
             except KeyError:
                 raise error.ManifestLookupError(
                     self._node, path, _(b'not found in manifest')
@@ -2357,8 +2357,7 @@
         # Test the other direction -- that this path from p2 isn't a directory
         # in p1 (test that p1 doesn't have any paths matching `path/*`).
         match = self.match([path], default=b'path')
-        matches = self.p1().manifest().matches(match)
-        mfiles = matches.keys()
+        mfiles = list(self.p1().manifest().walk(match))
         if len(mfiles) > 0:
             if len(mfiles) == 1 and mfiles[0] == path:
                 return
@@ -2488,6 +2487,17 @@
             editor=editor,
         )
 
+    def tomemctx_for_amend(self, precursor):
+        extra = precursor.extra().copy()
+        extra[b'amend_source'] = precursor.hex()
+        return self.tomemctx(
+            text=precursor.description(),
+            branch=precursor.branch(),
+            extra=extra,
+            date=precursor.date(),
+            user=precursor.user(),
+        )
+
     def isdirty(self, path):
         return path in self._cache
 
--- a/mercurial/copies.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/copies.py	Mon Mar 09 10:18:40 2020 -0700
@@ -403,13 +403,15 @@
         )
     if x == y or not x or not y:
         return {}
+    if y.rev() is None and x == y.p1():
+        if debug:
+            repo.ui.debug(b'debug.copies: search mode: dirstate\n')
+        # short-circuit to avoid issues with merge states
+        return _dirstatecopies(repo, match)
     a = y.ancestor(x)
     if a == x:
         if debug:
             repo.ui.debug(b'debug.copies: search mode: forward\n')
-        if y.rev() is None and x == y.p1():
-            # short-circuit to avoid issues with merge states
-            return _dirstatecopies(repo, match)
         copies = _forwardcopies(x, y, match=match)
     elif a == y:
         if debug:
@@ -452,44 +454,34 @@
 
     ```other changed <file> which local deleted```
 
-    Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
-    "dirmove".
+    Returns a tuple where:
 
-    "copy" is a mapping from destination name -> source name,
-    where source is in c1 and destination is in c2 or vice-versa.
-
-    "movewithdir" is a mapping from source name -> destination name,
-    where the file at source present in one context but not the other
-    needs to be moved to destination by the merge process, because the
-    other context moved the directory it is in.
+    "branch_copies" an instance of branch_copies.
 
     "diverge" is a mapping of source name -> list of destination names
     for divergent renames.
 
-    "renamedelete" is a mapping of source name -> list of destination
-    names for files deleted in c1 that were renamed in c2 or vice-versa.
-
-    "dirmove" is a mapping of detected source dir -> destination dir renames.
-    This is needed for handling changes to new files previously grafted into
-    renamed directories.
-
     This function calls different copytracing algorithms based on config.
     """
     # avoid silly behavior for update from empty dir
     if not c1 or not c2 or c1 == c2:
-        return {}, {}, {}, {}, {}
+        return branch_copies(), branch_copies(), {}
 
     narrowmatch = c1.repo().narrowmatch()
 
     # avoid silly behavior for parent -> working dir
     if c2.node() is None and c1.node() == repo.dirstate.p1():
-        return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
+        return (
+            branch_copies(_dirstatecopies(repo, narrowmatch)),
+            branch_copies(),
+            {},
+        )
 
     copytracing = repo.ui.config(b'experimental', b'copytrace')
     if stringutil.parsebool(copytracing) is False:
         # stringutil.parsebool() returns None when it is unable to parse the
         # value, so we should rely on making sure copytracing is on such cases
-        return {}, {}, {}, {}, {}
+        return branch_copies(), branch_copies(), {}
 
     if usechangesetcentricalgo(repo):
         # The heuristics don't make sense when we need changeset-centric algos
@@ -537,15 +529,45 @@
         if src not in m1:
             # renamed on side 1, deleted on side 2
             renamedelete[src] = dsts1
+    elif src not in mb:
+        # Work around the "short-circuit to avoid issues with merge states"
+        # thing in pathcopies(): pathcopies(x, y) can return a copy where the
+        # destination doesn't exist in y.
+        pass
     elif m2[src] != mb[src]:
         if not _related(c2[src], base[src]):
             return
         # modified on side 2
         for dst in dsts1:
-            if dst not in m2:
-                # dst not added on side 2 (handle as regular
-                # "both created" case in manifestmerge otherwise)
-                copy[dst] = src
+            copy[dst] = src
+
+
+class branch_copies(object):
+    """Information about copies made on one side of a merge/graft.
+
+    "copy" is a mapping from destination name -> source name,
+    where source is in c1 and destination is in c2 or vice-versa.
+
+    "movewithdir" is a mapping from source name -> destination name,
+    where the file at source present in one context but not the other
+    needs to be moved to destination by the merge process, because the
+    other context moved the directory it is in.
+
+    "renamedelete" is a mapping of source name -> list of destination
+    names for files deleted in c1 that were renamed in c2 or vice-versa.
+
+    "dirmove" is a mapping of detected source dir -> destination dir renames.
+    This is needed for handling changes to new files previously grafted into
+    renamed directories.
+    """
+
+    def __init__(
+        self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
+    ):
+        self.copy = {} if copy is None else copy
+        self.renamedelete = {} if renamedelete is None else renamedelete
+        self.dirmove = {} if dirmove is None else dirmove
+        self.movewithdir = {} if movewithdir is None else movewithdir
 
 
 def _fullcopytracing(repo, c1, c2, base):
@@ -563,6 +585,9 @@
     copies1 = pathcopies(base, c1)
     copies2 = pathcopies(base, c2)
 
+    if not (copies1 or copies2):
+        return branch_copies(), branch_copies(), {}
+
     inversecopies1 = {}
     inversecopies2 = {}
     for dst, src in copies1.items():
@@ -570,9 +595,11 @@
     for dst, src in copies2.items():
         inversecopies2.setdefault(src, []).append(dst)
 
-    copy = {}
+    copy1 = {}
+    copy2 = {}
     diverge = {}
-    renamedelete = {}
+    renamedelete1 = {}
+    renamedelete2 = {}
     allsources = set(inversecopies1) | set(inversecopies2)
     for src in allsources:
         dsts1 = inversecopies1.get(src)
@@ -589,7 +616,8 @@
                 # and 'd' and deletes 'a'.
                 if dsts1 & dsts2:
                     for dst in dsts1 & dsts2:
-                        copy[dst] = src
+                        copy1[dst] = src
+                        copy2[dst] = src
                 else:
                     diverge[src] = sorted(dsts1 | dsts2)
             elif src in m1 and src in m2:
@@ -597,27 +625,21 @@
                 dsts1 = set(dsts1)
                 dsts2 = set(dsts2)
                 for dst in dsts1 & dsts2:
-                    copy[dst] = src
+                    copy1[dst] = src
+                    copy2[dst] = src
             # TODO: Handle cases where it was renamed on one side and copied
             # on the other side
         elif dsts1:
             # copied/renamed only on side 1
             _checksinglesidecopies(
-                src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
+                src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
             )
         elif dsts2:
             # copied/renamed only on side 2
             _checksinglesidecopies(
-                src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
+                src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
             )
 
-    renamedeleteset = set()
-    divergeset = set()
-    for dsts in diverge.values():
-        divergeset.update(dsts)
-    for dsts in renamedelete.values():
-        renamedeleteset.update(dsts)
-
     # find interesting file sets from manifests
     addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
     addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
@@ -630,33 +652,60 @@
     if u2:
         repo.ui.debug(b"%s:\n   %s\n" % (header % b'other', b"\n   ".join(u2)))
 
-    fullcopy = copies1.copy()
-    fullcopy.update(copies2)
-    if not fullcopy:
-        return copy, {}, diverge, renamedelete, {}
+    if repo.ui.debugflag:
+        renamedeleteset = set()
+        divergeset = set()
+        for dsts in diverge.values():
+            divergeset.update(dsts)
+        for dsts in renamedelete1.values():
+            renamedeleteset.update(dsts)
+        for dsts in renamedelete2.values():
+            renamedeleteset.update(dsts)
 
-    if repo.ui.debugflag:
         repo.ui.debug(
             b"  all copies found (* = to merge, ! = divergent, "
             b"% = renamed and deleted):\n"
         )
-        for f in sorted(fullcopy):
-            note = b""
-            if f in copy:
-                note += b"*"
-            if f in divergeset:
-                note += b"!"
-            if f in renamedeleteset:
-                note += b"%"
-            repo.ui.debug(
-                b"   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
-            )
-    del divergeset
+        for side, copies in ((b"local", copies1), (b"remote", copies2)):
+            if not copies:
+                continue
+            repo.ui.debug(b"   on %s side:\n" % side)
+            for f in sorted(copies):
+                note = b""
+                if f in copy1 or f in copy2:
+                    note += b"*"
+                if f in divergeset:
+                    note += b"!"
+                if f in renamedeleteset:
+                    note += b"%"
+                repo.ui.debug(
+                    b"    src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
+                )
+        del renamedeleteset
+        del divergeset
 
     repo.ui.debug(b"  checking for directory renames\n")
 
+    dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
+    dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
+
+    branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
+    branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
+
+    return branch_copies1, branch_copies2, diverge
+
+
+def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
+    """Finds moved directories and files that should move with them.
+
+    ctx: the context for one of the sides
+    copy: files copied on the same side (as ctx)
+    fullcopy: files copied on the same side (as ctx), including those that
+              merge.manifestmerge() won't care about
+    addedfiles: added files on the other side (compared to ctx)
+    """
     # generate a directory move map
-    d1, d2 = c1.dirs(), c2.dirs()
+    d = ctx.dirs()
     invalid = set()
     dirmove = {}
 
@@ -667,12 +716,9 @@
         if dsrc in invalid:
             # already seen to be uninteresting
             continue
-        elif dsrc in d1 and ddst in d1:
+        elif dsrc in d and ddst in d:
             # directory wasn't entirely moved locally
             invalid.add(dsrc)
-        elif dsrc in d2 and ddst in d2:
-            # directory wasn't entirely moved remotely
-            invalid.add(dsrc)
         elif dsrc in dirmove and dirmove[dsrc] != ddst:
             # files from the same directory moved to two different places
             invalid.add(dsrc)
@@ -683,10 +729,10 @@
     for i in invalid:
         if i in dirmove:
             del dirmove[i]
-    del d1, d2, invalid
+    del d, invalid
 
     if not dirmove:
-        return copy, {}, diverge, renamedelete, {}
+        return {}, {}
 
     dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
 
@@ -697,7 +743,7 @@
 
     movewithdir = {}
     # check unaccounted nonoverlapping files against directory moves
-    for f in u1 + u2:
+    for f in addedfiles:
         if f not in fullcopy:
             for d in dirmove:
                 if f.startswith(d):
@@ -711,7 +757,7 @@
                         )
                     break
 
-    return copy, movewithdir, diverge, renamedelete, dirmove
+    return dirmove, movewithdir
 
 
 def _heuristicscopytracing(repo, c1, c2, base):
@@ -744,8 +790,6 @@
     if c2.rev() is None:
         c2 = c2.p1()
 
-    copies = {}
-
     changedfiles = set()
     m1 = c1.manifest()
     if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
@@ -765,10 +809,11 @@
         changedfiles.update(ctx.files())
         ctx = ctx.p1()
 
+    copies2 = {}
     cp = _forwardcopies(base, c2)
     for dst, src in pycompat.iteritems(cp):
         if src in m1:
-            copies[dst] = src
+            copies2[dst] = src
 
     # file is missing if it isn't present in the destination, but is present in
     # the base and present in the source.
@@ -777,6 +822,7 @@
     filt = lambda f: f not in m1 and f in base and f in c2
     missingfiles = [f for f in changedfiles if filt(f)]
 
+    copies1 = {}
     if missingfiles:
         basenametofilename = collections.defaultdict(list)
         dirnametofilename = collections.defaultdict(list)
@@ -818,9 +864,9 @@
                     # if there are a few related copies then we'll merge
                     # changes into all of them. This matches the behaviour
                     # of upstream copytracing
-                    copies[candidate] = f
+                    copies1[candidate] = f
 
-    return copies, {}, {}, {}, {}
+    return branch_copies(copies1), branch_copies(copies2), {}
 
 
 def _related(f1, f2):
--- a/mercurial/crecord.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/crecord.py	Mon Mar 09 10:18:40 2020 -0700
@@ -63,13 +63,13 @@
     import curses.ascii
 
     curses.error
-except ImportError:
+except (ImportError, AttributeError):
     # I have no idea if wcurses works with crecord...
     try:
         import wcurses as curses
 
         curses.error
-    except ImportError:
+    except (ImportError, AttributeError):
         # wcurses is not shipped on Windows by default, or python is not
         # compiled with curses
         curses = False
--- a/mercurial/debugcommands.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/debugcommands.py	Mon Mar 09 10:18:40 2020 -0700
@@ -11,8 +11,10 @@
 import collections
 import difflib
 import errno
+import glob
 import operator
 import os
+import platform
 import random
 import re
 import socket
@@ -27,7 +29,6 @@
 from .node import (
     bin,
     hex,
-    nullhex,
     nullid,
     nullrev,
     short,
@@ -38,6 +39,7 @@
 )
 from . import (
     bundle2,
+    bundlerepo,
     changegroup,
     cmdutil,
     color,
@@ -75,6 +77,7 @@
     sshpeer,
     sslutil,
     streamclone,
+    tags as tagsmod,
     templater,
     treediscovery,
     upgrade,
@@ -93,7 +96,10 @@
     stringutil,
 )
 
-from .revlogutils import deltas as deltautil
+from .revlogutils import (
+    deltas as deltautil,
+    nodemap,
+)
 
 release = lockmod.release
 
@@ -578,7 +584,7 @@
     dots = opts.get('dots')
     if file_:
         rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
-        revs = set((int(r) for r in revs))
+        revs = {int(r) for r in revs}
 
         def events():
             for r in rlog:
@@ -1128,7 +1134,7 @@
         (b'analyzed', filesetlang.analyze),
         (b'optimized', filesetlang.optimize),
     ]
-    stagenames = set(n for n, f in stages)
+    stagenames = {n for n, f in stages}
 
     showalways = set()
     if ui.verbose and not opts[b'show_stage']:
@@ -1487,6 +1493,11 @@
         pycompat.sysexecutable or _(b"unknown"),
     )
     fm.write(
+        b'pythonimplementation',
+        _(b"checking Python implementation (%s)\n"),
+        pycompat.sysbytes(platform.python_implementation()),
+    )
+    fm.write(
         b'pythonver',
         _(b"checking Python version (%s)\n"),
         (b"%d.%d.%d" % sys.version_info[:3]),
@@ -1497,6 +1508,13 @@
         pythonlib or _(b"unknown"),
     )
 
+    try:
+        from . import rustext
+
+        rustext.__doc__  # trigger lazy import
+    except ImportError:
+        rustext = None
+
     security = set(sslutil.supportedprotocols)
     if sslutil.hassni:
         security.add(b'sni')
@@ -1524,6 +1542,13 @@
             )
         )
 
+    fm.plain(
+        _(
+            b"checking Rust extensions (%s)\n"
+            % (b'missing' if rustext is None else b'installed')
+        ),
+    )
+
     # TODO print CA cert info
 
     # hg version
@@ -1934,120 +1959,100 @@
         )
 
 
-@command(b'debugmergestate', [], b'')
-def debugmergestate(ui, repo, *args):
+@command(b'debugmergestate', [] + cmdutil.templateopts, b'')
+def debugmergestate(ui, repo, *args, **opts):
     """print merge state
 
     Use --verbose to print out information about whether v1 or v2 merge state
     was chosen."""
 
-    def _hashornull(h):
-        if h == nullhex:
-            return b'null'
-        else:
-            return h
-
-    def printrecords(version):
-        ui.writenoi18n(b'* version %d records\n' % version)
-        if version == 1:
-            records = v1records
+    if ui.verbose:
+        ms = mergemod.mergestate(repo)
+
+        # sort so that reasonable information is on top
+        v1records = ms._readrecordsv1()
+        v2records = ms._readrecordsv2()
+
+        if not v1records and not v2records:
+            pass
+        elif not v2records:
+            ui.writenoi18n(b'no version 2 merge state\n')
+        elif ms._v1v2match(v1records, v2records):
+            ui.writenoi18n(b'v1 and v2 states match: using v2\n')
         else:
-            records = v2records
-
-        for rtype, record in records:
-            # pretty print some record types
-            if rtype == b'L':
-                ui.writenoi18n(b'local: %s\n' % record)
-            elif rtype == b'O':
-                ui.writenoi18n(b'other: %s\n' % record)
-            elif rtype == b'm':
-                driver, mdstate = record.split(b'\0', 1)
-                ui.writenoi18n(
-                    b'merge driver: %s (state "%s")\n' % (driver, mdstate)
-                )
-            elif rtype in b'FDC':
-                r = record.split(b'\0')
-                f, state, hash, lfile, afile, anode, ofile = r[0:7]
-                if version == 1:
-                    onode = b'not stored in v1 format'
-                    flags = r[7]
-                else:
-                    onode, flags = r[7:9]
-                ui.writenoi18n(
-                    b'file: %s (record type "%s", state "%s", hash %s)\n'
-                    % (f, rtype, state, _hashornull(hash))
-                )
-                ui.writenoi18n(
-                    b'  local path: %s (flags "%s")\n' % (lfile, flags)
-                )
-                ui.writenoi18n(
-                    b'  ancestor path: %s (node %s)\n'
-                    % (afile, _hashornull(anode))
-                )
-                ui.writenoi18n(
-                    b'  other path: %s (node %s)\n'
-                    % (ofile, _hashornull(onode))
-                )
-            elif rtype == b'f':
-                filename, rawextras = record.split(b'\0', 1)
-                extras = rawextras.split(b'\0')
-                i = 0
-                extrastrings = []
-                while i < len(extras):
-                    extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
-                    i += 2
-
-                ui.writenoi18n(
-                    b'file extras: %s (%s)\n'
-                    % (filename, b', '.join(extrastrings))
-                )
-            elif rtype == b'l':
-                labels = record.split(b'\0', 2)
-                labels = [l for l in labels if len(l) > 0]
-                ui.writenoi18n(b'labels:\n')
-                ui.write((b'  local: %s\n' % labels[0]))
-                ui.write((b'  other: %s\n' % labels[1]))
-                if len(labels) > 2:
-                    ui.write((b'  base:  %s\n' % labels[2]))
-            else:
-                ui.writenoi18n(
-                    b'unrecognized entry: %s\t%s\n'
-                    % (rtype, record.replace(b'\0', b'\t'))
-                )
-
-    # Avoid mergestate.read() since it may raise an exception for unsupported
-    # merge state records. We shouldn't be doing this, but this is OK since this
-    # command is pretty low-level.
-    ms = mergemod.mergestate(repo)
-
-    # sort so that reasonable information is on top
-    v1records = ms._readrecordsv1()
-    v2records = ms._readrecordsv2()
-    order = b'LOml'
-
-    def key(r):
-        idx = order.find(r[0])
-        if idx == -1:
-            return (1, r[1])
-        else:
-            return (0, idx)
-
-    v1records.sort(key=key)
-    v2records.sort(key=key)
-
-    if not v1records and not v2records:
-        ui.writenoi18n(b'no merge state found\n')
-    elif not v2records:
-        ui.notenoi18n(b'no version 2 merge state\n')
-        printrecords(1)
-    elif ms._v1v2match(v1records, v2records):
-        ui.notenoi18n(b'v1 and v2 states match: using v2\n')
-        printrecords(2)
-    else:
-        ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
-        printrecords(1)
-        if ui.verbose:
-            printrecords(2)
+            ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
+
+    opts = pycompat.byteskwargs(opts)
+    if not opts[b'template']:
+        opts[b'template'] = (
+            b'{if(commits, "", "no merge state found\n")}'
+            b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
+            b'{files % "file: {path} (state \\"{state}\\")\n'
+            b'{if(local_path, "'
+            b'  local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
+            b'  ancestor path: {ancestor_path} (node {ancestor_node})\n'
+            b'  other path: {other_path} (node {other_node})\n'
+            b'")}'
+            b'{if(rename_side, "'
+            b'  rename side: {rename_side}\n'
+            b'  renamed path: {renamed_path}\n'
+            b'")}'
+            b'{extras % "  extra: {key} = {value}\n"}'
+            b'"}'
+        )
+
+    ms = mergemod.mergestate.read(repo)
+
+    fm = ui.formatter(b'debugmergestate', opts)
+    fm.startitem()
+
+    fm_commits = fm.nested(b'commits')
+    if ms.active():
+        for name, node, label_index in (
+            (b'local', ms.local, 0),
+            (b'other', ms.other, 1),
+        ):
+            fm_commits.startitem()
+            fm_commits.data(name=name)
+            fm_commits.data(node=hex(node))
+            if ms._labels and len(ms._labels) > label_index:
+                fm_commits.data(label=ms._labels[label_index])
+    fm_commits.end()
+
+    fm_files = fm.nested(b'files')
+    if ms.active():
+        for f in ms:
+            fm_files.startitem()
+            fm_files.data(path=f)
+            state = ms._state[f]
+            fm_files.data(state=state[0])
+            if state[0] in (
+                mergemod.MERGE_RECORD_UNRESOLVED,
+                mergemod.MERGE_RECORD_RESOLVED,
+            ):
+                fm_files.data(local_key=state[1])
+                fm_files.data(local_path=state[2])
+                fm_files.data(ancestor_path=state[3])
+                fm_files.data(ancestor_node=state[4])
+                fm_files.data(other_path=state[5])
+                fm_files.data(other_node=state[6])
+                fm_files.data(local_flags=state[7])
+            elif state[0] in (
+                mergemod.MERGE_RECORD_UNRESOLVED_PATH,
+                mergemod.MERGE_RECORD_RESOLVED_PATH,
+            ):
+                fm_files.data(renamed_path=state[1])
+                fm_files.data(rename_side=state[2])
+            fm_extras = fm_files.nested(b'extras')
+            for k, v in ms.extras(f).items():
+                fm_extras.startitem()
+                fm_extras.data(key=k)
+                fm_extras.data(value=v)
+            fm_extras.end()
+
+    fm_files.end()
+
+    fm.end()
 
 
 @command(b'debugnamecomplete', [], _(b'NAME...'))
@@ -2075,6 +2080,64 @@
 
 
 @command(
+    b'debugnodemap',
+    [
+        (
+            b'',
+            b'dump-new',
+            False,
+            _(b'write a (new) persistent binary nodemap on stdin'),
+        ),
+        (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
+        (
+            b'',
+            b'check',
+            False,
+            _(b'check that the data on disk data are correct.'),
+        ),
+        (
+            b'',
+            b'metadata',
+            False,
+            _(b'display the on disk meta data for the nodemap'),
+        ),
+    ],
+)
+def debugnodemap(ui, repo, **opts):
+    """write and inspect on disk nodemap
+    """
+    if opts['dump_new']:
+        unfi = repo.unfiltered()
+        cl = unfi.changelog
+        data = nodemap.persistent_data(cl.index)
+        ui.write(data)
+    elif opts['dump_disk']:
+        unfi = repo.unfiltered()
+        cl = unfi.changelog
+        nm_data = nodemap.persisted_data(cl)
+        if nm_data is not None:
+            docket, data = nm_data
+            ui.write(data[:])
+    elif opts['check']:
+        unfi = repo.unfiltered()
+        cl = unfi.changelog
+        nm_data = nodemap.persisted_data(cl)
+        if nm_data is not None:
+            docket, data = nm_data
+            return nodemap.check_data(ui, cl.index, data)
+    elif opts['metadata']:
+        unfi = repo.unfiltered()
+        cl = unfi.changelog
+        nm_data = nodemap.persisted_data(cl)
+        if nm_data is not None:
+            docket, data = nm_data
+            ui.write((b"uid: %s\n") % docket.uid)
+            ui.write((b"tip-rev: %d\n") % docket.tip_rev)
+            ui.write((b"data-length: %d\n") % docket.data_length)
+            ui.write((b"data-unused: %d\n") % docket.data_unused)
+
+
+@command(
     b'debugobsolete',
     [
         (b'', b'flags', 0, _(b'markers flag')),
@@ -2549,7 +2612,7 @@
             dirstatefiles = set(dirstate)
             manifestonly = manifestfiles - dirstatefiles
             dsonly = dirstatefiles - manifestfiles
-            dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
+            dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
             changedfiles = manifestonly | dsnotadded
 
         dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
@@ -3116,7 +3179,7 @@
         raise error.Abort(
             _(b'cannot use --verify-optimized with --no-optimized')
         )
-    stagenames = set(n for n, f in stages)
+    stagenames = {n for n, f in stages}
 
     showalways = set()
     showchanged = set()
@@ -3355,6 +3418,143 @@
 
 
 @command(
+    b"debugbackupbundle",
+    [
+        (
+            b"",
+            b"recover",
+            b"",
+            b"brings the specified changeset back into the repository",
+        )
+    ]
+    + cmdutil.logopts,
+    _(b"hg debugbackupbundle [--recover HASH]"),
+)
+def debugbackupbundle(ui, repo, *pats, **opts):
+    """lists the changesets available in backup bundles
+
+    Without any arguments, this command prints a list of the changesets in each
+    backup bundle.
+
+    --recover takes a changeset hash and unbundles the first bundle that
+    contains that hash, which puts that changeset back in your repository.
+
+    --verbose will print the entire commit message and the bundle path for that
+    backup.
+    """
+    backups = list(
+        filter(
+            os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
+        )
+    )
+    backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
+
+    opts = pycompat.byteskwargs(opts)
+    opts[b"bundle"] = b""
+    opts[b"force"] = None
+    limit = logcmdutil.getlimit(opts)
+
+    def display(other, chlist, displayer):
+        if opts.get(b"newest_first"):
+            chlist.reverse()
+        count = 0
+        for n in chlist:
+            if limit is not None and count >= limit:
+                break
+            parents = [True for p in other.changelog.parents(n) if p != nullid]
+            if opts.get(b"no_merges") and len(parents) == 2:
+                continue
+            count += 1
+            displayer.show(other[n])
+
+    recovernode = opts.get(b"recover")
+    if recovernode:
+        if scmutil.isrevsymbol(repo, recovernode):
+            ui.warn(_(b"%s already exists in the repo\n") % recovernode)
+            return
+    elif backups:
+        msg = _(
+            b"Recover changesets using: hg debugbackupbundle --recover "
+            b"<changeset hash>\n\nAvailable backup changesets:"
+        )
+        ui.status(msg, label=b"status.removed")
+    else:
+        ui.status(_(b"no backup changesets found\n"))
+        return
+
+    for backup in backups:
+        # Much of this is copied from the hg incoming logic
+        source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
+        source, branches = hg.parseurl(source, opts.get(b"branch"))
+        try:
+            other = hg.peer(repo, opts, source)
+        except error.LookupError as ex:
+            msg = _(b"\nwarning: unable to open bundle %s") % source
+            hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
+            ui.warn(msg, hint=hint)
+            continue
+        revs, checkout = hg.addbranchrevs(
+            repo, other, branches, opts.get(b"rev")
+        )
+
+        if revs:
+            revs = [other.lookup(rev) for rev in revs]
+
+        quiet = ui.quiet
+        try:
+            ui.quiet = True
+            other, chlist, cleanupfn = bundlerepo.getremotechanges(
+                ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+            )
+        except error.LookupError:
+            continue
+        finally:
+            ui.quiet = quiet
+
+        try:
+            if not chlist:
+                continue
+            if recovernode:
+                with repo.lock(), repo.transaction(b"unbundle") as tr:
+                    if scmutil.isrevsymbol(other, recovernode):
+                        ui.status(_(b"Unbundling %s\n") % (recovernode))
+                        f = hg.openpath(ui, source)
+                        gen = exchange.readbundle(ui, f, source)
+                        if isinstance(gen, bundle2.unbundle20):
+                            bundle2.applybundle(
+                                repo,
+                                gen,
+                                tr,
+                                source=b"unbundle",
+                                url=b"bundle:" + source,
+                            )
+                        else:
+                            gen.apply(repo, b"unbundle", b"bundle:" + source)
+                        break
+            else:
+                backupdate = encoding.strtolocal(
+                    time.strftime(
+                        "%a %H:%M, %Y-%m-%d",
+                        time.localtime(os.path.getmtime(source)),
+                    )
+                )
+                ui.status(b"\n%s\n" % (backupdate.ljust(50)))
+                if ui.verbose:
+                    ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
+                else:
+                    opts[
+                        b"template"
+                    ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
+                displayer = logcmdutil.changesetdisplayer(
+                    ui, other, opts, False
+                )
+                display(other, chlist, displayer)
+                displayer.close()
+        finally:
+            cleanupfn()
+
+
+@command(
     b'debugsub',
     [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
     _(b'[-r REV] [REV]'),
@@ -3423,6 +3623,17 @@
             ui.write(b'\n')
 
 
+@command(b'debugtagscache', [])
+def debugtagscache(ui, repo):
+    """display the contents of .hg/cache/hgtagsfnodes1"""
+    cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
+    for r in repo:
+        node = repo[r].node()
+        tagsnode = cache.getfnode(node, computemissing=False)
+        tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
+        ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
+
+
 @command(
     b'debugtemplate',
     [
--- a/mercurial/dirstate.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/dirstate.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1262,6 +1262,9 @@
         return files in the dirstate (in whatever state) filtered by match
         '''
         dmap = self._map
+        if rustmod is not None:
+            dmap = self._map._rustmap
+
         if match.always():
             return dmap.keys()
         files = match.files()
--- a/mercurial/discovery.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/discovery.py	Mon Mar 09 10:18:40 2020 -0700
@@ -188,7 +188,7 @@
         # ancestors of missing
         og._computecommonmissing()
         cl = repo.changelog
-        missingrevs = set(cl.rev(n) for n in og._missing)
+        missingrevs = {cl.rev(n) for n in og._missing}
         og._common = set(cl.ancestors(missingrevs)) - missingrevs
         commonheads = set(og.commonheads)
         og.missingheads = [h for h in og.missingheads if h not in commonheads]
@@ -264,8 +264,8 @@
     # If there are no obsstore, no post processing are needed.
     if repo.obsstore:
         torev = repo.changelog.rev
-        futureheads = set(torev(h) for h in outgoing.missingheads)
-        futureheads |= set(torev(h) for h in outgoing.commonheads)
+        futureheads = {torev(h) for h in outgoing.missingheads}
+        futureheads |= {torev(h) for h in outgoing.commonheads}
         allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
         for branch, heads in sorted(pycompat.iteritems(headssum)):
             remoteheads, newheads, unsyncedheads, placeholder = heads
@@ -448,7 +448,7 @@
                 if branch not in (b'default', None):
                     errormsg = _(
                         b"push creates new remote head %s on branch '%s'!"
-                    ) % (short(dhs[0]), branch)
+                    ) % (short(dhs[0]), branch,)
                 elif repo[dhs[0]].bookmarks():
                     errormsg = _(
                         b"push creates new remote head %s "
--- a/mercurial/dispatch.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/dispatch.py	Mon Mar 09 10:18:40 2020 -0700
@@ -514,7 +514,7 @@
     '''
     # util.interpolate can't deal with "$@" (with quotes) because it's only
     # built to match prefix + patterns.
-    replacemap = dict((b'$%d' % (i + 1), arg) for i, arg in enumerate(args))
+    replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)}
     replacemap[b'$0'] = name
     replacemap[b'$$'] = b'$'
     replacemap[b'$@'] = b' '.join(args)
@@ -624,7 +624,7 @@
         except error.AmbiguousCommand:
             self.badalias = _(
                 b"alias '%s' resolves to ambiguous command '%s'"
-            ) % (self.name, cmd)
+            ) % (self.name, cmd,)
 
     def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
         # confine strings to be passed to i18n.gettext()
--- a/mercurial/encoding.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/encoding.py	Mon Mar 09 10:18:40 2020 -0700
@@ -86,10 +86,10 @@
 else:
     # preferred encoding isn't known yet; use utf-8 to avoid unicode error
     # and recreate it once encoding is settled
-    environ = dict(
-        (k.encode('utf-8'), v.encode('utf-8'))
+    environ = {
+        k.encode('utf-8'): v.encode('utf-8')
         for k, v in os.environ.items()  # re-exports
-    )
+    }
 
 _encodingrewrites = {
     b'646': b'ascii',
@@ -285,10 +285,10 @@
 if not _nativeenviron:
     # now encoding and helper functions are available, recreate the environ
     # dict to be exported to other modules
-    environ = dict(
-        (tolocal(k.encode('utf-8')), tolocal(v.encode('utf-8')))
+    environ = {
+        tolocal(k.encode('utf-8')): tolocal(v.encode('utf-8'))
         for k, v in os.environ.items()  # re-exports
-    )
+    }
 
 if pycompat.ispy3:
     # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
--- a/mercurial/exchange.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/exchange.py	Mon Mar 09 10:18:40 2020 -0700
@@ -856,7 +856,11 @@
     for b, scid, dcid in addsrc:
         if b in explicit:
             explicit.remove(b)
-            pushop.outbookmarks.append((b, b'', scid))
+            if bookmod.isdivergent(b):
+                pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
+                pushop.bkresult = 2
+            else:
+                pushop.outbookmarks.append((b, b'', scid))
     # search for overwritten bookmark
     for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
         if b in explicit:
@@ -1675,12 +1679,12 @@
     def headsofdiff(h1, h2):
         """Returns heads(h1 % h2)"""
         res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
-        return set(ctx.node() for ctx in res)
+        return {ctx.node() for ctx in res}
 
     def headsofunion(h1, h2):
         """Returns heads((h1 + h2) - null)"""
         res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
-        return set(ctx.node() for ctx in res)
+        return {ctx.node() for ctx in res}
 
     while True:
         old_heads = unficl.heads()
@@ -3068,7 +3072,15 @@
     if not prefers:
         return list(entries)
 
-    prefers = [p.split(b'=', 1) for p in prefers]
+    def _split(p):
+        if b'=' not in p:
+            hint = _(b"each comma separated item should be key=value pairs")
+            raise error.Abort(
+                _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
+            )
+        return p.split(b'=', 1)
+
+    prefers = [_split(p) for p in prefers]
 
     items = sorted(clonebundleentry(v, prefers) for v in entries)
     return [i.value for i in items]
--- a/mercurial/extensions.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/extensions.py	Mon Mar 09 10:18:40 2020 -0700
@@ -787,11 +787,11 @@
     try:
         from hgext import __index__  # pytype: disable=import-error
 
-        return dict(
-            (name, gettext(desc))
+        return {
+            name: gettext(desc)
             for name, desc in pycompat.iteritems(__index__.docs)
             if name not in _order
-        )
+        }
     except (ImportError, AttributeError):
         pass
 
--- a/mercurial/fancyopts.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/fancyopts.py	Mon Mar 09 10:18:40 2020 -0700
@@ -314,7 +314,7 @@
     argmap = {}
     defmap = {}
     negations = {}
-    alllong = set(o[1] for o in options)
+    alllong = {o[1] for o in options}
 
     for option in options:
         if len(option) == 5:
--- a/mercurial/graphmod.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/graphmod.py	Mon Mar 09 10:18:40 2020 -0700
@@ -58,7 +58,7 @@
         # partition into parents in the rev set and missing parents, then
         # augment the lists with markers, to inform graph drawing code about
         # what kind of edge to draw between nodes.
-        pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
+        pset = {p.rev() for p in ctx.parents() if p.rev() in revs}
         mpars = [
             p.rev()
             for p in ctx.parents()
@@ -95,9 +95,9 @@
     include = set(nodes)
     for node in nodes:
         ctx = repo[node]
-        parents = set(
+        parents = {
             (PARENT, p.rev()) for p in ctx.parents() if p.node() in include
-        )
+        }
         yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
 
 
--- a/mercurial/hbisect.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/hbisect.py	Mon Mar 09 10:18:40 2020 -0700
@@ -137,7 +137,7 @@
             side = state[b'bad']
         else:
             side = state[b'good']
-        num = len(set(i.node() for i in parents) & set(side))
+        num = len({i.node() for i in parents} & set(side))
         if num == 1:
             return parents[0].ancestor(parents[1])
     return None
--- a/mercurial/help.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/help.py	Mon Mar 09 10:18:40 2020 -0700
@@ -153,7 +153,18 @@
     return doc
 
 
-def optrst(header, options, verbose):
+def parsedefaultmarker(text):
+    """given a text 'abc (DEFAULT: def.ghi)',
+    returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
+    if text[-1:] == b')':
+        marker = b' (DEFAULT: '
+        pos = text.find(marker)
+        if pos >= 0:
+            item = text[pos + len(marker) : -1]
+            return text[:pos], item.split(b'.', 2)
+
+
+def optrst(header, options, verbose, ui):
     data = []
     multioccur = False
     for option in options:
@@ -165,7 +176,14 @@
 
         if not verbose and any(w in desc for w in _exclkeywords):
             continue
-
+        defaultstrsuffix = b''
+        if default is None:
+            parseresult = parsedefaultmarker(desc)
+            if parseresult is not None:
+                (desc, (section, name)) = parseresult
+                if ui.configbool(section, name):
+                    default = True
+                    defaultstrsuffix = _(b' from config')
         so = b''
         if shortopt:
             so = b'-' + shortopt
@@ -183,7 +201,7 @@
             defaultstr = pycompat.bytestr(default)
             if default is True:
                 defaultstr = _(b"on")
-            desc += _(b" (default: %s)") % defaultstr
+            desc += _(b" (default: %s)") % (defaultstr + defaultstrsuffix)
 
         if isinstance(default, list):
             lo += b" %s [+]" % optlabel
@@ -714,11 +732,13 @@
 
         # options
         if not ui.quiet and entry[1]:
-            rst.append(optrst(_(b"options"), entry[1], ui.verbose))
+            rst.append(optrst(_(b"options"), entry[1], ui.verbose, ui))
 
         if ui.verbose:
             rst.append(
-                optrst(_(b"global options"), commands.globalopts, ui.verbose)
+                optrst(
+                    _(b"global options"), commands.globalopts, ui.verbose, ui
+                )
             )
 
         if not ui.verbose:
@@ -858,7 +878,9 @@
         elif ui.verbose:
             rst.append(
                 b'\n%s\n'
-                % optrst(_(b"global options"), commands.globalopts, ui.verbose)
+                % optrst(
+                    _(b"global options"), commands.globalopts, ui.verbose, ui
+                )
             )
             if name == b'shortlist':
                 rst.append(
--- a/mercurial/helptext/config.txt	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/helptext/config.txt	Mon Mar 09 10:18:40 2020 -0700
@@ -888,7 +888,8 @@
     Compression algorithm used by revlog. Supported values are `zlib` and
     `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
     a newer format that is usually a net win over `zlib`, operating faster at
-    better compression rates. Use `zstd` to reduce CPU usage.
+    better compression rates. Use `zstd` to reduce CPU usage. Multiple values
+    can be specified, the first available one will be used.
 
     On some systems, the Mercurial installation may lack `zstd` support.
 
@@ -2005,12 +2006,12 @@
     Level of allowed race condition between two pushing clients.
 
     - 'strict': push is abort if another client touched the repository
-      while the push was preparing. (default)
+      while the push was preparing.
     - 'check-related': push is only aborted if it affects head that got also
-      affected while the push was preparing.
-
-    This requires compatible client (version 4.3 and later). Old client will
-    use 'strict'.
+      affected while the push was preparing. (default since 5.4)
+
+    'check-related' only takes effect for compatible clients (version
+    4.3 and later). Older clients will use 'strict'.
 
 ``validate``
     Whether to validate the completeness of pushed changesets by
--- a/mercurial/hg.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/hg.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1040,10 +1040,9 @@
 def clean(repo, node, show_stats=True, quietempty=False):
     """forcibly switch the working directory to node, clobbering changes"""
     stats = updaterepo(repo, node, True)
-    repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
+    assert stats.unresolvedcount == 0
     if show_stats:
         _showstats(repo, stats, quietempty)
-    return stats.unresolvedcount > 0
 
 
 # naming conflict in updatetotally()
@@ -1138,27 +1137,12 @@
 
 
 def merge(
-    repo,
-    node,
-    force=None,
-    remind=True,
-    mergeforce=False,
-    labels=None,
-    abort=False,
+    ctx, force=False, remind=True, labels=None,
 ):
     """Branch merge with node, resolving changes. Return true if any
     unresolved conflicts."""
-    if abort:
-        return abortmerge(repo.ui, repo)
-
-    stats = mergemod.update(
-        repo,
-        node,
-        branchmerge=True,
-        force=force,
-        mergeforce=mergeforce,
-        labels=labels,
-    )
+    repo = ctx.repo()
+    stats = mergemod.merge(ctx, force=force, labels=labels)
     _showstats(repo, stats)
     if stats.unresolvedcount:
         repo.ui.status(
@@ -1182,9 +1166,9 @@
         node = repo[b'.'].hex()
 
     repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
-    stats = mergemod.update(repo, node, branchmerge=False, force=True)
+    stats = mergemod.clean_update(repo[node])
+    assert stats.unresolvedcount == 0
     _showstats(repo, stats)
-    return stats.unresolvedcount > 0
 
 
 def _incoming(
--- a/mercurial/hgweb/webutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/hgweb/webutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -936,5 +936,5 @@
 
 def getgraphnode(repo, ctx):
     return templatekw.getgraphnodecurrent(
-        repo, ctx
+        repo, ctx, {}
     ) + templatekw.getgraphnodesymbol(ctx)
--- a/mercurial/httpconnection.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/httpconnection.py	Mon Mar 09 10:18:40 2020 -0700
@@ -39,12 +39,15 @@
         self.write = self._data.write
         self.length = os.fstat(self._data.fileno()).st_size
         self._pos = 0
+        self._progress = self._makeprogress()
+
+    def _makeprogress(self):
         # We pass double the max for total because we currently have
         # to send the bundle twice in the case of a server that
         # requires authentication. Since we can't know until we try
         # once whether authentication will be required, just lie to
         # the user and maybe the push succeeds suddenly at 50%.
-        self._progress = ui.makeprogress(
+        return self.ui.makeprogress(
             _(b'sending'), unit=_(b'kb'), total=(self.length // 1024 * 2)
         )
 
--- a/mercurial/interfaces/repository.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/interfaces/repository.py	Mon Mar 09 10:18:40 2020 -0700
@@ -985,18 +985,9 @@
     def hasdir(dir):
         """Returns a bool indicating if a directory is in this manifest."""
 
-    def matches(match):
-        """Generate a new manifest filtered through a matcher.
-
-        Returns an object conforming to the ``imanifestdict`` interface.
-        """
-
     def walk(match):
         """Generator of paths in manifest satisfying a matcher.
 
-        This is equivalent to ``self.matches(match).iterkeys()`` except a new
-        manifest object is not created.
-
         If the matcher has explicit files listed and they don't exist in
         the manifest, ``match.bad()`` is called for each missing file.
         """
@@ -1027,8 +1018,8 @@
     def get(path, default=None):
         """Obtain the node value for a path or a default value if missing."""
 
-    def flags(path, default=b''):
-        """Return the flags value for a path or a default value if missing."""
+    def flags(path):
+        """Return the flags value for a path (default: empty bytestring)."""
 
     def copy():
         """Return a copy of this manifest."""
@@ -1071,14 +1062,6 @@
     as part of a larger interface.
     """
 
-    def new():
-        """Obtain a new manifest instance.
-
-        Returns an object conforming to the ``imanifestrevisionwritable``
-        interface. The instance will be associated with the same
-        ``imanifestlog`` collection as this instance.
-        """
-
     def copy():
         """Obtain a copy of this manifest instance.
 
--- a/mercurial/localrepo.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/localrepo.py	Mon Mar 09 10:18:40 2020 -0700
@@ -932,6 +932,12 @@
 
     if ui.configbool(b'experimental', b'rust.index'):
         options[b'rust.index'] = True
+    if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
+        options[b'exp-persistent-nodemap'] = True
+    if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
+        options[b'exp-persistent-nodemap.mmap'] = True
+    if ui.configbool(b'devel', b'persistent-nodemap'):
+        options[b'devel-force-nodemap'] = True
 
     return options
 
@@ -1803,7 +1809,7 @@
         # map tag name to (node, hist)
         alltags = tagsmod.findglobaltags(self.ui, self)
         # map tag name to tag type
-        tagtypes = dict((tag, b'global') for tag in alltags)
+        tagtypes = {tag: b'global' for tag in alltags}
 
         tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
 
@@ -1816,12 +1822,10 @@
             if node != nullid:
                 tags[encoding.tolocal(name)] = node
         tags[b'tip'] = self.changelog.tip()
-        tagtypes = dict(
-            [
-                (encoding.tolocal(name), value)
-                for (name, value) in pycompat.iteritems(tagtypes)
-            ]
-        )
+        tagtypes = {
+            encoding.tolocal(name): value
+            for (name, value) in pycompat.iteritems(tagtypes)
+        }
         return (tags, tagtypes)
 
     def tagtype(self, tagname):
@@ -2498,6 +2502,9 @@
 
         if full:
             unfi = self.unfiltered()
+
+            self.changelog.update_caches(transaction=tr)
+
             rbc = unfi.revbranchcache()
             for r in unfi.changelog:
                 rbc.branchinfo(r)
@@ -2938,6 +2945,9 @@
                 self, status, text, user, date, extra
             )
 
+            ms = mergemod.mergestate.read(self)
+            mergeutil.checkunresolved(ms)
+
             # internal config: ui.allowemptycommit
             allowemptycommit = (
                 wctx.branch() != wctx.p1().branch()
@@ -2947,14 +2957,13 @@
                 or self.ui.configbool(b'ui', b'allowemptycommit')
             )
             if not allowemptycommit:
+                self.ui.debug(b'nothing to commit, clearing merge state\n')
+                ms.reset()
                 return None
 
             if merge and cctx.deleted():
                 raise error.Abort(_(b"cannot commit merge with missing files"))
 
-            ms = mergemod.mergestate.read(self)
-            mergeutil.checkunresolved(ms)
-
             if editor:
                 cctx._text = editor(self, cctx, subs)
             edited = text != cctx._text
@@ -3572,14 +3581,17 @@
             if ui.configbool(b'format', b'dotencode'):
                 requirements.add(b'dotencode')
 
-    compengine = ui.config(b'format', b'revlog-compression')
-    if compengine not in util.compengines:
+    compengines = ui.configlist(b'format', b'revlog-compression')
+    for compengine in compengines:
+        if compengine in util.compengines:
+            break
+    else:
         raise error.Abort(
             _(
-                b'compression engine %s defined by '
+                b'compression engines %s defined by '
                 b'format.revlog-compression not available'
             )
-            % compengine,
+            % b', '.join(b'"%s"' % e for e in compengines),
             hint=_(
                 b'run "hg debuginstall" to list available '
                 b'compression engines'
@@ -3587,7 +3599,7 @@
         )
 
     # zlib is the historical default and doesn't need an explicit requirement.
-    elif compengine == b'zstd':
+    if compengine == b'zstd':
         requirements.add(b'revlog-compression-zstd')
     elif compengine != b'zlib':
         requirements.add(b'exp-compression-%s' % compengine)
--- a/mercurial/logcmdutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/logcmdutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1004,7 +1004,7 @@
         ui, spec, defaults=templatekw.keywords, resources=tres
     )
 
-    def formatnode(repo, ctx):
+    def formatnode(repo, ctx, cache):
         props = {b'ctx': ctx, b'repo': repo}
         return templ.renderdefault(props)
 
@@ -1038,8 +1038,9 @@
         # experimental config: experimental.graphshorten
         state.graphshorten = ui.configbool(b'experimental', b'graphshorten')
 
+    formatnode_cache = {}
     for rev, type, ctx, parents in dag:
-        char = formatnode(repo, ctx)
+        char = formatnode(repo, ctx, formatnode_cache)
         copies = getcopies(ctx) if getcopies else None
         edges = edgefn(type, char, state, rev, parents)
         firstedge = next(edges)
--- a/mercurial/manifest.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/manifest.py	Mon Mar 09 10:18:40 2020 -0700
@@ -23,6 +23,7 @@
 from . import (
     encoding,
     error,
+    match as matchmod,
     mdiff,
     pathutil,
     policy,
@@ -461,7 +462,7 @@
     __bool__ = __nonzero__
 
     def __setitem__(self, key, node):
-        self._lm[key] = node, self.flags(key, b'')
+        self._lm[key] = node, self.flags(key)
 
     def __contains__(self, key):
         if key is None:
@@ -482,17 +483,11 @@
 
     def filesnotin(self, m2, match=None):
         '''Set of files in this manifest that are not in the other'''
-        if match:
-            m1 = self.matches(match)
-            m2 = m2.matches(match)
-            return m1.filesnotin(m2)
-        diff = self.diff(m2)
-        files = set(
-            filepath
-            for filepath, hashflags in pycompat.iteritems(diff)
-            if hashflags[1][0] is None
-        )
-        return files
+        if match is not None:
+            match = matchmod.badmatch(match, lambda path, msg: None)
+            sm2 = set(m2.walk(match))
+            return {f for f in self.walk(match) if f not in sm2}
+        return {f for f in self if f not in m2}
 
     @propertycache
     def _dirs(self):
@@ -531,7 +526,8 @@
         # avoid the entire walk if we're only looking for specific files
         if self._filesfastpath(match):
             for fn in sorted(fset):
-                yield fn
+                if fn in self:
+                    yield fn
             return
 
         for fn in self:
@@ -549,7 +545,7 @@
             if not self.hasdir(fn):
                 match.bad(fn, None)
 
-    def matches(self, match):
+    def _matches(self, match):
         '''generate a new manifest filtered by the match argument'''
         if match.always():
             return self.copy()
@@ -582,8 +578,8 @@
         string.
         '''
         if match:
-            m1 = self.matches(match)
-            m2 = m2.matches(match)
+            m1 = self._matches(match)
+            m2 = m2._matches(match)
             return m1.diff(m2, clean=clean)
         return self._lm.diff(m2._lm, clean)
 
@@ -596,11 +592,11 @@
         except KeyError:
             return default
 
-    def flags(self, key, default=b''):
+    def flags(self, key):
         try:
             return self._lm[key][1]
         except KeyError:
-            return default
+            return b''
 
     def copy(self):
         c = manifestdict()
@@ -1079,8 +1075,8 @@
     def filesnotin(self, m2, match=None):
         '''Set of files in this manifest that are not in the other'''
         if match and not match.always():
-            m1 = self.matches(match)
-            m2 = m2.matches(match)
+            m1 = self._matches(match)
+            m2 = m2._matches(match)
             return m1.filesnotin(m2)
 
         files = set()
@@ -1126,9 +1122,6 @@
     def walk(self, match):
         '''Generates matching file names.
 
-        Equivalent to manifest.matches(match).iterkeys(), but without creating
-        an entirely new manifest.
-
         It also reports nonexistent files by marking them bad with match.bad().
         '''
         if match.always():
@@ -1171,16 +1164,16 @@
                     for f in self._dirs[p]._walk(match):
                         yield f
 
-    def matches(self, match):
-        '''generate a new manifest filtered by the match argument'''
-        if match.always():
-            return self.copy()
-
-        return self._matches(match)
-
     def _matches(self, match):
         '''recursively generate a new manifest filtered by the match argument.
         '''
+        if match.always():
+            return self.copy()
+        return self._matches_inner(match)
+
+    def _matches_inner(self, match):
+        if match.always():
+            return self.copy()
 
         visit = match.visitchildrenset(self._dir[:-1])
         if visit == b'all':
@@ -1211,7 +1204,7 @@
         for dir, subm in pycompat.iteritems(self._dirs):
             if visit and dir[:-1] not in visit:
                 continue
-            m = subm._matches(match)
+            m = subm._matches_inner(match)
             if not m._isempty():
                 ret._dirs[dir] = m
 
@@ -1235,8 +1228,8 @@
         string.
         '''
         if match and not match.always():
-            m1 = self.matches(match)
-            m2 = m2.matches(match)
+            m1 = self._matches(match)
+            m2 = m2._matches(match)
             return m1.diff(m2, clean=clean)
         result = {}
         emptytree = treemanifest()
@@ -1923,9 +1916,6 @@
     def _storage(self):
         return self._manifestlog.getstorage(b'')
 
-    def new(self):
-        return memmanifestctx(self._manifestlog)
-
     def copy(self):
         memmf = memmanifestctx(self._manifestlog)
         memmf._manifestdict = self.read().copy()
@@ -1972,9 +1962,6 @@
     def node(self):
         return self._node
 
-    def new(self):
-        return memmanifestctx(self._manifestlog)
-
     def copy(self):
         memmf = memmanifestctx(self._manifestlog)
         memmf._manifestdict = self.read().copy()
@@ -2039,9 +2026,6 @@
     def _storage(self):
         return self._manifestlog.getstorage(b'')
 
-    def new(self, dir=b''):
-        return memtreemanifestctx(self._manifestlog, dir=dir)
-
     def copy(self):
         memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
         memmf._treemanifest = self._treemanifest.copy()
@@ -2124,9 +2108,6 @@
     def node(self):
         return self._node
 
-    def new(self, dir=b''):
-        return memtreemanifestctx(self._manifestlog, dir=dir)
-
     def copy(self):
         memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
         memmf._treemanifest = self.read().copy()
--- a/mercurial/match.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/match.py	Mon Mar 09 10:18:40 2020 -0700
@@ -24,7 +24,7 @@
 )
 from .utils import stringutil
 
-rustmod = policy.importrust('filepatterns')
+rustmod = policy.importrust('dirstate')
 
 allpatternkinds = (
     b're',
@@ -772,7 +772,7 @@
         candidates = self._fileset | self._dirs - {b''}
         if dir != b'':
             d = dir + b'/'
-            candidates = set(c[len(d) :] for c in candidates if c.startswith(d))
+            candidates = {c[len(d) :] for c in candidates if c.startswith(d)}
         # self._dirs includes all of the directories, recursively, so if
         # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
         # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
@@ -1273,15 +1273,6 @@
     '''Convert a (normalized) pattern of any kind into a
     regular expression.
     globsuffix is appended to the regexp of globs.'''
-
-    if rustmod is not None:
-        try:
-            return rustmod.build_single_regex(kind, pat, globsuffix)
-        except rustmod.PatternError:
-            raise error.ProgrammingError(
-                b'not a regex pattern: %s:%s' % (kind, pat)
-            )
-
     if not pat and kind in (b'glob', b'relpath'):
         return b''
     if kind == b're':
@@ -1554,18 +1545,6 @@
     This is useful to debug ignore patterns.
     '''
 
-    if rustmod is not None:
-        result, warnings = rustmod.read_pattern_file(
-            filepath, bool(warn), sourceinfo,
-        )
-
-        for warning_params in warnings:
-            # Can't be easily emitted from Rust, because it would require
-            # a mechanism for both gettext and calling the `warn` function.
-            warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
-
-        return result
-
     syntaxes = {
         b're': b'relre:',
         b'regexp': b'relre:',
--- a/mercurial/mdiff.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/mdiff.py	Mon Mar 09 10:18:40 2020 -0700
@@ -91,7 +91,7 @@
             )
 
     def copy(self, **kwargs):
-        opts = dict((k, getattr(self, k)) for k in self.defaults)
+        opts = {k: getattr(self, k) for k in self.defaults}
         opts = pycompat.strkwargs(opts)
         opts.update(kwargs)
         return diffopts(**opts)
--- a/mercurial/merge.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/merge.py	Mon Mar 09 10:18:40 2020 -0700
@@ -386,18 +386,26 @@
         return configmergedriver
 
     @util.propertycache
-    def localctx(self):
+    def local(self):
         if self._local is None:
-            msg = b"localctx accessed but self._local isn't set"
+            msg = b"local accessed but self._local isn't set"
             raise error.ProgrammingError(msg)
-        return self._repo[self._local]
+        return self._local
+
+    @util.propertycache
+    def localctx(self):
+        return self._repo[self.local]
+
+    @util.propertycache
+    def other(self):
+        if self._other is None:
+            msg = b"other accessed but self._other isn't set"
+            raise error.ProgrammingError(msg)
+        return self._other
 
     @util.propertycache
     def otherctx(self):
-        if self._other is None:
-            msg = b"otherctx accessed but self._other isn't set"
-            raise error.ProgrammingError(msg)
-        return self._repo[self._other]
+        return self._repo[self.other]
 
     def active(self):
         """Whether mergestate is active.
@@ -405,14 +413,7 @@
         Returns True if there appears to be mergestate. This is a rough proxy
         for "is a merge in progress."
         """
-        # Check local variables before looking at filesystem for performance
-        # reasons.
-        return (
-            bool(self._local)
-            or bool(self._state)
-            or self._repo.vfs.exists(self.statepathv1)
-            or self._repo.vfs.exists(self.statepathv2)
-        )
+        return bool(self._local) or bool(self._state)
 
     def commit(self):
         """Write current state on disk (if necessary)"""
@@ -989,11 +990,10 @@
     """
     Check for case-folding collisions.
     """
-
     # If the repo is narrowed, filter out files outside the narrowspec.
     narrowmatch = repo.narrowmatch()
     if not narrowmatch.always():
-        wmf = wmf.matches(narrowmatch)
+        pmmf = set(wmf.walk(narrowmatch))
         if actions:
             narrowactions = {}
             for m, actionsfortype in pycompat.iteritems(actions):
@@ -1002,9 +1002,9 @@
                     if narrowmatch(f):
                         narrowactions[m].append((f, args, msg))
             actions = narrowactions
-
-    # build provisional merged manifest up
-    pmmf = set(wmf)
+    else:
+        # build provisional merged manifest up
+        pmmf = set(wmf)
 
     if actions:
         # KEEP and EXEC are no-op
@@ -1256,17 +1256,19 @@
     if matcher is not None and matcher.always():
         matcher = None
 
-    copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
-
     # manifests fetched in order are going to be faster, so prime the caches
     [
         x.manifest()
         for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
     ]
 
+    branch_copies1 = copies.branch_copies()
+    branch_copies2 = copies.branch_copies()
+    diverge = {}
     if followcopies:
-        ret = copies.mergecopies(repo, wctx, p2, pa)
-        copy, movewithdir, diverge, renamedelete, dirmove = ret
+        branch_copies1, branch_copies2, diverge = copies.mergecopies(
+            repo, wctx, p2, pa
+        )
 
     boolbm = pycompat.bytestr(bool(branchmerge))
     boolf = pycompat.bytestr(bool(force))
@@ -1278,8 +1280,10 @@
     repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
 
     m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
-    copied = set(copy.values())
-    copied.update(movewithdir.values())
+    copied1 = set(branch_copies1.copy.values())
+    copied1.update(branch_copies1.movewithdir.values())
+    copied2 = set(branch_copies2.copy.values())
+    copied2.update(branch_copies2.movewithdir.values())
 
     if b'.hgsubstate' in m1 and wctx.rev() is None:
         # Check whether sub state is modified, and overwrite the manifest
@@ -1299,10 +1303,10 @@
         relevantfiles = set(ma.diff(m2).keys())
 
         # For copied and moved files, we need to add the source file too.
-        for copykey, copyvalue in pycompat.iteritems(copy):
+        for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
             if copyvalue in relevantfiles:
                 relevantfiles.add(copykey)
-        for movedirkey in movewithdir:
+        for movedirkey in branch_copies1.movewithdir:
             relevantfiles.add(movedirkey)
         filesmatcher = scmutil.matchfiles(repo, relevantfiles)
         matcher = matchmod.intersectmatchers(matcher, filesmatcher)
@@ -1313,7 +1317,10 @@
     for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
         if n1 and n2:  # file exists on both local and remote side
             if f not in ma:
-                fa = copy.get(f, None)
+                # TODO: what if they're renamed from different sources?
+                fa = branch_copies1.copy.get(
+                    f, None
+                ) or branch_copies2.copy.get(f, None)
                 if fa is not None:
                     actions[f] = (
                         ACTION_MERGE,
@@ -1356,10 +1363,12 @@
                         b'versions differ',
                     )
         elif n1:  # file exists only on local side
-            if f in copied:
+            if f in copied2:
                 pass  # we'll deal with it on m2 side
-            elif f in movewithdir:  # directory rename, move local
-                f2 = movewithdir[f]
+            elif (
+                f in branch_copies1.movewithdir
+            ):  # directory rename, move local
+                f2 = branch_copies1.movewithdir[f]
                 if f2 in m2:
                     actions[f2] = (
                         ACTION_MERGE,
@@ -1372,8 +1381,8 @@
                         (f, fl1),
                         b'remote directory rename - move from %s' % f,
                     )
-            elif f in copy:
-                f2 = copy[f]
+            elif f in branch_copies1.copy:
+                f2 = branch_copies1.copy[f]
                 actions[f] = (
                     ACTION_MERGE,
                     (f, f2, f2, False, pa.node()),
@@ -1397,10 +1406,10 @@
                 else:
                     actions[f] = (ACTION_REMOVE, None, b'other deleted')
         elif n2:  # file exists only on remote side
-            if f in copied:
+            if f in copied1:
                 pass  # we'll deal with it on m1 side
-            elif f in movewithdir:
-                f2 = movewithdir[f]
+            elif f in branch_copies2.movewithdir:
+                f2 = branch_copies2.movewithdir[f]
                 if f2 in m1:
                     actions[f2] = (
                         ACTION_MERGE,
@@ -1413,8 +1422,8 @@
                         (f, fl2),
                         b'local directory rename - get from %s' % f,
                     )
-            elif f in copy:
-                f2 = copy[f]
+            elif f in branch_copies2.copy:
+                f2 = branch_copies2.copy[f]
                 if f2 in m2:
                     actions[f] = (
                         ACTION_MERGE,
@@ -1451,10 +1460,10 @@
                     )
             elif n2 != ma[f]:
                 df = None
-                for d in dirmove:
+                for d in branch_copies1.dirmove:
                     if f.startswith(d):
                         # new file added in a directory that was moved
-                        df = dirmove[d] + f[len(d) :]
+                        df = branch_copies1.dirmove[d] + f[len(d) :]
                         break
                 if df is not None and df in m1:
                     actions[df] = (
@@ -1481,6 +1490,9 @@
         # Updates "actions" in place
         _filternarrowactions(narrowmatch, branchmerge, actions)
 
+    renamedelete = branch_copies1.renamedelete
+    renamedelete.update(branch_copies2.renamedelete)
+
     return actions, diverge, renamedelete
 
 
@@ -1784,8 +1796,8 @@
 
 def emptyactions():
     """create an actions dict, to be populated and passed to applyupdates()"""
-    return dict(
-        (m, [])
+    return {
+        m: []
         for m in (
             ACTION_ADD,
             ACTION_ADD_MODIFIED,
@@ -1802,7 +1814,7 @@
             ACTION_PATH_CONFLICT,
             ACTION_PATH_CONFLICT_RESOLVE,
         )
-    )
+    }
 
 
 def applyupdates(
@@ -2058,7 +2070,7 @@
 
     extraactions = ms.actions()
     if extraactions:
-        mfiles = set(a[0] for a in actions[ACTION_MERGE])
+        mfiles = {a[0] for a in actions[ACTION_MERGE]}
         for k, acts in pycompat.iteritems(extraactions):
             actions[k].extend(acts)
             if k == ACTION_GET and wantfiledata:
@@ -2205,6 +2217,7 @@
     labels=None,
     matcher=None,
     mergeforce=False,
+    updatedirstate=True,
     updatecheck=None,
     wc=None,
 ):
@@ -2288,13 +2301,6 @@
                     ),
                 )
             )
-    # If we're doing a partial update, we need to skip updating
-    # the dirstate, so make a note of any partial-ness to the
-    # update here.
-    if matcher is None or matcher.always():
-        partial = False
-    else:
-        partial = True
     with repo.wlock():
         if wc is None:
             wc = repo[None]
@@ -2507,7 +2513,11 @@
         ### apply phase
         if not branchmerge:  # just jump to the new rev
             fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
-        if not partial and not wc.isinmemory():
+        # If we're doing a partial update, we need to skip updating
+        # the dirstate.
+        always = matcher is None or matcher.always()
+        updatedirstate = updatedirstate and always and not wc.isinmemory()
+        if updatedirstate:
             repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
             # note that we're in the middle of an update
             repo.vfs.write(b'updatestate', p2.hex())
@@ -2553,7 +2563,6 @@
                 )
             )
 
-        updatedirstate = not partial and not wc.isinmemory()
         wantfiledata = updatedirstate and not branchmerge
         stats, getfiledata = applyupdates(
             repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
@@ -2574,15 +2583,65 @@
     if not branchmerge:
         sparse.prunetemporaryincludes(repo)
 
-    if not partial:
+    if updatedirstate:
         repo.hook(
             b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
         )
     return stats
 
 
+def merge(ctx, labels=None, force=False, wc=None):
+    """Merge another topological branch into the working copy.
+
+    force = whether the merge was run with 'merge --force' (deprecated)
+    """
+
+    return update(
+        ctx.repo(),
+        ctx.rev(),
+        labels=labels,
+        branchmerge=True,
+        force=force,
+        mergeforce=force,
+        wc=wc,
+    )
+
+
+def clean_update(ctx, wc=None):
+    """Do a clean update to the given commit.
+
+    This involves updating to the commit and discarding any changes in the
+    working copy.
+    """
+    return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
+
+
+def revert_to(ctx, matcher=None, wc=None):
+    """Revert the working copy to the given commit.
+
+    The working copy will keep its current parent(s) but its content will
+    be the same as in the given commit.
+    """
+
+    return update(
+        ctx.repo(),
+        ctx.rev(),
+        branchmerge=False,
+        force=True,
+        updatedirstate=False,
+        matcher=matcher,
+        wc=wc,
+    )
+
+
 def graft(
-    repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
+    repo,
+    ctx,
+    base=None,
+    labels=None,
+    keepparent=False,
+    keepconflictparent=False,
+    wctx=None,
 ):
     """Do a graft-like merge.
 
@@ -2593,7 +2652,7 @@
     renames/copies appropriately.
 
     ctx - changeset to rebase
-    base - merge base, usually ctx.p1()
+    base - merge base, or ctx.p1() if not specified
     labels - merge labels eg ['local', 'graft']
     keepparent - keep second parent if any
     keepconflictparent - if unresolved, keep parent used for the merge
@@ -2605,9 +2664,15 @@
     # to copy commits), and 2) informs update that the incoming changes are
     # newer than the destination so it doesn't prompt about "remote changed foo
     # which local deleted".
-    wctx = repo[None]
+    # We also pass mergeancestor=True when base is the same revision as p1. 2)
+    # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
+    wctx = wctx or repo[None]
     pctx = wctx.p1()
-    mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
+    base = base or ctx.p1()
+    mergeancestor = (
+        repo.changelog.isancestor(pctx.node(), ctx.node())
+        or pctx.rev() == base.rev()
+    )
 
     stats = update(
         repo,
@@ -2617,6 +2682,7 @@
         base.node(),
         mergeancestor=mergeancestor,
         labels=labels,
+        wc=wctx,
     )
 
     if keepconflictparent and stats.unresolvedcount:
@@ -2631,17 +2697,23 @@
     if pother == pctx.node():
         pother = nullid
 
-    with repo.dirstate.parentchange():
-        repo.setparents(pctx.node(), pother)
-        repo.dirstate.write(repo.currenttransaction())
+    if wctx.isinmemory():
+        wctx.setparents(pctx.node(), pother)
         # fix up dirstate for copies and renames
         copies.graftcopies(wctx, ctx, base)
+    else:
+        with repo.dirstate.parentchange():
+            repo.setparents(pctx.node(), pother)
+            repo.dirstate.write(repo.currenttransaction())
+            # fix up dirstate for copies and renames
+            copies.graftcopies(wctx, ctx, base)
     return stats
 
 
 def purge(
     repo,
     matcher,
+    unknown=True,
     ignored=False,
     removeemptydirs=True,
     removefiles=True,
@@ -2653,7 +2725,9 @@
     ``matcher`` is a matcher configured to scan the working directory -
     potentially a subset.
 
-    ``ignored`` controls whether ignored files should also be purged.
+    ``unknown`` controls whether unknown files should be purged.
+
+    ``ignored`` controls whether ignored files should be purged.
 
     ``removeemptydirs`` controls whether empty directories should be removed.
 
@@ -2690,7 +2764,7 @@
             directories = []
             matcher.traversedir = directories.append
 
-        status = repo.status(match=matcher, ignored=ignored, unknown=True)
+        status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
 
         if removefiles:
             for f in sorted(status.unknown + status.ignored):
--- a/mercurial/obsolete.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/obsolete.py	Mon Mar 09 10:18:40 2020 -0700
@@ -939,7 +939,7 @@
     getnode = repo.changelog.node
     notpublic = _mutablerevs(repo)
     isobs = repo.obsstore.successors.__contains__
-    obs = set(r for r in notpublic if isobs(getnode(r)))
+    obs = {r for r in notpublic if isobs(getnode(r))}
     return obs
 
 
@@ -965,7 +965,7 @@
 def _computesuspendedset(repo):
     """the set of obsolete parents with non obsolete descendants"""
     suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
-    return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
+    return {r for r in getrevs(repo, b'obsolete') if r in suspended}
 
 
 @cachefor(b'extinct')
--- a/mercurial/obsutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/obsutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -194,7 +194,7 @@
 
 def _filterprunes(markers):
     """return a set with no prune markers"""
-    return set(m for m in markers if m[1])
+    return {m for m in markers if m[1]}
 
 
 def exclusivemarkers(repo, nodes):
@@ -338,12 +338,12 @@
         # compute the whole set of successors or descendants
         while len(foreground) != plen:
             plen = len(foreground)
-            succs = set(c.node() for c in foreground)
+            succs = {c.node() for c in foreground}
             mutable = [c.node() for c in foreground if c.mutable()]
             succs.update(allsuccessors(repo.obsstore, mutable))
             known = (n for n in succs if has_node(n))
             foreground = set(repo.set(b'%ln::', known))
-    return set(c.node() for c in foreground)
+    return {c.node() for c in foreground}
 
 
 # effectflag field
@@ -855,11 +855,11 @@
     """ Returns a sorted list of markers users without duplicates
     """
     markersmeta = [dict(m[3]) for m in markers]
-    users = set(
+    users = {
         encoding.tolocal(meta[b'user'])
         for meta in markersmeta
         if meta.get(b'user')
-    )
+    }
 
     return sorted(users)
 
@@ -868,9 +868,9 @@
     """ Returns a sorted list of markers operations without duplicates
     """
     markersmeta = [dict(m[3]) for m in markers]
-    operations = set(
+    operations = {
         meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
-    )
+    }
 
     return sorted(operations)
 
--- a/mercurial/patch.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/patch.py	Mon Mar 09 10:18:40 2020 -0700
@@ -2888,7 +2888,7 @@
     or 'rename' (the latter two only if opts.git is set).'''
     gone = set()
 
-    copyto = dict([(v, k) for k, v in copy.items()])
+    copyto = {v: k for k, v in copy.items()}
 
     addedset, removedset = set(added), set(removed)
 
--- a/mercurial/pathutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/pathutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -84,7 +84,7 @@
                         _(b"path contains illegal component: %s") % path
                     )
         if b'.hg' in _lowerclean(path):
-            lparts = [_lowerclean(p.lower()) for p in parts]
+            lparts = [_lowerclean(p) for p in parts]
             for p in b'.hg', b'.hg.':
                 if p in lparts[1:]:
                     pos = lparts.index(p)
@@ -99,10 +99,11 @@
 
         parts.pop()
         normparts.pop()
-        prefixes = []
         # It's important that we check the path parts starting from the root.
-        # This means we won't accidentally traverse a symlink into some other
-        # filesystem (which is potentially expensive to access).
+        # We don't want to add "foo/bar/baz" to auditeddir before checking if
+        # there's a "foo/.hg" directory. This also means we won't accidentally
+        # traverse a symlink into some other filesystem (which is potentially
+        # expensive to access).
         for i in range(len(parts)):
             prefix = pycompat.ossep.join(parts[: i + 1])
             normprefix = pycompat.ossep.join(normparts[: i + 1])
@@ -110,13 +111,11 @@
                 continue
             if self._realfs:
                 self._checkfs(prefix, path)
-            prefixes.append(normprefix)
+            if self._cached:
+                self.auditeddir.add(normprefix)
 
         if self._cached:
             self.audited.add(normpath)
-            # only add prefixes to the cache after checking everything: we don't
-            # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
-            self.auditeddir.update(prefixes)
 
     def _checkfs(self, prefix, path):
         """raise exception if a file system backed check fails"""
--- a/mercurial/phases.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/phases.py	Mon Mar 09 10:18:40 2020 -0700
@@ -445,10 +445,10 @@
                     phasetracking, r, self.phase(repo, r), targetphase
                 )
 
-            roots = set(
+            roots = {
                 ctx.node()
                 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
-            )
+            }
             if olds != roots:
                 self._updateroots(phase, roots, tr)
                 # some roots may need to be declared for lower phases
@@ -518,9 +518,7 @@
             ]
             updatedroots = repo.set(b'roots(%ln::)', aboveroots)
 
-            finalroots = set(
-                n for n in currentroots if repo[n].rev() < minnewroot
-            )
+            finalroots = {n for n in currentroots if repo[n].rev() < minnewroot}
             finalroots.update(ctx.node() for ctx in updatedroots)
         if finalroots != oldroots:
             self._updateroots(targetphase, finalroots, tr)
@@ -760,7 +758,7 @@
     if not heads or heads == [nullid]:
         return []
     # The logic operated on revisions, convert arguments early for convenience
-    new_heads = set(rev(n) for n in heads if n != nullid)
+    new_heads = {rev(n) for n in heads if n != nullid}
     roots = [rev(n) for n in roots]
     # compute the area we need to remove
     affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
--- a/mercurial/posix.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/posix.py	Mon Mar 09 10:18:40 2020 -0700
@@ -324,9 +324,8 @@
                     open(fullpath, b'w').close()
                 except IOError as inst:
                     if (
-                        inst[0]  # pytype: disable=unsupported-operands
-                        == errno.EACCES
-                    ):
+                        inst[0] == errno.EACCES
+                    ):  # pytype: disable=unsupported-operands
                         # If we can't write to cachedir, just pretend
                         # that the fs is readonly and by association
                         # that the fs won't support symlinks. This
--- a/mercurial/profiling.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/profiling.py	Mon Mar 09 10:18:40 2020 -0700
@@ -186,6 +186,7 @@
         self._output = None
         self._fp = None
         self._fpdoclose = True
+        self._flushfp = None
         self._profiler = None
         self._enabled = enabled
         self._entered = False
@@ -246,6 +247,8 @@
             else:
                 self._fpdoclose = False
                 self._fp = self._ui.ferr
+                # Ensure we've flushed fout before writing to ferr.
+                self._flushfp = self._ui.fout
 
             if proffn is not None:
                 pass
@@ -265,6 +268,7 @@
     def __exit__(self, exception_type, exception_value, traceback):
         propagate = None
         if self._profiler is not None:
+            self._uiflush()
             propagate = self._profiler.__exit__(
                 exception_type, exception_value, traceback
             )
@@ -280,3 +284,7 @@
     def _closefp(self):
         if self._fpdoclose and self._fp is not None:
             self._fp.close()
+
+    def _uiflush(self):
+        if self._flushfp:
+            self._flushfp.flush()
--- a/mercurial/pure/parsers.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/pure/parsers.py	Mon Mar 09 10:18:40 2020 -0700
@@ -141,6 +141,50 @@
             self._extra = self._extra[: i - self._lgt]
 
 
+class PersistentNodeMapIndexObject(IndexObject):
+    """a Debug oriented class to test persistent nodemap
+
+    We need a simple python object to test API and higher level behavior. See
+    the Rust implementation for  more serious usage. This should be used only
+    through the dedicated `devel.persistent-nodemap` config.
+    """
+
+    def nodemap_data_all(self):
+        """Return bytes containing a full serialization of a nodemap
+
+        The nodemap should be valid for the full set of revisions in the
+        index."""
+        return nodemaputil.persistent_data(self)
+
+    def nodemap_data_incremental(self):
+        """Return bytes containing a incremental update to persistent nodemap
+
+        This containst the data for an append-only update of the data provided
+        in the last call to `update_nodemap_data`.
+        """
+        if self._nm_root is None:
+            return None
+        docket = self._nm_docket
+        changed, data = nodemaputil.update_persistent_data(
+            self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
+        )
+
+        self._nm_root = self._nm_max_idx = self._nm_docket = None
+        return docket, changed, data
+
+    def update_nodemap_data(self, docket, nm_data):
+        """provide full block of persisted binary data for a nodemap
+
+        The data are expected to come from disk. See `nodemap_data_all` for a
+        produceur of such data."""
+        if nm_data is not None:
+            self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
+            if self._nm_root:
+                self._nm_docket = docket
+            else:
+                self._nm_root = self._nm_max_idx = self._nm_docket = None
+
+
 class InlinedIndexObject(BaseIndexObject):
     def __init__(self, data, inline=0):
         self._data = data
@@ -188,6 +232,12 @@
     return InlinedIndexObject(data, inline), (0, data)
 
 
+def parse_index_devel_nodemap(data, inline):
+    """like parse_index2, but alway return a PersistentNodeMapIndexObject
+    """
+    return PersistentNodeMapIndexObject(data), None
+
+
 def parse_dirstate(dmap, copymap, st):
     parents = [st[:20], st[20:40]]
     # dereference fields so they will be local in loop
--- a/mercurial/pycompat.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/pycompat.py	Mon Mar 09 10:18:40 2020 -0700
@@ -334,7 +334,7 @@
         they can be passed as keyword arguments as dictonaries with bytes keys
         can't be passed as keyword arguments to functions on Python 3.
         """
-        dic = dict((k.decode('latin-1'), v) for k, v in dic.items())
+        dic = {k.decode('latin-1'): v for k, v in dic.items()}
         return dic
 
     def byteskwargs(dic):
@@ -342,7 +342,7 @@
         Converts keys of python dictonaries to bytes as they were converted to
         str to pass that dictonary as a keyword argument on Python 3.
         """
-        dic = dict((k.encode('latin-1'), v) for k, v in dic.items())
+        dic = {k.encode('latin-1'): v for k, v in dic.items()}
         return dic
 
     # TODO: handle shlex.shlex().
--- a/mercurial/repair.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/repair.py	Mon Mar 09 10:18:40 2020 -0700
@@ -351,7 +351,7 @@
 def safestriproots(ui, repo, nodes):
     """return list of roots of nodes where descendants are covered by nodes"""
     torev = repo.unfiltered().changelog.rev
-    revs = set(torev(n) for n in nodes)
+    revs = {torev(n) for n in nodes}
     # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
     # orphaned = affected - wanted
     # affected = descendants(roots(wanted))
--- a/mercurial/revlog.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/revlog.py	Mon Mar 09 10:18:40 2020 -0700
@@ -352,6 +352,21 @@
         return p
 
 
+NodemapRevlogIO = None
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+    class NodemapRevlogIO(revlogio):
+        """A debug oriented IO class that return a PersistentNodeMapIndexObject
+
+        The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
+        """
+
+        def parseindex(self, data, inline):
+            index, cache = parsers.parse_index_devel_nodemap(data, inline)
+            return index, cache
+
+
 class rustrevlogio(revlogio):
     def parseindex(self, data, inline):
         index, cache = super(rustrevlogio, self).parseindex(data, inline)
@@ -407,6 +422,7 @@
         mmaplargeindex=False,
         censorable=False,
         upperboundcomp=None,
+        persistentnodemap=False,
     ):
         """
         create a revlog object
@@ -418,6 +434,10 @@
         self.upperboundcomp = upperboundcomp
         self.indexfile = indexfile
         self.datafile = datafile or (indexfile[:-2] + b".d")
+        self.nodemap_file = None
+        if persistentnodemap:
+            self.nodemap_file = indexfile[:-2] + b".n"
+
         self.opener = opener
         #  When True, indexfile is opened with checkambig=True at writing, to
         #  avoid file stat ambiguity.
@@ -435,6 +455,7 @@
         self._maxchainlen = None
         self._deltabothparents = True
         self.index = None
+        self._nodemap_docket = None
         # Mapping of partial identifiers to full nodes.
         self._pcache = {}
         # Mapping of revision integer to full node.
@@ -591,13 +612,32 @@
 
         self._storedeltachains = True
 
+        devel_nodemap = (
+            self.nodemap_file
+            and opts.get(b'devel-force-nodemap', False)
+            and NodemapRevlogIO is not None
+        )
+
         self._io = revlogio()
         if self.version == REVLOGV0:
             self._io = revlogoldio()
+        elif devel_nodemap:
+            self._io = NodemapRevlogIO()
         elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
             self._io = rustrevlogio()
         try:
             d = self._io.parseindex(indexdata, self._inline)
+            index, _chunkcache = d
+            use_nodemap = (
+                not self._inline
+                and self.nodemap_file is not None
+                and util.safehasattr(index, 'update_nodemap_data')
+            )
+            if use_nodemap:
+                nodemap_data = nodemaputil.persisted_data(self)
+                if nodemap_data is not None:
+                    self._nodemap_docket = nodemap_data[0]
+                    index.update_nodemap_data(*nodemap_data)
         except (ValueError, IndexError):
             raise error.RevlogError(
                 _(b"index %s is corrupted") % self.indexfile
@@ -708,6 +748,13 @@
             return False
         return True
 
+    def update_caches(self, transaction):
+        if self.nodemap_file is not None:
+            if transaction is None:
+                nodemaputil.update_persistent_nodemap(self)
+            else:
+                nodemaputil.setup_persistent_nodemap(transaction, self)
+
     def clearcaches(self):
         self._revisioncache = None
         self._chainbasecache.clear()
@@ -1239,7 +1286,7 @@
         else:
             start = self.rev(start)
 
-        stoprevs = set(self.rev(n) for n in stop or [])
+        stoprevs = {self.rev(n) for n in stop or []}
 
         revs = dagop.headrevssubset(
             self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
@@ -1960,6 +2007,7 @@
             # manager
 
         tr.replace(self.indexfile, trindex * self._io.size)
+        nodemaputil.setup_persistent_nodemap(tr, self)
         self._chunkclear()
 
     def _nodeduplicatecallback(self, transaction, node):
@@ -2286,6 +2334,7 @@
             ifh.write(data[0])
             ifh.write(data[1])
             self._enforceinlinesize(transaction, ifh)
+        nodemaputil.setup_persistent_nodemap(transaction, self)
 
     def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
         """
--- a/mercurial/revlogutils/nodemap.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/revlogutils/nodemap.py	Mon Mar 09 10:18:40 2020 -0700
@@ -7,9 +7,548 @@
 # GNU General Public License version 2 or any later version.
 
 from __future__ import absolute_import
-from .. import error
+
+import errno
+import os
+import re
+import struct
+
+from .. import (
+    error,
+    node as nodemod,
+    util,
+)
 
 
 class NodeMap(dict):
     def __missing__(self, x):
         raise error.RevlogError(b'unknown node: %s' % x)
+
+
+def persisted_data(revlog):
+    """read the nodemap for a revlog from disk"""
+    if revlog.nodemap_file is None:
+        return None
+    pdata = revlog.opener.tryread(revlog.nodemap_file)
+    if not pdata:
+        return None
+    offset = 0
+    (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
+    if version != ONDISK_VERSION:
+        return None
+    offset += S_VERSION.size
+    headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
+    uid_size, tip_rev, data_length, data_unused = headers
+    offset += S_HEADER.size
+    docket = NodeMapDocket(pdata[offset : offset + uid_size])
+    docket.tip_rev = tip_rev
+    docket.data_length = data_length
+    docket.data_unused = data_unused
+
+    filename = _rawdata_filepath(revlog, docket)
+    use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
+    try:
+        with revlog.opener(filename) as fd:
+            if use_mmap:
+                data = util.buffer(util.mmapread(fd, data_length))
+            else:
+                data = fd.read(data_length)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    if len(data) < data_length:
+        return None
+    return docket, data
+
+
+def setup_persistent_nodemap(tr, revlog):
+    """Install whatever is needed transaction side to persist a nodemap on disk
+
+    (only actually persist the nodemap if this is relevant for this revlog)
+    """
+    if revlog._inline:
+        return  # inlined revlog are too small for this to be relevant
+    if revlog.nodemap_file is None:
+        return  # we do not use persistent_nodemap on this revlog
+    callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
+    if tr.hasfinalize(callback_id):
+        return  # no need to register again
+    tr.addfinalize(
+        callback_id, lambda tr: _persist_nodemap(tr.addpostclose, revlog)
+    )
+
+
+def update_persistent_nodemap(revlog):
+    """update the persistent nodemap right now
+
+    To be used for updating the nodemap on disk outside of a normal transaction
+    setup (eg, `debugupdatecache`).
+    """
+    cleanups = []
+    _persist_nodemap((lambda x, y: cleanups.append(y)), revlog)
+    for c in cleanups:
+        c(None)
+
+
+def _persist_nodemap(cleaner, revlog):
+    """Write nodemap data on disk for a given revlog
+    """
+    if getattr(revlog, 'filteredrevs', ()):
+        raise error.ProgrammingError(
+            "cannot persist nodemap of a filtered changelog"
+        )
+    if revlog.nodemap_file is None:
+        msg = "calling persist nodemap on a revlog without the feature enableb"
+        raise error.ProgrammingError(msg)
+
+    can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
+    ondisk_docket = revlog._nodemap_docket
+    feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
+    use_mmap = revlog.opener.options.get("exp-persistent-nodemap.mmap")
+
+    data = None
+    # first attemp an incremental update of the data
+    if can_incremental and ondisk_docket is not None:
+        target_docket = revlog._nodemap_docket.copy()
+        (
+            src_docket,
+            data_changed_count,
+            data,
+        ) = revlog.index.nodemap_data_incremental()
+        if src_docket != target_docket:
+            data = None
+        else:
+            datafile = _rawdata_filepath(revlog, target_docket)
+            # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+            # store vfs
+            new_length = target_docket.data_length + len(data)
+            with revlog.opener(datafile, b'r+') as fd:
+                fd.seek(target_docket.data_length)
+                fd.write(data)
+                if feed_data:
+                    if use_mmap:
+                        fd.seek(0)
+                        new_data = fd.read(new_length)
+                    else:
+                        fd.flush()
+                        new_data = util.buffer(util.mmapread(fd, new_length))
+            target_docket.data_length = new_length
+            target_docket.data_unused += data_changed_count
+
+    if data is None:
+        # otherwise fallback to a full new export
+        target_docket = NodeMapDocket()
+        datafile = _rawdata_filepath(revlog, target_docket)
+        if util.safehasattr(revlog.index, "nodemap_data_all"):
+            data = revlog.index.nodemap_data_all()
+        else:
+            data = persistent_data(revlog.index)
+        # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+        # store vfs
+        with revlog.opener(datafile, b'w+') as fd:
+            fd.write(data)
+            if feed_data:
+                if use_mmap:
+                    new_data = data
+                else:
+                    fd.flush()
+                    new_data = util.buffer(util.mmapread(fd, len(data)))
+        target_docket.data_length = len(data)
+    target_docket.tip_rev = revlog.tiprev()
+    # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+    # store vfs
+    with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
+        fp.write(target_docket.serialize())
+    revlog._nodemap_docket = target_docket
+    if feed_data:
+        revlog.index.update_nodemap_data(target_docket, new_data)
+
+    # EXP-TODO: if the transaction abort, we should remove the new data and
+    # reinstall the old one.
+
+    # search for old index file in all cases, some older process might have
+    # left one behind.
+    olds = _other_rawdata_filepath(revlog, target_docket)
+    if olds:
+        realvfs = getattr(revlog, '_realopener', revlog.opener)
+
+        def cleanup(tr):
+            for oldfile in olds:
+                realvfs.tryunlink(oldfile)
+
+        callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
+        cleaner(callback_id, cleanup)
+
+
+### Nodemap docket file
+#
+# The nodemap data are stored on disk using 2 files:
+#
+# * a raw data files containing a persistent nodemap
+#   (see `Nodemap Trie` section)
+#
+# * a small "docket" file containing medatadata
+#
+# While the nodemap data can be multiple tens of megabytes, the "docket" is
+# small, it is easy to update it automatically or to duplicated its content
+# during a transaction.
+#
+# Multiple raw data can exist at the same time (The currently valid one and a
+# new one beind used by an in progress transaction). To accomodate this, the
+# filename hosting the raw data has a variable parts. The exact filename is
+# specified inside the "docket" file.
+#
+# The docket file contains information to find, qualify and validate the raw
+# data. Its content is currently very light, but it will expand as the on disk
+# nodemap gains the necessary features to be used in production.
+
+# version 0 is experimental, no BC garantee, do no use outside of tests.
+ONDISK_VERSION = 0
+S_VERSION = struct.Struct(">B")
+S_HEADER = struct.Struct(">BQQQ")
+
+ID_SIZE = 8
+
+
+def _make_uid():
+    """return a new unique identifier.
+
+    The identifier is random and composed of ascii characters."""
+    return nodemod.hex(os.urandom(ID_SIZE))
+
+
+class NodeMapDocket(object):
+    """metadata associated with persistent nodemap data
+
+    The persistent data may come from disk or be on their way to disk.
+    """
+
+    def __init__(self, uid=None):
+        if uid is None:
+            uid = _make_uid()
+        self.uid = uid
+        self.tip_rev = None
+        self.data_length = None
+        self.data_unused = 0
+
+    def copy(self):
+        new = NodeMapDocket(uid=self.uid)
+        new.tip_rev = self.tip_rev
+        new.data_length = self.data_length
+        new.data_unused = self.data_unused
+        return new
+
+    def __cmp__(self, other):
+        if self.uid < other.uid:
+            return -1
+        if self.uid > other.uid:
+            return 1
+        elif self.data_length < other.data_length:
+            return -1
+        elif self.data_length > other.data_length:
+            return 1
+        return 0
+
+    def __eq__(self, other):
+        return self.uid == other.uid and self.data_length == other.data_length
+
+    def serialize(self):
+        """return serialized bytes for a docket using the passed uid"""
+        data = []
+        data.append(S_VERSION.pack(ONDISK_VERSION))
+        headers = (
+            len(self.uid),
+            self.tip_rev,
+            self.data_length,
+            self.data_unused,
+        )
+        data.append(S_HEADER.pack(*headers))
+        data.append(self.uid)
+        return b''.join(data)
+
+
+def _rawdata_filepath(revlog, docket):
+    """The (vfs relative) nodemap's rawdata file for a given uid"""
+    prefix = revlog.nodemap_file[:-2]
+    return b"%s-%s.nd" % (prefix, docket.uid)
+
+
+def _other_rawdata_filepath(revlog, docket):
+    prefix = revlog.nodemap_file[:-2]
+    pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
+    new_file_path = _rawdata_filepath(revlog, docket)
+    new_file_name = revlog.opener.basename(new_file_path)
+    dirpath = revlog.opener.dirname(new_file_path)
+    others = []
+    for f in revlog.opener.listdir(dirpath):
+        if pattern.match(f) and f != new_file_name:
+            others.append(f)
+    return others
+
+
+### Nodemap Trie
+#
+# This is a simple reference implementation to compute and persist a nodemap
+# trie. This reference implementation is write only. The python version of this
+# is not expected to be actually used, since it wont provide performance
+# improvement over existing non-persistent C implementation.
+#
+# The nodemap is persisted as Trie using 4bits-address/16-entries block. each
+# revision can be adressed using its node shortest prefix.
+#
+# The trie is stored as a sequence of block. Each block contains 16 entries
+# (signed 64bit integer, big endian). Each entry can be one of the following:
+#
+#  * value >=  0 -> index of sub-block
+#  * value == -1 -> no value
+#  * value <  -1 -> a revision value: rev = -(value+10)
+#
+# The implementation focus on simplicity, not on performance. A Rust
+# implementation should provide a efficient version of the same binary
+# persistence. This reference python implementation is never meant to be
+# extensively use in production.
+
+
+def persistent_data(index):
+    """return the persistent binary form for a nodemap for a given index
+    """
+    trie = _build_trie(index)
+    return _persist_trie(trie)
+
+
+def update_persistent_data(index, root, max_idx, last_rev):
+    """return the incremental update for persistent nodemap from a given index
+    """
+    changed_block, trie = _update_trie(index, root, last_rev)
+    return (
+        changed_block * S_BLOCK.size,
+        _persist_trie(trie, existing_idx=max_idx),
+    )
+
+
+S_BLOCK = struct.Struct(">" + ("l" * 16))
+
+NO_ENTRY = -1
+# rev 0 need to be -2 because 0 is used by block, -1 is a special value.
+REV_OFFSET = 2
+
+
+def _transform_rev(rev):
+    """Return the number used to represent the rev in the tree.
+
+    (or retrieve a rev number from such representation)
+
+    Note that this is an involution, a function equal to its inverse (i.e.
+    which gives the identity when applied to itself).
+    """
+    return -(rev + REV_OFFSET)
+
+
+def _to_int(hex_digit):
+    """turn an hexadecimal digit into a proper integer"""
+    return int(hex_digit, 16)
+
+
+class Block(dict):
+    """represent a block of the Trie
+
+    contains up to 16 entry indexed from 0 to 15"""
+
+    def __init__(self):
+        super(Block, self).__init__()
+        # If this block exist on disk, here is its ID
+        self.ondisk_id = None
+
+    def __iter__(self):
+        return iter(self.get(i) for i in range(16))
+
+
+def _build_trie(index):
+    """build a nodemap trie
+
+    The nodemap stores revision number for each unique prefix.
+
+    Each block is a dictionary with keys in `[0, 15]`. Values are either
+    another block or a revision number.
+    """
+    root = Block()
+    for rev in range(len(index)):
+        hex = nodemod.hex(index[rev][7])
+        _insert_into_block(index, 0, root, rev, hex)
+    return root
+
+
+def _update_trie(index, root, last_rev):
+    """consume"""
+    changed = 0
+    for rev in range(last_rev + 1, len(index)):
+        hex = nodemod.hex(index[rev][7])
+        changed += _insert_into_block(index, 0, root, rev, hex)
+    return changed, root
+
+
+def _insert_into_block(index, level, block, current_rev, current_hex):
+    """insert a new revision in a block
+
+    index: the index we are adding revision for
+    level: the depth of the current block in the trie
+    block: the block currently being considered
+    current_rev: the revision number we are adding
+    current_hex: the hexadecimal representation of the of that revision
+    """
+    changed = 1
+    if block.ondisk_id is not None:
+        block.ondisk_id = None
+    hex_digit = _to_int(current_hex[level : level + 1])
+    entry = block.get(hex_digit)
+    if entry is None:
+        # no entry, simply store the revision number
+        block[hex_digit] = current_rev
+    elif isinstance(entry, dict):
+        # need to recurse to an underlying block
+        changed += _insert_into_block(
+            index, level + 1, entry, current_rev, current_hex
+        )
+    else:
+        # collision with a previously unique prefix, inserting new
+        # vertices to fit both entry.
+        other_hex = nodemod.hex(index[entry][7])
+        other_rev = entry
+        new = Block()
+        block[hex_digit] = new
+        _insert_into_block(index, level + 1, new, other_rev, other_hex)
+        _insert_into_block(index, level + 1, new, current_rev, current_hex)
+    return changed
+
+
+def _persist_trie(root, existing_idx=None):
+    """turn a nodemap trie into persistent binary data
+
+    See `_build_trie` for nodemap trie structure"""
+    block_map = {}
+    if existing_idx is not None:
+        base_idx = existing_idx + 1
+    else:
+        base_idx = 0
+    chunks = []
+    for tn in _walk_trie(root):
+        if tn.ondisk_id is not None:
+            block_map[id(tn)] = tn.ondisk_id
+        else:
+            block_map[id(tn)] = len(chunks) + base_idx
+            chunks.append(_persist_block(tn, block_map))
+    return b''.join(chunks)
+
+
+def _walk_trie(block):
+    """yield all the block in a trie
+
+    Children blocks are always yield before their parent block.
+    """
+    for (_, item) in sorted(block.items()):
+        if isinstance(item, dict):
+            for sub_block in _walk_trie(item):
+                yield sub_block
+    yield block
+
+
+def _persist_block(block_node, block_map):
+    """produce persistent binary data for a single block
+
+    Children block are assumed to be already persisted and present in
+    block_map.
+    """
+    data = tuple(_to_value(v, block_map) for v in block_node)
+    return S_BLOCK.pack(*data)
+
+
+def _to_value(item, block_map):
+    """persist any value as an integer"""
+    if item is None:
+        return NO_ENTRY
+    elif isinstance(item, dict):
+        return block_map[id(item)]
+    else:
+        return _transform_rev(item)
+
+
+def parse_data(data):
+    """parse parse nodemap data into a nodemap Trie"""
+    if (len(data) % S_BLOCK.size) != 0:
+        msg = "nodemap data size is not a multiple of block size (%d): %d"
+        raise error.Abort(msg % (S_BLOCK.size, len(data)))
+    if not data:
+        return Block(), None
+    block_map = {}
+    new_blocks = []
+    for i in range(0, len(data), S_BLOCK.size):
+        block = Block()
+        block.ondisk_id = len(block_map)
+        block_map[block.ondisk_id] = block
+        block_data = data[i : i + S_BLOCK.size]
+        values = S_BLOCK.unpack(block_data)
+        new_blocks.append((block, values))
+    for b, values in new_blocks:
+        for idx, v in enumerate(values):
+            if v == NO_ENTRY:
+                continue
+            elif v >= 0:
+                b[idx] = block_map[v]
+            else:
+                b[idx] = _transform_rev(v)
+    return block, i // S_BLOCK.size
+
+
+# debug utility
+
+
+def check_data(ui, index, data):
+    """verify that the provided nodemap data are valid for the given idex"""
+    ret = 0
+    ui.status((b"revision in index:   %d\n") % len(index))
+    root, __ = parse_data(data)
+    all_revs = set(_all_revisions(root))
+    ui.status((b"revision in nodemap: %d\n") % len(all_revs))
+    for r in range(len(index)):
+        if r not in all_revs:
+            msg = b"  revision missing from nodemap: %d\n" % r
+            ui.write_err(msg)
+            ret = 1
+        else:
+            all_revs.remove(r)
+        nm_rev = _find_node(root, nodemod.hex(index[r][7]))
+        if nm_rev is None:
+            msg = b"  revision node does not match any entries: %d\n" % r
+            ui.write_err(msg)
+            ret = 1
+        elif nm_rev != r:
+            msg = (
+                b"  revision node does not match the expected revision: "
+                b"%d != %d\n" % (r, nm_rev)
+            )
+            ui.write_err(msg)
+            ret = 1
+
+    if all_revs:
+        for r in sorted(all_revs):
+            msg = b"  extra revision in  nodemap: %d\n" % r
+            ui.write_err(msg)
+        ret = 1
+    return ret
+
+
+def _all_revisions(root):
+    """return all revisions stored in a Trie"""
+    for block in _walk_trie(root):
+        for v in block:
+            if v is None or isinstance(v, Block):
+                continue
+            yield v
+
+
+def _find_node(block, node):
+    """find the revision associated with a given node"""
+    entry = block.get(_to_int(node[0:1]))
+    if isinstance(entry, dict):
+        return _find_node(entry, node[1:])
+    return entry
--- a/mercurial/revset.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/revset.py	Mon Mar 09 10:18:40 2020 -0700
@@ -769,6 +769,38 @@
     return subset
 
 
+@predicate(b'conflictlocal()', safe=True)
+def conflictlocal(repo, subset, x):
+    """The local side of the merge, if currently in an unresolved merge.
+
+    "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
+    """
+    getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
+    from . import merge
+
+    mergestate = merge.mergestate.read(repo)
+    if mergestate.active() and repo.changelog.hasnode(mergestate.local):
+        return subset & {repo.changelog.rev(mergestate.local)}
+
+    return baseset()
+
+
+@predicate(b'conflictother()', safe=True)
+def conflictother(repo, subset, x):
+    """The other side of the merge, if currently in an unresolved merge.
+
+    "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
+    """
+    getargs(x, 0, 0, _(b"conflictother takes no arguments"))
+    from . import merge
+
+    mergestate = merge.mergestate.read(repo)
+    if mergestate.active() and repo.changelog.hasnode(mergestate.other):
+        return subset & {repo.changelog.rev(mergestate.other)}
+
+    return baseset()
+
+
 @predicate(b'contains(pattern)', weight=100)
 def contains(repo, subset, x):
     """The revision's manifest contains a file matching pattern (but might not
@@ -1843,7 +1875,7 @@
     The set of all parents for all changesets in set, or the working directory.
     """
     if x is None:
-        ps = set(p.rev() for p in repo[x].parents())
+        ps = {p.rev() for p in repo[x].parents()}
     else:
         ps = set()
         cl = repo.changelog
@@ -2405,7 +2437,7 @@
     cl = repo.unfiltered().changelog
     torev = cl.index.get_rev
     tonode = cl.node
-    result = set(torev(n) for n in f(tonode(r) for r in s))
+    result = {torev(n) for n in f(tonode(r) for r in s)}
     result.discard(None)
     return smartset.baseset(result - repo.changelog.filteredrevs)
 
--- a/mercurial/scmutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/scmutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1457,10 +1457,10 @@
     # Merge old parent and old working dir copies
     oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
     oldcopies.update(copies)
-    copies = dict(
-        (dst, oldcopies.get(src, src))
+    copies = {
+        dst: oldcopies.get(src, src)
         for dst, src in pycompat.iteritems(oldcopies)
-    )
+    }
     # Adjust the dirstate copies
     for dst, src in pycompat.iteritems(copies):
         if src not in newctx or dst in newctx or ds[dst] != b'a':
--- a/mercurial/shelve.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/shelve.py	Mon Mar 09 10:18:40 2020 -0700
@@ -745,7 +745,7 @@
         try:
             checkparents(repo, state)
 
-            merge.update(repo, state.pendingctx, branchmerge=False, force=True)
+            merge.clean_update(state.pendingctx)
             if state.activebookmark and state.activebookmark in repo._bookmarks:
                 bookmarks.activate(repo, state.activebookmark)
             mergefiles(ui, repo, state.wctx, state.pendingctx)
@@ -827,10 +827,6 @@
                 )
 
         if newnode is None:
-            # If it ended up being a no-op commit, then the normal
-            # merge state clean-up path doesn't happen, so do it
-            # here. Fix issue5494
-            merge.mergestate.clean(repo)
             shelvectx = state.pendingctx
             msg = _(
                 b'note: unshelved changes already existed '
@@ -996,7 +992,6 @@
         stats = merge.graft(
             repo,
             shelvectx,
-            shelvectx.p1(),
             labels=[b'working-copy', b'shelve'],
             keepconflictparent=True,
         )
@@ -1032,10 +1027,6 @@
             )
 
         if newnode is None:
-            # If it ended up being a no-op commit, then the normal
-            # merge state clean-up path doesn't happen, so do it
-            # here. Fix issue5494
-            merge.mergestate.clean(repo)
             shelvectx = tmpwctx
             msg = _(
                 b'note: unshelved changes already existed '
--- a/mercurial/store.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/store.py	Mon Mar 09 10:18:40 2020 -0700
@@ -137,7 +137,7 @@
     asciistr = list(map(xchr, range(127)))
     capitals = list(range(ord(b"A"), ord(b"Z") + 1))
 
-    cmap = dict((x, x) for x in asciistr)
+    cmap = {x: x for x in asciistr}
     for x in _reserved():
         cmap[xchr(x)] = b"~%02x" % x
     for x in capitals + [ord(e)]:
@@ -200,7 +200,7 @@
     'the~07quick~adshot'
     '''
     xchr = pycompat.bytechr
-    cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
+    cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
     for x in _reserved():
         cmap[xchr(x)] = b"~%02x" % x
     for x in range(ord(b"A"), ord(b"Z") + 1):
--- a/mercurial/subrepo.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/subrepo.py	Mon Mar 09 10:18:40 2020 -0700
@@ -806,7 +806,7 @@
                 self.ui.debug(
                     b'merging subrepository "%s"\n' % subrelpath(self)
                 )
-                hg.merge(self._repo, state[1], remind=False)
+                hg.merge(dst, remind=False)
 
         wctx = self._repo[None]
         if self.dirty():
--- a/mercurial/tags.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/tags.py	Mon Mar 09 10:18:40 2020 -0700
@@ -720,15 +720,20 @@
 
         self._dirtyoffset = None
 
-        if rawlen < wantedlen:
-            self._dirtyoffset = rawlen
-            self._raw.extend(b'\xff' * (wantedlen - rawlen))
-        elif rawlen > wantedlen:
+        rawlentokeep = min(
+            wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
+        )
+        if rawlen > rawlentokeep:
             # There's no easy way to truncate array instances. This seems
             # slightly less evil than copying a potentially large array slice.
-            for i in range(rawlen - wantedlen):
+            for i in range(rawlen - rawlentokeep):
                 self._raw.pop()
-            self._dirtyoffset = len(self._raw)
+            rawlen = len(self._raw)
+            self._dirtyoffset = rawlen
+        if rawlen < wantedlen:
+            if self._dirtyoffset is None:
+                self._dirtyoffset = rawlen
+            self._raw.extend(b'\xff' * (wantedlen - rawlen))
 
     def getfnode(self, node, computemissing=True):
         """Obtain the filenode of the .hgtags file at a specified revision.
--- a/mercurial/templatekw.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/templatekw.py	Mon Mar 09 10:18:40 2020 -0700
@@ -396,26 +396,38 @@
     return templateutil.compatfileslist(context, mapping, b'file', ctx.files())
 
 
-@templatekeyword(b'graphnode', requires={b'repo', b'ctx'})
+@templatekeyword(b'graphnode', requires={b'repo', b'ctx', b'cache'})
 def showgraphnode(context, mapping):
     """String. The character representing the changeset node in an ASCII
     revision graph."""
     repo = context.resource(mapping, b'repo')
     ctx = context.resource(mapping, b'ctx')
-    return getgraphnode(repo, ctx)
+    cache = context.resource(mapping, b'cache')
+    return getgraphnode(repo, ctx, cache)
 
 
-def getgraphnode(repo, ctx):
-    return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
+def getgraphnode(repo, ctx, cache):
+    return getgraphnodecurrent(repo, ctx, cache) or getgraphnodesymbol(ctx)
 
 
-def getgraphnodecurrent(repo, ctx):
+def getgraphnodecurrent(repo, ctx, cache):
     wpnodes = repo.dirstate.parents()
     if wpnodes[1] == nullid:
         wpnodes = wpnodes[:1]
     if ctx.node() in wpnodes:
         return b'@'
     else:
+        merge_nodes = cache.get(b'merge_nodes', ())
+        if not merge_nodes:
+            from . import merge
+
+            mergestate = merge.mergestate.read(repo)
+            if mergestate.active():
+                merge_nodes = (mergestate.local, mergestate.other)
+            cache[b'merge_nodes'] = merge_nodes
+
+        if ctx.node() in merge_nodes:
+            return b'%'
         return b''
 
 
--- a/mercurial/transaction.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/transaction.py	Mon Mar 09 10:18:40 2020 -0700
@@ -30,9 +30,9 @@
 # the changelog having been written).
 postfinalizegenerators = {b'bookmarks', b'dirstate'}
 
-gengroupall = b'all'
-gengroupprefinalize = b'prefinalize'
-gengrouppostfinalize = b'postfinalize'
+GEN_GROUP_ALL = b'all'
+GEN_GROUP_PRE_FINALIZE = b'prefinalize'
+GEN_GROUP_POST_FINALIZE = b'postfinalize'
 
 
 def active(func):
@@ -352,19 +352,25 @@
         if genid in self._filegenerators:
             del self._filegenerators[genid]
 
-    def _generatefiles(self, suffix=b'', group=gengroupall):
+    def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
         # write files registered for generation
         any = False
+
+        if group == GEN_GROUP_ALL:
+            skip_post = skip_pre = False
+        else:
+            skip_pre = group == GEN_GROUP_POST_FINALIZE
+            skip_post = group == GEN_GROUP_PRE_FINALIZE
+
         for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
             any = True
             order, filenames, genfunc, location = entry
 
             # for generation at closing, check if it's before or after finalize
-            postfinalize = group == gengrouppostfinalize
-            if (
-                group != gengroupall
-                and (id in postfinalizegenerators) != postfinalize
-            ):
+            is_post = id in postfinalizegenerators
+            if skip_post and is_post:
+                continue
+            elif skip_pre and not is_post:
                 continue
 
             vfs = self._vfsmap[location]
@@ -505,7 +511,7 @@
         if self._count == 1:
             self._validator(self)  # will raise exception if needed
             self._validator = None  # Help prevent cycles.
-            self._generatefiles(group=gengroupprefinalize)
+            self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
             while self._finalizecallback:
                 callbacks = self._finalizecallback
                 self._finalizecallback = {}
@@ -514,7 +520,7 @@
                     callbacks[cat](self)
             # Prevent double usage and help clear cycles.
             self._finalizecallback = None
-            self._generatefiles(group=gengrouppostfinalize)
+            self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
 
         self._count -= 1
         if self._count != 0:
--- a/mercurial/ui.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/ui.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1939,30 +1939,6 @@
         if self._progbar is not None and self._progbar.printed:
             self._progbar.clear()
 
-    def progress(self, topic, pos, item=b"", unit=b"", total=None):
-        '''show a progress message
-
-        By default a textual progress bar will be displayed if an operation
-        takes too long. 'topic' is the current operation, 'item' is a
-        non-numeric marker of the current position (i.e. the currently
-        in-process file), 'pos' is the current numeric position (i.e.
-        revision, bytes, etc.), unit is a corresponding unit label,
-        and total is the highest expected pos.
-
-        Multiple nested topics may be active at a time.
-
-        All topics should be marked closed by setting pos to None at
-        termination.
-        '''
-        self.deprecwarn(
-            b"use ui.makeprogress() instead of ui.progress()", b"5.1"
-        )
-        progress = self.makeprogress(topic, unit, total)
-        if pos is not None:
-            progress.update(pos, item=item)
-        else:
-            progress.complete()
-
     def makeprogress(self, topic, unit=b"", total=None):
         """Create a progress helper for the specified topic"""
         if getattr(self._fmsgerr, 'structured', False):
--- a/mercurial/upgrade.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/upgrade.py	Mon Mar 09 10:18:40 2020 -0700
@@ -449,7 +449,14 @@
 
     @classmethod
     def fromconfig(cls, repo):
-        return repo.ui.config(b'format', b'revlog-compression')
+        compengines = repo.ui.configlist(b'format', b'revlog-compression')
+        # return the first valid value as the selection code would do
+        for comp in compengines:
+            if comp in util.compengines:
+                return comp
+
+        # no valide compression found lets display it all for clarity
+        return b','.join(compengines)
 
 
 @registerformatvariant
@@ -1122,7 +1129,7 @@
     """Upgrade a repository in place."""
     if optimize is None:
         optimize = []
-    optimize = set(legacy_opts_map.get(o, o) for o in optimize)
+    optimize = {legacy_opts_map.get(o, o) for o in optimize}
     repo = repo.unfiltered()
 
     revlogs = set(UPGRADE_ALL_REVLOGS)
--- a/mercurial/url.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/url.py	Mon Mar 09 10:18:40 2020 -0700
@@ -224,13 +224,11 @@
 
 
 def _generic_proxytunnel(self):
-    proxyheaders = dict(
-        [
-            (x, self.headers[x])
-            for x in self.headers
-            if x.lower().startswith('proxy-')
-        ]
-    )
+    proxyheaders = {
+        x: self.headers[x]
+        for x in self.headers
+        if x.lower().startswith('proxy-')
+    }
     self.send(b'CONNECT %s HTTP/1.0\r\n' % self.realhostport)
     for header in pycompat.iteritems(proxyheaders):
         self.send(b'%s: %s\r\n' % header)
--- a/mercurial/util.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/util.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1846,14 +1846,14 @@
     return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
 
 
-def checksignature(func):
+def checksignature(func, depth=1):
     '''wrap a function with code to check for calling errors'''
 
     def check(*args, **kwargs):
         try:
             return func(*args, **kwargs)
         except TypeError:
-            if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
+            if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
                 raise error.SignatureError
             raise
 
@@ -2213,7 +2213,7 @@
     '''
 
     def _makefspathcacheentry(dir):
-        return dict((normcase(n), n) for n in os.listdir(dir))
+        return {normcase(n): n for n in os.listdir(dir)}
 
     seps = pycompat.ossep
     if pycompat.osaltsep:
--- a/mercurial/utils/storageutil.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/utils/storageutil.py	Mon Mar 09 10:18:40 2020 -0700
@@ -364,7 +364,7 @@
     if nodesorder == b'nodes':
         revs = [frev(n) for n in nodes]
     elif nodesorder == b'linear':
-        revs = set(frev(n) for n in nodes)
+        revs = {frev(n) for n in nodes}
         revs = dagop.linearize(revs, store.parentrevs)
     else:  # storage and default
         revs = sorted(frev(n) for n in nodes)
--- a/mercurial/windows.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/windows.py	Mon Mar 09 10:18:40 2020 -0700
@@ -535,13 +535,11 @@
         cache = dircache.get(dir, None)
         if cache is None:
             try:
-                dmap = dict(
-                    [
-                        (normcase(n), s)
-                        for n, k, s in listdir(dir, True)
-                        if getkind(s.st_mode) in _wantedkinds
-                    ]
-                )
+                dmap = {
+                    normcase(n): s
+                    for n, k, s in listdir(dir, True)
+                    if getkind(s.st_mode) in _wantedkinds
+                }
             except OSError as err:
                 # Python >= 2.5 returns ENOENT and adds winerror field
                 # EINVAL is raised if dir is not a directory.
--- a/mercurial/wireprototypes.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/mercurial/wireprototypes.py	Mon Mar 09 10:18:40 2020 -0700
@@ -383,8 +383,8 @@
     # reason for it (like server operators wanting to achieve specific
     # performance characteristics). So fail fast if the config references
     # unusable compression engines.
-    validnames = set(e.name() for e in compengines)
-    invalidnames = set(e for e in configengines if e not in validnames)
+    validnames = {e.name() for e in compengines}
+    invalidnames = {e for e in configengines if e not in validnames}
     if invalidnames:
         raise error.Abort(
             _(b'invalid compression engine defined in %s: %s')
--- a/relnotes/next	Mon Mar 09 01:11:59 2020 +0100
+++ b/relnotes/next	Mon Mar 09 10:18:40 2020 -0700
@@ -1,14 +1,83 @@
 == New Features ==
 
+ * `hg purge`/`hg clean` can now delete ignored files instead of
+   untracked files, with the new -i flag.
+
+ * `hg log` now defaults to using an '%' symbol for commits involved
+    in unresolved merge conflicts. That includes unresolved conflicts
+    caused by e.g. `hg update --merge` and `hg graft`. '@' still takes
+    precedence, so what used to be marked '@' still is.
+
+ * New `conflictlocal()` and `conflictother()` revsets return the
+   commits that are being merged, when there are conflicts. Also works
+   for conflicts caused by e.g. `hg graft`.
+
+ * `hg copy --forget` can be used to unmark a file as copied.
+
+ * The `format.revlog-compression` configuration entry now accept a list. The
+   first available option will be used. for example setting::
+
+     [format]
+     revlog-compression=zstd, zlib
+
+   Will use `zstd` compression for new repositories is available, and will
+   simply fall back to `zlib` if not.
+
+ * `hg debugmergestate` output is now templated, which may be useful
+   e.g. for IDEs that want to help the user resolve merge conflicts.
+
 
 == New Experimental Features ==
 
+ * `hg copy` now supports a `--at-rev` argument to mark files as
+   copied in the specified commit. It only works with `--after` for
+   now (i.e., it's only useful for marking files copied using non-hg
+   `cp` as copied).
+
+ * Use `hg copy --forget --at-rev REV` to unmark already committed
+   copies.
 
 == Bug Fixes  ==
 
+ * Fix server exception when concurrent pushes delete the same bookmark
+
+ * Prevent pushes of divergent bookmarks (foo@remote)
+
+ * The push error "remote repository changed while pushing - please
+   try again" now only happens when a concurrent push changed related
+   heads (instead of when a concurrent pushed any revision).
+
 
 == Backwards Compatibility Changes ==
 
+ * When `hg rebase` pauses for merge conflict resolution, the working
+   copy will no longer have the rebased node as a second parent. You
+   can use the new `conflictparents()` revset for finding the other
+   parent during a conflict.
+
+ * `hg recover` does not verify the validity of the whole repository
+   anymore. You can pass `--verify` or call `hg verify` if necessary.
+
+ * `hg debugmergestate` output format changed. Let us know if that is
+   causing you problems and we'll roll it back.
+
+ * Resolved merge conflicts are now cleared by `hg commit` even if the
+   working copy has no changes.
+
 
 == Internal API Changes ==
 
+ * The deprecated `ui.progress()` has now been deleted. Please use
+   `ui.makeprogress()` instead.
+
+ * `hg.merge()` now takes a `ctx` instead of the previous `repo` and
+   `node` arguments.
+
+ * `hg.merge()` has lost its `abort` argument. Please call
+   `hg.abortmerge()` directly instead.
+
+ * `hg.merge()` has lost its `mergeforce` argument. It should have
+   only ever been called with the same value as the `force` argument.
+
+ * The `*others` argument of `cmdutil.check_incompatible_arguments()`
+   changed from being varargs argument to being a single collection.
--- a/rust/Cargo.lock	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/Cargo.lock	Mon Mar 09 10:18:40 2020 -0700
@@ -2,23 +2,38 @@
 # It is not intended for manual editing.
 [[package]]
 name = "aho-corasick"
-version = "0.7.6"
+version = "0.7.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "arrayvec"
-version = "0.4.12"
+name = "ansi_term"
+version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "autocfg"
-version = "0.1.6"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "autocfg"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -33,19 +48,37 @@
 
 [[package]]
 name = "c2-chacha"
-version = "0.2.2"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "cc"
+version = "1.0.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "cfg-if"
 version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "clap"
+version = "2.33.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cloudabi"
 version = "0.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -55,55 +88,71 @@
 
 [[package]]
 name = "cpython"
-version = "0.3.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-deque"
-version = "0.7.1"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.7.2"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-queue"
-version = "0.1.2"
+version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.6.6"
+version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "ctor"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "difference"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "either"
 version = "1.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -115,25 +164,46 @@
 
 [[package]]
 name = "getrandom"
-version = "0.1.12"
+version = "0.1.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "hermit-abi"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "hex"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "hg-core"
 version = "0.1.0"
 dependencies = [
  "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -141,17 +211,9 @@
 name = "hg-cpython"
 version = "0.1.0"
 dependencies = [
- "cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "hgdirectffi"
-version = "0.1.0"
-dependencies = [
- "hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -161,64 +223,104 @@
 
 [[package]]
 name = "libc"
-version = "0.2.64"
+version = "0.2.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "memchr"
-version = "2.2.1"
+version = "2.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "memmap"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "memoffset"
-version = "0.5.1"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "nodrop"
-version = "0.1.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "num-traits"
-version = "0.2.8"
+version = "0.2.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "num_cpus"
-version = "1.10.1"
+version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "output_vt100"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "ppv-lite86"
-version = "0.2.5"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "pretty_assertions"
+version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ctor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
 
 [[package]]
 name = "python27-sys"
-version = "0.3.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "python3-sys"
-version = "0.3.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -226,8 +328,8 @@
 version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -241,11 +343,11 @@
 
 [[package]]
 name = "rand"
-version = "0.7.2"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -256,7 +358,7 @@
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -265,7 +367,7 @@
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -287,7 +389,7 @@
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -319,7 +421,7 @@
 version = "0.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -331,7 +433,7 @@
 dependencies = [
  "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -342,7 +444,7 @@
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -356,24 +458,24 @@
 
 [[package]]
 name = "rayon"
-version = "1.2.0"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.6.0"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -385,22 +487,35 @@
 ]
 
 [[package]]
+name = "redox_syscall"
+version = "0.1.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "regex"
-version = "1.3.1"
+version = "1.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.12"
+version = "0.6.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "remove_dir_all"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rustc_version"
 version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -409,6 +524,14 @@
 ]
 
 [[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "scopeguard"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -427,8 +550,44 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "strsim"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "syn"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
+ "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "thread_local"
-version = "0.3.6"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -439,12 +598,27 @@
 version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "unicode-width"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "vec_map"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "wasi"
-version = "0.7.0"
+version = "0.9.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -462,39 +636,59 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "winapi-util"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [metadata]
-"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
-"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
-"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
+"checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811"
+"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
+"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
+"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
 "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 "checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
-"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101"
+"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb"
+"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
 "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
 "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85532c648315aeb0829ad216a6a29aa3212cf9319bc7f6daf1404aa0bdd1485f"
-"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
-"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"
-"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
-"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
+"checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
+"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca"
+"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"
+"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
+"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"
+"checksum ctor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8ce37ad4184ab2ce004c33bf6379185d3b1c95801cab51026bd271bf68eedc"
+"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
 "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
 "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571"
+"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
+"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772"
+"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e"
 "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c"
-"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
-"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
-"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
-"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32"
-"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
-"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b"
-"checksum python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "372555e88a6bc8109eb641380240dc8d25a128fc48363ec9075664daadffdd5b"
-"checksum python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a8ebed3f1201fda179f3960609dbbc10cd8c75e9f2afcb03788278f367d8ea"
+"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
+"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223"
+"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
+"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
+"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
+"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
+"checksum output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
+"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
+"checksum pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
+"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548"
+"checksum python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
+"checksum python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
+"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"
 "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
-"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
+"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
 "checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
 "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
@@ -507,18 +701,29 @@
 "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
 "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
 "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
-"checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123"
-"checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b"
+"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
+"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
 "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"
-"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
+"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
+"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8"
+"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06"
+"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
 "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
 "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
 "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
+"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5"
+"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
+"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
 "checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
-"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
+"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479"
+"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
+"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a"
+"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
 "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/Cargo.toml	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/Cargo.toml	Mon Mar 09 10:18:40 2020 -0700
@@ -1,3 +1,3 @@
 [workspace]
-members = ["hg-core", "hg-direct-ffi", "hg-cpython"]
+members = ["hg-core", "hg-cpython"]
 exclude = ["chg", "hgcli"]
--- a/rust/hg-core/Cargo.toml	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/Cargo.toml	Mon Mar 09 10:18:40 2020 -0700
@@ -4,16 +4,33 @@
 authors = ["Georges Racinet <gracinet@anybox.fr>"]
 description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
 edition = "2018"
+build = "build.rs"
 
 [lib]
 name = "hg"
 
 [dependencies]
 byteorder = "1.3.1"
+hex = "0.4.0"
 lazy_static = "1.3.0"
+libc = { version = "0.2.66", optional = true }
 memchr = "2.2.0"
 rand = "0.6.5"
 rand_pcg = "0.1.1"
-rayon = "1.2.0"
+rayon = "1.3.0"
 regex = "1.1.0"
 twox-hash = "1.5.0"
+same-file = "1.0.6"
+
+[dev-dependencies]
+clap = "*"
+memmap = "0.7.0"
+pretty_assertions = "0.6.1"
+tempfile = "3.1.0"
+
+[build-dependencies]
+cc = { version = "1.0.48", optional = true }
+
+[features]
+default = []
+with-re2 = ["cc", "libc"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/build.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,25 @@
+// build.rs
+//
+// Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+#[cfg(feature = "with-re2")]
+use cc;
+
+#[cfg(feature = "with-re2")]
+fn compile_re2() {
+    cc::Build::new()
+        .cpp(true)
+        .flag("-std=c++11")
+        .file("src/re2/rust_re2.cpp")
+        .compile("librustre.a");
+
+    println!("cargo:rustc-link-lib=re2");
+}
+
+fn main() {
+    #[cfg(feature = "with-re2")]
+    compile_re2();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/examples/nodemap/index.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,95 @@
+// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Minimal `RevlogIndex`, readable from standard Mercurial file format
+use hg::*;
+use memmap::*;
+use std::fs::File;
+use std::ops::Deref;
+use std::path::Path;
+use std::slice;
+
+pub struct Index {
+    data: Box<dyn Deref<Target = [IndexEntry]> + Send>,
+}
+
+/// A fixed sized index entry. All numbers are big endian
+#[repr(C)]
+pub struct IndexEntry {
+    not_used_yet: [u8; 24],
+    p1: Revision,
+    p2: Revision,
+    node: Node,
+    unused_node: [u8; 12],
+}
+
+pub const INDEX_ENTRY_SIZE: usize = 64;
+
+impl IndexEntry {
+    fn parents(&self) -> [Revision; 2] {
+        [Revision::from_be(self.p1), Revision::from_be(self.p1)]
+    }
+}
+
+impl RevlogIndex for Index {
+    fn len(&self) -> usize {
+        self.data.len()
+    }
+
+    fn node(&self, rev: Revision) -> Option<&Node> {
+        if rev == NULL_REVISION {
+            return None;
+        }
+        let i = rev as usize;
+        if i >= self.len() {
+            None
+        } else {
+            Some(&self.data[i].node)
+        }
+    }
+}
+
+impl Graph for &Index {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        let [p1, p2] = (*self).data[rev as usize].parents();
+        let len = (*self).len();
+        if p1 < NULL_REVISION
+            || p2 < NULL_REVISION
+            || p1 as usize >= len
+            || p2 as usize >= len
+        {
+            return Err(GraphError::ParentOutOfRange(rev));
+        }
+        Ok([p1, p2])
+    }
+}
+
+struct IndexMmap(Mmap);
+
+impl Deref for IndexMmap {
+    type Target = [IndexEntry];
+
+    fn deref(&self) -> &[IndexEntry] {
+        let ptr = self.0.as_ptr() as *const IndexEntry;
+        // Any misaligned data will be ignored.
+        debug_assert_eq!(
+            self.0.len() % std::mem::align_of::<IndexEntry>(),
+            0,
+            "Misaligned data in mmap"
+        );
+        unsafe { slice::from_raw_parts(ptr, self.0.len() / INDEX_ENTRY_SIZE) }
+    }
+}
+
+impl Index {
+    pub fn load_mmap(path: impl AsRef<Path>) -> Self {
+        let file = File::open(path).unwrap();
+        let msg = "Index file is missing, or missing permission";
+        let mmap = unsafe { MmapOptions::new().map(&file) }.expect(msg);
+        Self {
+            data: Box::new(IndexMmap(mmap)),
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/examples/nodemap/main.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,146 @@
+// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use clap::*;
+use hg::revlog::node::*;
+use hg::revlog::nodemap::*;
+use hg::revlog::*;
+use memmap::MmapOptions;
+use rand::Rng;
+use std::fs::File;
+use std::io;
+use std::io::Write;
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+use std::time::Instant;
+
+mod index;
+use index::Index;
+
+fn mmap_index(repo_path: &Path) -> Index {
+    let mut path = PathBuf::from(repo_path);
+    path.extend([".hg", "store", "00changelog.i"].iter());
+    Index::load_mmap(path)
+}
+
+fn mmap_nodemap(path: &Path) -> NodeTree {
+    let file = File::open(path).unwrap();
+    let mmap = unsafe { MmapOptions::new().map(&file).unwrap() };
+    let len = mmap.len();
+    NodeTree::load_bytes(Box::new(mmap), len)
+}
+
+/// Scan the whole index and create the corresponding nodemap file at `path`
+fn create(index: &Index, path: &Path) -> io::Result<()> {
+    let mut file = File::create(path)?;
+    let start = Instant::now();
+    let mut nm = NodeTree::default();
+    for rev in 0..index.len() {
+        let rev = rev as Revision;
+        nm.insert(index, index.node(rev).unwrap(), rev).unwrap();
+    }
+    eprintln!("Nodemap constructed in RAM in {:?}", start.elapsed());
+    file.write(&nm.into_readonly_and_added_bytes().1)?;
+    eprintln!("Nodemap written to disk");
+    Ok(())
+}
+
+fn query(index: &Index, nm: &NodeTree, prefix: &str) {
+    let start = Instant::now();
+    let res = nm.find_hex(index, prefix);
+    println!("Result found in {:?}: {:?}", start.elapsed(), res);
+}
+
+fn bench(index: &Index, nm: &NodeTree, queries: usize) {
+    let len = index.len() as u32;
+    let mut rng = rand::thread_rng();
+    let nodes: Vec<Node> = (0..queries)
+        .map(|_| {
+            index
+                .node((rng.gen::<u32>() % len) as Revision)
+                .unwrap()
+                .clone()
+        })
+        .collect();
+    if queries < 10 {
+        let nodes_hex: Vec<String> =
+            nodes.iter().map(|n| n.encode_hex()).collect();
+        println!("Nodes: {:?}", nodes_hex);
+    }
+    let mut last: Option<Revision> = None;
+    let start = Instant::now();
+    for node in nodes.iter() {
+        last = nm.find_bin(index, node.into()).unwrap();
+    }
+    let elapsed = start.elapsed();
+    println!(
+        "Did {} queries in {:?} (mean {:?}), last was {:?} with result {:?}",
+        queries,
+        elapsed,
+        elapsed / (queries as u32),
+        nodes.last().unwrap().encode_hex(),
+        last
+    );
+}
+
+fn main() {
+    let matches = App::new("Nodemap pure Rust example")
+        .arg(
+            Arg::with_name("REPOSITORY")
+                .help("Path to the repository, always necessary for its index")
+                .required(true),
+        )
+        .arg(
+            Arg::with_name("NODEMAP_FILE")
+                .help("Path to the nodemap file, independent of REPOSITORY")
+                .required(true),
+        )
+        .subcommand(
+            SubCommand::with_name("create")
+                .about("Create NODEMAP_FILE by scanning repository index"),
+        )
+        .subcommand(
+            SubCommand::with_name("query")
+                .about("Query NODEMAP_FILE for PREFIX")
+                .arg(Arg::with_name("PREFIX").required(true)),
+        )
+        .subcommand(
+            SubCommand::with_name("bench")
+                .about(
+                    "Perform #QUERIES random successful queries on NODEMAP_FILE")
+                .arg(Arg::with_name("QUERIES").required(true)),
+        )
+        .get_matches();
+
+    let repo = matches.value_of("REPOSITORY").unwrap();
+    let nm_path = matches.value_of("NODEMAP_FILE").unwrap();
+
+    let index = mmap_index(&Path::new(repo));
+
+    if let Some(_) = matches.subcommand_matches("create") {
+        println!("Creating nodemap file {} for repository {}", nm_path, repo);
+        create(&index, &Path::new(nm_path)).unwrap();
+        return;
+    }
+
+    let nm = mmap_nodemap(&Path::new(nm_path));
+    if let Some(matches) = matches.subcommand_matches("query") {
+        let prefix = matches.value_of("PREFIX").unwrap();
+        println!(
+            "Querying {} in nodemap file {} of repository {}",
+            prefix, nm_path, repo
+        );
+        query(&index, &nm, prefix);
+    }
+    if let Some(matches) = matches.subcommand_matches("bench") {
+        let queries =
+            usize::from_str(matches.value_of("QUERIES").unwrap()).unwrap();
+        println!(
+            "Doing {} random queries in nodemap file {} of repository {}",
+            queries, nm_path, repo
+        );
+        bench(&index, &nm, queries);
+    }
+}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -8,12 +8,15 @@
 //! A multiset of directory names.
 //!
 //! Used to counts the references to directories in a manifest or dirstate.
-use crate::utils::hg_path::{HgPath, HgPathBuf};
 use crate::{
-    dirstate::EntryState, utils::files, DirstateEntry, DirstateMapError,
-    FastHashMap,
+    dirstate::EntryState,
+    utils::{
+        files,
+        hg_path::{HgPath, HgPathBuf, HgPathError},
+    },
+    DirstateEntry, DirstateMapError, FastHashMap,
 };
-use std::collections::hash_map::{self, Entry};
+use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
 
 // could be encapsulated if we care API stability more seriously
 pub type DirsMultisetIter<'a> = hash_map::Keys<'a, HgPathBuf, u32>;
@@ -75,7 +78,14 @@
             if subpath.as_bytes().last() == Some(&b'/') {
                 // TODO Remove this once PathAuditor is certified
                 // as the only entrypoint for path data
-                return Err(DirstateMapError::ConsecutiveSlashes);
+                let second_slash_index = subpath.len() - 1;
+
+                return Err(DirstateMapError::InvalidPath(
+                    HgPathError::ConsecutiveSlashes {
+                        bytes: path.as_ref().as_bytes().to_owned(),
+                        second_slash_index,
+                    },
+                ));
             }
             if let Some(val) = self.inner.get_mut(subpath) {
                 *val += 1;
@@ -129,6 +139,68 @@
     }
 }
 
+/// This is basically a reimplementation of `DirsMultiset` that stores the
+/// children instead of just a count of them, plus a small optional
+/// optimization to avoid some directories we don't need.
+#[derive(PartialEq, Debug)]
+pub struct DirsChildrenMultiset<'a> {
+    inner: FastHashMap<&'a HgPath, HashSet<&'a HgPath>>,
+    only_include: Option<HashSet<&'a HgPath>>,
+}
+
+impl<'a> DirsChildrenMultiset<'a> {
+    pub fn new(
+        paths: impl Iterator<Item = &'a HgPathBuf>,
+        only_include: Option<&'a HashSet<impl AsRef<HgPath> + 'a>>,
+    ) -> Self {
+        let mut new = Self {
+            inner: HashMap::default(),
+            only_include: only_include
+                .map(|s| s.iter().map(|p| p.as_ref()).collect()),
+        };
+
+        for path in paths {
+            new.add_path(path)
+        }
+
+        new
+    }
+    fn add_path(&mut self, path: &'a (impl AsRef<HgPath> + 'a)) {
+        if path.as_ref().is_empty() {
+            return;
+        }
+        for (directory, basename) in files::find_dirs_with_base(path.as_ref())
+        {
+            if !self.is_dir_included(directory) {
+                continue;
+            }
+            self.inner
+                .entry(directory)
+                .and_modify(|e| {
+                    e.insert(basename);
+                })
+                .or_insert_with(|| {
+                    let mut set = HashSet::new();
+                    set.insert(basename);
+                    set
+                });
+        }
+    }
+    fn is_dir_included(&self, dir: impl AsRef<HgPath>) -> bool {
+        match &self.only_include {
+            None => false,
+            Some(i) => i.contains(dir.as_ref()),
+        }
+    }
+
+    pub fn get(
+        &self,
+        path: impl AsRef<HgPath>,
+    ) -> Option<&HashSet<&'a HgPath>> {
+        self.inner.get(path.as_ref())
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -100,16 +100,12 @@
         if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
             self.get_non_normal_other_parent_entries()
                 .0
-                .as_mut()
-                .unwrap()
                 .insert(filename.to_owned());
         }
 
         if entry.size == SIZE_FROM_OTHER_PARENT {
             self.get_non_normal_other_parent_entries()
                 .1
-                .as_mut()
-                .unwrap()
                 .insert(filename.to_owned());
         }
         Ok(())
@@ -152,8 +148,6 @@
         );
         self.get_non_normal_other_parent_entries()
             .0
-            .as_mut()
-            .unwrap()
             .insert(filename.to_owned());
         Ok(())
     }
@@ -182,8 +176,6 @@
         }
         self.get_non_normal_other_parent_entries()
             .0
-            .as_mut()
-            .unwrap()
             .remove(filename);
 
         Ok(exists)
@@ -211,8 +203,6 @@
             if changed {
                 self.get_non_normal_other_parent_entries()
                     .0
-                    .as_mut()
-                    .unwrap()
                     .insert(filename.to_owned());
             }
         }
@@ -224,8 +214,6 @@
     ) -> bool {
         self.get_non_normal_other_parent_entries()
             .0
-            .as_mut()
-            .unwrap()
             .remove(key.as_ref())
     }
     pub fn non_normal_entries_union(
@@ -234,8 +222,6 @@
     ) -> Vec<HgPathBuf> {
         self.get_non_normal_other_parent_entries()
             .0
-            .as_mut()
-            .unwrap()
             .union(&other)
             .map(|e| e.to_owned())
             .collect()
@@ -243,12 +229,31 @@
 
     pub fn get_non_normal_other_parent_entries(
         &mut self,
-    ) -> (
-        &mut Option<HashSet<HgPathBuf>>,
-        &mut Option<HashSet<HgPathBuf>>,
-    ) {
+    ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
         self.set_non_normal_other_parent_entries(false);
-        (&mut self.non_normal_set, &mut self.other_parent_set)
+        (
+            self.non_normal_set.as_mut().unwrap(),
+            self.other_parent_set.as_mut().unwrap(),
+        )
+    }
+
+    /// Useful to get immutable references to those sets in contexts where
+    /// you only have an immutable reference to the `DirstateMap`, like when
+    /// sharing references with Python.
+    ///
+    /// TODO, get rid of this along with the other "setter/getter" stuff when
+    /// a nice typestate plan is defined.
+    ///
+    /// # Panics
+    ///
+    /// Will panic if either set is `None`.
+    pub fn get_non_normal_other_parent_entries_panic(
+        &self,
+    ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
+        (
+            self.non_normal_set.as_ref().unwrap(),
+            self.other_parent_set.as_ref().unwrap(),
+        )
     }
 
     pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
@@ -440,22 +445,8 @@
         .unwrap();
 
         assert_eq!(1, map.len());
-        assert_eq!(
-            0,
-            map.get_non_normal_other_parent_entries()
-                .0
-                .as_ref()
-                .unwrap()
-                .len()
-        );
-        assert_eq!(
-            0,
-            map.get_non_normal_other_parent_entries()
-                .1
-                .as_ref()
-                .unwrap()
-                .len()
-        );
+        assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
+        assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
     }
 
     #[test]
@@ -487,7 +478,7 @@
         })
         .collect();
 
-        let non_normal = [
+        let mut non_normal = [
             b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
         ]
         .iter()
@@ -499,8 +490,8 @@
         let entries = map.get_non_normal_other_parent_entries();
 
         assert_eq!(
-            (Some(non_normal), Some(other_parent)),
-            (entries.0.to_owned(), entries.1.to_owned())
+            (&mut non_normal, &mut other_parent),
+            (entries.0, entries.1)
         );
     }
 }
--- a/rust/hg-core/src/dirstate/status.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/dirstate/status.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -272,7 +272,7 @@
 
 pub fn status<'a: 'c, 'b: 'c, 'c>(
     dmap: &'a DirstateMap,
-    matcher: &'b (impl Matcher),
+    matcher: &'b impl Matcher,
     root_dir: impl AsRef<Path> + Sync + Send + Copy,
     list_clean: bool,
     last_normal_time: i64,
--- a/rust/hg-core/src/filepatterns.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/filepatterns.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -8,12 +8,18 @@
 //! Handling of Mercurial-specific patterns.
 
 use crate::{
-    utils::SliceExt, FastHashMap, LineNumber, PatternError, PatternFileError,
+    utils::{
+        files::{canonical_path, get_bytes_from_path, get_path_from_bytes},
+        hg_path::{path_to_hg_path_buf, HgPathBuf, HgPathError},
+        SliceExt,
+    },
+    FastHashMap, PatternError,
 };
 use lazy_static::lazy_static;
 use regex::bytes::{NoExpand, Regex};
 use std::fs::File;
 use std::io::Read;
+use std::ops::Deref;
 use std::path::{Path, PathBuf};
 use std::vec::Vec;
 
@@ -32,19 +38,33 @@
 const GLOB_REPLACEMENTS: &[(&[u8], &[u8])] =
     &[(b"*/", b"(?:.*/)?"), (b"*", b".*"), (b"", b"[^/]*")];
 
+/// Appended to the regexp of globs
+const GLOB_SUFFIX: &[u8; 7] = b"(?:/|$)";
+
 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
 pub enum PatternSyntax {
+    /// A regular expression
     Regexp,
     /// Glob that matches at the front of the path
     RootGlob,
     /// Glob that matches at any suffix of the path (still anchored at
     /// slashes)
     Glob,
+    /// a path relative to repository root, which is matched recursively
     Path,
+    /// A path relative to cwd
     RelPath,
+    /// an unrooted glob (*.rs matches Rust files in all dirs)
     RelGlob,
+    /// A regexp that needn't match the start of a name
     RelRegexp,
+    /// A path relative to repository root, which is matched non-recursively
+    /// (will not match subdirectories)
     RootFiles,
+    /// A file of patterns to read and include
+    Include,
+    /// A file of patterns to match against files under the same directory
+    SubInclude,
 }
 
 /// Transforms a glob pattern into a regex
@@ -125,16 +145,20 @@
         .collect()
 }
 
-fn parse_pattern_syntax(kind: &[u8]) -> Result<PatternSyntax, PatternError> {
+pub fn parse_pattern_syntax(
+    kind: &[u8],
+) -> Result<PatternSyntax, PatternError> {
     match kind {
-        b"re" => Ok(PatternSyntax::Regexp),
-        b"path" => Ok(PatternSyntax::Path),
-        b"relpath" => Ok(PatternSyntax::RelPath),
-        b"rootfilesin" => Ok(PatternSyntax::RootFiles),
-        b"relglob" => Ok(PatternSyntax::RelGlob),
-        b"relre" => Ok(PatternSyntax::RelRegexp),
-        b"glob" => Ok(PatternSyntax::Glob),
-        b"rootglob" => Ok(PatternSyntax::RootGlob),
+        b"re:" => Ok(PatternSyntax::Regexp),
+        b"path:" => Ok(PatternSyntax::Path),
+        b"relpath:" => Ok(PatternSyntax::RelPath),
+        b"rootfilesin:" => Ok(PatternSyntax::RootFiles),
+        b"relglob:" => Ok(PatternSyntax::RelGlob),
+        b"relre:" => Ok(PatternSyntax::RelRegexp),
+        b"glob:" => Ok(PatternSyntax::Glob),
+        b"rootglob:" => Ok(PatternSyntax::RootGlob),
+        b"include:" => Ok(PatternSyntax::Include),
+        b"subinclude:" => Ok(PatternSyntax::SubInclude),
         _ => Err(PatternError::UnsupportedSyntax(
             String::from_utf8_lossy(kind).to_string(),
         )),
@@ -144,11 +168,10 @@
 /// Builds the regex that corresponds to the given pattern.
 /// If within a `syntax: regexp` context, returns the pattern,
 /// otherwise, returns the corresponding regex.
-fn _build_single_regex(
-    syntax: PatternSyntax,
-    pattern: &[u8],
-    globsuffix: &[u8],
-) -> Vec<u8> {
+fn _build_single_regex(entry: &IgnorePattern) -> Vec<u8> {
+    let IgnorePattern {
+        syntax, pattern, ..
+    } = entry;
     if pattern.is_empty() {
         return vec![];
     }
@@ -158,7 +181,7 @@
             if pattern[0] == b'^' {
                 return pattern.to_owned();
             }
-            [b".*", pattern].concat()
+            [&b".*"[..], pattern].concat()
         }
         PatternSyntax::Path | PatternSyntax::RelPath => {
             if pattern == b"." {
@@ -181,36 +204,91 @@
         PatternSyntax::RelGlob => {
             let glob_re = glob_to_re(pattern);
             if let Some(rest) = glob_re.drop_prefix(b"[^/]*") {
-                [b".*", rest, globsuffix].concat()
+                [b".*", rest, GLOB_SUFFIX].concat()
             } else {
-                [b"(?:|.*/)", glob_re.as_slice(), globsuffix].concat()
+                [b"(?:|.*/)", glob_re.as_slice(), GLOB_SUFFIX].concat()
             }
         }
         PatternSyntax::Glob | PatternSyntax::RootGlob => {
-            [glob_to_re(pattern).as_slice(), globsuffix].concat()
+            [glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
         }
+        PatternSyntax::Include | PatternSyntax::SubInclude => unreachable!(),
     }
 }
 
 const GLOB_SPECIAL_CHARACTERS: [u8; 7] =
     [b'*', b'?', b'[', b']', b'{', b'}', b'\\'];
 
+/// TODO support other platforms
+#[cfg(unix)]
+pub fn normalize_path_bytes(bytes: &[u8]) -> Vec<u8> {
+    if bytes.is_empty() {
+        return b".".to_vec();
+    }
+    let sep = b'/';
+
+    let mut initial_slashes = bytes.iter().take_while(|b| **b == sep).count();
+    if initial_slashes > 2 {
+        // POSIX allows one or two initial slashes, but treats three or more
+        // as single slash.
+        initial_slashes = 1;
+    }
+    let components = bytes
+        .split(|b| *b == sep)
+        .filter(|c| !(c.is_empty() || c == b"."))
+        .fold(vec![], |mut acc, component| {
+            if component != b".."
+                || (initial_slashes == 0 && acc.is_empty())
+                || (!acc.is_empty() && acc[acc.len() - 1] == b"..")
+            {
+                acc.push(component)
+            } else if !acc.is_empty() {
+                acc.pop();
+            }
+            acc
+        });
+    let mut new_bytes = components.join(&sep);
+
+    if initial_slashes > 0 {
+        let mut buf: Vec<_> = (0..initial_slashes).map(|_| sep).collect();
+        buf.extend(new_bytes);
+        new_bytes = buf;
+    }
+    if new_bytes.is_empty() {
+        b".".to_vec()
+    } else {
+        new_bytes
+    }
+}
+
 /// Wrapper function to `_build_single_regex` that short-circuits 'exact' globs
 /// that don't need to be transformed into a regex.
 pub fn build_single_regex(
-    kind: &[u8],
-    pat: &[u8],
-    globsuffix: &[u8],
+    entry: &IgnorePattern,
 ) -> Result<Vec<u8>, PatternError> {
-    let enum_kind = parse_pattern_syntax(kind)?;
-    if enum_kind == PatternSyntax::RootGlob
-        && !pat.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
+    let IgnorePattern {
+        pattern, syntax, ..
+    } = entry;
+    let pattern = match syntax {
+        PatternSyntax::RootGlob
+        | PatternSyntax::Path
+        | PatternSyntax::RelGlob
+        | PatternSyntax::RootFiles => normalize_path_bytes(&pattern),
+        PatternSyntax::Include | PatternSyntax::SubInclude => {
+            return Err(PatternError::NonRegexPattern(entry.clone()))
+        }
+        _ => pattern.to_owned(),
+    };
+    if *syntax == PatternSyntax::RootGlob
+        && !pattern.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
     {
-        let mut escaped = escape_pattern(pat);
-        escaped.extend(b"(?:/|$)");
+        let mut escaped = escape_pattern(&pattern);
+        escaped.extend(GLOB_SUFFIX);
         Ok(escaped)
     } else {
-        Ok(_build_single_regex(enum_kind, pat, globsuffix))
+        let mut entry = entry.clone();
+        entry.pattern = pattern;
+        Ok(_build_single_regex(&entry))
     }
 }
 
@@ -222,24 +300,29 @@
         m.insert(b"regexp".as_ref(), b"relre:".as_ref());
         m.insert(b"glob".as_ref(), b"relglob:".as_ref());
         m.insert(b"rootglob".as_ref(), b"rootglob:".as_ref());
-        m.insert(b"include".as_ref(), b"include".as_ref());
-        m.insert(b"subinclude".as_ref(), b"subinclude".as_ref());
+        m.insert(b"include".as_ref(), b"include:".as_ref());
+        m.insert(b"subinclude".as_ref(), b"subinclude:".as_ref());
         m
     };
 }
 
-pub type PatternTuple = (Vec<u8>, LineNumber, Vec<u8>);
-type WarningTuple = (PathBuf, Vec<u8>);
+#[derive(Debug)]
+pub enum PatternFileWarning {
+    /// (file path, syntax bytes)
+    InvalidSyntax(PathBuf, Vec<u8>),
+    /// File path
+    NoSuchFile(PathBuf),
+}
 
 pub fn parse_pattern_file_contents<P: AsRef<Path>>(
     lines: &[u8],
     file_path: P,
     warn: bool,
-) -> (Vec<PatternTuple>, Vec<WarningTuple>) {
+) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
     let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
     let comment_escape_regex = Regex::new(r"\\#").unwrap();
-    let mut inputs: Vec<PatternTuple> = vec![];
-    let mut warnings: Vec<WarningTuple> = vec![];
+    let mut inputs: Vec<IgnorePattern> = vec![];
+    let mut warnings: Vec<PatternFileWarning> = vec![];
 
     let mut current_syntax = b"relre:".as_ref();
 
@@ -267,8 +350,10 @@
             if let Some(rel_syntax) = SYNTAXES.get(syntax) {
                 current_syntax = rel_syntax;
             } else if warn {
-                warnings
-                    .push((file_path.as_ref().to_owned(), syntax.to_owned()));
+                warnings.push(PatternFileWarning::InvalidSyntax(
+                    file_path.as_ref().to_owned(),
+                    syntax.to_owned(),
+                ));
             }
             continue;
         }
@@ -288,34 +373,186 @@
             }
         }
 
-        inputs.push((
-            [line_syntax, line].concat(),
-            line_number,
-            line.to_owned(),
+        inputs.push(IgnorePattern::new(
+            parse_pattern_syntax(&line_syntax).map_err(|e| match e {
+                PatternError::UnsupportedSyntax(syntax) => {
+                    PatternError::UnsupportedSyntaxInFile(
+                        syntax,
+                        file_path.as_ref().to_string_lossy().into(),
+                        line_number,
+                    )
+                }
+                _ => e,
+            })?,
+            &line,
+            &file_path,
         ));
     }
-    (inputs, warnings)
+    Ok((inputs, warnings))
 }
 
 pub fn read_pattern_file<P: AsRef<Path>>(
     file_path: P,
     warn: bool,
-) -> Result<(Vec<PatternTuple>, Vec<WarningTuple>), PatternFileError> {
-    let mut f = File::open(file_path.as_ref())?;
+) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
+    let mut f = match File::open(file_path.as_ref()) {
+        Ok(f) => Ok(f),
+        Err(e) => match e.kind() {
+            std::io::ErrorKind::NotFound => {
+                return Ok((
+                    vec![],
+                    vec![PatternFileWarning::NoSuchFile(
+                        file_path.as_ref().to_owned(),
+                    )],
+                ))
+            }
+            _ => Err(e),
+        },
+    }?;
     let mut contents = Vec::new();
 
     f.read_to_end(&mut contents)?;
 
-    Ok(parse_pattern_file_contents(&contents, file_path, warn))
+    Ok(parse_pattern_file_contents(&contents, file_path, warn)?)
+}
+
+/// Represents an entry in an "ignore" file.
+#[derive(Debug, Eq, PartialEq, Clone)]
+pub struct IgnorePattern {
+    pub syntax: PatternSyntax,
+    pub pattern: Vec<u8>,
+    pub source: PathBuf,
+}
+
+impl IgnorePattern {
+    pub fn new(
+        syntax: PatternSyntax,
+        pattern: &[u8],
+        source: impl AsRef<Path>,
+    ) -> Self {
+        Self {
+            syntax,
+            pattern: pattern.to_owned(),
+            source: source.as_ref().to_owned(),
+        }
+    }
+}
+
+pub type PatternResult<T> = Result<T, PatternError>;
+
+/// Wrapper for `read_pattern_file` that also recursively expands `include:`
+/// patterns.
+///
+/// `subinclude:` is not treated as a special pattern here: unraveling them
+/// needs to occur in the "ignore" phase.
+pub fn get_patterns_from_file(
+    pattern_file: impl AsRef<Path>,
+    root_dir: impl AsRef<Path>,
+) -> PatternResult<(Vec<IgnorePattern>, Vec<PatternFileWarning>)> {
+    let (patterns, mut warnings) = read_pattern_file(&pattern_file, true)?;
+    let patterns = patterns
+        .into_iter()
+        .flat_map(|entry| -> PatternResult<_> {
+            let IgnorePattern {
+                syntax,
+                pattern,
+                source: _,
+            } = &entry;
+            Ok(match syntax {
+                PatternSyntax::Include => {
+                    let inner_include =
+                        root_dir.as_ref().join(get_path_from_bytes(&pattern));
+                    let (inner_pats, inner_warnings) = get_patterns_from_file(
+                        &inner_include,
+                        root_dir.as_ref(),
+                    )?;
+                    warnings.extend(inner_warnings);
+                    inner_pats
+                }
+                _ => vec![entry],
+            })
+        })
+        .flatten()
+        .collect();
+
+    Ok((patterns, warnings))
+}
+
+/// Holds all the information needed to handle a `subinclude:` pattern.
+pub struct SubInclude {
+    /// Will be used for repository (hg) paths that start with this prefix.
+    /// It is relative to the current working directory, so comparing against
+    /// repository paths is painless.
+    pub prefix: HgPathBuf,
+    /// The file itself, containing the patterns
+    pub path: PathBuf,
+    /// Folder in the filesystem where this it applies
+    pub root: PathBuf,
+}
+
+impl SubInclude {
+    pub fn new(
+        root_dir: impl AsRef<Path>,
+        pattern: &[u8],
+        source: impl AsRef<Path>,
+    ) -> Result<SubInclude, HgPathError> {
+        let normalized_source =
+            normalize_path_bytes(&get_bytes_from_path(source));
+
+        let source_root = get_path_from_bytes(&normalized_source);
+        let source_root = source_root.parent().unwrap_or(source_root.deref());
+
+        let path = source_root.join(get_path_from_bytes(pattern));
+        let new_root = path.parent().unwrap_or(path.deref());
+
+        let prefix = canonical_path(&root_dir, &root_dir, new_root)?;
+
+        Ok(Self {
+            prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
+                if !p.is_empty() {
+                    p.push(b'/');
+                }
+                Ok(p)
+            })?,
+            path: path.to_owned(),
+            root: new_root.to_owned(),
+        })
+    }
+}
+
+/// Separate and pre-process subincludes from other patterns for the "ignore"
+/// phase.
+pub fn filter_subincludes(
+    ignore_patterns: &[IgnorePattern],
+    root_dir: impl AsRef<Path>,
+) -> Result<(Vec<SubInclude>, Vec<&IgnorePattern>), HgPathError> {
+    let mut subincludes = vec![];
+    let mut others = vec![];
+
+    for ignore_pattern in ignore_patterns.iter() {
+        let IgnorePattern {
+            syntax,
+            pattern,
+            source,
+        } = ignore_pattern;
+        if *syntax == PatternSyntax::SubInclude {
+            subincludes.push(SubInclude::new(&root_dir, pattern, &source)?);
+        } else {
+            others.push(ignore_pattern)
+        }
+    }
+    Ok((subincludes, others))
 }
 
 #[cfg(test)]
 mod tests {
     use super::*;
+    use pretty_assertions::assert_eq;
 
     #[test]
     fn escape_pattern_test() {
-        let untouched = br#"!"%',/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz"#;
+        let untouched =
+            br#"!"%',/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz"#;
         assert_eq!(escape_pattern(untouched), untouched.to_vec());
         // All escape codes
         assert_eq!(
@@ -342,39 +579,78 @@
         let lines = b"syntax: glob\n*.elc";
 
         assert_eq!(
-            vec![(b"relglob:*.elc".to_vec(), 2, b"*.elc".to_vec())],
             parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .unwrap()
                 .0,
+            vec![IgnorePattern::new(
+                PatternSyntax::RelGlob,
+                b"*.elc",
+                Path::new("file_path")
+            )],
         );
 
         let lines = b"syntax: include\nsyntax: glob";
 
         assert_eq!(
             parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .unwrap()
                 .0,
             vec![]
         );
         let lines = b"glob:**.o";
         assert_eq!(
             parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .unwrap()
                 .0,
-            vec![(b"relglob:**.o".to_vec(), 1, b"**.o".to_vec())]
+            vec![IgnorePattern::new(
+                PatternSyntax::RelGlob,
+                b"**.o",
+                Path::new("file_path")
+            )]
+        );
+    }
+
+    #[test]
+    fn test_build_single_regex() {
+        assert_eq!(
+            build_single_regex(&IgnorePattern::new(
+                PatternSyntax::RelGlob,
+                b"rust/target/",
+                Path::new("")
+            ))
+            .unwrap(),
+            br"(?:|.*/)rust/target(?:/|$)".to_vec(),
         );
     }
 
     #[test]
     fn test_build_single_regex_shortcut() {
         assert_eq!(
-            br"(?:/|$)".to_vec(),
-            build_single_regex(b"rootglob", b"", b"").unwrap()
+            build_single_regex(&IgnorePattern::new(
+                PatternSyntax::RootGlob,
+                b"",
+                Path::new("")
+            ))
+            .unwrap(),
+            br"\.(?:/|$)".to_vec(),
         );
         assert_eq!(
+            build_single_regex(&IgnorePattern::new(
+                PatternSyntax::RootGlob,
+                b"whatever",
+                Path::new("")
+            ))
+            .unwrap(),
             br"whatever(?:/|$)".to_vec(),
-            build_single_regex(b"rootglob", b"whatever", b"").unwrap()
         );
         assert_eq!(
-            br"[^/]*\.o".to_vec(),
-            build_single_regex(b"rootglob", b"*.o", b"").unwrap()
+            build_single_regex(&IgnorePattern::new(
+                PatternSyntax::RootGlob,
+                b"*.o",
+                Path::new("")
+            ))
+            .unwrap(),
+            br"[^/]*\.o(?:/|$)".to_vec(),
         );
     }
 }
--- a/rust/hg-core/src/lib.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/lib.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -21,11 +21,14 @@
 pub mod matchers;
 pub mod revlog;
 pub use revlog::*;
+#[cfg(feature = "with-re2")]
+pub mod re2;
 pub mod utils;
 
-use crate::utils::hg_path::HgPathBuf;
+use crate::utils::hg_path::{HgPathBuf, HgPathError};
 pub use filepatterns::{
-    build_single_regex, read_pattern_file, PatternSyntax, PatternTuple,
+    parse_pattern_syntax, read_pattern_file, IgnorePattern,
+    PatternFileWarning, PatternSyntax,
 };
 use std::collections::HashMap;
 use twox_hash::RandomXxHashBuilder64;
@@ -79,18 +82,17 @@
 pub enum DirstateMapError {
     PathNotFound(HgPathBuf),
     EmptyPath,
-    ConsecutiveSlashes,
+    InvalidPath(HgPathError),
 }
 
 impl ToString for DirstateMapError {
     fn to_string(&self) -> String {
-        use crate::DirstateMapError::*;
         match self {
-            PathNotFound(_) => "expected a value, found none".to_string(),
-            EmptyPath => "Overflow in dirstate.".to_string(),
-            ConsecutiveSlashes => {
-                "found invalid consecutive slashes in path".to_string()
+            DirstateMapError::PathNotFound(_) => {
+                "expected a value, found none".to_string()
             }
+            DirstateMapError::EmptyPath => "Overflow in dirstate.".to_string(),
+            DirstateMapError::InvalidPath(e) => e.to_string(),
         }
     }
 }
@@ -116,18 +118,37 @@
 
 #[derive(Debug)]
 pub enum PatternError {
+    Path(HgPathError),
     UnsupportedSyntax(String),
+    UnsupportedSyntaxInFile(String, String, usize),
+    TooLong(usize),
+    IO(std::io::Error),
+    /// Needed a pattern that can be turned into a regex but got one that
+    /// can't. This should only happen through programmer error.
+    NonRegexPattern(IgnorePattern),
 }
 
-#[derive(Debug)]
-pub enum PatternFileError {
-    IO(std::io::Error),
-    Pattern(PatternError, LineNumber),
-}
-
-impl From<std::io::Error> for PatternFileError {
-    fn from(e: std::io::Error) -> Self {
-        PatternFileError::IO(e)
+impl ToString for PatternError {
+    fn to_string(&self) -> String {
+        match self {
+            PatternError::UnsupportedSyntax(syntax) => {
+                format!("Unsupported syntax {}", syntax)
+            }
+            PatternError::UnsupportedSyntaxInFile(syntax, file_path, line) => {
+                format!(
+                    "{}:{}: unsupported syntax {}",
+                    file_path, line, syntax
+                )
+            }
+            PatternError::TooLong(size) => {
+                format!("matcher pattern is too long ({} bytes)", size)
+            }
+            PatternError::IO(e) => e.to_string(),
+            PatternError::Path(e) => e.to_string(),
+            PatternError::NonRegexPattern(pattern) => {
+                format!("'{:?}' cannot be turned into a regex", pattern)
+            }
+        }
     }
 }
 
@@ -142,3 +163,15 @@
         DirstateError::IO(e)
     }
 }
+
+impl From<std::io::Error> for PatternError {
+    fn from(e: std::io::Error) -> Self {
+        PatternError::IO(e)
+    }
+}
+
+impl From<HgPathError> for PatternError {
+    fn from(e: HgPathError) -> Self {
+        PatternError::Path(e)
+    }
+}
--- a/rust/hg-core/src/matchers.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/matchers.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -10,7 +10,9 @@
 use crate::{utils::hg_path::HgPath, DirsMultiset, DirstateMapError};
 use std::collections::HashSet;
 use std::iter::FromIterator;
+use std::ops::Deref;
 
+#[derive(Debug, PartialEq)]
 pub enum VisitChildrenSet<'a> {
     /// Don't visit anything
     Empty,
@@ -163,12 +165,48 @@
     }
     fn visit_children_set(
         &self,
-        _directory: impl AsRef<HgPath>,
+        directory: impl AsRef<HgPath>,
     ) -> VisitChildrenSet {
-        // TODO implement once we have `status.traverse`
-        // This is useless until unknown files are taken into account
-        // Which will not need to happen before the `IncludeMatcher`.
-        unimplemented!()
+        if self.files.is_empty() || !self.dirs.contains(&directory) {
+            return VisitChildrenSet::Empty;
+        }
+        let dirs_as_set = self.dirs.iter().map(|k| k.deref()).collect();
+
+        let mut candidates: HashSet<&HgPath> =
+            self.files.union(&dirs_as_set).map(|k| *k).collect();
+        candidates.remove(HgPath::new(b""));
+
+        if !directory.as_ref().is_empty() {
+            let directory = [directory.as_ref().as_bytes(), b"/"].concat();
+            candidates = candidates
+                .iter()
+                .filter_map(|c| {
+                    if c.as_bytes().starts_with(&directory) {
+                        Some(HgPath::new(&c.as_bytes()[directory.len()..]))
+                    } else {
+                        None
+                    }
+                })
+                .collect();
+        }
+
+        // `self.dirs` includes all of the directories, recursively, so if
+        // we're attempting to match 'foo/bar/baz.txt', it'll have '', 'foo',
+        // 'foo/bar' in it. Thus we can safely ignore a candidate that has a
+        // '/' in it, indicating it's for a subdir-of-a-subdir; the immediate
+        // subdir will be in there without a slash.
+        VisitChildrenSet::Set(
+            candidates
+                .iter()
+                .filter_map(|c| {
+                    if c.bytes().all(|b| *b != b'/') {
+                        Some(*c)
+                    } else {
+                        None
+                    }
+                })
+                .collect(),
+        )
     }
     fn matches_everything(&self) -> bool {
         false
@@ -177,3 +215,107 @@
         true
     }
 }
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use pretty_assertions::assert_eq;
+
+    #[test]
+    fn test_filematcher_visit_children_set() {
+        // Visitchildrenset
+        let files = vec![HgPath::new(b"dir/subdir/foo.txt")];
+        let matcher = FileMatcher::new(&files).unwrap();
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"dir"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::Set(set)
+        );
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"subdir"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::Set(set)
+        );
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"foo.txt"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Set(set)
+        );
+
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"dir/subdir/foo.txt")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+    }
+
+    #[test]
+    fn test_filematcher_visit_children_set_files_and_dirs() {
+        let files = vec![
+            HgPath::new(b"rootfile.txt"),
+            HgPath::new(b"a/file1.txt"),
+            HgPath::new(b"a/b/file2.txt"),
+            // No file in a/b/c
+            HgPath::new(b"a/b/c/d/file4.txt"),
+        ];
+        let matcher = FileMatcher::new(&files).unwrap();
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"a"));
+        set.insert(HgPath::new(b"rootfile.txt"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::Set(set)
+        );
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"b"));
+        set.insert(HgPath::new(b"file1.txt"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"a")),
+            VisitChildrenSet::Set(set)
+        );
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"c"));
+        set.insert(HgPath::new(b"file2.txt"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"a/b")),
+            VisitChildrenSet::Set(set)
+        );
+
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"d"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"a/b/c")),
+            VisitChildrenSet::Set(set)
+        );
+        let mut set = HashSet::new();
+        set.insert(HgPath::new(b"file4.txt"));
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"a/b/c/d")),
+            VisitChildrenSet::Set(set)
+        );
+
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"a/b/c/d/e")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            matcher.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/mod.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,21 @@
+/// re2 module
+///
+/// The Python implementation of Mercurial uses the Re2 regex engine when
+/// possible and if the bindings are installed, falling back to Python's `re`
+/// in case of unsupported syntax (Re2 is a non-backtracking engine).
+///
+/// Using it from Rust is not ideal. We need C++ bindings, a C++ compiler,
+/// Re2 needs to be installed... why not just use the `regex` crate?
+///
+/// Using Re2 from the Rust implementation guarantees backwards compatibility.
+/// We know it will work out of the box without needing to figure out the
+/// subtle differences in syntax. For example, `regex` currently does not
+/// support empty alternations (regex like `a||b`) which happens more often
+/// than we might think. Old benchmarks also showed worse performance from
+/// regex than with Re2, but the methodology and results were lost, so take
+/// this with a grain of salt.
+///
+/// The idea is to use Re2 for now as a temporary phase and then investigate
+/// how much work would be needed to use `regex`.
+mod re2;
+pub use re2::Re2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/re2.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,66 @@
+/*
+re2.rs
+
+Rust FFI bindings to Re2.
+
+Copyright 2020 Valentin Gatien-Baron
+
+This software may be used and distributed according to the terms of the
+GNU General Public License version 2 or any later version.
+*/
+use libc::{c_int, c_void};
+
+type Re2Ptr = *const c_void;
+
+pub struct Re2(Re2Ptr);
+
+/// `re2.h` says:
+/// "An "RE2" object is safe for concurrent use by multiple threads."
+unsafe impl Sync for Re2 {}
+
+/// These bind to the C ABI in `rust_re2.cpp`.
+extern "C" {
+    fn rust_re2_create(data: *const u8, len: usize) -> Re2Ptr;
+    fn rust_re2_destroy(re2: Re2Ptr);
+    fn rust_re2_ok(re2: Re2Ptr) -> bool;
+    fn rust_re2_error(
+        re2: Re2Ptr,
+        outdata: *mut *const u8,
+        outlen: *mut usize,
+    ) -> bool;
+    fn rust_re2_match(
+        re2: Re2Ptr,
+        data: *const u8,
+        len: usize,
+        anchor: c_int,
+    ) -> bool;
+}
+
+impl Re2 {
+    pub fn new(pattern: &[u8]) -> Result<Re2, String> {
+        unsafe {
+            let re2 = rust_re2_create(pattern.as_ptr(), pattern.len());
+            if rust_re2_ok(re2) {
+                Ok(Re2(re2))
+            } else {
+                let mut data: *const u8 = std::ptr::null();
+                let mut len: usize = 0;
+                rust_re2_error(re2, &mut data, &mut len);
+                Err(String::from_utf8_lossy(std::slice::from_raw_parts(
+                    data, len,
+                ))
+                .to_string())
+            }
+        }
+    }
+
+    pub fn is_match(&self, data: &[u8]) -> bool {
+        unsafe { rust_re2_match(self.0, data.as_ptr(), data.len(), 1) }
+    }
+}
+
+impl Drop for Re2 {
+    fn drop(&mut self) {
+        unsafe { rust_re2_destroy(self.0) }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/rust_re2.cpp	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,49 @@
+/*
+rust_re2.cpp
+
+C ABI export of Re2's C++ interface for Rust FFI.
+
+Copyright 2020 Valentin Gatien-Baron
+
+This software may be used and distributed according to the terms of the
+GNU General Public License version 2 or any later version.
+*/
+
+#include <re2/re2.h>
+using namespace re2;
+
+extern "C" {
+	RE2* rust_re2_create(const char* data, size_t len) {
+		RE2::Options o;
+		o.set_encoding(RE2::Options::Encoding::EncodingLatin1);
+		o.set_log_errors(false);
+		o.set_max_mem(50000000);
+
+		return new RE2(StringPiece(data, len), o);
+	}
+
+	void rust_re2_destroy(RE2* re) {
+		delete re;
+	}
+
+	bool rust_re2_ok(RE2* re) {
+		return re->ok();
+	}
+
+	void rust_re2_error(RE2* re, const char** outdata, size_t* outlen) {
+		const std::string& e = re->error();
+		*outdata = e.data();
+		*outlen = e.length();
+	}
+
+	bool rust_re2_match(RE2* re, char* data, size_t len, int ianchor) {
+		const StringPiece sp = StringPiece(data, len);
+
+		RE2::Anchor anchor =
+			ianchor == 0 ? RE2::Anchor::UNANCHORED :
+			(ianchor == 1 ? RE2::Anchor::ANCHOR_START :
+			 RE2::Anchor::ANCHOR_BOTH);
+
+		return re->Match(sp, 0, len, anchor, NULL, 0);
+	}
+}
--- a/rust/hg-core/src/revlog.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/revlog.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -5,6 +5,10 @@
 // GNU General Public License version 2 or any later version.
 //! Mercurial concepts for handling revision history
 
+pub mod node;
+pub mod nodemap;
+pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+
 /// Mercurial revision numbers
 ///
 /// As noted in revlog.c, revision numbers are actually encoded in
@@ -36,3 +40,17 @@
     ParentOutOfRange(Revision),
     WorkingDirectoryUnsupported,
 }
+
+/// The Mercurial Revlog Index
+///
+/// This is currently limited to the minimal interface that is needed for
+/// the [`nodemap`](nodemap/index.html) module
+pub trait RevlogIndex {
+    /// Total number of Revisions referenced in this index
+    fn len(&self) -> usize;
+
+    /// Return a reference to the Node or `None` if rev is out of bounds
+    ///
+    /// `NULL_REVISION` is not considered to be out of bounds.
+    fn node(&self, rev: Revision) -> Option<&Node>;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/node.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,428 @@
+// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Definitions and utilities for Revision nodes
+//!
+//! In Mercurial code base, it is customary to call "a node" the binary SHA
+//! of a revision.
+
+use hex::{self, FromHex, FromHexError};
+
+/// The length in bytes of a `Node`
+///
+/// This constant is meant to ease refactors of this module, and
+/// are private so that calling code does not expect all nodes have
+/// the same size, should we support several formats concurrently in
+/// the future.
+const NODE_BYTES_LENGTH: usize = 20;
+
+/// The length in bytes of a `Node`
+///
+/// see also `NODES_BYTES_LENGTH` about it being private.
+const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH;
+
+/// Private alias for readability and to ease future change
+type NodeData = [u8; NODE_BYTES_LENGTH];
+
+/// Binary revision SHA
+///
+/// ## Future changes of hash size
+///
+/// To accomodate future changes of hash size, Rust callers
+/// should use the conversion methods at the boundaries (FFI, actual
+/// computation of hashes and I/O) only, and only if required.
+///
+/// All other callers outside of unit tests should just handle `Node` values
+/// and never make any assumption on the actual length, using [`nybbles_len`]
+/// if they need a loop boundary.
+///
+/// All methods that create a `Node` either take a type that enforces
+/// the size or fail immediately at runtime with [`ExactLengthRequired`].
+///
+/// [`nybbles_len`]: #method.nybbles_len
+/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
+#[derive(Clone, Debug, PartialEq)]
+pub struct Node {
+    data: NodeData,
+}
+
+/// The node value for NULL_REVISION
+pub const NULL_NODE: Node = Node {
+    data: [0; NODE_BYTES_LENGTH],
+};
+
+impl From<NodeData> for Node {
+    fn from(data: NodeData) -> Node {
+        Node { data }
+    }
+}
+
+#[derive(Debug, PartialEq)]
+pub enum NodeError {
+    ExactLengthRequired(usize, String),
+    PrefixTooLong(String),
+    HexError(FromHexError, String),
+}
+
+/// Low level utility function, also for prefixes
+fn get_nybble(s: &[u8], i: usize) -> u8 {
+    if i % 2 == 0 {
+        s[i / 2] >> 4
+    } else {
+        s[i / 2] & 0x0f
+    }
+}
+
+impl Node {
+    /// Retrieve the `i`th half-byte of the binary data.
+    ///
+    /// This is also the `i`th hexadecimal digit in numeric form,
+    /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+    pub fn get_nybble(&self, i: usize) -> u8 {
+        get_nybble(&self.data, i)
+    }
+
+    /// Length of the data, in nybbles
+    pub fn nybbles_len(&self) -> usize {
+        // public exposure as an instance method only, so that we can
+        // easily support several sizes of hashes if needed in the future.
+        NODE_NYBBLES_LENGTH
+    }
+
+    /// Convert from hexadecimal string representation
+    ///
+    /// Exact length is required.
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn from_hex(hex: &str) -> Result<Node, NodeError> {
+        Ok(NodeData::from_hex(hex)
+            .map_err(|e| NodeError::from((e, hex)))?
+            .into())
+    }
+
+    /// Convert to hexadecimal string representation
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn encode_hex(&self) -> String {
+        hex::encode(self.data)
+    }
+
+    /// Provide access to binary data
+    ///
+    /// This is needed by FFI layers, for instance to return expected
+    /// binary values to Python.
+    pub fn as_bytes(&self) -> &[u8] {
+        &self.data
+    }
+}
+
+impl<T: AsRef<str>> From<(FromHexError, T)> for NodeError {
+    fn from(err_offender: (FromHexError, T)) -> Self {
+        let (err, offender) = err_offender;
+        match err {
+            FromHexError::InvalidStringLength => {
+                NodeError::ExactLengthRequired(
+                    NODE_NYBBLES_LENGTH,
+                    offender.as_ref().to_owned(),
+                )
+            }
+            _ => NodeError::HexError(err, offender.as_ref().to_owned()),
+        }
+    }
+}
+
+/// The beginning of a binary revision SHA.
+///
+/// Since it can potentially come from an hexadecimal representation with
+/// odd length, it needs to carry around whether the last 4 bits are relevant
+/// or not.
+#[derive(Debug, PartialEq)]
+pub struct NodePrefix {
+    buf: Vec<u8>,
+    is_odd: bool,
+}
+
+impl NodePrefix {
+    /// Convert from hexadecimal string representation
+    ///
+    /// Similarly to `hex::decode`, can be used with Unicode string types
+    /// (`String`, `&str`) as well as bytes.
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, NodeError> {
+        let hex = hex.as_ref();
+        let len = hex.len();
+        if len > NODE_NYBBLES_LENGTH {
+            return Err(NodeError::PrefixTooLong(
+                String::from_utf8_lossy(hex).to_owned().to_string(),
+            ));
+        }
+
+        let is_odd = len % 2 == 1;
+        let even_part = if is_odd { &hex[..len - 1] } else { hex };
+        let mut buf: Vec<u8> = Vec::from_hex(&even_part)
+            .map_err(|e| (e, String::from_utf8_lossy(hex)))?;
+
+        if is_odd {
+            let latest_char = char::from(hex[len - 1]);
+            let latest_nybble = latest_char.to_digit(16).ok_or_else(|| {
+                (
+                    FromHexError::InvalidHexCharacter {
+                        c: latest_char,
+                        index: len - 1,
+                    },
+                    String::from_utf8_lossy(hex),
+                )
+            })? as u8;
+            buf.push(latest_nybble << 4);
+        }
+        Ok(NodePrefix { buf, is_odd })
+    }
+
+    pub fn borrow(&self) -> NodePrefixRef {
+        NodePrefixRef {
+            buf: &self.buf,
+            is_odd: self.is_odd,
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct NodePrefixRef<'a> {
+    buf: &'a [u8],
+    is_odd: bool,
+}
+
+impl<'a> NodePrefixRef<'a> {
+    pub fn len(&self) -> usize {
+        if self.is_odd {
+            self.buf.len() * 2 - 1
+        } else {
+            self.buf.len() * 2
+        }
+    }
+
+    pub fn is_prefix_of(&self, node: &Node) -> bool {
+        if self.is_odd {
+            let buf = self.buf;
+            let last_pos = buf.len() - 1;
+            node.data.starts_with(buf.split_at(last_pos).0)
+                && node.data[last_pos] >> 4 == buf[last_pos] >> 4
+        } else {
+            node.data.starts_with(self.buf)
+        }
+    }
+
+    /// Retrieve the `i`th half-byte from the prefix.
+    ///
+    /// This is also the `i`th hexadecimal digit in numeric form,
+    /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+    pub fn get_nybble(&self, i: usize) -> u8 {
+        assert!(i < self.len());
+        get_nybble(self.buf, i)
+    }
+
+    /// Return the index first nybble that's different from `node`
+    ///
+    /// If the return value is `None` that means that `self` is
+    /// a prefix of `node`, but the current method is a bit slower
+    /// than `is_prefix_of`.
+    ///
+    /// Returned index is as in `get_nybble`, i.e., starting at 0.
+    pub fn first_different_nybble(&self, node: &Node) -> Option<usize> {
+        let buf = self.buf;
+        let until = if self.is_odd {
+            buf.len() - 1
+        } else {
+            buf.len()
+        };
+        for i in 0..until {
+            if buf[i] != node.data[i] {
+                if buf[i] & 0xf0 == node.data[i] & 0xf0 {
+                    return Some(2 * i + 1);
+                } else {
+                    return Some(2 * i);
+                }
+            }
+        }
+        if self.is_odd && buf[until] & 0xf0 != node.data[until] & 0xf0 {
+            Some(until * 2)
+        } else {
+            None
+        }
+    }
+}
+
+/// A shortcut for full `Node` references
+impl<'a> From<&'a Node> for NodePrefixRef<'a> {
+    fn from(node: &'a Node) -> Self {
+        NodePrefixRef {
+            buf: &node.data,
+            is_odd: false,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    fn sample_node() -> Node {
+        let mut data = [0; NODE_BYTES_LENGTH];
+        data.copy_from_slice(&[
+            0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
+            0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef,
+        ]);
+        data.into()
+    }
+
+    /// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH`
+    ///
+    /// The padding is made with zeros
+    pub fn hex_pad_right(hex: &str) -> String {
+        let mut res = hex.to_string();
+        while res.len() < NODE_NYBBLES_LENGTH {
+            res.push('0');
+        }
+        res
+    }
+
+    fn sample_node_hex() -> String {
+        hex_pad_right("0123456789abcdeffedcba9876543210deadbeef")
+    }
+
+    #[test]
+    fn test_node_from_hex() {
+        assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node()));
+
+        let mut short = hex_pad_right("0123");
+        short.pop();
+        short.pop();
+        assert_eq!(
+            Node::from_hex(&short),
+            Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)),
+        );
+
+        let not_hex = hex_pad_right("012... oops");
+        assert_eq!(
+            Node::from_hex(&not_hex),
+            Err(NodeError::HexError(
+                FromHexError::InvalidHexCharacter { c: '.', index: 3 },
+                not_hex,
+            )),
+        );
+    }
+
+    #[test]
+    fn test_node_encode_hex() {
+        assert_eq!(sample_node().encode_hex(), sample_node_hex());
+    }
+
+    #[test]
+    fn test_prefix_from_hex() -> Result<(), NodeError> {
+        assert_eq!(
+            NodePrefix::from_hex("0e1")?,
+            NodePrefix {
+                buf: vec![14, 16],
+                is_odd: true
+            }
+        );
+        assert_eq!(
+            NodePrefix::from_hex("0e1a")?,
+            NodePrefix {
+                buf: vec![14, 26],
+                is_odd: false
+            }
+        );
+
+        // checking limit case
+        let node_as_vec = sample_node().data.iter().cloned().collect();
+        assert_eq!(
+            NodePrefix::from_hex(sample_node_hex())?,
+            NodePrefix {
+                buf: node_as_vec,
+                is_odd: false
+            }
+        );
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_prefix_from_hex_errors() {
+        assert_eq!(
+            NodePrefix::from_hex("testgr"),
+            Err(NodeError::HexError(
+                FromHexError::InvalidHexCharacter { c: 't', index: 0 },
+                "testgr".to_string()
+            ))
+        );
+        let mut long = NULL_NODE.encode_hex();
+        long.push('c');
+        match NodePrefix::from_hex(&long)
+            .expect_err("should be refused as too long")
+        {
+            NodeError::PrefixTooLong(s) => assert_eq!(s, long),
+            err => panic!(format!("Should have been TooLong, got {:?}", err)),
+        }
+    }
+
+    #[test]
+    fn test_is_prefix_of() -> Result<(), NodeError> {
+        let mut node_data = [0; NODE_BYTES_LENGTH];
+        node_data[0] = 0x12;
+        node_data[1] = 0xca;
+        let node = Node::from(node_data);
+        assert!(NodePrefix::from_hex("12")?.borrow().is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("1a")?.borrow().is_prefix_of(&node));
+        assert!(NodePrefix::from_hex("12c")?.borrow().is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("12d")?.borrow().is_prefix_of(&node));
+        Ok(())
+    }
+
+    #[test]
+    fn test_get_nybble() -> Result<(), NodeError> {
+        let prefix = NodePrefix::from_hex("dead6789cafe")?;
+        assert_eq!(prefix.borrow().get_nybble(0), 13);
+        assert_eq!(prefix.borrow().get_nybble(7), 9);
+        Ok(())
+    }
+
+    #[test]
+    fn test_first_different_nybble_even_prefix() {
+        let prefix = NodePrefix::from_hex("12ca").unwrap();
+        let prefref = prefix.borrow();
+        let mut node = Node::from([0; NODE_BYTES_LENGTH]);
+        assert_eq!(prefref.first_different_nybble(&node), Some(0));
+        node.data[0] = 0x13;
+        assert_eq!(prefref.first_different_nybble(&node), Some(1));
+        node.data[0] = 0x12;
+        assert_eq!(prefref.first_different_nybble(&node), Some(2));
+        node.data[1] = 0xca;
+        // now it is a prefix
+        assert_eq!(prefref.first_different_nybble(&node), None);
+    }
+
+    #[test]
+    fn test_first_different_nybble_odd_prefix() {
+        let prefix = NodePrefix::from_hex("12c").unwrap();
+        let prefref = prefix.borrow();
+        let mut node = Node::from([0; NODE_BYTES_LENGTH]);
+        assert_eq!(prefref.first_different_nybble(&node), Some(0));
+        node.data[0] = 0x13;
+        assert_eq!(prefref.first_different_nybble(&node), Some(1));
+        node.data[0] = 0x12;
+        assert_eq!(prefref.first_different_nybble(&node), Some(2));
+        node.data[1] = 0xca;
+        // now it is a prefix
+        assert_eq!(prefref.first_different_nybble(&node), None);
+    }
+}
+
+#[cfg(test)]
+pub use tests::hex_pad_right;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/nodemap.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,1122 @@
+// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
+//           and Mercurial contributors
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+//! Indexing facilities for fast retrieval of `Revision` from `Node`
+//!
+//! This provides a variation on the 16-ary radix tree that is
+//! provided as "nodetree" in revlog.c, ready for append-only persistence
+//! on disk.
+//!
+//! Following existing implicit conventions, the "nodemap" terminology
+//! is used in a more abstract context.
+
+use super::{
+    node::NULL_NODE, Node, NodeError, NodePrefix, NodePrefixRef, Revision,
+    RevlogIndex, NULL_REVISION,
+};
+
+use std::cmp::max;
+use std::fmt;
+use std::mem;
+use std::ops::Deref;
+use std::ops::Index;
+use std::slice;
+
+#[derive(Debug, PartialEq)]
+pub enum NodeMapError {
+    MultipleResults,
+    InvalidNodePrefix(NodeError),
+    /// A `Revision` stored in the nodemap could not be found in the index
+    RevisionNotInIndex(Revision),
+}
+
+impl From<NodeError> for NodeMapError {
+    fn from(err: NodeError) -> Self {
+        NodeMapError::InvalidNodePrefix(err)
+    }
+}
+
+/// Mapping system from Mercurial nodes to revision numbers.
+///
+/// ## `RevlogIndex` and `NodeMap`
+///
+/// One way to think about their relationship is that
+/// the `NodeMap` is a prefix-oriented reverse index of the `Node` information
+/// carried by a [`RevlogIndex`].
+///
+/// Many of the methods in this trait take a `RevlogIndex` argument
+/// which is used for validation of their results. This index must naturally
+/// be the one the `NodeMap` is about, and it must be consistent.
+///
+/// Notably, the `NodeMap` must not store
+/// information about more `Revision` values than there are in the index.
+/// In these methods, an encountered `Revision` is not in the index, a
+/// [`RevisionNotInIndex`] error is returned.
+///
+/// In insert operations, the rule is thus that the `NodeMap` must always
+/// be updated after the `RevlogIndex`
+/// be updated first, and the `NodeMap` second.
+///
+/// [`RevisionNotInIndex`]: enum.NodeMapError.html#variant.RevisionNotInIndex
+/// [`RevlogIndex`]: ../trait.RevlogIndex.html
+pub trait NodeMap {
+    /// Find the unique `Revision` having the given `Node`
+    ///
+    /// If no Revision matches the given `Node`, `Ok(None)` is returned.
+    fn find_node(
+        &self,
+        index: &impl RevlogIndex,
+        node: &Node,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        self.find_bin(index, node.into())
+    }
+
+    /// Find the unique Revision whose `Node` starts with a given binary prefix
+    ///
+    /// If no Revision matches the given prefix, `Ok(None)` is returned.
+    ///
+    /// If several Revisions match the given prefix, a [`MultipleResults`]
+    /// error is returned.
+    fn find_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<Revision>, NodeMapError>;
+
+    /// Find the unique Revision whose `Node` hexadecimal string representation
+    /// starts with a given prefix
+    ///
+    /// If no Revision matches the given prefix, `Ok(None)` is returned.
+    ///
+    /// If several Revisions match the given prefix, a [`MultipleResults`]
+    /// error is returned.
+    fn find_hex(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: &str,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        self.find_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
+    }
+
+    /// Give the size of the shortest node prefix that determines
+    /// the revision uniquely.
+    ///
+    /// From a binary node prefix, if it is matched in the node map, this
+    /// returns the number of hexadecimal digits that would had sufficed
+    /// to find the revision uniquely.
+    ///
+    /// Returns `None` if no `Revision` could be found for the prefix.
+    ///
+    /// If several Revisions match the given prefix, a [`MultipleResults`]
+    /// error is returned.
+    fn unique_prefix_len_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        node_prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<usize>, NodeMapError>;
+
+    /// Same as `unique_prefix_len_bin`, with the hexadecimal representation
+    /// of the prefix as input.
+    fn unique_prefix_len_hex(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: &str,
+    ) -> Result<Option<usize>, NodeMapError> {
+        self.unique_prefix_len_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
+    }
+
+    /// Same as `unique_prefix_len_bin`, with a full `Node` as input
+    fn unique_prefix_len_node(
+        &self,
+        idx: &impl RevlogIndex,
+        node: &Node,
+    ) -> Result<Option<usize>, NodeMapError> {
+        self.unique_prefix_len_bin(idx, node.into())
+    }
+}
+
+pub trait MutableNodeMap: NodeMap {
+    fn insert<I: RevlogIndex>(
+        &mut self,
+        index: &I,
+        node: &Node,
+        rev: Revision,
+    ) -> Result<(), NodeMapError>;
+}
+
+/// Low level NodeTree [`Blocks`] elements
+///
+/// These are exactly as for instance on persistent storage.
+type RawElement = i32;
+
+/// High level representation of values in NodeTree
+/// [`Blocks`](struct.Block.html)
+///
+/// This is the high level representation that most algorithms should
+/// use.
+#[derive(Clone, Debug, Eq, PartialEq)]
+enum Element {
+    Rev(Revision),
+    Block(usize),
+    None,
+}
+
+impl From<RawElement> for Element {
+    /// Conversion from low level representation, after endianness conversion.
+    ///
+    /// See [`Block`](struct.Block.html) for explanation about the encoding.
+    fn from(raw: RawElement) -> Element {
+        if raw >= 0 {
+            Element::Block(raw as usize)
+        } else if raw == -1 {
+            Element::None
+        } else {
+            Element::Rev(-raw - 2)
+        }
+    }
+}
+
+impl From<Element> for RawElement {
+    fn from(element: Element) -> RawElement {
+        match element {
+            Element::None => 0,
+            Element::Block(i) => i as RawElement,
+            Element::Rev(rev) => -rev - 2,
+        }
+    }
+}
+
+/// A logical block of the `NodeTree`, packed with a fixed size.
+///
+/// These are always used in container types implementing `Index<Block>`,
+/// such as `&Block`
+///
+/// As an array of integers, its ith element encodes that the
+/// ith potential edge from the block, representing the ith hexadecimal digit
+/// (nybble) `i` is either:
+///
+/// - absent (value -1)
+/// - another `Block` in the same indexable container (value ≥ 0)
+///  - a `Revision` leaf (value ≤ -2)
+///
+/// Endianness has to be fixed for consistency on shared storage across
+/// different architectures.
+///
+/// A key difference with the C `nodetree` is that we need to be
+/// able to represent the [`Block`] at index 0, hence -1 is the empty marker
+/// rather than 0 and the `Revision` range upper limit of -2 instead of -1.
+///
+/// Another related difference is that `NULL_REVISION` (-1) is not
+/// represented at all, because we want an immutable empty nodetree
+/// to be valid.
+
+#[derive(Copy, Clone)]
+pub struct Block([u8; BLOCK_SIZE]);
+
+/// Not derivable for arrays of length >32 until const generics are stable
+impl PartialEq for Block {
+    fn eq(&self, other: &Self) -> bool {
+        &self.0[..] == &other.0[..]
+    }
+}
+
+pub const BLOCK_SIZE: usize = 64;
+
+impl Block {
+    fn new() -> Self {
+        // -1 in 2's complement to create an absent node
+        let byte: u8 = 255;
+        Block([byte; BLOCK_SIZE])
+    }
+
+    fn get(&self, nybble: u8) -> Element {
+        let index = nybble as usize * mem::size_of::<RawElement>();
+        Element::from(RawElement::from_be_bytes([
+            self.0[index],
+            self.0[index + 1],
+            self.0[index + 2],
+            self.0[index + 3],
+        ]))
+    }
+
+    fn set(&mut self, nybble: u8, element: Element) {
+        let values = RawElement::to_be_bytes(element.into());
+        let index = nybble as usize * mem::size_of::<RawElement>();
+        self.0[index] = values[0];
+        self.0[index + 1] = values[1];
+        self.0[index + 2] = values[2];
+        self.0[index + 3] = values[3];
+    }
+}
+
+impl fmt::Debug for Block {
+    /// sparse representation for testing and debugging purposes
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_map()
+            .entries((0..16).filter_map(|i| match self.get(i) {
+                Element::None => None,
+                element => Some((i, element)),
+            }))
+            .finish()
+    }
+}
+
+/// A mutable 16-radix tree with the root block logically at the end
+///
+/// Because of the append only nature of our node trees, we need to
+/// keep the original untouched and store new blocks separately.
+///
+/// The mutable root `Block` is kept apart so that we don't have to rebump
+/// it on each insertion.
+pub struct NodeTree {
+    readonly: Box<dyn Deref<Target = [Block]> + Send>,
+    growable: Vec<Block>,
+    root: Block,
+    masked_inner_blocks: usize,
+}
+
+impl Index<usize> for NodeTree {
+    type Output = Block;
+
+    fn index(&self, i: usize) -> &Block {
+        let ro_len = self.readonly.len();
+        if i < ro_len {
+            &self.readonly[i]
+        } else if i == ro_len + self.growable.len() {
+            &self.root
+        } else {
+            &self.growable[i - ro_len]
+        }
+    }
+}
+
+/// Return `None` unless the `Node` for `rev` has given prefix in `index`.
+fn has_prefix_or_none(
+    idx: &impl RevlogIndex,
+    prefix: NodePrefixRef,
+    rev: Revision,
+) -> Result<Option<Revision>, NodeMapError> {
+    idx.node(rev)
+        .ok_or_else(|| NodeMapError::RevisionNotInIndex(rev))
+        .map(|node| {
+            if prefix.is_prefix_of(node) {
+                Some(rev)
+            } else {
+                None
+            }
+        })
+}
+
+/// validate that the candidate's node starts indeed with given prefix,
+/// and treat ambiguities related to `NULL_REVISION`.
+///
+/// From the data in the NodeTree, one can only conclude that some
+/// revision is the only one for a *subprefix* of the one being looked up.
+fn validate_candidate(
+    idx: &impl RevlogIndex,
+    prefix: NodePrefixRef,
+    candidate: (Option<Revision>, usize),
+) -> Result<(Option<Revision>, usize), NodeMapError> {
+    let (rev, steps) = candidate;
+    if let Some(nz_nybble) = prefix.first_different_nybble(&NULL_NODE) {
+        rev.map_or(Ok((None, steps)), |r| {
+            has_prefix_or_none(idx, prefix, r)
+                .map(|opt| (opt, max(steps, nz_nybble + 1)))
+        })
+    } else {
+        // the prefix is only made of zeros; NULL_REVISION always matches it
+        // and any other *valid* result is an ambiguity
+        match rev {
+            None => Ok((Some(NULL_REVISION), steps + 1)),
+            Some(r) => match has_prefix_or_none(idx, prefix, r)? {
+                None => Ok((Some(NULL_REVISION), steps + 1)),
+                _ => Err(NodeMapError::MultipleResults),
+            },
+        }
+    }
+}
+
+impl NodeTree {
+    /// Initiate a NodeTree from an immutable slice-like of `Block`
+    ///
+    /// We keep `readonly` and clone its root block if it isn't empty.
+    fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
+        let root = readonly
+            .last()
+            .map(|b| b.clone())
+            .unwrap_or_else(|| Block::new());
+        NodeTree {
+            readonly: readonly,
+            growable: Vec::new(),
+            root: root,
+            masked_inner_blocks: 0,
+        }
+    }
+
+    /// Create from an opaque bunch of bytes
+    ///
+    /// The created `NodeTreeBytes` from `buffer`,
+    /// of which exactly `amount` bytes are used.
+    ///
+    /// - `buffer` could be derived from `PyBuffer` and `Mmap` objects.
+    /// - `offset` allows for the final file format to include fixed data
+    ///   (generation number, behavioural flags)
+    /// - `amount` is expressed in bytes, and is not automatically derived from
+    ///   `bytes`, so that a caller that manages them atomically can perform
+    ///   temporary disk serializations and still rollback easily if needed.
+    ///   First use-case for this would be to support Mercurial shell hooks.
+    ///
+    /// panics if `buffer` is smaller than `amount`
+    pub fn load_bytes(
+        bytes: Box<dyn Deref<Target = [u8]> + Send>,
+        amount: usize,
+    ) -> Self {
+        NodeTree::new(Box::new(NodeTreeBytes::new(bytes, amount)))
+    }
+
+    /// Retrieve added `Block` and the original immutable data
+    pub fn into_readonly_and_added(
+        self,
+    ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<Block>) {
+        let mut vec = self.growable;
+        let readonly = self.readonly;
+        if readonly.last() != Some(&self.root) {
+            vec.push(self.root);
+        }
+        (readonly, vec)
+    }
+
+    /// Retrieve added `Blocks` as bytes, ready to be written to persistent
+    /// storage
+    pub fn into_readonly_and_added_bytes(
+        self,
+    ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<u8>) {
+        let (readonly, vec) = self.into_readonly_and_added();
+        // Prevent running `v`'s destructor so we are in complete control
+        // of the allocation.
+        let vec = mem::ManuallyDrop::new(vec);
+
+        // Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
+        // bytes, so this is perfectly safe.
+        let bytes = unsafe {
+            // Assert that `Block` hasn't been changed and has no padding
+            let _: [u8; 4 * BLOCK_SIZE] =
+                std::mem::transmute([Block::new(); 4]);
+
+            // /!\ Any use of `vec` after this is use-after-free.
+            // TODO: use `into_raw_parts` once stabilized
+            Vec::from_raw_parts(
+                vec.as_ptr() as *mut u8,
+                vec.len() * BLOCK_SIZE,
+                vec.capacity() * BLOCK_SIZE,
+            )
+        };
+        (readonly, bytes)
+    }
+
+    /// Total number of blocks
+    fn len(&self) -> usize {
+        self.readonly.len() + self.growable.len() + 1
+    }
+
+    /// Implemented for completeness
+    ///
+    /// A `NodeTree` always has at least the mutable root block.
+    #[allow(dead_code)]
+    fn is_empty(&self) -> bool {
+        false
+    }
+
+    /// Main working method for `NodeTree` searches
+    ///
+    /// The first returned value is the result of analysing `NodeTree` data
+    /// *alone*: whereas `None` guarantees that the given prefix is absent
+    /// from the `NodeTree` data (but still could match `NULL_NODE`), with
+    /// `Some(rev)`, it is to be understood that `rev` is the unique `Revision`
+    /// that could match the prefix. Actually, all that can be inferred from
+    /// the `NodeTree` data is that `rev` is the revision with the longest
+    /// common node prefix with the given prefix.
+    ///
+    /// The second returned value is the size of the smallest subprefix
+    /// of `prefix` that would give the same result, i.e. not the
+    /// `MultipleResults` error variant (again, using only the data of the
+    /// `NodeTree`).
+    fn lookup(
+        &self,
+        prefix: NodePrefixRef,
+    ) -> Result<(Option<Revision>, usize), NodeMapError> {
+        for (i, visit_item) in self.visit(prefix).enumerate() {
+            if let Some(opt) = visit_item.final_revision() {
+                return Ok((opt, i + 1));
+            }
+        }
+        Err(NodeMapError::MultipleResults)
+    }
+
+    fn visit<'n, 'p>(
+        &'n self,
+        prefix: NodePrefixRef<'p>,
+    ) -> NodeTreeVisitor<'n, 'p> {
+        NodeTreeVisitor {
+            nt: self,
+            prefix: prefix,
+            visit: self.len() - 1,
+            nybble_idx: 0,
+            done: false,
+        }
+    }
+    /// Return a mutable reference for `Block` at index `idx`.
+    ///
+    /// If `idx` lies in the immutable area, then the reference is to
+    /// a newly appended copy.
+    ///
+    /// Returns (new_idx, glen, mut_ref) where
+    ///
+    /// - `new_idx` is the index of the mutable `Block`
+    /// - `mut_ref` is a mutable reference to the mutable Block.
+    /// - `glen` is the new length of `self.growable`
+    ///
+    /// Note: the caller wouldn't be allowed to query `self.growable.len()`
+    /// itself because of the mutable borrow taken with the returned `Block`
+    fn mutable_block(&mut self, idx: usize) -> (usize, &mut Block, usize) {
+        let ro_blocks = &self.readonly;
+        let ro_len = ro_blocks.len();
+        let glen = self.growable.len();
+        if idx < ro_len {
+            self.masked_inner_blocks += 1;
+            // TODO OPTIM I think this makes two copies
+            self.growable.push(ro_blocks[idx].clone());
+            (glen + ro_len, &mut self.growable[glen], glen + 1)
+        } else if glen + ro_len == idx {
+            (idx, &mut self.root, glen)
+        } else {
+            (idx, &mut self.growable[idx - ro_len], glen)
+        }
+    }
+
+    /// Main insertion method
+    ///
+    /// This will dive in the node tree to find the deepest `Block` for
+    /// `node`, split it as much as needed and record `node` in there.
+    /// The method then backtracks, updating references in all the visited
+    /// blocks from the root.
+    ///
+    /// All the mutated `Block` are copied first to the growable part if
+    /// needed. That happens for those in the immutable part except the root.
+    pub fn insert<I: RevlogIndex>(
+        &mut self,
+        index: &I,
+        node: &Node,
+        rev: Revision,
+    ) -> Result<(), NodeMapError> {
+        let ro_len = &self.readonly.len();
+
+        let mut visit_steps: Vec<_> = self.visit(node.into()).collect();
+        let read_nybbles = visit_steps.len();
+        // visit_steps cannot be empty, since we always visit the root block
+        let deepest = visit_steps.pop().unwrap();
+
+        let (mut block_idx, mut block, mut glen) =
+            self.mutable_block(deepest.block_idx);
+
+        if let Element::Rev(old_rev) = deepest.element {
+            let old_node = index
+                .node(old_rev)
+                .ok_or_else(|| NodeMapError::RevisionNotInIndex(old_rev))?;
+            if old_node == node {
+                return Ok(()); // avoid creating lots of useless blocks
+            }
+
+            // Looping over the tail of nybbles in both nodes, creating
+            // new blocks until we find the difference
+            let mut new_block_idx = ro_len + glen;
+            let mut nybble = deepest.nybble;
+            for nybble_pos in read_nybbles..node.nybbles_len() {
+                block.set(nybble, Element::Block(new_block_idx));
+
+                let new_nybble = node.get_nybble(nybble_pos);
+                let old_nybble = old_node.get_nybble(nybble_pos);
+
+                if old_nybble == new_nybble {
+                    self.growable.push(Block::new());
+                    block = &mut self.growable[glen];
+                    glen += 1;
+                    new_block_idx += 1;
+                    nybble = new_nybble;
+                } else {
+                    let mut new_block = Block::new();
+                    new_block.set(old_nybble, Element::Rev(old_rev));
+                    new_block.set(new_nybble, Element::Rev(rev));
+                    self.growable.push(new_block);
+                    break;
+                }
+            }
+        } else {
+            // Free slot in the deepest block: no splitting has to be done
+            block.set(deepest.nybble, Element::Rev(rev));
+        }
+
+        // Backtrack over visit steps to update references
+        while let Some(visited) = visit_steps.pop() {
+            let to_write = Element::Block(block_idx);
+            if visit_steps.is_empty() {
+                self.root.set(visited.nybble, to_write);
+                break;
+            }
+            let (new_idx, block, _) = self.mutable_block(visited.block_idx);
+            if block.get(visited.nybble) == to_write {
+                break;
+            }
+            block.set(visited.nybble, to_write);
+            block_idx = new_idx;
+        }
+        Ok(())
+    }
+
+    /// Make the whole `NodeTree` logically empty, without touching the
+    /// immutable part.
+    pub fn invalidate_all(&mut self) {
+        self.root = Block::new();
+        self.growable = Vec::new();
+        self.masked_inner_blocks = self.readonly.len();
+    }
+
+    /// Return the number of blocks in the readonly part that are currently
+    /// masked in the mutable part.
+    ///
+    /// The `NodeTree` structure has no efficient way to know how many blocks
+    /// are already unreachable in the readonly part.
+    ///
+    /// After a call to `invalidate_all()`, the returned number can be actually
+    /// bigger than the whole readonly part, a conventional way to mean that
+    /// all the readonly blocks have been masked. This is what is really
+    /// useful to the caller and does not require to know how many were
+    /// actually unreachable to begin with.
+    pub fn masked_readonly_blocks(&self) -> usize {
+        if let Some(readonly_root) = self.readonly.last() {
+            if readonly_root == &self.root {
+                return 0;
+            }
+        } else {
+            return 0;
+        }
+        self.masked_inner_blocks + 1
+    }
+}
+
+pub struct NodeTreeBytes {
+    buffer: Box<dyn Deref<Target = [u8]> + Send>,
+    len_in_blocks: usize,
+}
+
+impl NodeTreeBytes {
+    fn new(
+        buffer: Box<dyn Deref<Target = [u8]> + Send>,
+        amount: usize,
+    ) -> Self {
+        assert!(buffer.len() >= amount);
+        let len_in_blocks = amount / BLOCK_SIZE;
+        NodeTreeBytes {
+            buffer,
+            len_in_blocks,
+        }
+    }
+}
+
+impl Deref for NodeTreeBytes {
+    type Target = [Block];
+
+    fn deref(&self) -> &[Block] {
+        unsafe {
+            slice::from_raw_parts(
+                (&self.buffer).as_ptr() as *const Block,
+                self.len_in_blocks,
+            )
+        }
+    }
+}
+
+struct NodeTreeVisitor<'n, 'p> {
+    nt: &'n NodeTree,
+    prefix: NodePrefixRef<'p>,
+    visit: usize,
+    nybble_idx: usize,
+    done: bool,
+}
+
+#[derive(Debug, PartialEq, Clone)]
+struct NodeTreeVisitItem {
+    block_idx: usize,
+    nybble: u8,
+    element: Element,
+}
+
+impl<'n, 'p> Iterator for NodeTreeVisitor<'n, 'p> {
+    type Item = NodeTreeVisitItem;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.done || self.nybble_idx >= self.prefix.len() {
+            return None;
+        }
+
+        let nybble = self.prefix.get_nybble(self.nybble_idx);
+        self.nybble_idx += 1;
+
+        let visit = self.visit;
+        let element = self.nt[visit].get(nybble);
+        if let Element::Block(idx) = element {
+            self.visit = idx;
+        } else {
+            self.done = true;
+        }
+
+        Some(NodeTreeVisitItem {
+            block_idx: visit,
+            nybble: nybble,
+            element: element,
+        })
+    }
+}
+
+impl NodeTreeVisitItem {
+    // Return `Some(opt)` if this item is final, with `opt` being the
+    // `Revision` that it may represent.
+    //
+    // If the item is not terminal, return `None`
+    fn final_revision(&self) -> Option<Option<Revision>> {
+        match self.element {
+            Element::Block(_) => None,
+            Element::Rev(r) => Some(Some(r)),
+            Element::None => Some(None),
+        }
+    }
+}
+
+impl From<Vec<Block>> for NodeTree {
+    fn from(vec: Vec<Block>) -> Self {
+        Self::new(Box::new(vec))
+    }
+}
+
+impl fmt::Debug for NodeTree {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let readonly: &[Block] = &*self.readonly;
+        write!(
+            f,
+            "readonly: {:?}, growable: {:?}, root: {:?}",
+            readonly, self.growable, self.root
+        )
+    }
+}
+
+impl Default for NodeTree {
+    /// Create a fully mutable empty NodeTree
+    fn default() -> Self {
+        NodeTree::new(Box::new(Vec::new()))
+    }
+}
+
+impl NodeMap for NodeTree {
+    fn find_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+            .map(|(opt, _shortest)| opt)
+    }
+
+    fn unique_prefix_len_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<usize>, NodeMapError> {
+        validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+            .map(|(opt, shortest)| opt.map(|_rev| shortest))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::NodeMapError::*;
+    use super::*;
+    use crate::revlog::node::{hex_pad_right, Node};
+    use std::collections::HashMap;
+
+    /// Creates a `Block` using a syntax close to the `Debug` output
+    macro_rules! block {
+        {$($nybble:tt : $variant:ident($val:tt)),*} => (
+            {
+                let mut block = Block::new();
+                $(block.set($nybble, Element::$variant($val)));*;
+                block
+            }
+        )
+    }
+
+    #[test]
+    fn test_block_debug() {
+        let mut block = Block::new();
+        block.set(1, Element::Rev(3));
+        block.set(10, Element::Block(0));
+        assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}");
+    }
+
+    #[test]
+    fn test_block_macro() {
+        let block = block! {5: Block(2)};
+        assert_eq!(format!("{:?}", block), "{5: Block(2)}");
+
+        let block = block! {13: Rev(15), 5: Block(2)};
+        assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}");
+    }
+
+    #[test]
+    fn test_raw_block() {
+        let mut raw = [255u8; 64];
+
+        let mut counter = 0;
+        for val in [0, 15, -2, -1, -3].iter() {
+            for byte in RawElement::to_be_bytes(*val).iter() {
+                raw[counter] = *byte;
+                counter += 1;
+            }
+        }
+        let block = Block(raw);
+        assert_eq!(block.get(0), Element::Block(0));
+        assert_eq!(block.get(1), Element::Block(15));
+        assert_eq!(block.get(3), Element::None);
+        assert_eq!(block.get(2), Element::Rev(0));
+        assert_eq!(block.get(4), Element::Rev(1));
+    }
+
+    type TestIndex = HashMap<Revision, Node>;
+
+    impl RevlogIndex for TestIndex {
+        fn node(&self, rev: Revision) -> Option<&Node> {
+            self.get(&rev)
+        }
+
+        fn len(&self) -> usize {
+            self.len()
+        }
+    }
+
+    /// Pad hexadecimal Node prefix with zeros on the right
+    ///
+    /// This avoids having to repeatedly write very long hexadecimal
+    /// strings for test data, and brings actual hash size independency.
+    #[cfg(test)]
+    fn pad_node(hex: &str) -> Node {
+        Node::from_hex(&hex_pad_right(hex)).unwrap()
+    }
+
+    /// Pad hexadecimal Node prefix with zeros on the right, then insert
+    fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
+        idx.insert(rev, pad_node(hex));
+    }
+
+    fn sample_nodetree() -> NodeTree {
+        NodeTree::from(vec![
+            block![0: Rev(9)],
+            block![0: Rev(0), 1: Rev(9)],
+            block![0: Block(1), 1:Rev(1)],
+        ])
+    }
+
+    #[test]
+    fn test_nt_debug() {
+        let nt = sample_nodetree();
+        assert_eq!(
+            format!("{:?}", nt),
+            "readonly: \
+             [{0: Rev(9)}, {0: Rev(0), 1: Rev(9)}, {0: Block(1), 1: Rev(1)}], \
+             growable: [], \
+             root: {0: Block(1), 1: Rev(1)}",
+        );
+    }
+
+    #[test]
+    fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
+        let mut idx: TestIndex = HashMap::new();
+        pad_insert(&mut idx, 1, "1234deadcafe");
+
+        let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
+        assert_eq!(nt.find_hex(&idx, "1")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "12")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "1234de")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "1a")?, None);
+        assert_eq!(nt.find_hex(&idx, "ab")?, None);
+
+        // and with full binary Nodes
+        assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
+        let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
+        assert_eq!(nt.find_node(&idx, &unknown)?, None);
+        Ok(())
+    }
+
+    #[test]
+    fn test_immutable_find_one_jump() {
+        let mut idx = TestIndex::new();
+        pad_insert(&mut idx, 9, "012");
+        pad_insert(&mut idx, 0, "00a");
+
+        let nt = sample_nodetree();
+
+        assert_eq!(nt.find_hex(&idx, "0"), Err(MultipleResults));
+        assert_eq!(nt.find_hex(&idx, "01"), Ok(Some(9)));
+        assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
+        assert_eq!(nt.find_hex(&idx, "00a"), Ok(Some(0)));
+        assert_eq!(nt.unique_prefix_len_hex(&idx, "00a"), Ok(Some(3)));
+        assert_eq!(nt.find_hex(&idx, "000"), Ok(Some(NULL_REVISION)));
+    }
+
+    #[test]
+    fn test_mutated_find() -> Result<(), NodeMapError> {
+        let mut idx = TestIndex::new();
+        pad_insert(&mut idx, 9, "012");
+        pad_insert(&mut idx, 0, "00a");
+        pad_insert(&mut idx, 2, "cafe");
+        pad_insert(&mut idx, 3, "15");
+        pad_insert(&mut idx, 1, "10");
+
+        let nt = NodeTree {
+            readonly: sample_nodetree().readonly,
+            growable: vec![block![0: Rev(1), 5: Rev(3)]],
+            root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
+            masked_inner_blocks: 1,
+        };
+        assert_eq!(nt.find_hex(&idx, "10")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "c")?, Some(2));
+        assert_eq!(nt.unique_prefix_len_hex(&idx, "c")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
+        assert_eq!(nt.find_hex(&idx, "000")?, Some(NULL_REVISION));
+        assert_eq!(nt.unique_prefix_len_hex(&idx, "000")?, Some(3));
+        assert_eq!(nt.find_hex(&idx, "01")?, Some(9));
+        assert_eq!(nt.masked_readonly_blocks(), 2);
+        Ok(())
+    }
+
+    struct TestNtIndex {
+        index: TestIndex,
+        nt: NodeTree,
+    }
+
+    impl TestNtIndex {
+        fn new() -> Self {
+            TestNtIndex {
+                index: HashMap::new(),
+                nt: NodeTree::default(),
+            }
+        }
+
+        fn insert(
+            &mut self,
+            rev: Revision,
+            hex: &str,
+        ) -> Result<(), NodeMapError> {
+            let node = pad_node(hex);
+            self.index.insert(rev, node.clone());
+            self.nt.insert(&self.index, &node, rev)?;
+            Ok(())
+        }
+
+        fn find_hex(
+            &self,
+            prefix: &str,
+        ) -> Result<Option<Revision>, NodeMapError> {
+            self.nt.find_hex(&self.index, prefix)
+        }
+
+        fn unique_prefix_len_hex(
+            &self,
+            prefix: &str,
+        ) -> Result<Option<usize>, NodeMapError> {
+            self.nt.unique_prefix_len_hex(&self.index, prefix)
+        }
+
+        /// Drain `added` and restart a new one
+        fn commit(self) -> Self {
+            let mut as_vec: Vec<Block> =
+                self.nt.readonly.iter().map(|block| block.clone()).collect();
+            as_vec.extend(self.nt.growable);
+            as_vec.push(self.nt.root);
+
+            Self {
+                index: self.index,
+                nt: NodeTree::from(as_vec).into(),
+            }
+        }
+    }
+
+    #[test]
+    fn test_insert_full_mutable() -> Result<(), NodeMapError> {
+        let mut idx = TestNtIndex::new();
+        idx.insert(0, "1234")?;
+        assert_eq!(idx.find_hex("1")?, Some(0));
+        assert_eq!(idx.find_hex("12")?, Some(0));
+
+        // let's trigger a simple split
+        idx.insert(1, "1a34")?;
+        assert_eq!(idx.nt.growable.len(), 1);
+        assert_eq!(idx.find_hex("12")?, Some(0));
+        assert_eq!(idx.find_hex("1a")?, Some(1));
+
+        // reinserting is a no_op
+        idx.insert(1, "1a34")?;
+        assert_eq!(idx.nt.growable.len(), 1);
+        assert_eq!(idx.find_hex("12")?, Some(0));
+        assert_eq!(idx.find_hex("1a")?, Some(1));
+
+        idx.insert(2, "1a01")?;
+        assert_eq!(idx.nt.growable.len(), 2);
+        assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
+        assert_eq!(idx.find_hex("12")?, Some(0));
+        assert_eq!(idx.find_hex("1a3")?, Some(1));
+        assert_eq!(idx.find_hex("1a0")?, Some(2));
+        assert_eq!(idx.find_hex("1a12")?, None);
+
+        // now let's make it split and create more than one additional block
+        idx.insert(3, "1a345")?;
+        assert_eq!(idx.nt.growable.len(), 4);
+        assert_eq!(idx.find_hex("1a340")?, Some(1));
+        assert_eq!(idx.find_hex("1a345")?, Some(3));
+        assert_eq!(idx.find_hex("1a341")?, None);
+
+        // there's no readonly block to mask
+        assert_eq!(idx.nt.masked_readonly_blocks(), 0);
+        Ok(())
+    }
+
+    #[test]
+    fn test_unique_prefix_len_zero_prefix() {
+        let mut idx = TestNtIndex::new();
+        idx.insert(0, "00000abcd").unwrap();
+
+        assert_eq!(idx.find_hex("000"), Err(NodeMapError::MultipleResults));
+        // in the nodetree proper, this will be found at the first nybble
+        // yet the correct answer for unique_prefix_len is not 1, nor 1+1,
+        // but the first difference with `NULL_NODE`
+        assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
+        assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
+
+        // same with odd result
+        idx.insert(1, "00123").unwrap();
+        assert_eq!(idx.unique_prefix_len_hex("001"), Ok(Some(3)));
+        assert_eq!(idx.unique_prefix_len_hex("0012"), Ok(Some(3)));
+
+        // these are unchanged of course
+        assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
+        assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
+    }
+
+    #[test]
+    fn test_insert_extreme_splitting() -> Result<(), NodeMapError> {
+        // check that the splitting loop is long enough
+        let mut nt_idx = TestNtIndex::new();
+        let nt = &mut nt_idx.nt;
+        let idx = &mut nt_idx.index;
+
+        let node0_hex = hex_pad_right("444444");
+        let mut node1_hex = hex_pad_right("444444").clone();
+        node1_hex.pop();
+        node1_hex.push('5');
+        let node0 = Node::from_hex(&node0_hex).unwrap();
+        let node1 = Node::from_hex(&node1_hex).unwrap();
+
+        idx.insert(0, node0.clone());
+        nt.insert(idx, &node0, 0)?;
+        idx.insert(1, node1.clone());
+        nt.insert(idx, &node1, 1)?;
+
+        assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(0));
+        assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(1));
+        Ok(())
+    }
+
+    #[test]
+    fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
+        let mut idx = TestNtIndex::new();
+        idx.insert(0, "1234")?;
+        idx.insert(1, "1235")?;
+        idx.insert(2, "131")?;
+        idx.insert(3, "cafe")?;
+        let mut idx = idx.commit();
+        assert_eq!(idx.find_hex("1234")?, Some(0));
+        assert_eq!(idx.find_hex("1235")?, Some(1));
+        assert_eq!(idx.find_hex("131")?, Some(2));
+        assert_eq!(idx.find_hex("cafe")?, Some(3));
+        // we did not add anything since init from readonly
+        assert_eq!(idx.nt.masked_readonly_blocks(), 0);
+
+        idx.insert(4, "123A")?;
+        assert_eq!(idx.find_hex("1234")?, Some(0));
+        assert_eq!(idx.find_hex("1235")?, Some(1));
+        assert_eq!(idx.find_hex("131")?, Some(2));
+        assert_eq!(idx.find_hex("cafe")?, Some(3));
+        assert_eq!(idx.find_hex("123A")?, Some(4));
+        // we masked blocks for all prefixes of "123", including the root
+        assert_eq!(idx.nt.masked_readonly_blocks(), 4);
+
+        eprintln!("{:?}", idx.nt);
+        idx.insert(5, "c0")?;
+        assert_eq!(idx.find_hex("cafe")?, Some(3));
+        assert_eq!(idx.find_hex("c0")?, Some(5));
+        assert_eq!(idx.find_hex("c1")?, None);
+        assert_eq!(idx.find_hex("1234")?, Some(0));
+        // inserting "c0" is just splitting the 'c' slot of the mutable root,
+        // it doesn't mask anything
+        assert_eq!(idx.nt.masked_readonly_blocks(), 4);
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_invalidate_all() -> Result<(), NodeMapError> {
+        let mut idx = TestNtIndex::new();
+        idx.insert(0, "1234")?;
+        idx.insert(1, "1235")?;
+        idx.insert(2, "131")?;
+        idx.insert(3, "cafe")?;
+        let mut idx = idx.commit();
+
+        idx.nt.invalidate_all();
+
+        assert_eq!(idx.find_hex("1234")?, None);
+        assert_eq!(idx.find_hex("1235")?, None);
+        assert_eq!(idx.find_hex("131")?, None);
+        assert_eq!(idx.find_hex("cafe")?, None);
+        // all the readonly blocks have been masked, this is the
+        // conventional expected response
+        assert_eq!(idx.nt.masked_readonly_blocks(), idx.nt.readonly.len() + 1);
+        Ok(())
+    }
+
+    #[test]
+    fn test_into_added_empty() {
+        assert!(sample_nodetree().into_readonly_and_added().1.is_empty());
+        assert!(sample_nodetree()
+            .into_readonly_and_added_bytes()
+            .1
+            .is_empty());
+    }
+
+    #[test]
+    fn test_into_added_bytes() -> Result<(), NodeMapError> {
+        let mut idx = TestNtIndex::new();
+        idx.insert(0, "1234")?;
+        let mut idx = idx.commit();
+        idx.insert(4, "cafe")?;
+        let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
+
+        // only the root block has been changed
+        assert_eq!(bytes.len(), BLOCK_SIZE);
+        // big endian for -2
+        assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
+        // big endian for -6
+        assert_eq!(&bytes[12 * 4..13 * 4], [255, 255, 255, 250]);
+        Ok(())
+    }
+}
--- a/rust/hg-core/src/utils.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/utils.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -7,8 +7,12 @@
 
 //! Contains useful functions, traits, structs, etc. for use in core.
 
+use crate::utils::hg_path::HgPath;
+use std::{io::Write, ops::Deref};
+
 pub mod files;
 pub mod hg_path;
+pub mod path_auditor;
 
 /// Useful until rust/issues/56345 is stable
 ///
@@ -111,3 +115,54 @@
         }
     }
 }
+
+pub trait Escaped {
+    /// Return bytes escaped for display to the user
+    fn escaped_bytes(&self) -> Vec<u8>;
+}
+
+impl Escaped for u8 {
+    fn escaped_bytes(&self) -> Vec<u8> {
+        let mut acc = vec![];
+        match self {
+            c @ b'\'' | c @ b'\\' => {
+                acc.push(b'\\');
+                acc.push(*c);
+            }
+            b'\t' => {
+                acc.extend(br"\\t");
+            }
+            b'\n' => {
+                acc.extend(br"\\n");
+            }
+            b'\r' => {
+                acc.extend(br"\\r");
+            }
+            c if (*c < b' ' || *c >= 127) => {
+                write!(acc, "\\x{:x}", self).unwrap();
+            }
+            c => {
+                acc.push(*c);
+            }
+        }
+        acc
+    }
+}
+
+impl<'a, T: Escaped> Escaped for &'a [T] {
+    fn escaped_bytes(&self) -> Vec<u8> {
+        self.iter().flat_map(|item| item.escaped_bytes()).collect()
+    }
+}
+
+impl<T: Escaped> Escaped for Vec<T> {
+    fn escaped_bytes(&self) -> Vec<u8> {
+        self.deref().escaped_bytes()
+    }
+}
+
+impl<'a> Escaped for &'a HgPath {
+    fn escaped_bytes(&self) -> Vec<u8> {
+        self.as_bytes().escaped_bytes()
+    }
+}
--- a/rust/hg-core/src/utils/files.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/utils/files.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -9,11 +9,18 @@
 
 //! Functions for fiddling with files.
 
-use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::utils::{
+    hg_path::{path_to_hg_path_buf, HgPath, HgPathBuf, HgPathError},
+    path_auditor::PathAuditor,
+    replace_slice,
+};
+use lazy_static::lazy_static;
+use same_file::is_same_file;
+use std::borrow::ToOwned;
+use std::fs::Metadata;
 use std::iter::FusedIterator;
-
-use std::fs::Metadata;
-use std::path::Path;
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
 
 pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
     let os_str;
@@ -62,6 +69,28 @@
 
 impl<'a> FusedIterator for Ancestors<'a> {}
 
+/// An iterator over repository path yielding itself and its ancestors.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct AncestorsWithBase<'a> {
+    next: Option<(&'a HgPath, &'a HgPath)>,
+}
+
+impl<'a> Iterator for AncestorsWithBase<'a> {
+    type Item = (&'a HgPath, &'a HgPath);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        let next = self.next;
+        self.next = match self.next {
+            Some((s, _)) if s.is_empty() => None,
+            Some((s, _)) => Some(s.split_filename()),
+            None => None,
+        };
+        next
+    }
+}
+
+impl<'a> FusedIterator for AncestorsWithBase<'a> {}
+
 /// Returns an iterator yielding ancestor directories of the given repository
 /// path.
 ///
@@ -77,6 +106,25 @@
     dirs
 }
 
+/// Returns an iterator yielding ancestor directories of the given repository
+/// path.
+///
+/// The path is separated by '/', and must not start with '/'.
+///
+/// The path itself isn't included unless it is b"" (meaning the root
+/// directory.)
+pub(crate) fn find_dirs_with_base<'a>(
+    path: &'a HgPath,
+) -> AncestorsWithBase<'a> {
+    let mut dirs = AncestorsWithBase {
+        next: Some((path, HgPath::new(b""))),
+    };
+    if !path.is_empty() {
+        dirs.next(); // skip itself
+    }
+    dirs
+}
+
 /// TODO more than ASCII?
 pub fn normalize_case(path: &HgPath) -> HgPathBuf {
     #[cfg(windows)] // NTFS compares via upper()
@@ -85,6 +133,41 @@
     path.to_ascii_lowercase()
 }
 
+lazy_static! {
+    static ref IGNORED_CHARS: Vec<Vec<u8>> = {
+        [
+            0x200c, 0x200d, 0x200e, 0x200f, 0x202a, 0x202b, 0x202c, 0x202d,
+            0x202e, 0x206a, 0x206b, 0x206c, 0x206d, 0x206e, 0x206f, 0xfeff,
+        ]
+        .iter()
+        .map(|code| {
+            std::char::from_u32(*code)
+                .unwrap()
+                .encode_utf8(&mut [0; 3])
+                .bytes()
+                .collect()
+        })
+        .collect()
+    };
+}
+
+fn hfs_ignore_clean(bytes: &[u8]) -> Vec<u8> {
+    let mut buf = bytes.to_owned();
+    let needs_escaping = bytes.iter().any(|b| *b == b'\xe2' || *b == b'\xef');
+    if needs_escaping {
+        for forbidden in IGNORED_CHARS.iter() {
+            replace_slice(&mut buf, forbidden, &[])
+        }
+        buf
+    } else {
+        buf
+    }
+}
+
+pub fn lower_clean(bytes: &[u8]) -> Vec<u8> {
+    hfs_ignore_clean(&bytes.to_ascii_lowercase())
+}
+
 #[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
 pub struct HgMetadata {
     pub st_dev: u64,
@@ -111,9 +194,66 @@
     }
 }
 
+/// Returns the canonical path of `name`, given `cwd` and `root`
+pub fn canonical_path(
+    root: impl AsRef<Path>,
+    cwd: impl AsRef<Path>,
+    name: impl AsRef<Path>,
+) -> Result<PathBuf, HgPathError> {
+    // TODO add missing normalization for other platforms
+    let root = root.as_ref();
+    let cwd = cwd.as_ref();
+    let name = name.as_ref();
+
+    let name = if !name.is_absolute() {
+        root.join(&cwd).join(&name)
+    } else {
+        name.to_owned()
+    };
+    let mut auditor = PathAuditor::new(&root);
+    if name != root && name.starts_with(&root) {
+        let name = name.strip_prefix(&root).unwrap();
+        auditor.audit_path(path_to_hg_path_buf(name)?)?;
+        return Ok(name.to_owned());
+    } else if name == root {
+        return Ok("".into());
+    } else {
+        // Determine whether `name' is in the hierarchy at or beneath `root',
+        // by iterating name=name.parent() until it returns `None` (can't
+        // check name == '/', because that doesn't work on windows).
+        let mut name = name.deref();
+        let original_name = name.to_owned();
+        loop {
+            let same = is_same_file(&name, &root).unwrap_or(false);
+            if same {
+                if name == original_name {
+                    // `name` was actually the same as root (maybe a symlink)
+                    return Ok("".into());
+                }
+                // `name` is a symlink to root, so `original_name` is under
+                // root
+                let rel_path = original_name.strip_prefix(&name).unwrap();
+                auditor.audit_path(path_to_hg_path_buf(&rel_path)?)?;
+                return Ok(rel_path.to_owned());
+            }
+            name = match name.parent() {
+                None => break,
+                Some(p) => p,
+            };
+        }
+        // TODO hint to the user about using --cwd
+        // Bubble up the responsibility to Python for now
+        Err(HgPathError::NotUnderRoot {
+            path: original_name.to_owned(),
+            root: root.to_owned(),
+        })
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
+    use pretty_assertions::assert_eq;
 
     #[test]
     fn find_dirs_some() {
@@ -133,4 +273,112 @@
         assert_eq!(dirs.next(), None);
         assert_eq!(dirs.next(), None);
     }
+
+    #[test]
+    fn test_find_dirs_with_base_some() {
+        let mut dirs = super::find_dirs_with_base(HgPath::new(b"foo/bar/baz"));
+        assert_eq!(
+            dirs.next(),
+            Some((HgPath::new(b"foo/bar"), HgPath::new(b"baz")))
+        );
+        assert_eq!(
+            dirs.next(),
+            Some((HgPath::new(b"foo"), HgPath::new(b"bar")))
+        );
+        assert_eq!(dirs.next(), Some((HgPath::new(b""), HgPath::new(b"foo"))));
+        assert_eq!(dirs.next(), None);
+        assert_eq!(dirs.next(), None);
+    }
+
+    #[test]
+    fn test_find_dirs_with_base_empty() {
+        let mut dirs = super::find_dirs_with_base(HgPath::new(b""));
+        assert_eq!(dirs.next(), Some((HgPath::new(b""), HgPath::new(b""))));
+        assert_eq!(dirs.next(), None);
+        assert_eq!(dirs.next(), None);
+    }
+
+    #[test]
+    fn test_canonical_path() {
+        let root = Path::new("/repo");
+        let cwd = Path::new("/dir");
+        let name = Path::new("filename");
+        assert_eq!(
+            canonical_path(root, cwd, name),
+            Err(HgPathError::NotUnderRoot {
+                path: PathBuf::from("/dir/filename"),
+                root: root.to_path_buf()
+            })
+        );
+
+        let root = Path::new("/repo");
+        let cwd = Path::new("/");
+        let name = Path::new("filename");
+        assert_eq!(
+            canonical_path(root, cwd, name),
+            Err(HgPathError::NotUnderRoot {
+                path: PathBuf::from("/filename"),
+                root: root.to_path_buf()
+            })
+        );
+
+        let root = Path::new("/repo");
+        let cwd = Path::new("/");
+        let name = Path::new("repo/filename");
+        assert_eq!(
+            canonical_path(root, cwd, name),
+            Ok(PathBuf::from("filename"))
+        );
+
+        let root = Path::new("/repo");
+        let cwd = Path::new("/repo");
+        let name = Path::new("filename");
+        assert_eq!(
+            canonical_path(root, cwd, name),
+            Ok(PathBuf::from("filename"))
+        );
+
+        let root = Path::new("/repo");
+        let cwd = Path::new("/repo/subdir");
+        let name = Path::new("filename");
+        assert_eq!(
+            canonical_path(root, cwd, name),
+            Ok(PathBuf::from("subdir/filename"))
+        );
+    }
+
+    #[test]
+    fn test_canonical_path_not_rooted() {
+        use std::fs::create_dir;
+        use tempfile::tempdir;
+
+        let base_dir = tempdir().unwrap();
+        let base_dir_path = base_dir.path();
+        let beneath_repo = base_dir_path.join("a");
+        let root = base_dir_path.join("a/b");
+        let out_of_repo = base_dir_path.join("c");
+        let under_repo_symlink = out_of_repo.join("d");
+
+        create_dir(&beneath_repo).unwrap();
+        create_dir(&root).unwrap();
+
+        // TODO make portable
+        std::os::unix::fs::symlink(&root, &out_of_repo).unwrap();
+
+        assert_eq!(
+            canonical_path(&root, Path::new(""), out_of_repo),
+            Ok(PathBuf::from(""))
+        );
+        assert_eq!(
+            canonical_path(&root, Path::new(""), &beneath_repo),
+            Err(HgPathError::NotUnderRoot {
+                path: beneath_repo.to_owned(),
+                root: root.to_owned()
+            })
+        );
+        assert_eq!(
+            canonical_path(&root, Path::new(""), &under_repo_symlink),
+            Ok(PathBuf::from("d"))
+        );
+    }
 }
--- a/rust/hg-core/src/utils/hg_path.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-core/src/utils/hg_path.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -15,12 +15,35 @@
 pub enum HgPathError {
     /// Bytes from the invalid `HgPath`
     LeadingSlash(Vec<u8>),
-    /// Bytes and index of the second slash
-    ConsecutiveSlashes(Vec<u8>, usize),
-    /// Bytes and index of the null byte
-    ContainsNullByte(Vec<u8>, usize),
+    ConsecutiveSlashes {
+        bytes: Vec<u8>,
+        second_slash_index: usize,
+    },
+    ContainsNullByte {
+        bytes: Vec<u8>,
+        null_byte_index: usize,
+    },
     /// Bytes
     DecodeError(Vec<u8>),
+    /// The rest come from audit errors
+    EndsWithSlash(HgPathBuf),
+    ContainsIllegalComponent(HgPathBuf),
+    /// Path is inside the `.hg` folder
+    InsideDotHg(HgPathBuf),
+    IsInsideNestedRepo {
+        path: HgPathBuf,
+        nested_repo: HgPathBuf,
+    },
+    TraversesSymbolicLink {
+        path: HgPathBuf,
+        symlink: HgPathBuf,
+    },
+    NotFsCompliant(HgPathBuf),
+    /// `path` is the smallest invalid path
+    NotUnderRoot {
+        path: PathBuf,
+        root: PathBuf,
+    },
 }
 
 impl ToString for HgPathError {
@@ -29,17 +52,55 @@
             HgPathError::LeadingSlash(bytes) => {
                 format!("Invalid HgPath '{:?}': has a leading slash.", bytes)
             }
-            HgPathError::ConsecutiveSlashes(bytes, pos) => format!(
-                "Invalid HgPath '{:?}': consecutive slahes at pos {}.",
+            HgPathError::ConsecutiveSlashes {
+                bytes,
+                second_slash_index: pos,
+            } => format!(
+                "Invalid HgPath '{:?}': consecutive slashes at pos {}.",
                 bytes, pos
             ),
-            HgPathError::ContainsNullByte(bytes, pos) => format!(
+            HgPathError::ContainsNullByte {
+                bytes,
+                null_byte_index: pos,
+            } => format!(
                 "Invalid HgPath '{:?}': contains null byte at pos {}.",
                 bytes, pos
             ),
             HgPathError::DecodeError(bytes) => {
                 format!("Invalid HgPath '{:?}': could not be decoded.", bytes)
             }
+            HgPathError::EndsWithSlash(path) => {
+                format!("Audit failed for '{}': ends with a slash.", path)
+            }
+            HgPathError::ContainsIllegalComponent(path) => format!(
+                "Audit failed for '{}': contains an illegal component.",
+                path
+            ),
+            HgPathError::InsideDotHg(path) => format!(
+                "Audit failed for '{}': is inside the '.hg' folder.",
+                path
+            ),
+            HgPathError::IsInsideNestedRepo {
+                path,
+                nested_repo: nested,
+            } => format!(
+                "Audit failed for '{}': is inside a nested repository '{}'.",
+                path, nested
+            ),
+            HgPathError::TraversesSymbolicLink { path, symlink } => format!(
+                "Audit failed for '{}': traverses symbolic link '{}'.",
+                path, symlink
+            ),
+            HgPathError::NotFsCompliant(path) => format!(
+                "Audit failed for '{}': cannot be turned into a \
+                 filesystem path.",
+                path
+            ),
+            HgPathError::NotUnderRoot { path, root } => format!(
+                "Audit failed for '{}': not under root {}.",
+                path.display(),
+                root.display()
+            ),
         }
     }
 }
@@ -112,10 +173,40 @@
     pub fn contains(&self, other: u8) -> bool {
         self.inner.contains(&other)
     }
-    pub fn starts_with(&self, needle: impl AsRef<HgPath>) -> bool {
+    pub fn starts_with(&self, needle: impl AsRef<Self>) -> bool {
         self.inner.starts_with(needle.as_ref().as_bytes())
     }
-    pub fn join<T: ?Sized + AsRef<HgPath>>(&self, other: &T) -> HgPathBuf {
+    pub fn trim_trailing_slash(&self) -> &Self {
+        Self::new(if self.inner.last() == Some(&b'/') {
+            &self.inner[..self.inner.len() - 1]
+        } else {
+            &self.inner[..]
+        })
+    }
+    /// Returns a tuple of slices `(base, filename)` resulting from the split
+    /// at the rightmost `/`, if any.
+    ///
+    /// # Examples:
+    ///
+    /// ```
+    /// use hg::utils::hg_path::HgPath;
+    ///
+    /// let path = HgPath::new(b"cool/hg/path").split_filename();
+    /// assert_eq!(path, (HgPath::new(b"cool/hg"), HgPath::new(b"path")));
+    ///
+    /// let path = HgPath::new(b"pathwithoutsep").split_filename();
+    /// assert_eq!(path, (HgPath::new(b""), HgPath::new(b"pathwithoutsep")));
+    /// ```
+    pub fn split_filename(&self) -> (&Self, &Self) {
+        match &self.inner.iter().rposition(|c| *c == b'/') {
+            None => (HgPath::new(""), &self),
+            Some(size) => (
+                HgPath::new(&self.inner[..*size]),
+                HgPath::new(&self.inner[*size + 1..]),
+            ),
+        }
+    }
+    pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
         let mut inner = self.inner.to_owned();
         if inner.len() != 0 && inner.last() != Some(&b'/') {
             inner.push(b'/');
@@ -123,21 +214,103 @@
         inner.extend(other.as_ref().bytes());
         HgPathBuf::from_bytes(&inner)
     }
+    pub fn parent(&self) -> &Self {
+        let inner = self.as_bytes();
+        HgPath::new(match inner.iter().rposition(|b| *b == b'/') {
+            Some(pos) => &inner[..pos],
+            None => &[],
+        })
+    }
     /// Given a base directory, returns the slice of `self` relative to the
     /// base directory. If `base` is not a directory (does not end with a
     /// `b'/'`), returns `None`.
-    pub fn relative_to(&self, base: impl AsRef<HgPath>) -> Option<&HgPath> {
+    pub fn relative_to(&self, base: impl AsRef<Self>) -> Option<&Self> {
         let base = base.as_ref();
         if base.is_empty() {
             return Some(self);
         }
         let is_dir = base.as_bytes().ends_with(b"/");
         if is_dir && self.starts_with(base) {
-            Some(HgPath::new(&self.inner[base.len()..]))
+            Some(Self::new(&self.inner[base.len()..]))
         } else {
             None
         }
     }
+
+    #[cfg(windows)]
+    /// Copied from the Python stdlib's `os.path.splitdrive` implementation.
+    ///
+    /// Split a pathname into drive/UNC sharepoint and relative path
+    /// specifiers. Returns a 2-tuple (drive_or_unc, path); either part may
+    /// be empty.
+    ///
+    /// If you assign
+    ///  result = split_drive(p)
+    /// It is always true that:
+    ///  result[0] + result[1] == p
+    ///
+    /// If the path contained a drive letter, drive_or_unc will contain
+    /// everything up to and including the colon.
+    /// e.g. split_drive("c:/dir") returns ("c:", "/dir")
+    ///
+    /// If the path contained a UNC path, the drive_or_unc will contain the
+    /// host name and share up to but not including the fourth directory
+    /// separator character.
+    /// e.g. split_drive("//host/computer/dir") returns ("//host/computer",
+    /// "/dir")
+    ///
+    /// Paths cannot contain both a drive letter and a UNC path.
+    pub fn split_drive<'a>(&self) -> (&HgPath, &HgPath) {
+        let bytes = self.as_bytes();
+        let is_sep = |b| std::path::is_separator(b as char);
+
+        if self.len() < 2 {
+            (HgPath::new(b""), &self)
+        } else if is_sep(bytes[0])
+            && is_sep(bytes[1])
+            && (self.len() == 2 || !is_sep(bytes[2]))
+        {
+            // Is a UNC path:
+            // vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
+            // \\machine\mountpoint\directory\etc\...
+            //           directory ^^^^^^^^^^^^^^^
+
+            let machine_end_index = bytes[2..].iter().position(|b| is_sep(*b));
+            let mountpoint_start_index = if let Some(i) = machine_end_index {
+                i + 2
+            } else {
+                return (HgPath::new(b""), &self);
+            };
+
+            match bytes[mountpoint_start_index + 1..]
+                .iter()
+                .position(|b| is_sep(*b))
+            {
+                // A UNC path can't have two slashes in a row
+                // (after the initial two)
+                Some(0) => (HgPath::new(b""), &self),
+                Some(i) => {
+                    let (a, b) =
+                        bytes.split_at(mountpoint_start_index + 1 + i);
+                    (HgPath::new(a), HgPath::new(b))
+                }
+                None => (&self, HgPath::new(b"")),
+            }
+        } else if bytes[1] == b':' {
+            // Drive path c:\directory
+            let (a, b) = bytes.split_at(2);
+            (HgPath::new(a), HgPath::new(b))
+        } else {
+            (HgPath::new(b""), &self)
+        }
+    }
+
+    #[cfg(unix)]
+    /// Split a pathname into drive and path. On Posix, drive is always empty.
+    pub fn split_drive(&self) -> (&HgPath, &HgPath) {
+        (HgPath::new(b""), &self)
+    }
+
     /// Checks for errors in the path, short-circuiting at the first one.
     /// This generates fine-grained errors useful for debugging.
     /// To simply check if the path is valid during tests, use `is_valid`.
@@ -154,17 +327,17 @@
         for (index, byte) in bytes.iter().enumerate() {
             match byte {
                 0 => {
-                    return Err(HgPathError::ContainsNullByte(
-                        bytes.to_vec(),
-                        index,
-                    ))
+                    return Err(HgPathError::ContainsNullByte {
+                        bytes: bytes.to_vec(),
+                        null_byte_index: index,
+                    })
                 }
                 b'/' => {
                     if previous_byte.is_some() && previous_byte == Some(b'/') {
-                        return Err(HgPathError::ConsecutiveSlashes(
-                            bytes.to_vec(),
-                            index,
-                        ));
+                        return Err(HgPathError::ConsecutiveSlashes {
+                            bytes: bytes.to_vec(),
+                            second_slash_index: index,
+                        });
                     }
                 }
                 _ => (),
@@ -348,6 +521,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
+    use pretty_assertions::assert_eq;
 
     #[test]
     fn test_path_states() {
@@ -356,11 +530,17 @@
             HgPath::new(b"/").check_state()
         );
         assert_eq!(
-            Err(HgPathError::ConsecutiveSlashes(b"a/b//c".to_vec(), 4)),
+            Err(HgPathError::ConsecutiveSlashes {
+                bytes: b"a/b//c".to_vec(),
+                second_slash_index: 4
+            }),
             HgPath::new(b"a/b//c").check_state()
         );
         assert_eq!(
-            Err(HgPathError::ContainsNullByte(b"a/b/\0c".to_vec(), 4)),
+            Err(HgPathError::ContainsNullByte {
+                bytes: b"a/b/\0c".to_vec(),
+                null_byte_index: 4
+            }),
             HgPath::new(b"a/b/\0c").check_state()
         );
         // TODO test HgPathError::DecodeError for the Windows implementation.
@@ -473,4 +653,116 @@
         let base = HgPath::new(b"ends/");
         assert_eq!(Some(HgPath::new(b"with/dir/")), path.relative_to(base));
     }
+
+    #[test]
+    #[cfg(unix)]
+    fn test_split_drive() {
+        // Taken from the Python stdlib's tests
+        assert_eq!(
+            HgPath::new(br"/foo/bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"foo:bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"foo:bar"))
+        );
+        assert_eq!(
+            HgPath::new(br":foo:bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br":foo:bar"))
+        );
+        // Also try NT paths; should not split them
+        assert_eq!(
+            HgPath::new(br"c:\foo\bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"c:\foo\bar"))
+        );
+        assert_eq!(
+            HgPath::new(b"c:/foo/bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"c:/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(b""),
+                HgPath::new(br"\\conky\mountpoint\foo\bar")
+            )
+        );
+    }
+
+    #[test]
+    #[cfg(windows)]
+    fn test_split_drive() {
+        assert_eq!(
+            HgPath::new(br"c:\foo\bar").split_drive(),
+            (HgPath::new(br"c:"), HgPath::new(br"\foo\bar"))
+        );
+        assert_eq!(
+            HgPath::new(b"c:/foo/bar").split_drive(),
+            (HgPath::new(br"c:"), HgPath::new(br"/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br"\\conky\mountpoint"),
+                HgPath::new(br"\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"//conky/mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br"//conky/mountpoint"),
+                HgPath::new(br"/foo/bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"\\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"\\\conky\mountpoint\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"///conky/mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"///conky/mountpoint/foo/bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"\\conky\\mountpoint\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"//conky//mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"//conky//mountpoint/foo/bar")
+            )
+        );
+        // UNC part containing U+0130
+        assert_eq!(
+            HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT/foo/bar").split_drive(),
+            (
+                HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT"),
+                HgPath::new(br"/foo/bar")
+            )
+        );
+    }
+
+    #[test]
+    fn test_parent() {
+        let path = HgPath::new(b"");
+        assert_eq!(path.parent(), path);
+
+        let path = HgPath::new(b"a");
+        assert_eq!(path.parent(), HgPath::new(b""));
+
+        let path = HgPath::new(b"a/b");
+        assert_eq!(path.parent(), HgPath::new(b"a"));
+
+        let path = HgPath::new(b"a/other/b");
+        assert_eq!(path.parent(), HgPath::new(b"a/other"));
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/utils/path_auditor.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,230 @@
+// path_auditor.rs
+//
+// Copyright 2020
+// Raphaël Gomès <rgomes@octobus.net>,
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::utils::{
+    files::lower_clean,
+    find_slice_in_slice,
+    hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf, HgPathError},
+};
+use std::collections::HashSet;
+use std::path::{Path, PathBuf};
+
+/// Ensures that a path is valid for use in the repository i.e. does not use
+/// any banned components, does not traverse a symlink, etc.
+#[derive(Debug, Default)]
+pub struct PathAuditor {
+    audited: HashSet<HgPathBuf>,
+    audited_dirs: HashSet<HgPathBuf>,
+    root: PathBuf,
+}
+
+impl PathAuditor {
+    pub fn new(root: impl AsRef<Path>) -> Self {
+        Self {
+            root: root.as_ref().to_owned(),
+            ..Default::default()
+        }
+    }
+    pub fn audit_path(
+        &mut self,
+        path: impl AsRef<HgPath>,
+    ) -> Result<(), HgPathError> {
+        // TODO windows "localpath" normalization
+        let path = path.as_ref();
+        if path.is_empty() {
+            return Ok(());
+        }
+        // TODO case normalization
+        if self.audited.contains(path) {
+            return Ok(());
+        }
+        // AIX ignores "/" at end of path, others raise EISDIR.
+        let last_byte = path.as_bytes()[path.len() - 1];
+        if last_byte == b'/' || last_byte == b'\\' {
+            return Err(HgPathError::EndsWithSlash(path.to_owned()));
+        }
+        let parts: Vec<_> = path
+            .as_bytes()
+            .split(|b| std::path::is_separator(*b as char))
+            .collect();
+
+        let first_component = lower_clean(parts[0]);
+        let first_component = first_component.as_slice();
+        if !path.split_drive().0.is_empty()
+            || (first_component == b".hg"
+                || first_component == b".hg."
+                || first_component == b"")
+            || parts.iter().any(|c| c == b"..")
+        {
+            return Err(HgPathError::InsideDotHg(path.to_owned()));
+        }
+
+        // Windows shortname aliases
+        for part in parts.iter() {
+            if part.contains(&b'~') {
+                let mut split = part.splitn(1, |b| *b == b'~');
+                let first =
+                    split.next().unwrap().to_owned().to_ascii_uppercase();
+                let last = split.next().unwrap();
+                if last.iter().all(u8::is_ascii_digit)
+                    && (first == b"HG" || first == b"HG8B6C")
+                {
+                    return Err(HgPathError::ContainsIllegalComponent(
+                        path.to_owned(),
+                    ));
+                }
+            }
+        }
+        let lower_path = lower_clean(path.as_bytes());
+        if find_slice_in_slice(&lower_path, b".hg").is_some() {
+            let lower_parts: Vec<_> = path
+                .as_bytes()
+                .split(|b| std::path::is_separator(*b as char))
+                .collect();
+            for pattern in [b".hg".to_vec(), b".hg.".to_vec()].iter() {
+                if let Some(pos) = lower_parts[1..]
+                    .iter()
+                    .position(|part| part == &pattern.as_slice())
+                {
+                    let base = lower_parts[..=pos]
+                        .iter()
+                        .fold(HgPathBuf::new(), |acc, p| {
+                            acc.join(HgPath::new(p))
+                        });
+                    return Err(HgPathError::IsInsideNestedRepo {
+                        path: path.to_owned(),
+                        nested_repo: base,
+                    });
+                }
+            }
+        }
+
+        let parts = &parts[..parts.len().saturating_sub(1)];
+
+        // We don't want to add "foo/bar/baz" to `audited_dirs` before checking
+        // if there's a "foo/.hg" directory. This also means we won't
+        // accidentally traverse a symlink into some other filesystem (which
+        // is potentially expensive to access).
+        for index in 0..parts.len() {
+            let prefix = &parts[..index + 1].join(&b'/');
+            let prefix = HgPath::new(prefix);
+            if self.audited_dirs.contains(prefix) {
+                continue;
+            }
+            self.check_filesystem(&prefix, &path)?;
+        }
+
+        self.audited.insert(path.to_owned());
+
+        Ok(())
+    }
+
+    pub fn check_filesystem(
+        &self,
+        prefix: impl AsRef<HgPath>,
+        path: impl AsRef<HgPath>,
+    ) -> Result<(), HgPathError> {
+        let prefix = prefix.as_ref();
+        let path = path.as_ref();
+        let current_path = self.root.join(
+            hg_path_to_path_buf(prefix)
+                .map_err(|_| HgPathError::NotFsCompliant(path.to_owned()))?,
+        );
+        match std::fs::symlink_metadata(&current_path) {
+            Err(e) => {
+                // EINVAL can be raised as invalid path syntax under win32.
+                if e.kind() != std::io::ErrorKind::NotFound
+                    && e.kind() != std::io::ErrorKind::InvalidInput
+                    && e.raw_os_error() != Some(20)
+                {
+                    // Rust does not yet have an `ErrorKind` for
+                    // `NotADirectory` (errno 20)
+                    // It happens if the dirstate contains `foo/bar` and
+                    // foo is not a directory
+                    return Err(HgPathError::NotFsCompliant(path.to_owned()));
+                }
+            }
+            Ok(meta) => {
+                if meta.file_type().is_symlink() {
+                    return Err(HgPathError::TraversesSymbolicLink {
+                        path: path.to_owned(),
+                        symlink: prefix.to_owned(),
+                    });
+                }
+                if meta.file_type().is_dir()
+                    && current_path.join(".hg").is_dir()
+                {
+                    return Err(HgPathError::IsInsideNestedRepo {
+                        path: path.to_owned(),
+                        nested_repo: prefix.to_owned(),
+                    });
+                }
+            }
+        };
+
+        Ok(())
+    }
+
+    pub fn check(&mut self, path: impl AsRef<HgPath>) -> bool {
+        self.audit_path(path).is_ok()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::utils::files::get_path_from_bytes;
+    use crate::utils::hg_path::path_to_hg_path_buf;
+
+    #[test]
+    fn test_path_auditor() {
+        let mut auditor = PathAuditor::new(get_path_from_bytes(b"/tmp"));
+
+        let path = HgPath::new(b".hg/00changelog.i");
+        assert_eq!(
+            auditor.audit_path(path),
+            Err(HgPathError::InsideDotHg(path.to_owned()))
+        );
+        let path = HgPath::new(b"this/is/nested/.hg/thing.txt");
+        assert_eq!(
+            auditor.audit_path(path),
+            Err(HgPathError::IsInsideNestedRepo {
+                path: path.to_owned(),
+                nested_repo: HgPathBuf::from_bytes(b"this/is/nested")
+            })
+        );
+
+        use std::fs::{create_dir, File};
+        use tempfile::tempdir;
+
+        let base_dir = tempdir().unwrap();
+        let base_dir_path = base_dir.path();
+        let a = base_dir_path.join("a");
+        let b = base_dir_path.join("b");
+        create_dir(&a).unwrap();
+        let in_a_path = a.join("in_a");
+        File::create(in_a_path).unwrap();
+
+        // TODO make portable
+        std::os::unix::fs::symlink(&a, &b).unwrap();
+
+        let buf = b.join("in_a").components().skip(2).collect::<PathBuf>();
+        eprintln!("buf: {}", buf.display());
+        let path = path_to_hg_path_buf(buf).unwrap();
+        assert_eq!(
+            auditor.audit_path(&path),
+            Err(HgPathError::TraversesSymbolicLink {
+                path: path,
+                symlink: path_to_hg_path_buf(
+                    b.components().skip(2).collect::<PathBuf>()
+                )
+                .unwrap()
+            })
+        );
+    }
+}
--- a/rust/hg-cpython/Cargo.toml	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/Cargo.toml	Mon Mar 09 10:18:40 2020 -0700
@@ -10,6 +10,7 @@
 
 [features]
 default = ["python27"]
+with-re2 = ["hg-core/with-re2"]
 
 # Features to build an extension module:
 python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -21,9 +22,9 @@
 python3-bin = ["cpython/python3-sys"]
 
 [dependencies]
-hg-core = { path = "../hg-core" }
+hg-core = { path = "../hg-core"}
 libc = '*'
 
 [dependencies.cpython]
-version = "0.3"
+version = "0.4"
 default-features = false
--- a/rust/hg-cpython/src/dirstate/copymap.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/dirstate/copymap.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -8,11 +8,12 @@
 //! Bindings for `hg::dirstate::dirstate_map::CopyMap` provided by the
 //! `hg-core` package.
 
-use cpython::{PyBytes, PyClone, PyDict, PyObject, PyResult, Python};
+use cpython::{
+    PyBytes, PyClone, PyDict, PyObject, PyResult, Python, UnsafePyLeaked,
+};
 use std::cell::RefCell;
 
 use crate::dirstate::dirstate_map::DirstateMap;
-use crate::ref_sharing::PyLeaked;
 use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
 
 py_class!(pub class CopyMap |py| {
@@ -104,14 +105,14 @@
 
 py_shared_iterator!(
     CopyMapKeysIterator,
-    PyLeaked<CopyMapIter<'static>>,
+    UnsafePyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     CopyMapItemsIterator,
-    PyLeaked<CopyMapIter<'static>>,
+    UnsafePyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key_value,
     Option<(PyBytes, PyBytes)>
 );
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -13,11 +13,10 @@
 
 use cpython::{
     exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
-    Python,
+    Python, UnsafePyLeaked,
 };
 
 use crate::dirstate::extract_dirstate;
-use crate::ref_sharing::{PyLeaked, PySharedRefCell};
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
     DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
@@ -25,7 +24,7 @@
 };
 
 py_class!(pub class Dirs |py| {
-    data inner: PySharedRefCell<DirsMultiset>;
+    @shared data inner: DirsMultiset;
 
     // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes
     // a `list`)
@@ -65,14 +64,11 @@
                 })?
         };
 
-        Self::create_instance(
-            py,
-            PySharedRefCell::new(inner),
-        )
+        Self::create_instance(py, inner)
     }
 
     def addpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.add_path(
+        self.inner(py).borrow_mut().add_path(
             HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
         ).and(Ok(py.None())).or_else(|e| {
             match e {
@@ -90,7 +86,7 @@
     }
 
     def delpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.delete_path(
+        self.inner(py).borrow_mut().delete_path(
             HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
         )
             .and(Ok(py.None()))
@@ -109,7 +105,7 @@
             })
     }
     def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirsMultisetKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -117,17 +113,15 @@
     }
 
     def __contains__(&self, item: PyObject) -> PyResult<bool> {
-        Ok(self.inner_shared(py).borrow().contains(HgPath::new(
+        Ok(self.inner(py).borrow().contains(HgPath::new(
             item.extract::<PyBytes>(py)?.data(py).as_ref(),
         )))
     }
 });
 
-py_shared_ref!(Dirs, DirsMultiset, inner, inner_shared);
-
 impl Dirs {
     pub fn from_inner(py: Python, d: DirsMultiset) -> PyResult<Self> {
-        Self::create_instance(py, PySharedRefCell::new(d))
+        Self::create_instance(py, d)
     }
 
     fn translate_key(
@@ -140,7 +134,7 @@
 
 py_shared_iterator!(
     DirsMultisetKeysIterator,
-    PyLeaked<DirsMultisetIter<'static>>,
+    UnsafePyLeaked<DirsMultisetIter<'static>>,
     Dirs::translate_key,
     Option<PyBytes>
 );
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -15,13 +15,15 @@
 use cpython::{
     exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
     PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
+    UnsafePyLeaked,
 };
 
 use crate::{
     dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
-    dirstate::non_normal_entries::NonNormalEntries,
+    dirstate::non_normal_entries::{
+        NonNormalEntries, NonNormalEntriesIterator,
+    },
     dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
-    ref_sharing::{PyLeaked, PySharedRefCell},
 };
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
@@ -43,18 +45,15 @@
 //     All attributes also have to have a separate refcount data attribute for
 //     leaks, with all methods that go along for reference sharing.
 py_class!(pub class DirstateMap |py| {
-    data inner: PySharedRefCell<RustDirstateMap>;
+    @shared data inner: RustDirstateMap;
 
     def __new__(_cls, _root: PyObject) -> PyResult<Self> {
         let inner = RustDirstateMap::default();
-        Self::create_instance(
-            py,
-            PySharedRefCell::new(inner),
-        )
+        Self::create_instance(py, inner)
     }
 
     def clear(&self) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.clear();
+        self.inner(py).borrow_mut().clear();
         Ok(py.None())
     }
 
@@ -64,7 +63,7 @@
         default: Option<PyObject> = None
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
-        match self.inner_shared(py).borrow().get(HgPath::new(key.data(py))) {
+        match self.inner(py).borrow().get(HgPath::new(key.data(py))) {
             Some(entry) => {
                 Ok(Some(make_dirstate_tuple(py, entry)?))
             },
@@ -81,7 +80,7 @@
         size: PyObject,
         mtime: PyObject
     ) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.add_file(
+        self.inner(py).borrow_mut().add_file(
             HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
             oldstate.extract::<PyBytes>(py)?.data(py)[0]
                 .try_into()
@@ -109,7 +108,7 @@
         oldstate: PyObject,
         size: PyObject
     ) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .remove_file(
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -133,7 +132,7 @@
         f: PyObject,
         oldstate: PyObject
     ) -> PyResult<PyBool> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .drop_file(
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -164,13 +163,13 @@
                 ))
             })
             .collect();
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .clear_ambiguous_times(files?, now.extract(py)?);
         Ok(py.None())
     }
 
     def other_parent_entries(&self) -> PyResult<PyObject> {
-        let mut inner_shared = self.inner_shared(py).borrow_mut()?;
+        let mut inner_shared = self.inner(py).borrow_mut();
         let (_, other_parent) =
             inner_shared.get_non_normal_other_parent_entries();
 
@@ -178,8 +177,7 @@
         locals.set_item(
             py,
             "other_parent",
-            other_parent.as_ref()
-                .unwrap()
+            other_parent
                 .iter()
                 .map(|v| PyBytes::new(py, v.as_ref()))
                 .collect::<Vec<PyBytes>>()
@@ -196,11 +194,9 @@
     def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
         Ok(self
-            .inner_shared(py)
-            .borrow_mut()?
+            .inner(py)
+            .borrow_mut()
             .get_non_normal_other_parent_entries().0
-            .as_ref()
-            .unwrap()
             .contains(HgPath::new(key.data(py))))
     }
 
@@ -211,11 +207,10 @@
                 &format!(
                     "NonNormalEntries: {:?}",
                     self
-                        .inner_shared(py)
-                        .borrow_mut()?
+                        .inner(py)
+                        .borrow_mut()
                         .get_non_normal_other_parent_entries().0
-                        .as_ref()
-                        .unwrap().iter().map(|o| o))
+                        .iter().map(|o| o))
                 )
             )
     }
@@ -223,8 +218,8 @@
     def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         self
-            .inner_shared(py)
-            .borrow_mut()?
+            .inner(py)
+            .borrow_mut()
             .non_normal_entries_remove(HgPath::new(key.data(py)));
         Ok(py.None())
     }
@@ -239,21 +234,37 @@
                     .collect();
 
         let res = self
-            .inner_shared(py)
-            .borrow_mut()?
+            .inner(py)
+            .borrow_mut()
             .non_normal_entries_union(other?);
 
         let ret = PyList::new(py, &[]);
-        for (i, filename) in res.iter().enumerate() {
+        for filename in res.iter() {
             let as_pystring = PyBytes::new(py, filename.as_bytes());
-            ret.insert_item(py, i, as_pystring.into_object());
+            ret.append(py, as_pystring.into_object());
         }
         Ok(ret)
     }
 
+    def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
+        // Make sure the sets are defined before we no longer have a mutable
+        // reference to the dmap.
+        self.inner(py)
+            .borrow_mut()
+            .set_non_normal_other_parent_entries(false);
+
+        let leaked_ref = self.inner(py).leak_immutable();
+
+        NonNormalEntriesIterator::from_inner(py, unsafe {
+            leaked_ref.map(py, |o| {
+                o.get_non_normal_other_parent_entries_panic().0.iter()
+            })
+        })
+    }
+
     def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
         let d = d.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py).borrow_mut()?
+        Ok(self.inner(py).borrow_mut()
             .has_tracked_dir(HgPath::new(d.data(py)))
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -263,7 +274,7 @@
 
     def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
         let d = d.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py).borrow_mut()?
+        Ok(self.inner(py).borrow_mut()
             .has_dir(HgPath::new(d.data(py)))
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -272,7 +283,7 @@
     }
 
     def parents(&self, st: PyObject) -> PyResult<PyTuple> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .parents(st.extract::<PyBytes>(py)?.data(py))
             .and_then(|d| {
                 Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
@@ -290,13 +301,13 @@
         let p1 = extract_node_id(py, &p1)?;
         let p2 = extract_node_id(py, &p2)?;
 
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .set_parents(&DirstateParents { p1, p2 });
         Ok(py.None())
     }
 
     def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
-        match self.inner_shared(py).borrow_mut()?
+        match self.inner(py).borrow_mut()
             .read(st.extract::<PyBytes>(py)?.data(py))
         {
             Ok(Some(parents)) => Ok(Some(
@@ -323,7 +334,7 @@
             p2: extract_node_id(py, &p2)?,
         };
 
-        match self.inner_shared(py).borrow_mut()?.pack(parents, now) {
+        match self.inner(py).borrow_mut().pack(parents, now) {
             Ok(packed) => Ok(PyBytes::new(py, &packed)),
             Err(_) => Err(PyErr::new::<exc::OSError, _>(
                 py,
@@ -335,7 +346,7 @@
     def filefoldmapasdict(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
         for (key, value) in
-            self.inner_shared(py).borrow_mut()?.build_file_fold_map().iter()
+            self.inner(py).borrow_mut().build_file_fold_map().iter()
         {
             dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
         }
@@ -343,20 +354,18 @@
     }
 
     def __len__(&self) -> PyResult<usize> {
-        Ok(self.inner_shared(py).borrow().len())
+        Ok(self.inner(py).borrow().len())
     }
 
     def __contains__(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py)
-               .borrow()
-               .contains_key(HgPath::new(key.data(py))))
+        Ok(self.inner(py).borrow().contains_key(HgPath::new(key.data(py))))
     }
 
     def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         let key = HgPath::new(key.data(py));
-        match self.inner_shared(py).borrow().get(key) {
+        match self.inner(py).borrow().get(key) {
             Some(entry) => {
                 Ok(make_dirstate_tuple(py, entry)?)
             },
@@ -368,7 +377,7 @@
     }
 
     def keys(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -376,7 +385,7 @@
     }
 
     def items(&self) -> PyResult<DirstateMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -384,7 +393,7 @@
     }
 
     def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -393,14 +402,14 @@
 
     def getdirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_dirs()
+        self.inner(py).borrow_mut().set_dirs()
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
             })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner_shared(py).borrow(),
+                &self.inner(py).borrow(),
                 Some(EntryState::Removed),
             )
             .map_err(|e| {
@@ -410,14 +419,14 @@
     }
     def getalldirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_all_dirs()
+        self.inner(py).borrow_mut().set_all_dirs()
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
             })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner_shared(py).borrow(),
+                &self.inner(py).borrow(),
                 None,
             ).map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -428,7 +437,7 @@
     // TODO all copymap* methods, see docstring above
     def copymapcopy(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
-        for (key, value) in self.inner_shared(py).borrow().copy_map.iter() {
+        for (key, value) in self.inner(py).borrow().copy_map.iter() {
             dict.set_item(
                 py,
                 PyBytes::new(py, key.as_ref()),
@@ -440,10 +449,7 @@
 
     def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
         let key = key.extract::<PyBytes>(py)?;
-        match self.inner_shared(py)
-                  .borrow()
-                  .copy_map
-                  .get(HgPath::new(key.data(py))) {
+        match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
             Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
             None => Err(PyErr::new::<exc::KeyError, _>(
                 py,
@@ -456,12 +462,12 @@
     }
 
     def copymaplen(&self) -> PyResult<usize> {
-        Ok(self.inner_shared(py).borrow().copy_map.len())
+        Ok(self.inner(py).borrow().copy_map.len())
     }
     def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
         Ok(self
-            .inner_shared(py)
+            .inner(py)
             .borrow()
             .copy_map
             .contains_key(HgPath::new(key.data(py))))
@@ -473,7 +479,7 @@
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
         match self
-            .inner_shared(py)
+            .inner(py)
             .borrow()
             .copy_map
             .get(HgPath::new(key.data(py)))
@@ -491,7 +497,7 @@
     ) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         let value = value.extract::<PyBytes>(py)?;
-        self.inner_shared(py).borrow_mut()?.copy_map.insert(
+        self.inner(py).borrow_mut().copy_map.insert(
             HgPathBuf::from_bytes(key.data(py)),
             HgPathBuf::from_bytes(value.data(py)),
         );
@@ -504,8 +510,8 @@
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
         match self
-            .inner_shared(py)
-            .borrow_mut()?
+            .inner(py)
+            .borrow_mut()
             .copy_map
             .remove(HgPath::new(key.data(py)))
         {
@@ -515,7 +521,7 @@
     }
 
     def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         CopyMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -523,7 +529,7 @@
     }
 
     def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         CopyMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -537,7 +543,7 @@
         &'a self,
         py: Python<'a>,
     ) -> Ref<'a, RustDirstateMap> {
-        self.inner_shared(py).borrow()
+        self.inner(py).borrow()
     }
     fn translate_key(
         py: Python,
@@ -557,18 +563,16 @@
     }
 }
 
-py_shared_ref!(DirstateMap, RustDirstateMap, inner, inner_shared);
-
 py_shared_iterator!(
     DirstateMapKeysIterator,
-    PyLeaked<StateMapIter<'static>>,
+    UnsafePyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     DirstateMapItemsIterator,
-    PyLeaked<StateMapIter<'static>>,
+    UnsafePyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key_value,
     Option<(PyBytes, PyObject)>
 );
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -6,11 +6,15 @@
 // GNU General Public License version 2 or any later version.
 
 use cpython::{
-    exc::NotImplementedError, CompareOp, ObjectProtocol, PyErr, PyList,
-    PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
+    exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
+    PyErr, PyList, PyObject, PyResult, PyString, Python, PythonObject,
+    ToPyObject, UnsafePyLeaked,
 };
 
 use crate::dirstate::DirstateMap;
+use hg::utils::hg_path::HgPathBuf;
+use std::cell::RefCell;
+use std::collections::hash_set;
 
 py_class!(pub class NonNormalEntries |py| {
     data dmap: DirstateMap;
@@ -34,6 +38,10 @@
     def __repr__(&self) -> PyResult<PyString> {
         self.dmap(py).non_normal_entries_display(py)
     }
+
+    def __iter__(&self) -> PyResult<NonNormalEntriesIterator> {
+        self.dmap(py).non_normal_entries_iter(py)
+    }
 });
 
 impl NonNormalEntries {
@@ -49,4 +57,20 @@
         }
         Ok(true)
     }
+
+    fn translate_key(
+        py: Python,
+        key: &HgPathBuf,
+    ) -> PyResult<Option<PyBytes>> {
+        Ok(Some(PyBytes::new(py, key.as_ref())))
+    }
 }
+
+type NonNormalEntriesIter<'a> = hash_set::Iter<'a, HgPathBuf>;
+
+py_shared_iterator!(
+    NonNormalEntriesIterator,
+    UnsafePyLeaked<NonNormalEntriesIter<'static>>,
+    NonNormalEntries::translate_key,
+    Option<PyBytes>
+);
--- a/rust/hg-cpython/src/dirstate/status.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/dirstate/status.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -33,7 +33,7 @@
     let list = PyList::new(py, &[]);
 
     for (i, path) in collection.iter().enumerate() {
-        list.insert_item(
+        list.insert(
             py,
             i,
             PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
--- a/rust/hg-cpython/src/exceptions.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/exceptions.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -13,7 +13,7 @@
 //!
 //! [`GraphError`]: struct.GraphError.html
 use cpython::{
-    exc::{IOError, RuntimeError, ValueError},
+    exc::{RuntimeError, ValueError},
     py_exception, PyErr, Python,
 };
 use hg;
@@ -39,34 +39,4 @@
     }
 }
 
-py_exception!(rustext, PatternError, RuntimeError);
-py_exception!(rustext, PatternFileError, RuntimeError);
 py_exception!(rustext, HgPathPyError, RuntimeError);
-
-impl PatternError {
-    pub fn pynew(py: Python, inner: hg::PatternError) -> PyErr {
-        match inner {
-            hg::PatternError::UnsupportedSyntax(m) => {
-                PatternError::new(py, ("PatternError", m))
-            }
-        }
-    }
-}
-
-impl PatternFileError {
-    pub fn pynew(py: Python, inner: hg::PatternFileError) -> PyErr {
-        match inner {
-            hg::PatternFileError::IO(e) => {
-                let value = (e.raw_os_error().unwrap_or(2), e.to_string());
-                PyErr::new::<IOError, _>(py, value)
-            }
-            hg::PatternFileError::Pattern(e, l) => match e {
-                hg::PatternError::UnsupportedSyntax(m) => {
-                    PatternFileError::new(py, ("PatternFileError", m, l))
-                }
-            },
-        }
-    }
-}
-
-py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs	Mon Mar 09 01:11:59 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-// filepatterns.rs
-//
-// Copyright 2019, Georges Racinet <gracinet@anybox.fr>,
-// Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Bindings for the `hg::filepatterns` module provided by the
-//! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns`
-//! and can be used as replacement for the the pure `filepatterns` Python
-//! module.
-use crate::exceptions::{PatternError, PatternFileError};
-use cpython::{
-    PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject,
-};
-use hg::utils::files;
-use hg::{build_single_regex, read_pattern_file, LineNumber, PatternTuple};
-use std::path::PathBuf;
-
-/// Rust does not like functions with different return signatures.
-/// The 3-tuple version is always returned by the hg-core function,
-/// the (potential) conversion is handled at this level since it is not likely
-/// to have any measurable impact on performance.
-///
-/// The Python implementation passes a function reference for `warn` instead
-/// of a boolean that is used to emit warnings while parsing. The Rust
-/// implementation chooses to accumulate the warnings and propagate them to
-/// Python upon completion. See the `readpatternfile` function in `match.py`
-/// for more details.
-fn read_pattern_file_wrapper(
-    py: Python,
-    file_path: PyObject,
-    warn: bool,
-    source_info: bool,
-) -> PyResult<PyTuple> {
-    let bytes = file_path.extract::<PyBytes>(py)?;
-    let path = files::get_path_from_bytes(bytes.data(py));
-    match read_pattern_file(path, warn) {
-        Ok((patterns, warnings)) => {
-            if source_info {
-                let itemgetter = |x: &PatternTuple| {
-                    (PyBytes::new(py, &x.0), x.1, PyBytes::new(py, &x.2))
-                };
-                let results: Vec<(PyBytes, LineNumber, PyBytes)> =
-                    patterns.iter().map(itemgetter).collect();
-                return Ok((results, warnings_to_py_bytes(py, &warnings))
-                    .to_py_object(py));
-            }
-            let itemgetter = |x: &PatternTuple| PyBytes::new(py, &x.0);
-            let results: Vec<PyBytes> =
-                patterns.iter().map(itemgetter).collect();
-            Ok(
-                (results, warnings_to_py_bytes(py, &warnings))
-                    .to_py_object(py),
-            )
-        }
-        Err(e) => Err(PatternFileError::pynew(py, e)),
-    }
-}
-
-fn warnings_to_py_bytes(
-    py: Python,
-    warnings: &[(PathBuf, Vec<u8>)],
-) -> Vec<(PyBytes, PyBytes)> {
-    warnings
-        .iter()
-        .map(|(path, syn)| {
-            (
-                PyBytes::new(py, &files::get_bytes_from_path(path)),
-                PyBytes::new(py, syn),
-            )
-        })
-        .collect()
-}
-
-fn build_single_regex_wrapper(
-    py: Python,
-    kind: PyObject,
-    pat: PyObject,
-    globsuffix: PyObject,
-) -> PyResult<PyBytes> {
-    match build_single_regex(
-        kind.extract::<PyBytes>(py)?.data(py),
-        pat.extract::<PyBytes>(py)?.data(py),
-        globsuffix.extract::<PyBytes>(py)?.data(py),
-    ) {
-        Ok(regex) => Ok(PyBytes::new(py, &regex)),
-        Err(e) => Err(PatternError::pynew(py, e)),
-    }
-}
-
-pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
-    let dotted_name = &format!("{}.filepatterns", package);
-    let m = PyModule::new(py, dotted_name)?;
-
-    m.add(py, "__package__", package)?;
-    m.add(
-        py,
-        "__doc__",
-        "Patterns files parsing - Rust implementation",
-    )?;
-    m.add(
-        py,
-        "build_single_regex",
-        py_fn!(
-            py,
-            build_single_regex_wrapper(
-                kind: PyObject,
-                pat: PyObject,
-                globsuffix: PyObject
-            )
-        ),
-    )?;
-    m.add(
-        py,
-        "read_pattern_file",
-        py_fn!(
-            py,
-            read_pattern_file_wrapper(
-                file_path: PyObject,
-                warn: bool,
-                source_info: bool
-            )
-        ),
-    )?;
-    m.add(py, "PatternError", py.get_type::<PatternError>())?;
-    let sys = PyModule::import(py, "sys")?;
-    let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
-    sys_modules.set_item(py, dotted_name, &m)?;
-
-    Ok(m)
-}
--- a/rust/hg-cpython/src/lib.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/lib.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -33,7 +33,6 @@
 pub mod dirstate;
 pub mod discovery;
 pub mod exceptions;
-pub mod filepatterns;
 pub mod parsers;
 pub mod revlog;
 pub mod utils;
@@ -53,25 +52,10 @@
     m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
     m.add(
         py,
-        "filepatterns",
-        filepatterns::init_module(py, &dotted_name)?,
-    )?;
-    m.add(
-        py,
         "parsers",
         parsers::init_parsers_module(py, &dotted_name)?,
     )?;
     m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
-    m.add(
-        py,
-        "PatternFileError",
-        py.get_type::<exceptions::PatternFileError>(),
-    )?;
-    m.add(
-        py,
-        "PatternError",
-        py.get_type::<exceptions::PatternError>(),
-    )?;
     Ok(())
 });
 
--- a/rust/hg-cpython/src/ref_sharing.rs	Mon Mar 09 01:11:59 2020 +0100
+++ b/rust/hg-cpython/src/ref_sharing.rs	Mon Mar 09 10:18:40 2020 -0700
@@ -22,413 +22,6 @@
 
 //! Macros for use in the `hg-cpython` bridge library.
 
-use crate::exceptions::AlreadyBorrowed;
-use cpython::{exc, PyClone, PyErr, PyObject, PyResult, Python};
-use std::cell::{Ref, RefCell, RefMut};
-use std::ops::{Deref, DerefMut};
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-/// Manages the shared state between Python and Rust
-///
-/// `PySharedState` is owned by `PySharedRefCell`, and is shared across its
-/// derived references. The consistency of these references are guaranteed
-/// as follows:
-///
-/// - The immutability of `py_class!` object fields. Any mutation of
-///   `PySharedRefCell` is allowed only through its `borrow_mut()`.
-/// - The `py: Python<'_>` token, which makes sure that any data access is
-///   synchronized by the GIL.
-/// - The underlying `RefCell`, which prevents `PySharedRefCell` data from
-///   being directly borrowed or leaked while it is mutably borrowed.
-/// - The `borrow_count`, which is the number of references borrowed from
-///   `PyLeaked`. Just like `RefCell`, mutation is prohibited while `PyLeaked`
-///   is borrowed.
-/// - The `generation` counter, which increments on `borrow_mut()`. `PyLeaked`
-///   reference is valid only if the `current_generation()` equals to the
-///   `generation` at the time of `leak_immutable()`.
-#[derive(Debug, Default)]
-struct PySharedState {
-    // The counter variable could be Cell<usize> since any operation on
-    // PySharedState is synchronized by the GIL, but being "atomic" makes
-    // PySharedState inherently Sync. The ordering requirement doesn't
-    // matter thanks to the GIL.
-    borrow_count: AtomicUsize,
-    generation: AtomicUsize,
-}
-
-impl PySharedState {
-    fn borrow_mut<'a, T>(
-        &'a self,
-        py: Python<'a>,
-        pyrefmut: RefMut<'a, T>,
-    ) -> PyResult<RefMut<'a, T>> {
-        match self.current_borrow_count(py) {
-            0 => {
-                // Note that this wraps around to the same value if mutably
-                // borrowed more than usize::MAX times, which wouldn't happen
-                // in practice.
-                self.generation.fetch_add(1, Ordering::Relaxed);
-                Ok(pyrefmut)
-            }
-            _ => Err(AlreadyBorrowed::new(
-                py,
-                "Cannot borrow mutably while immutably borrowed",
-            )),
-        }
-    }
-
-    /// Return a reference to the wrapped data and its state with an
-    /// artificial static lifetime.
-    /// We need to be protected by the GIL for thread-safety.
-    ///
-    /// # Safety
-    ///
-    /// This is highly unsafe since the lifetime of the given data can be
-    /// extended. Do not call this function directly.
-    unsafe fn leak_immutable<T>(
-        &self,
-        _py: Python,
-        data: Ref<T>,
-    ) -> (&'static T, &'static PySharedState) {
-        let ptr: *const T = &*data;
-        let state_ptr: *const PySharedState = self;
-        (&*ptr, &*state_ptr)
-    }
-
-    fn current_borrow_count(&self, _py: Python) -> usize {
-        self.borrow_count.load(Ordering::Relaxed)
-    }
-
-    fn increase_borrow_count(&self, _py: Python) {
-        // Note that this wraps around if there are more than usize::MAX
-        // borrowed references, which shouldn't happen due to memory limit.
-        self.borrow_count.fetch_add(1, Ordering::Relaxed);
-    }
-
-    fn decrease_borrow_count(&self, _py: Python) {
-        let prev_count = self.borrow_count.fetch_sub(1, Ordering::Relaxed);
-        assert!(prev_count > 0);
-    }
-
-    fn current_generation(&self, _py: Python) -> usize {
-        self.generation.load(Ordering::Relaxed)
-    }
-}
-
-/// Helper to keep the borrow count updated while the shared object is
-/// immutably borrowed without using the `RefCell` interface.
-struct BorrowPyShared<'a> {
-    py: Python<'a>,
-    py_shared_state: &'a PySharedState,
-}
-
-impl<'a> BorrowPyShared<'a> {
-    fn new(
-        py: Python<'a>,
-        py_shared_state: &'a PySharedState,
-    ) -> BorrowPyShared<'a> {
-        py_shared_state.increase_borrow_count(py);
-        BorrowPyShared {
-            py,
-            py_shared_state,
-        }
-    }
-}
-
-impl Drop for BorrowPyShared<'_> {
-    fn drop(&mut self) {
-        self.py_shared_state.decrease_borrow_count(self.py);
-    }
-}
-
-/// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
-///
-/// This object can be stored in a `py_class!` object as a data field. Any
-/// operation is allowed through the `PySharedRef` interface.
-#[derive(Debug)]
-pub struct PySharedRefCell<T> {
-    inner: RefCell<T>,
-    py_shared_state: PySharedState,
-}
-
-impl<T> PySharedRefCell<T> {
-    pub fn new(value: T) -> PySharedRefCell<T> {
-        Self {
-            inner: RefCell::new(value),
-            py_shared_state: PySharedState::default(),
-        }
-    }
-
-    fn borrow<'a>(&'a self, _py: Python<'a>) -> Ref<'a, T> {
-        // py_shared_state isn't involved since
-        // - inner.borrow() would fail if self is mutably borrowed,
-        // - and inner.borrow_mut() would fail while self is borrowed.
-        self.inner.borrow()
-    }
-
-    // TODO: maybe this should be named as try_borrow_mut(), and use
-    // inner.try_borrow_mut(). The current implementation panics if
-    // self.inner has been borrowed, but returns error if py_shared_state
-    // refuses to borrow.
-    fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<RefMut<'a, T>> {
-        self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
-    }
-}
-
-/// Sharable data member of type `T` borrowed from the `PyObject`.
-pub struct PySharedRef<'a, T> {
-    py: Python<'a>,
-    owner: &'a PyObject,
-    data: &'a PySharedRefCell<T>,
-}
-
-impl<'a, T> PySharedRef<'a, T> {
-    /// # Safety
-    ///
-    /// The `data` must be owned by the `owner`. Otherwise, the leak count
-    /// would get wrong.
-    pub unsafe fn new(
-        py: Python<'a>,
-        owner: &'a PyObject,
-        data: &'a PySharedRefCell<T>,
-    ) -> Self {
-        Self { py, owner, data }
-    }
-
-    pub fn borrow(&self) -> Ref<'a, T> {
-        self.data.borrow(self.py)
-    }
-
-    pub fn borrow_mut(&self) -> PyResult<RefMut<'a, T>> {
-        self.data.borrow_mut(self.py)
-    }
-
-    /// Returns a leaked reference.
-    ///
-    /// # Panics
-    ///
-    /// Panics if this is mutably borrowed.
-    pub fn leak_immutable(&self) -> PyLeaked<&'static T> {
-        let state = &self.data.py_shared_state;
-        // make sure self.data isn't mutably borrowed; otherwise the
-        // generation number can't be trusted.
-        let data_ref = self.borrow();
-        unsafe {
-            let (static_ref, static_state_ref) =
-                state.leak_immutable(self.py, data_ref);
-            PyLeaked::new(self.py, self.owner, static_ref, static_state_ref)
-        }
-    }
-}
-
-/// Allows a `py_class!` generated struct to share references to one of its
-/// data members with Python.
-///
-/// # Parameters
-///
-/// * `$name` is the same identifier used in for `py_class!` macro call.
-/// * `$inner_struct` is the identifier of the underlying Rust struct
-/// * `$data_member` is the identifier of the data member of `$inner_struct`
-/// that will be shared.
-/// * `$shared_accessor` is the function name to be generated, which allows
-/// safe access to the data member.
-///
-/// # Safety
-///
-/// `$data_member` must persist while the `$name` object is alive. In other
-/// words, it must be an accessor to a data field of the Python object.
-///
-/// # Example
-///
-/// ```
-/// struct MyStruct {
-///     inner: Vec<u32>;
-/// }
-///
-/// py_class!(pub class MyType |py| {
-///     data inner: PySharedRefCell<MyStruct>;
-/// });
-///
-/// py_shared_ref!(MyType, MyStruct, inner, inner_shared);
-/// ```
-macro_rules! py_shared_ref {
-    (
-        $name: ident,
-        $inner_struct: ident,
-        $data_member: ident,
-        $shared_accessor: ident
-    ) => {
-        impl $name {
-            /// Returns a safe reference to the shared `$data_member`.
-            ///
-            /// This function guarantees that `PySharedRef` is created with
-            /// the valid `self` and `self.$data_member(py)` pair.
-            fn $shared_accessor<'a>(
-                &'a self,
-                py: Python<'a>,
-            ) -> $crate::ref_sharing::PySharedRef<'a, $inner_struct> {
-                use cpython::PythonObject;
-                use $crate::ref_sharing::PySharedRef;
-                let owner = self.as_object();
-                let data = self.$data_member(py);
-                unsafe { PySharedRef::new(py, owner, data) }
-            }
-        }
-    };
-}
-
-/// Manage immutable references to `PyObject` leaked into Python iterators.
-///
-/// This reference will be invalidated once the original value is mutably
-/// borrowed.
-pub struct PyLeaked<T> {
-    inner: PyObject,
-    data: Option<T>,
-    py_shared_state: &'static PySharedState,
-    /// Generation counter of data `T` captured when PyLeaked is created.
-    generation: usize,
-}
-
-// DO NOT implement Deref for PyLeaked<T>! Dereferencing PyLeaked
-// without taking Python GIL wouldn't be safe. Also, the underling reference
-// is invalid if generation != py_shared_state.generation.
-
-impl<T> PyLeaked<T> {
-    /// # Safety
-    ///
-    /// The `py_shared_state` must be owned by the `inner` Python object.
-    fn new(
-        py: Python,
-        inner: &PyObject,
-        data: T,
-        py_shared_state: &'static PySharedState,
-    ) -> Self {
-        Self {
-            inner: inner.clone_ref(py),
-            data: Some(data),
-            py_shared_state,
-            generation: py_shared_state.current_generation(py),
-        }
-    }
-
-    /// Immutably borrows the wrapped value.
-    ///
-    /// Borrowing fails if the underlying reference has been invalidated.
-    pub fn try_borrow<'a>(
-        &'a self,
-        py: Python<'a>,
-    ) -> PyResult<PyLeakedRef<'a, T>> {
-        self.validate_generation(py)?;
-        Ok(PyLeakedRef {
-            _borrow: BorrowPyShared::new(py, self.py_shared_state),
-            data: self.data.as_ref().unwrap(),
-        })
-    }
-
-    /// Mutably borrows the wrapped value.
-    ///
-    /// Borrowing fails if the underlying reference has been invalidated.
-    ///
-    /// Typically `T` is an iterator. If `T` is an immutable reference,
-    /// `get_mut()` is useless since the inner value can't be mutated.
-    pub fn try_borrow_mut<'a>(
-        &'a mut self,
-        py: Python<'a>,
-    ) -> PyResult<PyLeakedRefMut<'a, T>> {
-        self.validate_generation(py)?;
-        Ok(PyLeakedRefMut {
-            _borrow: BorrowPyShared::new(py, self.py_shared_state),
-            data: self.data.as_mut().unwrap(),
-        })
-    }
-
-    /// Converts the inner value by the given function.
-    ///
-    /// Typically `T` is a static reference to a container, and `U` is an
-    /// iterator of that container.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the underlying reference has been invalidated.
-    ///
-    /// This is typically called immediately after the `PyLeaked` is obtained.
-    /// In which case, the reference must be valid and no panic would occur.
-    ///
-    /// # Safety
-    ///
-    /// The lifetime of the object passed in to the function `f` is cheated.
-    /// It's typically a static reference, but is valid only while the
-    /// corresponding `PyLeaked` is alive. Do not copy it out of the
-    /// function call.
-    pub unsafe fn map<U>(
-        mut self,
-        py: Python,
-        f: impl FnOnce(T) -> U,
-    ) -> PyLeaked<U> {
-        // Needs to test the generation value to make sure self.data reference
-        // is still intact.
-        self.validate_generation(py)
-            .expect("map() over invalidated leaked reference");
-
-        // f() could make the self.data outlive. That's why map() is unsafe.
-        // In order to make this function safe, maybe we'll need a way to
-        // temporarily restrict the lifetime of self.data and translate the
-        // returned object back to Something<'static>.
-        let new_data = f(self.data.take().unwrap());
-        PyLeaked {
-            inner: self.inner.clone_ref(py),
-            data: Some(new_data),
-            py_shared_state: self.py_shared_state,
-            generation: self.generation,
-        }
-    }
-
-    fn validate_generation(&self, py: Python) -> PyResult<()> {
-        if self.py_shared_state.current_generation(py) == self.generation {
-            Ok(())
-        } else {
-            Err(PyErr::new::<exc::RuntimeError, _>(
-                py,
-                "Cannot access to leaked reference after mutation",
-            ))
-        }
-    }
-}
-
-/// Immutably borrowed reference to a leaked value.
-pub struct PyLeakedRef<'a, T> {
-    _borrow: BorrowPyShared<'a>,
-    data: &'a T,
-}
-
-impl<T> Deref for PyLeakedRef<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        self.data
-    }
-}
-
-/// Mutably borrowed reference to a leaked value.
-pub struct PyLeakedRefMut<'a, T> {
-    _borrow: BorrowPyShared<'a>,
-    data: &'a mut T,
-}
-
-impl<T> Deref for PyLeakedRefMut<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        self.data
-    }
-}
-
-impl<T> DerefMut for PyLeakedRefMut<'_, T> {
-    fn deref_mut(&mut self) -> &mut T {
-        self.data
-    }
-}
-
 /// Defines a `py_class!` that acts as a Python iterator over a Rust iterator.
 ///
 /// TODO: this is a bit awkward to use, and a better (more complicated)
@@ -437,12 +30,18 @@
 /// # Parameters
 ///
 /// * `$name` is the identifier to give to the resulting Rust struct.
-/// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call.
+/// * `$leaked` corresponds to `UnsafePyLeaked` in the matching `@shared data`
+/// declaration.
 /// * `$iterator_type` is the type of the Rust iterator.
 /// * `$success_func` is a function for processing the Rust `(key, value)`
 /// tuple on iteration success, turning it into something Python understands.
 /// * `$success_func` is the return type of `$success_func`
 ///
+/// # Safety
+///
+/// `$success_func` may take a reference, but it's lifetime may be cheated.
+/// Do not copy it out of the function call.
+///
 /// # Example
 ///
 /// ```
@@ -451,7 +50,7 @@
 /// }
 ///
 /// py_class!(pub class MyType |py| {
-///     data inner: PySharedRefCell<MyStruct>;
+///     @shared data inner: MyStruct;
 ///
 ///     def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
 ///         let leaked_ref = self.inner_shared(py).leak_immutable();
@@ -475,11 +74,9 @@
 ///     }
 /// }
 ///
-/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef);
-///
 /// py_shared_iterator!(
 ///     MyTypeItemsIterator,
-///     PyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
+///     UnsafePyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
 ///     MyType::translate_key_value,
 ///     Option<(PyBytes, PyBytes)>
 /// );
@@ -496,9 +93,10 @@
 
             def __next__(&self) -> PyResult<$success_type> {
                 let mut leaked = self.inner(py).borrow_mut();
-                let mut iter = leaked.try_borrow_mut(py)?;
+                let mut iter = unsafe { leaked.try_borrow_mut(py)? };
                 match iter.next() {
                     None => Ok(None),
+                    // res may be a reference of cheated 'static lifetime
                     Some(res) => $success_func(py, res),
                 }
             }
@@ -521,116 +119,3 @@
         }
     };
 }
-
-#[cfg(test)]
-#[cfg(any(feature = "python27-bin", feature = "python3-bin"))]
-mod test {
-    use super::*;
-    use cpython::{GILGuard, Python};
-
-    py_class!(class Owner |py| {
-        data string: PySharedRefCell<String>;
-    });
-    py_shared_ref!(Owner, String, string, string_shared);
-
-    fn prepare_env() -> (GILGuard, Owner) {
-        let gil = Python::acquire_gil();
-        let py = gil.python();
-        let owner =
-            Owner::create_instance(py, PySharedRefCell::new("new".to_owned()))
-                .unwrap();
-        (gil, owner)
-    }
-
-    #[test]
-    fn test_leaked_borrow() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let leaked_ref = leaked.try_borrow(py).unwrap();
-        assert_eq!(*leaked_ref, "new");
-    }
-
-    #[test]
-    fn test_leaked_borrow_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        let mut leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
-        assert_eq!(leaked_ref.next(), Some('n'));
-        assert_eq!(leaked_ref.next(), Some('e'));
-        assert_eq!(leaked_ref.next(), Some('w'));
-        assert_eq!(leaked_ref.next(), None);
-    }
-
-    #[test]
-    fn test_leaked_borrow_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        assert!(leaked.try_borrow(py).is_err());
-    }
-
-    #[test]
-    fn test_leaked_borrow_mut_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        assert!(leaked_iter.try_borrow_mut(py).is_err());
-    }
-
-    #[test]
-    #[should_panic(expected = "map() over invalidated leaked reference")]
-    fn test_leaked_map_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        let _leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-    }
-
-    #[test]
-    fn test_borrow_mut_while_leaked_ref() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-        let leaked = owner.string_shared(py).leak_immutable();
-        {
-            let _leaked_ref = leaked.try_borrow(py).unwrap();
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-            {
-                let _leaked_ref2 = leaked.try_borrow(py).unwrap();
-                assert!(owner.string_shared(py).borrow_mut().is_err());
-            }
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-        }
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-    }
-
-    #[test]
-    fn test_borrow_mut_while_leaked_ref_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        {
-            let _leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-        }
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-    }
-
-    #[test]
-    #[should_panic(expected = "mutably borrowed")]
-    fn test_leak_while_borrow_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let _mut_ref = owner.string_shared(py).borrow_mut();
-        owner.string_shared(py).leak_immutable();
-    }
-}
--- a/rust/hg-direct-ffi/Cargo.toml	Mon Mar 09 01:11:59 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-[package]
-name = "hgdirectffi"
-version = "0.1.0"
-authors = ["Georges Racinet <gracinet@anybox.fr>"]
-description = "Low level Python bindings for hg-core, going through existing C extensions"
-
-[dependencies]
-libc = "*"
-hg-core = { path = "../hg-core" }
-
-[lib]
-crate-type = ["staticlib"]
--- a/rust/hg-direct-ffi/rustfmt.toml	Mon Mar 09 01:11:59 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-max_width = 79
-wrap_comments = true
-error_on_line_overflow = true
--- a/rust/hg-direct-ffi/src/ancestors.rs	Mon Mar 09 01:11:59 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,282 +0,0 @@
-// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Bindings for CPython extension code
-//!
-//! This exposes methods to build and use a `rustlazyancestors` iterator
-//! from C code, using an index and its parents function that are passed
-//! from the caller at instantiation.
-
-use hg::AncestorsIterator;
-use hg::{Graph, GraphError, Revision, NULL_REVISION};
-use libc::{c_int, c_long, c_void, ssize_t};
-use std::ptr::null_mut;
-use std::slice;
-
-type IndexPtr = *mut c_void;
-
-extern "C" {
-    fn HgRevlogIndex_GetParents(
-        op: IndexPtr,
-        rev: c_int,
-        parents: *mut [c_int; 2],
-    ) -> c_int;
-}
-
-/// A Graph backed up by objects and functions from revlog.c
-///
-/// This implementation of the Graph trait, relies on (pointers to)
-/// - the C index object (`index` member)
-/// - the `index_get_parents()` function (`parents` member)
-pub struct Index {
-    index: IndexPtr,
-}
-
-impl Index {
-    pub fn new(index: IndexPtr) -> Self {
-        Index { index: index }
-    }
-}
-
-impl Graph for Index {
-    /// wrap a call to the C extern parents function
-    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
-        let mut res: [c_int; 2] = [0; 2];
-        let code = unsafe {
-            HgRevlogIndex_GetParents(
-                self.index,
-                rev,
-                &mut res as *mut [c_int; 2],
-            )
-        };
-        match code {
-            0 => Ok(res),
-            _ => Err(GraphError::ParentOutOfRange(rev)),
-        }
-    }
-}
-
-/// Wrapping of AncestorsIterator<Index> constructor, for C callers.
-///
-/// Besides `initrevs`, `stoprev` and `inclusive`, that are converted
-/// we receive the index and the parents function as pointers
-#[no_mangle]
-pub extern "C" fn rustlazyancestors_init(
-    index: IndexPtr,
-    initrevslen: ssize_t,
-    initrevs: *mut c_long,
-    stoprev: c_long,
-    inclusive: c_int,
-) -> *mut AncestorsIterator<Index> {
-    assert!(initrevslen >= 0);
-    unsafe {
-        raw_init(
-            Index::new(index),
-            initrevslen as usize,
-            initrevs,
-            stoprev,
-            inclusive,
-        )
-    }
-}
-
-/// Testable (for any Graph) version of rustlazyancestors_init
-#[inline]
-unsafe fn raw_init<G: Graph>(
-    graph: G,
-    initrevslen: usize,
-    initrevs: *mut c_long,
-    stoprev: c_long,
-    inclusive: c_int,
-) -> *mut AncestorsIterator<G> {
-    let inclb = match inclusive {
-        0 => false,
-        1 => true,
-        _ => {
-            return null_mut();
-        }
-    };
-
-    let slice = slice::from_raw_parts(initrevs, initrevslen);
-
-    Box::into_raw(Box::new(
-        match AncestorsIterator::new(
-            graph,
-            slice.into_iter().map(|&r| r as Revision),
-            stoprev as Revision,
-            inclb,
-        ) {
-            Ok(it) => it,
-            Err(_) => {
-                return null_mut();
-            }
-        },
-    ))
-}
-
-/// Deallocator to be called from C code
-#[no_mangle]
-pub extern "C" fn rustlazyancestors_drop(
-    raw_iter: *mut AncestorsIterator<Index>,
-) {
-    raw_drop(raw_iter);
-}
-
-/// Testable (for any Graph) version of rustlazayancestors_drop
-#[inline]
-fn raw_drop<G: Graph>(raw_iter: *mut AncestorsIterator<G>) {
-    unsafe {
-        Box::from_raw(raw_iter);
-    }
-}
-
-/// Iteration main method to be called from C code
-///
-/// We convert the end of iteration into NULL_REVISION,
-/// it will be up to the C wrapper to convert that back into a Python end of
-/// iteration
-#[no_mangle]
-pub extern "C" fn rustlazyancestors_next(
-    raw: *mut AncestorsIterator<Index>,
-) -> c_long {
-    raw_next(raw)
-}
-
-/// Testable (for any Graph) version of rustlazayancestors_next
-#[inline]
-fn raw_next<G: Graph>(raw: *mut AncestorsIterator<G>) -> c_long {
-    let as_ref = unsafe { &mut *raw };
-    let rev = match as_ref.next() {
-        Some(Ok(rev)) => rev,
-        Some(Err(_)) | None => NULL_REVISION,
-    };
-    rev as c_long
-}
-
-#[no_mangle]
-pub extern "C" fn rustlazyancestors_contains(
-    raw: *mut AncestorsIterator<Index>,
-    target: c_long,
-) -> c_int {
-    raw_contains(raw, target)
-}
-
-/// Testable (for any Graph) version of rustlazayancestors_next
-#[inline]
-fn raw_contains<G: Graph>(
-    raw: *mut AncestorsIterator<G>,
-    target: c_long,
-) -> c_int {
-    let as_ref = unsafe { &mut *raw };
-    match as_ref.contains(target as Revision) {
-        Ok(r) => r as c_int,
-        Err(_) => -1,
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use std::thread;
-
-    #[derive(Clone, Debug)]
-    struct Stub;
-
-    impl Graph for Stub {
-        fn parents(&self, r: Revision) -> Result<[Revision; 2], GraphError> {
-            match r {
-                25 => Err(GraphError::ParentOutOfRange(25)),
-                _ => Ok([1, 2]),
-            }
-        }
-    }
-
-    /// Helper for test_init_next()
-    fn stub_raw_init(
-        initrevslen: usize,
-        initrevs: usize,
-        stoprev: c_long,
-        inclusive: c_int,
-    ) -> usize {
-        unsafe {
-            raw_init(
-                Stub,
-                initrevslen,
-                initrevs as *mut c_long,
-                stoprev,
-                inclusive,
-            ) as usize
-        }
-    }
-
-    fn stub_raw_init_from_vec(
-        mut initrevs: Vec<c_long>,
-        stoprev: c_long,
-        inclusive: c_int,
-    ) -> *mut AncestorsIterator<Stub> {
-        unsafe {
-            raw_init(
-                Stub,
-                initrevs.len(),
-                initrevs.as_mut_ptr(),
-                stoprev,
-                inclusive,
-            )
-        }
-    }
-
-    #[test]
-    // Test what happens when we init an Iterator as with the exposed C ABI
-    // and try to use it afterwards
-    // We spawn new threads, in order to make memory consistency harder
-    // but this forces us to convert the pointers into shareable usizes.
-    fn test_init_next() {
-        let mut initrevs: Vec<c_long> = vec![11, 13];
-        let initrevs_len = initrevs.len();
-        let initrevs_ptr = initrevs.as_mut_ptr() as usize;
-        let handler = thread::spawn(move || {
-            stub_raw_init(initrevs_len, initrevs_ptr, 0, 1)
-        });
-        let raw = handler.join().unwrap() as *mut AncestorsIterator<Stub>;
-
-        assert_eq!(raw_next(raw), 13);
-        assert_eq!(raw_next(raw), 11);
-        assert_eq!(raw_next(raw), 2);
-        assert_eq!(raw_next(raw), 1);
-        assert_eq!(raw_next(raw), NULL_REVISION as c_long);
-        raw_drop(raw);
-    }
-
-    #[test]
-    fn test_init_wrong_bool() {
-        assert_eq!(stub_raw_init_from_vec(vec![11, 13], 0, 2), null_mut());
-    }
-
-    #[test]
-    fn test_empty() {
-        let raw = stub_raw_init_from_vec(vec![], 0, 1);
-        assert_eq!(raw_next(raw), NULL_REVISION as c_long);
-        raw_drop(raw);
-    }
-
-    #[test]
-    fn test_init_err_out_of_range() {
-        assert!(stub_raw_init_from_vec(vec![25], 0, 0).is_null());
-    }
-
-    #[test]
-    fn test_contains() {
-        let raw = stub_raw_init_from_vec(vec![5, 6], 0, 1);
-        assert_eq!(raw_contains(raw, 5), 1);
-        assert_eq!(raw_contains(raw, 2), 1);
-    }
-
-    #[test]
-    fn test_contains_exclusive() {
-        let raw = stub_raw_init_from_vec(vec![5, 6], 0, 0);
-        assert_eq!(raw_contains(raw, 5), 0);
-        assert_eq!(raw_contains(raw, 2), 1);
-    }
-}
--- a/rust/hg-direct-ffi/src/lib.rs	Mon Mar 09 01:11:59 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,19 +0,0 @@
-// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Bindings for CPython extension code
-//!
-//! This exposes methods to build and use a `rustlazyancestors` iterator
-//! from C code, using an index and its parents function that are passed
-//! from the caller at instantiation.
-
-extern crate hg;
-extern crate libc;
-
-mod ancestors;
-pub use ancestors::{
-    rustlazyancestors_contains, rustlazyancestors_drop,
-    rustlazyancestors_init, rustlazyancestors_next,
-};
--- a/setup.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/setup.py	Mon Mar 09 10:18:40 2020 -0700
@@ -3,6 +3,7 @@
 #
 # 'python setup.py install', or
 # 'python setup.py --help' for more options
+from __future__ import print_function
 
 import os
 
@@ -141,7 +142,9 @@
 # TODO record it for proper rebuild upon changes
 # (see mercurial/__modulepolicy__.py)
 if hgrustext != 'cpython' and hgrustext is not None:
-    hgrustext = 'direct-ffi'
+    if hgrustext:
+        print('unkown HGWITHRUSTEXT value: %s' % hgrustext, file=sys.stderr)
+    hgrustext = None
 
 import ctypes
 import errno
@@ -323,7 +326,7 @@
     # gives precedence to hg.exe in the current directory, so fall back to the
     # python invocation of local hg, where pythonXY.dll can always be found.
     check_cmd = ['log', '-r.', '-Ttest']
-    if os.name != 'nt':
+    if os.name != 'nt' or not os.path.exists("hg.exe"):
         try:
             retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
         except EnvironmentError:
@@ -543,7 +546,7 @@
         # Build Rust standalon extensions if it'll be used
         # and its build is not explictely disabled (for external build
         # as Linux distributions would do)
-        if self.distribution.rust and self.rust and hgrustext != 'direct-ffi':
+        if self.distribution.rust and self.rust:
             for rustext in ruststandalones:
                 rustext.build('' if self.inplace else self.build_lib)
 
@@ -935,11 +938,11 @@
             normalizecrlf('doc/%s.html' % root)
 
         # This logic is duplicated in doc/Makefile.
-        sources = set(
+        sources = {
             f
             for f in os.listdir('mercurial/helptext')
             if re.search(r'[0-9]\.txt$', f)
-        )
+        }
 
         # common.txt is a one-off.
         gentxt('common')
@@ -979,7 +982,7 @@
         # Screen out egg related commands to prevent egg generation.  But allow
         # mercurial.egg-info generation, since that is part of modern
         # packaging.
-        excl = set(['bdist_egg'])
+        excl = {'bdist_egg'}
         return filter(lambda x: x not in excl, install.get_sub_commands(self))
 
 
@@ -1212,6 +1215,7 @@
     'hgext.fastannotate',
     'hgext.fsmonitor.pywatchman',
     'hgext.highlight',
+    'hgext.hooklib',
     'hgext.infinitepush',
     'hgext.largefiles',
     'hgext.lfs',
@@ -1353,10 +1357,19 @@
             env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
 
         cargocmd = ['cargo', 'rustc', '-vv', '--release']
+
+        feature_flags = []
+
         if sys.version_info[0] == 3 and self.py3_features is not None:
-            cargocmd.extend(
-                ('--features', self.py3_features, '--no-default-features')
-            )
+            feature_flags.append(self.py3_features)
+            cargocmd.append('--no-default-features')
+
+        rust_features = env.get("HG_RUST_FEATURES")
+        if rust_features:
+            feature_flags.append(rust_features)
+
+        cargocmd.extend(('--features', " ".join(feature_flags)))
+
         cargocmd.append('--')
         if sys.platform == 'darwin':
             cargocmd.extend(
@@ -1381,29 +1394,6 @@
             )
 
 
-class RustEnhancedExtension(RustExtension):
-    """A C Extension, conditionally enhanced with Rust code.
-
-    If the HGWITHRUSTEXT environment variable is set to something else
-    than 'cpython', the Rust sources get compiled and linked within
-    the C target shared library object.
-    """
-
-    def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
-        RustExtension.__init__(
-            self, mpath, sources, rustlibname, subcrate, **kw
-        )
-        if hgrustext != 'direct-ffi':
-            return
-        self.extra_compile_args.append('-DWITH_RUST')
-        self.libraries.append(rustlibname)
-        self.library_dirs.append(self.rusttargetdir)
-
-    def rustbuild(self):
-        if hgrustext == 'direct-ffi':
-            RustExtension.rustbuild(self)
-
-
 class RustStandaloneExtension(RustExtension):
     def __init__(self, pydottedname, rustcrate, dylibname, **kw):
         RustExtension.__init__(
@@ -1443,7 +1433,7 @@
         include_dirs=common_include_dirs,
         depends=common_depends,
     ),
-    RustEnhancedExtension(
+    Extension(
         'mercurial.cext.parsers',
         [
             'mercurial/cext/charencode.c',
@@ -1453,16 +1443,9 @@
             'mercurial/cext/pathencode.c',
             'mercurial/cext/revlog.c',
         ],
-        'hgdirectffi',
-        'hg-direct-ffi',
         include_dirs=common_include_dirs,
         depends=common_depends
-        + [
-            'mercurial/cext/charencode.h',
-            'mercurial/cext/revlog.h',
-            'rust/hg-core/src/ancestors.rs',
-            'rust/hg-core/src/lib.rs',
-        ],
+        + ['mercurial/cext/charencode.h', 'mercurial/cext/revlog.h',],
     ),
     Extension(
         'mercurial.cext.osutil',
--- a/tests/common-pattern.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/common-pattern.py	Mon Mar 09 10:18:40 2020 -0700
@@ -12,6 +12,7 @@
         br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
         br'bookmarks%250A'
         br'changegroup%253D01%252C02%250A'
+        br'checkheads%253Drelated%250A'
         br'digests%253Dmd5%252Csha1%252Csha512%250A'
         br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
         br'hgtagsfnodes%250A'
@@ -28,6 +29,7 @@
         br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
         br'bookmarks%250A'
         br'changegroup%253D01%252C02%250A'
+        br'checkheads%3Drelated%0A'
         br'digests%253Dmd5%252Csha1%252Csha512%250A'
         br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
         br'hgtagsfnodes%250A'
@@ -43,6 +45,7 @@
         br'bundle2=HG20%0A'
         br'bookmarks%0A'
         br'changegroup%3D01%2C02%0A'
+        br'checkheads%3Drelated%0A'
         br'digests%3Dmd5%2Csha1%2Csha512%0A'
         br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
         br'hgtagsfnodes%0A'
@@ -60,6 +63,7 @@
         br'bundle2=HG20%0A'
         br'bookmarks%0A'
         br'changegroup%3D01%2C02%0A'
+        br'checkheads%3Drelated%0A'
         br'digests%3Dmd5%2Csha1%2Csha512%0A'
         br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
         br'hgtagsfnodes%0A'
--- a/tests/drawdag.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/drawdag.py	Mon Mar 09 10:18:40 2020 -0700
@@ -438,13 +438,13 @@
             if cmd in (b'replace', b'rebase', b'amend'):
                 nodes = [getctx(m) for m in arg.split(b'->')]
                 for i in range(len(nodes) - 1):
-                    rels.append((nodes[i], (nodes[i + 1],)))
+                    rels.append(((nodes[i],), (nodes[i + 1],)))
             elif cmd in (b'split',):
                 pre, succs = arg.split(b'->')
                 succs = succs.split(b',')
-                rels.append((getctx(pre), (getctx(s) for s in succs)))
+                rels.append(((getctx(pre),), (getctx(s) for s in succs)))
             elif cmd in (b'prune',):
                 for n in arg.split(b','):
-                    rels.append((getctx(n), ()))
+                    rels.append(((getctx(n),), ()))
             if rels:
                 obsolete.createmarkers(repo, rels, date=(0, 0), operation=cmd)
--- a/tests/fsmonitor-run-tests.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/fsmonitor-run-tests.py	Mon Mar 09 10:18:40 2020 -0700
@@ -30,7 +30,7 @@
     PYTHON3 = True
     xrange = range  # we use xrange in one place, and we'd rather not use range
 
-    def _bytespath(p):
+    def _sys2bytes(p):
         return p.encode('utf-8')
 
 
@@ -47,7 +47,7 @@
     # bytestrings by default, so we don't have to do any extra
     # fiddling there. We define the wrapper functions anyway just to
     # help keep code consistent between platforms.
-    def _bytespath(p):
+    def _sys2bytes(p):
         return p
 
 
@@ -107,7 +107,7 @@
         ]
 
         envb = osenvironb.copy()
-        envb[b'WATCHMAN_CONFIG_FILE'] = _bytespath(cfgfile)
+        envb[b'WATCHMAN_CONFIG_FILE'] = _sys2bytes(cfgfile)
         with open(clilogfile, 'wb') as f:
             proc = subprocess.Popen(
                 argv, env=envb, stdin=None, stdout=f, stderr=f
@@ -129,7 +129,7 @@
     args, runtestsargv = parser.parse_known_args()
 
     with watchman(args) as sockfile:
-        osenvironb[b'WATCHMAN_SOCK'] = _bytespath(sockfile)
+        osenvironb[b'WATCHMAN_SOCK'] = _sys2bytes(sockfile)
         # Indicate to hghave that we're running with fsmonitor enabled.
         osenvironb[b'HGFSMONITOR_TESTS'] = b'1'
 
--- a/tests/hghave.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/hghave.py	Mon Mar 09 10:18:40 2020 -0700
@@ -29,12 +29,12 @@
 
 if sys.version_info[0] >= 3:
 
-    def _bytespath(p):
+    def _sys2bytes(p):
         if p is None:
             return p
         return p.encode('utf-8')
 
-    def _strpath(p):
+    def _bytes2sys(p):
         if p is None:
             return p
         return p.decode('utf-8')
@@ -42,10 +42,10 @@
 
 else:
 
-    def _bytespath(p):
+    def _sys2bytes(p):
         return p
 
-    _strpath = _bytespath
+    _bytes2sys = _sys2bytes
 
 
 def check(name, desc):
@@ -307,13 +307,23 @@
         return False
 
 
-def gethgversion():
+def _gethgversion():
     m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
     if not m:
         return (0, 0)
     return (int(m.group(1)), int(m.group(2)))
 
 
+_hgversion = None
+
+
+def gethgversion():
+    global _hgversion
+    if _hgversion is None:
+        _hgversion = _gethgversion()
+    return _hgversion
+
+
 @checkvers(
     "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)])
 )
@@ -322,6 +332,17 @@
     return gethgversion() >= (int(major), int(minor))
 
 
+@check("rust", "Using the Rust extensions")
+def has_rust():
+    """Check is the mercurial currently running is using some rust code"""
+    cmd = b'hg debuginstall --quiet 2>&1'
+    match = br'checking module policy \(([^)]+)\)'
+    policy = matchoutput(cmd, match)
+    if not policy:
+        return False
+    return b'rust' in policy.group(1)
+
+
 @check("hg08", "Mercurial >= 0.8")
 def has_hg08():
     if checks["hg09"][0]():
@@ -451,7 +472,7 @@
     os.close(fh)
     name = tempfile.mktemp(dir='.', prefix=tempprefix)
     try:
-        util.oslink(_bytespath(fn), _bytespath(name))
+        util.oslink(_sys2bytes(fn), _sys2bytes(name))
         os.unlink(name)
         return True
     except OSError:
@@ -543,7 +564,7 @@
 @check("pyflakes", "Pyflakes python linter")
 def has_pyflakes():
     return matchoutput(
-        "sh -c \"echo 'import re' 2>&1 | pyflakes\"",
+        "sh -c \"echo 'import re' 2>&1 | $PYTHON -m pyflakes\"",
         br"<stdin>:1: 're' imported but unused",
         True,
     )
@@ -685,7 +706,7 @@
 
         curses.COLOR_BLUE
         return matchoutput('test -x "`which tic`"', br'')
-    except ImportError:
+    except (ImportError, AttributeError):
         return False
 
 
@@ -1022,7 +1043,7 @@
     version_regex = b'black, version ([0-9a-b.]+)'
     version = matchoutput(blackcmd, version_regex)
     sv = distutils.version.StrictVersion
-    return version and sv(_strpath(version.group(1))) >= sv('19.10b0')
+    return version and sv(_bytes2sys(version.group(1))) >= sv('19.10b0')
 
 
 @check('pytype', 'the pytype type checker')
@@ -1030,7 +1051,7 @@
     pytypecmd = 'pytype --version'
     version = matchoutput(pytypecmd, b'[0-9a-b.]+')
     sv = distutils.version.StrictVersion
-    return version and sv(_strpath(version.group(0))) >= sv('2019.10.17')
+    return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17')
 
 
 @check("rustfmt", "rustfmt tool")
--- a/tests/run-tests.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/run-tests.py	Mon Mar 09 10:18:40 2020 -0700
@@ -143,12 +143,12 @@
     PYTHON3 = True
     xrange = range  # we use xrange in one place, and we'd rather not use range
 
-    def _bytespath(p):
+    def _sys2bytes(p):
         if p is None:
             return p
         return p.encode('utf-8')
 
-    def _strpath(p):
+    def _bytes2sys(p):
         if p is None:
             return p
         return p.decode('utf-8')
@@ -165,34 +165,34 @@
                 self._strenv = strenv
 
             def __getitem__(self, k):
-                v = self._strenv.__getitem__(_strpath(k))
-                return _bytespath(v)
+                v = self._strenv.__getitem__(_bytes2sys(k))
+                return _sys2bytes(v)
 
             def __setitem__(self, k, v):
-                self._strenv.__setitem__(_strpath(k), _strpath(v))
+                self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
 
             def __delitem__(self, k):
-                self._strenv.__delitem__(_strpath(k))
+                self._strenv.__delitem__(_bytes2sys(k))
 
             def __contains__(self, k):
-                return self._strenv.__contains__(_strpath(k))
+                return self._strenv.__contains__(_bytes2sys(k))
 
             def __iter__(self):
-                return iter([_bytespath(k) for k in iter(self._strenv)])
+                return iter([_sys2bytes(k) for k in iter(self._strenv)])
 
             def get(self, k, default=None):
-                v = self._strenv.get(_strpath(k), _strpath(default))
-                return _bytespath(v)
+                v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
+                return _sys2bytes(v)
 
             def pop(self, k, default=None):
-                v = self._strenv.pop(_strpath(k), _strpath(default))
-                return _bytespath(v)
+                v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
+                return _sys2bytes(v)
 
         osenvironb = environbytes(os.environ)
 
     getcwdb = getattr(os, 'getcwdb')
     if not getcwdb or os.name == 'nt':
-        getcwdb = lambda: _bytespath(os.getcwd())
+        getcwdb = lambda: _sys2bytes(os.getcwd())
 
 elif sys.version_info >= (3, 0, 0):
     print(
@@ -207,10 +207,10 @@
     # bytestrings by default, so we don't have to do any extra
     # fiddling there. We define the wrapper functions anyway just to
     # help keep code consistent between platforms.
-    def _bytespath(p):
+    def _sys2bytes(p):
         return p
 
-    _strpath = _bytespath
+    _bytes2sys = _sys2bytes
     osenvironb = os.environ
     getcwdb = os.getcwd
 
@@ -274,10 +274,10 @@
 def Popen4(cmd, wd, timeout, env=None):
     processlock.acquire()
     p = subprocess.Popen(
-        _strpath(cmd),
+        _bytes2sys(cmd),
         shell=True,
         bufsize=-1,
-        cwd=_strpath(wd),
+        cwd=_bytes2sys(wd),
         env=env,
         close_fds=closefds,
         stdin=subprocess.PIPE,
@@ -315,7 +315,7 @@
 else:
     raise AssertionError('Could not find Python interpreter')
 
-PYTHON = _bytespath(sysexecutable.replace('\\', '/'))
+PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
 IMPL_PATH = b'PYTHONPATH'
 if 'java' in sys.platform:
     IMPL_PATH = b'JYTHONPATH'
@@ -555,12 +555,6 @@
         help="use pure Python code instead of C extensions",
     )
     hgconf.add_argument(
-        "-3",
-        "--py3-warnings",
-        action="store_true",
-        help="enable Py3k warnings on Python 2.7+",
-    )
-    hgconf.add_argument(
         "--with-chg",
         metavar="CHG",
         help="use specified chg wrapper in place of hg",
@@ -646,7 +640,7 @@
     if options.local:
         if options.with_hg or options.with_chg:
             parser.error('--local cannot be used with --with-hg or --with-chg')
-        testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
+        testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
         reporootdir = os.path.dirname(testdir)
         pathandattrs = [(b'hg', 'with_hg')]
         if options.chg:
@@ -658,10 +652,10 @@
                     '--local specified, but %r not found or '
                     'not executable' % binpath
                 )
-            setattr(options, attr, _strpath(binpath))
+            setattr(options, attr, _bytes2sys(binpath))
 
     if options.with_hg:
-        options.with_hg = canonpath(_bytespath(options.with_hg))
+        options.with_hg = canonpath(_sys2bytes(options.with_hg))
         if not (
             os.path.isfile(options.with_hg)
             and os.access(options.with_hg, os.X_OK)
@@ -675,7 +669,7 @@
         parser.error('chg does not work on %s' % os.name)
     if options.with_chg:
         options.chg = False  # no installation to temporary location
-        options.with_chg = canonpath(_bytespath(options.with_chg))
+        options.with_chg = canonpath(_sys2bytes(options.with_chg))
         if not (
             os.path.isfile(options.with_chg)
             and os.access(options.with_chg, os.X_OK)
@@ -748,9 +742,6 @@
             )
         options.timeout = 0
         options.slowtimeout = 0
-    if options.py3_warnings:
-        if PYTHON3:
-            parser.error('--py3-warnings can only be used on Python 2.7')
 
     if options.blacklist:
         options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -909,7 +900,6 @@
         timeout=None,
         startport=None,
         extraconfigopts=None,
-        py3warnings=False,
         shell=None,
         hgcommand=None,
         slowtimeout=None,
@@ -942,8 +932,6 @@
         must have the form "key=value" (something understood by hgrc). Values
         of the form "foo.key=value" will result in "[foo] key=value".
 
-        py3warnings enables Py3k warnings.
-
         shell is the shell to execute tests in.
         """
         if timeout is None:
@@ -954,7 +942,7 @@
             slowtimeout = defaults['slowtimeout']
         self.path = path
         self.bname = os.path.basename(path)
-        self.name = _strpath(self.bname)
+        self.name = _bytes2sys(self.bname)
         self._testdir = os.path.dirname(path)
         self._outputdir = outputdir
         self._tmpname = os.path.basename(path)
@@ -968,8 +956,7 @@
         self._slowtimeout = slowtimeout
         self._startport = startport
         self._extraconfigopts = extraconfigopts or []
-        self._py3warnings = py3warnings
-        self._shell = _bytespath(shell)
+        self._shell = _sys2bytes(shell)
         self._hgcommand = hgcommand or b'hg'
         self._usechg = usechg
         self._useipv6 = useipv6
@@ -1178,10 +1165,7 @@
         if self._keeptmpdir:
             log(
                 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
-                % (
-                    self._testtmp.decode('utf-8'),
-                    self._threadtmp.decode('utf-8'),
-                )
+                % (_bytes2sys(self._testtmp), _bytes2sys(self._threadtmp),)
             )
         else:
             try:
@@ -1281,7 +1265,7 @@
         environment."""
         # Put the restoreenv script inside self._threadtmp
         scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
-        testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
+        testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
 
         # Only restore environment variable names that the shell allows
         # us to export.
@@ -1315,15 +1299,15 @@
         env = os.environ.copy()
         env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
         env['HGEMITWARNINGS'] = '1'
-        env['TESTTMP'] = _strpath(self._testtmp)
+        env['TESTTMP'] = _bytes2sys(self._testtmp)
         env['TESTNAME'] = self.name
-        env['HOME'] = _strpath(self._testtmp)
+        env['HOME'] = _bytes2sys(self._testtmp)
         # This number should match portneeded in _getport
         for port in xrange(3):
             # This list should be parallel to _portmap in _getreplacements
             defineport(port)
-        env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
-        env["DAEMON_PIDS"] = _strpath(
+        env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
+        env["DAEMON_PIDS"] = _bytes2sys(
             os.path.join(self._threadtmp, b'daemon.pids')
         )
         env["HGEDITOR"] = (
@@ -1344,7 +1328,7 @@
 
         extraextensions = []
         for opt in self._extraconfigopts:
-            section, key = opt.encode('utf-8').split(b'.', 1)
+            section, key = _sys2bytes(opt).split(b'.', 1)
             if section != 'extensions':
                 continue
             name = key.split(b'=', 1)[0]
@@ -1355,7 +1339,7 @@
 
         # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
         # IP addresses.
-        env['LOCALIP'] = _strpath(self._localip())
+        env['LOCALIP'] = _bytes2sys(self._localip())
 
         # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
         # but this is needed for testing python instances like dummyssh,
@@ -1441,11 +1425,11 @@
             )
             hgrc.write(b'[web]\n')
             hgrc.write(b'address = localhost\n')
-            hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
+            hgrc.write(b'ipv6 = %r\n' % self._useipv6)
             hgrc.write(b'server-header = testing stub value\n')
 
             for opt in self._extraconfigopts:
-                section, key = opt.encode('utf-8').split(b'.', 1)
+                section, key = _sys2bytes(opt).split(b'.', 1)
                 assert b'=' in key, (
                     'extra config opt %s must ' 'have an = for assignment' % opt
                 )
@@ -1464,7 +1448,10 @@
         """
         if self._debug:
             proc = subprocess.Popen(
-                _strpath(cmd), shell=True, cwd=_strpath(self._testtmp), env=env
+                _bytes2sys(cmd),
+                shell=True,
+                cwd=_bytes2sys(self._testtmp),
+                env=env,
             )
             ret = proc.wait()
             return (ret, None)
@@ -1515,9 +1502,8 @@
         return os.path.join(self._testdir, b'%s.out' % self.bname)
 
     def _run(self, env):
-        py3switch = self._py3warnings and b' -3' or b''
         # Quote the python(3) executable for Windows
-        cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
+        cmd = b'"%s" "%s"' % (PYTHON, self.path)
         vlog("# Running", cmd.decode("utf-8"))
         normalizenewlines = os.name == 'nt'
         result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
@@ -1564,7 +1550,7 @@
     NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
 
     ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
-    ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
+    ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
     ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
 
     def __init__(self, path, *args, **kwds):
@@ -1575,7 +1561,7 @@
         super(TTest, self).__init__(path, *args, **kwds)
         if case:
             casepath = b'#'.join(case)
-            self.name = '%s#%s' % (self.name, _strpath(casepath))
+            self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
             self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
             self._tmpname += b'-%s' % casepath
         self._have = {}
@@ -1626,7 +1612,7 @@
             return self._have.get(allreqs)
 
         # TODO do something smarter when all other uses of hghave are gone.
-        runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
+        runtestdir = os.path.abspath(os.path.dirname(_sys2bytes(__file__)))
         tdir = runtestdir.replace(b'\\', b'/')
         proc = Popen4(
             b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
@@ -2081,12 +2067,10 @@
         for line in lines:
             if line.startswith(TTest.SKIPPED_PREFIX):
                 line = line.splitlines()[0]
-                missing.append(
-                    line[len(TTest.SKIPPED_PREFIX) :].decode('utf-8')
-                )
+                missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
             elif line.startswith(TTest.FAILED_PREFIX):
                 line = line.splitlines()[0]
-                failed.append(line[len(TTest.FAILED_PREFIX) :].decode('utf-8'))
+                failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
 
         return missing, failed
 
@@ -2213,7 +2197,7 @@
                 v = self._options.view
                 subprocess.call(
                     r'"%s" "%s" "%s"'
-                    % (v, _strpath(test.refpath), _strpath(test.errpath)),
+                    % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
                     shell=True,
                 )
             else:
@@ -2535,7 +2519,7 @@
 def savetimes(outputdir, result):
     saved = dict(loadtimes(outputdir))
     maxruns = 5
-    skipped = set([str(t[0]) for t in result.skipped])
+    skipped = {str(t[0]) for t in result.skipped}
     for tdata in result.times:
         test, real = tdata[0], tdata[3]
         if test not in skipped:
@@ -2702,7 +2686,7 @@
             opts = ''
             withhg = self._runner.options.with_hg
             if withhg:
-                opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
+                opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
             rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
             data = pread(bisectcmd + ['--command', rtc])
             m = re.search(
@@ -2748,7 +2732,7 @@
     @staticmethod
     def _writexunit(result, outf):
         # See http://llg.cubic.org/docs/junit/ for a reference.
-        timesd = dict((t[0], t[3]) for t in result.times)
+        timesd = {t[0]: t[3] for t in result.times}
         doc = minidom.Document()
         s = doc.createElement('testsuite')
         s.setAttribute('errors', "0")  # TODO
@@ -2944,7 +2928,7 @@
         try:
             parser = parser or getparser()
             options = parseargs(args, parser)
-            tests = [_bytespath(a) for a in options.tests]
+            tests = [_sys2bytes(a) for a in options.tests]
             if options.test_list is not None:
                 for listfile in options.test_list:
                     with open(listfile, 'rb') as f:
@@ -2976,7 +2960,7 @@
                 testdir = os.path.join(testdir, pathname)
         self._testdir = osenvironb[b'TESTDIR'] = testdir
         if self.options.outputdir:
-            self._outputdir = canonpath(_bytespath(self.options.outputdir))
+            self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
         else:
             self._outputdir = getcwdb()
             if testdescs and pathname:
@@ -2993,7 +2977,7 @@
 
         if self.options.tmpdir:
             self.options.keep_tmpdir = True
-            tmpdir = _bytespath(self.options.tmpdir)
+            tmpdir = _sys2bytes(self.options.tmpdir)
             if os.path.exists(tmpdir):
                 # Meaning of tmpdir has changed since 1.3: we used to create
                 # HGTMP inside tmpdir; now HGTMP is tmpdir.  So fail if
@@ -3022,7 +3006,7 @@
             os.makedirs(self._tmpbindir)
 
             normbin = os.path.normpath(os.path.abspath(whg))
-            normbin = normbin.replace(os.sep.encode('ascii'), b'/')
+            normbin = normbin.replace(_sys2bytes(os.sep), b'/')
 
             # Other Python scripts in the test harness need to
             # `import mercurial`. If `hg` is a Python script, we assume
@@ -3071,11 +3055,11 @@
         osenvironb[b"BINDIR"] = self._bindir
         osenvironb[b"PYTHON"] = PYTHON
 
-        fileb = _bytespath(__file__)
+        fileb = _sys2bytes(__file__)
         runtestdir = os.path.abspath(os.path.dirname(fileb))
         osenvironb[b'RUNTESTDIR'] = runtestdir
         if PYTHON3:
-            sepb = _bytespath(os.pathsep)
+            sepb = _sys2bytes(os.pathsep)
         else:
             sepb = os.pathsep
         path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
@@ -3135,14 +3119,14 @@
                 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
             )
 
-        vlog("# Using TESTDIR", _strpath(self._testdir))
-        vlog("# Using RUNTESTDIR", _strpath(osenvironb[b'RUNTESTDIR']))
-        vlog("# Using HGTMP", _strpath(self._hgtmp))
+        vlog("# Using TESTDIR", _bytes2sys(self._testdir))
+        vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
+        vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
         vlog("# Using PATH", os.environ["PATH"])
         vlog(
-            "# Using", _strpath(IMPL_PATH), _strpath(osenvironb[IMPL_PATH]),
+            "# Using", _bytes2sys(IMPL_PATH), _bytes2sys(osenvironb[IMPL_PATH]),
         )
-        vlog("# Writing to directory", _strpath(self._outputdir))
+        vlog("# Writing to directory", _bytes2sys(self._outputdir))
 
         try:
             return self._runtests(testdescs) or 0
@@ -3160,7 +3144,7 @@
             if self.options.changed:
                 proc = Popen4(
                     b'hg st --rev "%s" -man0 .'
-                    % _bytespath(self.options.changed),
+                    % _sys2bytes(self.options.changed),
                     None,
                     0,
                 )
@@ -3354,7 +3338,7 @@
         tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
 
         # extra keyword parameters. 'case' is used by .t tests
-        kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
+        kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
 
         t = testcls(
             refpath,
@@ -3366,7 +3350,6 @@
             timeout=self.options.timeout,
             startport=self._getport(count),
             extraconfigopts=self.options.extra_config_opt,
-            py3warnings=self.options.py3_warnings,
             shell=self.options.shell,
             hgcommand=self._hgcommand,
             usechg=bool(self.options.with_chg or self.options.chg),
@@ -3381,7 +3364,7 @@
         if self.options.keep_tmpdir:
             return
 
-        vlog("# Cleaning up HGTMP", _strpath(self._hgtmp))
+        vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
         shutil.rmtree(self._hgtmp, True)
         for f in self._createdfiles:
             try:
@@ -3449,9 +3432,9 @@
         script = os.path.realpath(sys.argv[0])
         exe = sysexecutable
         if PYTHON3:
-            compiler = _bytespath(compiler)
-            script = _bytespath(script)
-            exe = _bytespath(exe)
+            compiler = _sys2bytes(compiler)
+            script = _sys2bytes(script)
+            exe = _sys2bytes(exe)
         hgroot = os.path.dirname(os.path.dirname(script))
         self._hgroot = hgroot
         os.chdir(hgroot)
@@ -3493,7 +3476,7 @@
         makedirs(self._bindir)
 
         vlog("# Running", cmd.decode("utf-8"))
-        if subprocess.call(_strpath(cmd), shell=True) == 0:
+        if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
             if not self.options.verbose:
                 try:
                     os.remove(installerrs)
@@ -3512,15 +3495,6 @@
 
         self._usecorrectpython()
 
-        if self.options.py3_warnings and not self.options.anycoverage:
-            vlog("# Updating hg command to enable Py3k Warnings switch")
-            with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
-                lines = [line.rstrip() for line in f]
-                lines[0] += ' -3'
-            with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
-                for line in lines:
-                    f.write(line + '\n')
-
         hgbat = os.path.join(self._bindir, b'hg.bat')
         if os.path.isfile(hgbat):
             # hg.bat expects to be put in bin/scripts while run-tests.py
@@ -3582,7 +3556,7 @@
         cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
         cmd = cmd % PYTHON
         if PYTHON3:
-            cmd = _strpath(cmd)
+            cmd = _bytes2sys(cmd)
 
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
         out, err = p.communicate()
@@ -3628,33 +3602,33 @@
         # chdir is the easiest way to get short, relative paths in the
         # output.
         os.chdir(self._hgroot)
-        covdir = os.path.join(_strpath(self._installdir), '..', 'coverage')
+        covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
         cov = coverage(data_file=os.path.join(covdir, 'cov'))
 
         # Map install directory paths back to source directory.
-        cov.config.paths['srcdir'] = ['.', _strpath(self._pythondir)]
+        cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
 
         cov.combine()
 
         omit = [
-            _strpath(os.path.join(x, b'*'))
+            _bytes2sys(os.path.join(x, b'*'))
             for x in [self._bindir, self._testdir]
         ]
         cov.report(ignore_errors=True, omit=omit)
 
         if self.options.htmlcov:
-            htmldir = os.path.join(_strpath(self._outputdir), 'htmlcov')
+            htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
             cov.html_report(directory=htmldir, omit=omit)
         if self.options.annotate:
-            adir = os.path.join(_strpath(self._outputdir), 'annotated')
+            adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
             if not os.path.isdir(adir):
                 os.mkdir(adir)
             cov.annotate(directory=adir, omit=omit)
 
     def _findprogram(self, program):
         """Search PATH for a executable program"""
-        dpb = _bytespath(os.defpath)
-        sepb = _bytespath(os.pathsep)
+        dpb = _sys2bytes(os.defpath)
+        sepb = _sys2bytes(os.pathsep)
         for p in osenvironb.get(b'PATH', dpb).split(sepb):
             name = os.path.join(p, program)
             if os.name == 'nt' or os.access(name, os.X_OK):
@@ -3669,7 +3643,7 @@
             found = self._findprogram(p)
             p = p.decode("utf-8")
             if found:
-                vlog("# Found prerequisite", p, "at", _strpath(found))
+                vlog("# Found prerequisite", p, "at", _bytes2sys(found))
             else:
                 print("WARNING: Did not find prerequisite tool: %s " % p)
 
--- a/tests/simplestorerepo.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/simplestorerepo.py	Mon Mar 09 10:18:40 2020 -0700
@@ -588,7 +588,7 @@
             start = nullid
         if stop is None:
             stop = []
-        stoprevs = set([self.rev(n) for n in stop])
+        stoprevs = {self.rev(n) for n in stop}
         startrev = self.rev(start)
         reachable = {startrev}
         heads = {startrev}
--- a/tests/test-acl.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-acl.t	Mon Mar 09 10:18:40 2020 -0700
@@ -109,17 +109,17 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -167,6 +167,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -174,18 +175,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -234,6 +236,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -241,18 +244,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -311,6 +315,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -318,18 +323,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -379,6 +385,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -386,18 +393,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -452,6 +460,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -459,18 +468,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -522,6 +532,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -529,18 +540,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -597,6 +609,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -604,18 +617,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -669,6 +683,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -676,18 +691,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -742,27 +758,29 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   bundle2-output-bundle: "HG20", 7 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:bookmarks" 37 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:bookmarks" supported
   bundle2-input-part: total payload size 37
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -828,27 +846,29 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   bundle2-output-bundle: "HG20", 7 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:bookmarks" 37 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:bookmarks" supported
   bundle2-input-part: total payload size 37
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -914,6 +934,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -921,18 +942,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -998,6 +1020,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1005,18 +1028,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1079,6 +1103,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1086,18 +1111,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1155,6 +1181,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1162,18 +1189,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1242,6 +1270,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1249,18 +1278,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1330,6 +1360,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1337,18 +1368,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1414,6 +1446,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1421,18 +1454,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1494,6 +1528,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1501,18 +1536,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1579,6 +1615,7 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
+  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1586,18 +1623,19 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
+  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1707,18 +1745,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1793,18 +1831,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1871,18 +1909,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1945,18 +1983,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2013,18 +2051,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2104,18 +2142,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2194,18 +2232,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2267,18 +2305,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2351,18 +2389,18 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 205
+  bundle2-input-part: total payload size 224
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "check:heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: "check:updated-heads" supported
+  bundle2-input-part: total payload size 40
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
--- a/tests/test-backout.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-backout.t	Mon Mar 09 10:18:40 2020 -0700
@@ -86,6 +86,33 @@
   commit: 1 unresolved (clean)
   update: (current)
   phases: 5 draft
+  $ hg log -G
+  @  changeset:   4:ed99997b793d
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:05 1970 +0000
+  |  summary:     ypples
+  |
+  o  changeset:   3:1c2161e97c0a
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
+  |  summary:     Backed out changeset 22cb4f70d813
+  |
+  o  changeset:   2:a8c6e511cfee
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
+  |  summary:     grapes
+  |
+  %  changeset:   1:22cb4f70d813
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
+  |  summary:     chair
+  |
+  o  changeset:   0:a5cb2dde5805
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     tomatoes
+  
 
 file that was removed is recreated
 (this also tests that editor is not invoked if the commit message is
@@ -682,23 +709,24 @@
   use 'hg resolve' to retry unresolved file merges
   [1]
   $ hg status
-  $ hg debugmergestate
-  * version 2 records
+  $ hg debugmergestate -v
+  v1 and v2 states match: using v2
   local: b71750c4b0fdf719734971e3ef90dbeab5919a2d
   other: a30dd8addae3ce71b8667868478542bc417439e6
-  file extras: foo (ancestorlinknode = 91360952243723bd5b1138d5f26bd8c8564cb553)
-  file: foo (record type "F", state "u", hash 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33)
-    local path: foo (flags "")
+  file: foo (state "u")
+    local path: foo (hash 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33, flags "")
     ancestor path: foo (node f89532f44c247a0e993d63e3a734dd781ab04708)
     other path: foo (node f50039b486d6fa1a90ae51778388cad161f425ee)
+    extra: ancestorlinknode = 91360952243723bd5b1138d5f26bd8c8564cb553
   $ mv .hg/merge/state2 .hg/merge/state2-moved
-  $ hg debugmergestate
-  * version 1 records
+  $ hg debugmergestate -v
+  no version 2 merge state
   local: b71750c4b0fdf719734971e3ef90dbeab5919a2d
-  file: foo (record type "F", state "u", hash 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33)
-    local path: foo (flags "")
+  other: b71750c4b0fdf719734971e3ef90dbeab5919a2d
+  file: foo (state "u")
+    local path: foo (hash 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33, flags "")
     ancestor path: foo (node f89532f44c247a0e993d63e3a734dd781ab04708)
-    other path: foo (node not stored in v1 format)
+    other path:  (node foo)
   $ mv .hg/merge/state2-moved .hg/merge/state2
   $ hg resolve -l  # still unresolved
   U foo
@@ -709,6 +737,23 @@
   commit: 1 unresolved (clean)
   update: (current)
   phases: 3 draft
+  $ hg log -G
+  @  changeset:   2:b71750c4b0fd
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     capital ten
+  |
+  o  changeset:   1:913609522437
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     capital three
+  |
+  %  changeset:   0:a30dd8addae3
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     initial
+  
   $ hg resolve --all --debug
   picked tool ':merge' for foo (binary False symlink False changedelete False)
   merging foo
--- a/tests/test-bookmarks-pushpull.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-bookmarks-pushpull.t	Mon Mar 09 10:18:40 2020 -0700
@@ -129,10 +129,10 @@
   bundle2-output: bundle parameter: 
   bundle2-output: start of parts
   bundle2-output: bundle part: "replycaps"
-  bundle2-output-part: "replycaps" 222 bytes payload
+  bundle2-output-part: "replycaps" 241 bytes payload
   bundle2-output: part 0: "REPLYCAPS"
   bundle2-output: header chunk size: 16
-  bundle2-output: payload chunk size: 222
+  bundle2-output: payload chunk size: 241
   bundle2-output: closing payload chunk
   bundle2-output: bundle part: "check:bookmarks"
   bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -162,9 +162,9 @@
   bundle2-input: part parameters: 0
   bundle2-input: found a handler for part replycaps
   bundle2-input-part: "replycaps" supported
-  bundle2-input: payload chunk size: 222
+  bundle2-input: payload chunk size: 241
   bundle2-input: payload chunk size: 0
-  bundle2-input-part: total payload size 222
+  bundle2-input-part: total payload size 241
   bundle2-input: part header size: 22
   bundle2-input: part type: "CHECK:BOOKMARKS"
   bundle2-input: part id: "1"
@@ -241,10 +241,10 @@
   bundle2-output: bundle parameter: 
   bundle2-output: start of parts
   bundle2-output: bundle part: "replycaps"
-  bundle2-output-part: "replycaps" 222 bytes payload
+  bundle2-output-part: "replycaps" 241 bytes payload
   bundle2-output: part 0: "REPLYCAPS"
   bundle2-output: header chunk size: 16
-  bundle2-output: payload chunk size: 222
+  bundle2-output: payload chunk size: 241
   bundle2-output: closing payload chunk
   bundle2-output: bundle part: "check:bookmarks"
   bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -275,9 +275,9 @@
   bundle2-input: part parameters: 0
   bundle2-input: found a handler for part replycaps
   bundle2-input-part: "replycaps" supported
-  bundle2-input: payload chunk size: 222
+  bundle2-input: payload chunk size: 241
   bundle2-input: payload chunk size: 0
-  bundle2-input-part: total payload size 222
+  bundle2-input-part: total payload size 241
   bundle2-input: part header size: 22
   bundle2-input: part type: "CHECK:BOOKMARKS"
   bundle2-input: part id: "1"
@@ -328,6 +328,17 @@
 
 #endif
 
+Divergent bookmark cannot be exported
+
+  $ hg book W@default
+  $ hg push -B W@default ../a
+  pushing to ../a
+  searching for changes
+  cannot push divergent bookmark W@default!
+  no changes found
+  [2]
+  $ hg book -d W@default
+
 export the active bookmark
 
   $ hg bookmark V
--- a/tests/test-check-format.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-check-format.t	Mon Mar 09 10:18:40 2020 -0700
@@ -1,5 +1,5 @@
 #require black
 
   $ cd $RUNTESTDIR/..
-  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
+  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
 
--- a/tests/test-check-interfaces.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-check-interfaces.py	Mon Mar 09 10:18:40 2020 -0700
@@ -252,7 +252,6 @@
     checkzobject(mctx)
 
     # Conforms to imanifestrevisionwritable.
-    checkzobject(mctx.new())
     checkzobject(mctx.copy())
 
     # Conforms to imanifestdict.
--- a/tests/test-check-module-imports.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-check-module-imports.t	Mon Mar 09 10:18:40 2020 -0700
@@ -24,6 +24,7 @@
   > -X contrib/packaging/hg-docker \
   > -X contrib/packaging/hgpackaging/ \
   > -X contrib/packaging/inno/ \
+  > -X contrib/phab-clean.py \
   > -X contrib/python-zstandard/ \
   > -X contrib/win32/hgwebdir_wsgi.py \
   > -X contrib/perf-utils/perf-revlog-write-plot.py \
--- a/tests/test-check-pyflakes.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-check-pyflakes.t	Mon Mar 09 10:18:40 2020 -0700
@@ -20,7 +20,7 @@
   > -X mercurial/thirdparty/concurrent \
   > -X mercurial/thirdparty/zope \
   > 2>/dev/null \
-  > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
+  > | xargs $PYTHON -m pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
   contrib/perf.py:*: undefined name 'xrange' (glob) (?)
   mercurial/hgweb/server.py:*: undefined name 'reload' (glob) (?)
   mercurial/util.py:*: undefined name 'file' (glob) (?)
--- a/tests/test-chg.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-chg.t	Mon Mar 09 10:18:40 2020 -0700
@@ -382,8 +382,8 @@
   YYYY/MM/DD HH:MM:SS (PID)> log -R cached
   YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in  ...s)
 
-Test that chg works even when python "coerces" the locale (py3.7+, which is done
-by default if none of LC_ALL, LC_CTYPE, or LANG are set in the environment)
+Test that chg works (sets to the user's actual LC_CTYPE) even when python
+"coerces" the locale (py3.7+)
 
   $ cat > $TESTTMP/debugenv.py <<EOF
   > from mercurial import encoding
@@ -397,9 +397,22 @@
   >         if v is not None:
   >             ui.write(b'%s=%s\n' % (k, encoding.environ[k]))
   > EOF
+(hg keeps python's modified LC_CTYPE, chg doesn't)
+  $ (unset LC_ALL; unset LANG; LC_CTYPE= "$CHGHG" \
+  >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  LC_CTYPE=C.UTF-8 (py37 !)
+  LC_CTYPE= (no-py37 !)
+  $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
+  >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  LC_CTYPE=
+  $ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
+  >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  LC_CTYPE=unsupported_value
+  $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
+  >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  LC_CTYPE=
   $ LANG= LC_ALL= LC_CTYPE= chg \
   >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv
   LC_ALL=
-  LC_CTYPE=C.UTF-8 (py37 !)
-  LC_CTYPE= (no-py37 !)
+  LC_CTYPE=
   LANG=
--- a/tests/test-clone-uncompressed.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-clone-uncompressed.t	Mon Mar 09 10:18:40 2020 -0700
@@ -54,6 +54,8 @@
     changegroup
       01
       02
+    checkheads
+      related
     digests
       md5
       sha1
@@ -120,6 +122,8 @@
     changegroup
       01
       02
+    checkheads
+      related
     digests
       md5
       sha1
--- a/tests/test-clonebundles.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-clonebundles.t	Mon Mar 09 10:18:40 2020 -0700
@@ -455,6 +455,19 @@
   no changes found
   2 local changesets published
 
+Test a bad attribute list
+
+  $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
+  abort: invalid ui.clonebundleprefers item: bad
+  (each comma separated item should be key=value pairs)
+  [255]
+  $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
+  >    -U http://localhost:$HGPORT bad-input
+  abort: invalid ui.clonebundleprefers item: bad
+  (each comma separated item should be key=value pairs)
+  [255]
+
+
 Test interaction between clone bundles and --stream
 
 A manifest with just a gzip bundle
--- a/tests/test-commit-unresolved.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-commit-unresolved.t	Mon Mar 09 10:18:40 2020 -0700
@@ -60,7 +60,7 @@
   abort: cannot specify a node with --abort
   [255]
   $ hg merge --abort --rev e4501
-  abort: cannot specify both --rev and --abort
+  abort: cannot specify both --abort and --rev
   [255]
 
 #if abortcommand
@@ -144,7 +144,7 @@
   (branch merge, don't forget to commit)
 
   $ hg merge --preview --abort
-  abort: cannot specify --preview with --abort
+  abort: cannot specify both --abort and --preview
   [255]
 
   $ hg abort
--- a/tests/test-completion.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-completion.t	Mon Mar 09 10:18:40 2020 -0700
@@ -75,6 +75,7 @@
   $ hg debugcomplete debug
   debugancestor
   debugapplystreamclonebundle
+  debugbackupbundle
   debugbuilddag
   debugbundle
   debugcapabilities
@@ -107,6 +108,7 @@
   debugmanifestfulltextcache
   debugmergestate
   debugnamecomplete
+  debugnodemap
   debugobsolete
   debugp1copies
   debugp2copies
@@ -128,6 +130,7 @@
   debugssl
   debugsub
   debugsuccessorssets
+  debugtagscache
   debugtemplate
   debuguigetpass
   debuguiprompt
@@ -255,9 +258,10 @@
   commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
   config: untrusted, edit, local, global, template
   continue: dry-run
-  copy: after, force, include, exclude, dry-run
+  copy: forget, after, at-rev, force, include, exclude, dry-run
   debugancestor: 
   debugapplystreamclonebundle: 
+  debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
   debugbuilddag: mergeable-file, overwritten-file, new-file
   debugbundle: all, part-type, spec
   debugcapabilities: 
@@ -287,8 +291,9 @@
   debuglabelcomplete: 
   debuglocks: force-lock, force-wlock, set-lock, set-wlock
   debugmanifestfulltextcache: clear, add
-  debugmergestate: 
+  debugmergestate: style, template
   debugnamecomplete: 
+  debugnodemap: dump-new, dump-disk, check, metadata
   debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
   debugp1copies: rev
   debugp2copies: rev
@@ -310,6 +315,7 @@
   debugssl: 
   debugsub: rev
   debugsuccessorssets: closest
+  debugtagscache: 
   debugtemplate: rev, define
   debuguigetpass: prompt
   debuguiprompt: prompt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-copies-chain-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,869 @@
+=====================================================
+Test Copy tracing for chain of copies involving merge
+=====================================================
+
+This test files covers copies/rename case for a chains of commit where merges
+are involved. It cheks we do not have unwanted update of behavior and that the
+different options to retrieve copies behave correctly.
+
+Setup
+=====
+
+use git diff to see rename
+
+  $ cat << EOF >> $HGRCPATH
+  > [diff]
+  > git=yes
+  > [ui]
+  > logtemplate={rev} {desc}]\n
+  > EOF
+
+  $ hg init repo-chain
+  $ cd repo-chain
+
+Add some linear rename initialy
+
+  $ touch a b h
+  $ hg ci -Am 'i-0 initial commit: a b h'
+  adding a
+  adding b
+  adding h
+  $ hg mv a c
+  $ hg ci -Am 'i-1: a -move-> c'
+  $ hg mv c d
+  $ hg ci -Am 'i-2: c -move-> d'
+  $ hg log -G
+  @  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+And having another branch with renames on the other side
+
+  $ hg mv d e
+  $ hg ci -Am 'a-1: d -move-> e'
+  $ hg mv e f
+  $ hg ci -Am 'a-2: e -move-> f'
+  $ hg log -G --rev '::.'
+  @  4 a-2: e -move-> f]
+  |
+  o  3 a-1: d -move-> e]
+  |
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Have a branching with nothing on one side
+
+  $ hg up 'desc("i-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo foo > b
+  $ hg ci -m 'b-1: b update'
+  created new head
+  $ hg log -G --rev '::.'
+  @  5 b-1: b update]
+  |
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+Merge the two branches we just defined (in both directions)
+- one with change to an unrelated file
+- one with renames in them
+
+  $ hg up 'desc("b-1")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("a-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBAm-0 simple merge - one way'
+  $ hg up 'desc("a-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mABm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
+  @    7 mABm-0 simple merge - the other way]
+  |\
+  +---o  6 mBAm-0 simple merge - one way]
+  | |/
+  | o  5 b-1: b update]
+  | |
+  o |  4 a-2: e -move-> f]
+  | |
+  o |  3 a-1: d -move-> e]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Create a branch that delete a file previous renamed
+
+  $ hg up 'desc("i-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg rm d
+  $ hg ci -m 'c-1 delete d'
+  created new head
+  $ hg log -G --rev '::.'
+  @  8 c-1 delete d]
+  |
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Merge:
+- one with change to an unrelated file
+- one deleting the change
+and recreate an unrelated file after the merge
+
+  $ hg up 'desc("b-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("c-1")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBCm-0 simple merge - one way'
+  $ echo bar > d
+  $ hg add d
+  $ hg ci -m 'mBCm-1 re-add d'
+  $ hg up 'desc("c-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mCBm-0 simple merge - the other way'
+  created new head
+  $ echo bar > d
+  $ hg add d
+  $ hg ci -m 'mCBm-1 re-add d'
+  $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
+  @  12 mCBm-1 re-add d]
+  |
+  o    11 mCBm-0 simple merge - the other way]
+  |\
+  | | o  10 mBCm-1 re-add d]
+  | | |
+  +---o  9 mBCm-0 simple merge - one way]
+  | |/
+  | o  8 c-1 delete d]
+  | |
+  o |  5 b-1: b update]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Create a branch that delete a file previous renamed and recreate it
+
+  $ hg up 'desc("i-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg rm d
+  $ hg ci -m 'd-1 delete d'
+  created new head
+  $ echo bar > d
+  $ hg add d
+  $ hg ci -m 'd-2 re-add d'
+  $ hg log -G --rev '::.'
+  @  14 d-2 re-add d]
+  |
+  o  13 d-1 delete d]
+  |
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the file
+
+Note:
+| In this case, the merge get conflicting information since on one side we have
+| a "brand new" d. and one the other one we have "d renamed from c (itself
+| renamed from c)".
+|
+| The current code arbitrarily pick one side
+
+  $ hg up 'desc("b-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("d-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBDm-0 simple merge - one way'
+  $ hg up 'desc("d-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mDBm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
+  @    16 mDBm-0 simple merge - the other way]
+  |\
+  +---o  15 mBDm-0 simple merge - one way]
+  | |/
+  | o  14 d-2 re-add d]
+  | |
+  | o  13 d-1 delete d]
+  | |
+  o |  5 b-1: b update]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Having another branch renaming a different file to the same filename as another
+
+  $ hg up 'desc("i-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg mv b g
+  $ hg ci -m 'e-1 b -move-> g'
+  created new head
+  $ hg mv g f
+  $ hg ci -m 'e-2 g -move-> f'
+  $ hg log -G --rev '::.'
+  @  18 e-2 g -move-> f]
+  |
+  o  17 e-1 b -move-> g]
+  |
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Merge:
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+
+  $ hg up 'desc("a-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("e-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mAEm-0 simple merge - one way'
+  $ hg up 'desc("e-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("a-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mEAm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
+  @    20 mEAm-0 simple merge - the other way]
+  |\
+  +---o  19 mAEm-0 simple merge - one way]
+  | |/
+  | o  18 e-2 g -move-> f]
+  | |
+  | o  17 e-1 b -move-> g]
+  | |
+  o |  4 a-2: e -move-> f]
+  | |
+  o |  3 a-1: d -move-> e]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Note:
+| In this case, one of the merge wrongly record a merge while there is none.
+| This lead to bad copy tracing information to be dug up.
+
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+
+  $ hg up 'desc("i-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg mv h i
+  $ hg commit -m "f-1: rename h -> i"
+  created new head
+  $ hg mv --force i d
+  $ hg commit -m "f-2: rename i -> d"
+  $ hg debugindex d
+     rev linkrev nodeid       p1           p2
+       0       2 01c2f5eabdc4 000000000000 000000000000
+       1      10 b004912a8510 000000000000 000000000000
+       2      15 0bb5445dc4d0 01c2f5eabdc4 b004912a8510
+       3      22 c72365ee036f 000000000000 000000000000
+  $ hg up 'desc("b-1")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("f-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBFm-0 simple merge - one way'
+  $ hg up 'desc("f-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mFBm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
+  @    24 mFBm-0 simple merge - the other way]
+  |\
+  +---o  23 mBFm-0 simple merge - one way]
+  | |/
+  | o  22 f-2: rename i -> d]
+  | |
+  | o  21 f-1: rename h -> i]
+  | |
+  o |  5 b-1: b update]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+Merge:
+- one with change to a file
+- one deleting and recreating the file
+
+  $ hg up 'desc("i-2")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo "some update" >> d
+  $ hg commit -m "g-1: update d"
+  created new head
+  $ hg up 'desc("d-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("g-1")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mDGm-0 simple merge - one way'
+  $ hg up 'desc("g-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("d-2")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mGDm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
+  @    27 mGDm-0 simple merge - the other way]
+  |\
+  +---o  26 mDGm-0 simple merge - one way]
+  | |/
+  | o  25 g-1: update d]
+  | |
+  o |  14 d-2 re-add d]
+  | |
+  o |  13 d-1 delete d]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+Merge:
+- one with change to a file (d)
+- one overwriting that file with a rename (from h to i, to d)
+
+  $ hg up 'desc("f-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("g-1")' --tool :union
+  merging d
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mFGm-0 simple merge - one way'
+  created new head
+  $ hg up 'desc("g-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("f-2")' --tool :union
+  merging d
+  0 files updated, 1 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mGFm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
+  @    29 mGFm-0 simple merge - the other way]
+  |\
+  +---o  28 mFGm-0 simple merge - one way]
+  | |/
+  | o  25 g-1: update d]
+  | |
+  o |  22 f-2: rename i -> d]
+  | |
+  o |  21 f-1: rename h -> i]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+Check results
+=============
+
+merging with unrelated change does not interfer wit the renames
+---------------------------------------------------------------
+
+- rename on one side
+- unrelated change on the other side
+
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mABm")'
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBAm")'
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mABm")'
+  M b
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mBAm")'
+  M b
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mABm")'
+  M b
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBAm")'
+  M b
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mABm")'
+  M b
+  A f
+    a
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBAm")'
+  M b
+  A f
+    a
+  R a
+
+merging with the side having a delete
+-------------------------------------
+
+case summary:
+- one with change to an unrelated file
+- one deleting the change
+and recreate an unrelated file after the merge
+
+checks:
+- comparing from the merge
+
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBCm-0")'
+  R d
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCBm-0")'
+  R d
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBCm-0")'
+  M b
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCBm-0")'
+  M b
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBCm-0")'
+  M b
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mCBm-0")'
+  M b
+  R d
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-0")'
+  M b
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-0")'
+  M b
+  R a
+
+- comparing with the merge children re-adding the file
+
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBCm-1")'
+  M d
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCBm-1")'
+  M d
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBCm-1")'
+  M b
+  A d
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCBm-1")'
+  M b
+  A d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBCm-1")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mCBm-1")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-1")'
+  M b
+  A d
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-1")'
+  M b
+  A d
+  R a
+
+Comparing with a merge re-adding the file afterward
+---------------------------------------------------
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+Note:
+| In this case, one of the merge wrongly record a merge while there is none.
+| This lead to bad copy tracing information to be dug up.
+
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBDm-0")'
+  M d
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mDBm-0")'
+  M d
+  $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mBDm-0")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mDBm-0")'
+  M b
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBDm-0")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mDBm-0")'
+  M b
+  M d
+
+The bugs makes recorded copy is different depending of where we started the merge from since
+
+  $ hg manifest --debug --rev 'desc("mBDm-0")' | grep '644   d'
+  0bb5445dc4d02f4e0d86cf16f9f3a411d0f17744 644   d
+  $ hg manifest --debug --rev 'desc("mDBm-0")' | grep '644   d'
+  b004912a8510032a0350a74daa2803dadfb00e12 644   d
+
+The 0bb5445dc4d02f4e0d86cf16f9f3a411d0f17744 entry is wrong, since the file was
+deleted on one side (then recreate) and untouched on the other side, no "merge"
+has happened. The resulting `d` file is the untouched version from branch `D`,
+not a merge.
+
+  $ hg manifest --debug --rev 'desc("d-2")' | grep '644   d'
+  b004912a8510032a0350a74daa2803dadfb00e12 644   d
+  $ hg manifest --debug --rev 'desc("b-1")' | grep '644   d'
+  01c2f5eabdc4ce2bdee42b5f86311955e6c8f573 644   d
+  $ hg debugindex d
+     rev linkrev nodeid       p1           p2
+       0       2 01c2f5eabdc4 000000000000 000000000000
+       1      10 b004912a8510 000000000000 000000000000
+       2      15 0bb5445dc4d0 01c2f5eabdc4 b004912a8510
+       3      22 c72365ee036f 000000000000 000000000000
+       4      23 863d9bc49190 01c2f5eabdc4 c72365ee036f
+       5      25 7bded9d9da1f 01c2f5eabdc4 000000000000
+       6      26 f04cac32d703 b004912a8510 7bded9d9da1f
+       7      27 d7a5eafb9322 7bded9d9da1f b004912a8510
+       8      28 2ed7a51aed47 c72365ee036f 7bded9d9da1f
+
+(This `hg log` output if wrong, since no merge actually happened).
+
+  $ hg log -Gfr 'desc("mBDm-0")' d
+  o    15 mBDm-0 simple merge - one way]
+  |\
+  o :  14 d-2 re-add d]
+  :/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+This `hg log` output is correct
+
+  $ hg log -Gfr 'desc("mDBm-0")' d
+  o  14 d-2 re-add d]
+  |
+  ~
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBDm-0")'
+  M b
+  A d
+    a
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDBm-0")'
+  M b
+  A d
+  R a
+
+Comparing with a merge with colliding rename
+--------------------------------------------
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+
+  $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644   f'
+  eb806e34ef6be4c264effd5933d31004ad15a793 644   f
+  $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644   f'
+  eb806e34ef6be4c264effd5933d31004ad15a793 644   f
+  $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
+  0dd616bc7ab1a111921d95d76f69cda5c2ac539c 644   f
+  $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
+  6da5a2eecb9c833f830b67a4972366d49a9a142c 644   f
+  $ hg debugindex f
+     rev linkrev nodeid       p1           p2
+       0       4 0dd616bc7ab1 000000000000 000000000000
+       1      18 6da5a2eecb9c 000000000000 000000000000
+       2      19 eb806e34ef6b 0dd616bc7ab1 6da5a2eecb9c
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
+  M f
+  R b
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
+  M f
+  R b
+  $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
+  M f
+  R d
+  $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
+  M f
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("e-2")'
+  A f
+    b
+  R b
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAEm-0")'
+  A f
+    d
+  R b
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEAm-0")'
+  A f
+    d
+  R b
+  R d
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
+  A f
+    a
+  R a
+  R b
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
+  A f
+    a
+  R a
+  R b
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+
+The overwriting should take over. However, the behavior is currently buggy
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBFm-0")'
+  M b
+  A d
+    a (true !)
+    h (false !)
+  R a
+  R h
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFBm-0")'
+  M b
+  A d
+    h
+  R a
+  R h
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBFm-0")'
+  M d
+  R h
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBFm-0")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBFm-0")'
+  M b
+  M d
+  R i
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFBm-0")'
+  M d
+  R h
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFBm-0")'
+  M b
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFBm-0")'
+  M b
+  M d
+  R i
+
+The following graphlog is wrong, the "a -> c -> d" chain was overwritten and should not appear.
+
+  $ hg log -Gfr 'desc("mBFm-0")' d
+  o    23 mBFm-0 simple merge - one way]
+  |\
+  o :  22 f-2: rename i -> d]
+  | :
+  o :  21 f-1: rename h -> i]
+  :/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+The following output is correct.
+
+  $ hg log -Gfr 'desc("mFBm-0")' d
+  o  22 f-2: rename i -> d]
+  |
+  o  21 f-1: rename h -> i]
+  :
+  o  0 i-0 initial commit: a b h]
+  
+
+Merge:
+- one with change to a file
+- one deleting and recreating the file
+
+Unlike in the 'BD/DB' cases, an actuall merge happened here. So we should
+consider history and rename on both branch of the merge.
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDGm-0")'
+  A d
+    a
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGDm-0")'
+  A d
+    a
+  R a
+  $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mDGm-0")'
+  M d
+  $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mGDm-0")'
+  M d
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mDGm-0")'
+  M d
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGDm-0")'
+  M d
+
+  $ hg log -Gfr 'desc("mDGm-0")' d
+  o    26 mDGm-0 simple merge - one way]
+  |\
+  | o  25 g-1: update d]
+  | |
+  o |  14 d-2 re-add d]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+  $ hg log -Gfr 'desc("mDGm-0")' d
+  o    26 mDGm-0 simple merge - one way]
+  |\
+  | o  25 g-1: update d]
+  | |
+  o |  14 d-2 re-add d]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+Merge:
+- one with change to a file (d)
+- one overwriting that file with a rename (from h to i, to d)
+
+This case is similar to BF/FB, but an actual merge happens, so both side of the
+history are relevant.
+
+Note:
+| In this case, the merge get conflicting information since on one side we have
+| "a -> c -> d". and one the other one we have "h -> i -> d".
+|
+| The current code arbitrarily pick one side
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
+  A d
+    a
+  R a
+  R h
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
+  A d
+    a
+  R a
+  R h
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFGm-0")'
+  M d
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mGFm-0")'
+  M d
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFGm-0")'
+  M d
+  R i
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mGFm-0")'
+  M d
+  R i
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mFGm-0")'
+  M d
+  R h
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGFm-0")'
+  M d
+  R h
+
+  $ hg log -Gfr 'desc("mFGm-0")' d
+  o    28 mFGm-0 simple merge - one way]
+  |\
+  | o  25 g-1: update d]
+  | |
+  o |  22 f-2: rename i -> d]
+  | |
+  o |  21 f-1: rename h -> i]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
+
+
+  $ hg log -Gfr 'desc("mGFm-0")' d
+  @    29 mGFm-0 simple merge - the other way]
+  |\
+  | o  25 g-1: update d]
+  | |
+  o |  22 f-2: rename i -> d]
+  | |
+  o |  21 f-1: rename h -> i]
+  |/
+  o  2 i-2: c -move-> d]
+  |
+  o  1 i-1: a -move-> c]
+  |
+  o  0 i-0 initial commit: a b h]
+  
--- a/tests/test-copy-move-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-copy-move-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -1,6 +1,19 @@
 Test for the full copytracing algorithm
 =======================================
 
+
+Initial Setup
+=============
+
+use git diff to see rename
+
+  $ cat << EOF >> $HGRCPATH
+  > [diff]
+  > git=yes
+  > EOF
+
+Setup an history where one side copy and rename a file (and update it) while the other side update it.
+
   $ hg init t
   $ cd t
 
@@ -22,13 +35,67 @@
 
   $ hg ci -qAm "other"
 
+  $ hg log -G --patch
+  @  changeset:   2:add3f11052fa
+  |  tag:         tip
+  |  parent:      0:b8bf91eeebbc
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     other
+  |
+  |  diff --git a/a b/a
+  |  --- a/a
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |  +0
+  |   1
+  |
+  | o  changeset:   1:17c05bb7fcb6
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     second
+  |
+  |    diff --git a/a b/b
+  |    rename from a
+  |    rename to b
+  |    --- a/a
+  |    +++ b/b
+  |    @@ -1,1 +1,2 @@
+  |     1
+  |    +2
+  |    diff --git a/a b/c
+  |    copy from a
+  |    copy to c
+  |    --- a/a
+  |    +++ b/c
+  |    @@ -1,1 +1,2 @@
+  |     1
+  |    +2
+  |
+  o  changeset:   0:b8bf91eeebbc
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     first
+  
+     diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,1 @@
+     +1
+  
+
+Test Simple Merge
+=================
+
   $ hg merge --debug
     unmatched files in other:
      b
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
-     src: 'a' -> dst: 'c' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
+      src: 'a' -> dst: 'c' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -63,8 +130,10 @@
   2
 
 Test disabling copy tracing
+===========================
 
-- first verify copy metadata was kept
+first verify copy metadata was kept
+-----------------------------------
 
   $ hg up -qC 2
   $ hg rebase --keep -d 1 -b 2 --config extensions.rebase=
@@ -77,7 +146,8 @@
   1
   2
 
-- next verify copy metadata is lost when disabled
+ next verify copy metadata is lost when disabled
+------------------------------------------------
 
   $ hg strip -r . --config extensions.strip=
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -98,6 +168,7 @@
   $ cd ..
 
 Verify disabling copy tracing still keeps copies from rebase source
+-------------------------------------------------------------------
 
   $ hg init copydisable
   $ cd copydisable
@@ -130,7 +201,14 @@
 
   $ cd ../
 
-Verify we duplicate existing copies, instead of detecting them
+
+test storage preservation
+-------------------------
+
+Verify rebase do not discard recorded copies data when copy tracing usage is
+disabled.
+
+Setup
 
   $ hg init copydisable3
   $ cd copydisable3
@@ -153,6 +231,12 @@
   |/
   o  0 add a
   
+
+Actual Test
+
+A file is copied on one side and has been moved twice on the other side. the
+file is copied from `0:a`, so the file history of the `3:b` should trace directly to `0:a`.
+
   $ hg rebase -d 2 -s 3 --config extensions.rebase= --config experimental.copytrace=off
   rebasing 3:47e1a9e6273b "copy a->b (2)" (tip)
   saved backup bundle to $TESTTMP/copydisable3/.hg/strip-backup/47e1a9e6273b-2d099c59-rebase.hg
--- a/tests/test-copy.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-copy.t	Mon Mar 09 10:18:40 2020 -0700
@@ -262,5 +262,113 @@
   xyzzy: not overwriting - file exists
   ('hg copy --after' to record the copy)
   [1]
+  $ hg co -qC .
+  $ rm baz xyzzy
+
+
+Test unmarking copy of a single file
+
+# Set up by creating a copy
+  $ hg cp bar baz
+# Test uncopying a non-existent file
+  $ hg copy --forget non-existent
+  non-existent: $ENOENT$
+# Test uncopying an tracked but unrelated file
+  $ hg copy --forget foo
+  foo: not unmarking as copy - file is not marked as copied
+# Test uncopying a copy source
+  $ hg copy --forget bar
+  bar: not unmarking as copy - file is not marked as copied
+# baz should still be marked as a copy
+  $ hg st -C
+  A baz
+    bar
+# Test the normal case
+  $ hg copy --forget baz
+  $ hg st -C
+  A baz
+# Test uncopy with matching an non-matching patterns
+  $ hg cp bar baz --after
+  $ hg copy --forget bar baz
+  bar: not unmarking as copy - file is not marked as copied
+  $ hg st -C
+  A baz
+# Test uncopy with no exact matches
+  $ hg cp bar baz --after
+  $ hg copy --forget .
+  $ hg st -C
+  A baz
+  $ hg forget baz
+  $ rm baz
+
+Test unmarking copy of a directory
+
+  $ mkdir dir
+  $ echo foo > dir/foo
+  $ echo bar > dir/bar
+  $ hg add dir
+  adding dir/bar
+  adding dir/foo
+  $ hg ci -m 'add dir/'
+  $ hg cp dir dir2
+  copying dir/bar to dir2/bar
+  copying dir/foo to dir2/foo
+  $ touch dir2/untracked
+  $ hg copy --forget dir2
+  $ hg st -C
+  A dir2/bar
+  A dir2/foo
+  ? dir2/untracked
+# Clean up for next test
+  $ hg forget dir2
+  removing dir2/bar
+  removing dir2/foo
+  $ rm -r dir2
+
+Test uncopy on committed copies
+
+# Commit some copies
+  $ hg cp bar baz
+  $ hg cp bar qux
+  $ hg ci -m copies
+  $ hg st -C --change .
+  A baz
+    bar
+  A qux
+    bar
+  $ base=$(hg log -r '.^' -T '{rev}')
+  $ hg log -G -T '{rev}:{node|short} {desc}\n' -r $base:
+  @  5:a612dc2edfda copies
+  |
+  o  4:4800b1f1f38e add dir/
+  |
+  ~
+# Add a dirty change on top to show that it's unaffected
+  $ echo dirty >> baz
+  $ hg st
+  M baz
+  $ cat baz
+  bleah
+  dirty
+  $ hg copy --forget --at-rev . baz
+  saved backup bundle to $TESTTMP/part2/.hg/strip-backup/a612dc2edfda-e36b4448-uncopy.hg
+# The unwanted copy is no longer recorded, but the unrelated one is
+  $ hg st -C --change .
+  A baz
+  A qux
+    bar
+# The old commit is gone and we have updated to the new commit
+  $ hg log -G -T '{rev}:{node|short} {desc}\n' -r $base:
+  @  5:c45090e5effe copies
+  |
+  o  4:4800b1f1f38e add dir/
+  |
+  ~
+# Working copy still has the uncommitted change
+  $ hg st
+  M baz
+  $ cat baz
+  bleah
+  dirty
 
   $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-debugbackupbundle.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,39 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > strip=
+  > EOF
+
+Setup repo
+
+  $ hg init repo
+  $ cd repo
+
+Test backups list and recover
+
+  $ hg debugbackupbundle
+  no backup changesets found
+
+  $ mkcommit() {
+  >    echo "$1" > "$1"
+  >    hg add "$1"
+  >    hg ci -l $1
+  > }
+  $ mkcommit a
+  $ mkcommit b
+  $ hg strip .
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d2ae7f538514-2953539b-backup.hg (glob)
+  $ hg debugbackupbundle
+  Recover changesets using: hg debugbackupbundle --recover <changeset hash>
+  
+  Available backup changesets:
+  * (glob)
+  d2ae7f538514 b
+
+  $ hg debugbackupbundle --recover d2ae7f538514
+  Unbundling d2ae7f538514
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets d2ae7f538514 (1 drafts)
--- a/tests/test-debugcommands.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-debugcommands.t	Mon Mar 09 10:18:40 2020 -0700
@@ -615,6 +615,8 @@
     changegroup
       01
       02
+    checkheads
+      related
     digests
       md5
       sha1
@@ -650,7 +652,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
--- a/tests/test-double-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-double-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -29,7 +29,8 @@
     unmatched files in other:
      bar
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'foo' -> dst: 'bar' *
+     on remote side:
+      src: 'foo' -> dst: 'bar' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
--- a/tests/test-fastannotate-revmap.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-fastannotate-revmap.py	Mon Mar 09 10:18:40 2020 -0700
@@ -165,7 +165,7 @@
     rm2.flush()
 
     # two files should be the same
-    ensure(len(set(util.readfile(p) for p in [path, path2])) == 1)
+    ensure(len({util.readfile(p) for p in [path, path2]}) == 1)
 
     os.unlink(path)
     os.unlink(path2)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastexport.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,855 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastexport=
+  > EOF
+
+  $ hg init
+
+  $ hg debugbuilddag -mon '+2:tbase @name1 +3:thead1 <tbase @name2 +4:thead2 @both /thead1 +2:tmaintip'
+
+  $ hg up -r 10
+  13 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg rm nf10
+  $ hg commit -u debugbuilddag --date 'Thu Jan 01 00:00:12 1970 +0000' -m r12
+  created new head
+  $ hg up -r 11
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge -r 12
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg commit -m debugbuilddag --date 'Thu Jan 01 00:00:13 1970 +0000'
+
+  $ hg log -G
+  @    changeset:   13:e5c379648af4
+  |\   branch:      both
+  | |  tag:         tip
+  | |  parent:      11:2cbd52c10e88
+  | |  parent:      12:4f31c9604af6
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:13 1970 +0000
+  | |  summary:     debugbuilddag
+  | |
+  | o  changeset:   12:4f31c9604af6
+  | |  branch:      both
+  | |  parent:      10:9220596cb068
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:12 1970 +0000
+  | |  summary:     r12
+  | |
+  o |  changeset:   11:2cbd52c10e88
+  |/   branch:      both
+  |    tag:         tmaintip
+  |    user:        debugbuilddag
+  |    date:        Thu Jan 01 00:00:11 1970 +0000
+  |    summary:     r11
+  |
+  o  changeset:   10:9220596cb068
+  |  branch:      both
+  |  user:        debugbuilddag
+  |  date:        Thu Jan 01 00:00:10 1970 +0000
+  |  summary:     r10
+  |
+  o    changeset:   9:0767d147d86e
+  |\   branch:      both
+  | |  parent:      8:0d0219415f18
+  | |  parent:      4:e8bc3a6ab9ae
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:09 1970 +0000
+  | |  summary:     r9
+  | |
+  | o  changeset:   8:0d0219415f18
+  | |  branch:      name2
+  | |  tag:         thead2
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:08 1970 +0000
+  | |  summary:     r8
+  | |
+  | o  changeset:   7:82c6c8b3ac68
+  | |  branch:      name2
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:07 1970 +0000
+  | |  summary:     r7
+  | |
+  | o  changeset:   6:94093a13175f
+  | |  branch:      name2
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:06 1970 +0000
+  | |  summary:     r6
+  | |
+  | o  changeset:   5:4baee2f72e9e
+  | |  branch:      name2
+  | |  parent:      1:bf4022f1addd
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:05 1970 +0000
+  | |  summary:     r5
+  | |
+  o |  changeset:   4:e8bc3a6ab9ae
+  | |  branch:      name1
+  | |  tag:         thead1
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:04 1970 +0000
+  | |  summary:     r4
+  | |
+  o |  changeset:   3:46148e496a8a
+  | |  branch:      name1
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:03 1970 +0000
+  | |  summary:     r3
+  | |
+  o |  changeset:   2:29863c4219cd
+  |/   branch:      name1
+  |    user:        debugbuilddag
+  |    date:        Thu Jan 01 00:00:02 1970 +0000
+  |    summary:     r2
+  |
+  o  changeset:   1:bf4022f1addd
+  |  tag:         tbase
+  |  user:        debugbuilddag
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
+  |  summary:     r1
+  |
+  o  changeset:   0:ae6ae30a671b
+     user:        debugbuilddag
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     r0
+  
+
+  $ hg fastexport --export-marks fastexport.marks
+  blob
+  mark :1
+  data 65
+  0 r0
+  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :2
+  data 3
+  r0
+  
+  commit refs/heads/default
+  mark :3
+  committer "debugbuilddag" <debugbuilddag> 0 -0000
+  data 2
+  r0
+  M 644 :1 mf
+  M 644 :2 nf0
+  M 644 :2 of
+  
+  blob
+  mark :4
+  data 68
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :5
+  data 3
+  r1
+  
+  blob
+  mark :6
+  data 3
+  r1
+  
+  commit refs/heads/default
+  mark :7
+  committer "debugbuilddag" <debugbuilddag> 1 -0000
+  data 2
+  r1
+  from :3
+  M 644 :4 mf
+  M 644 :5 nf1
+  M 644 :6 of
+  
+  blob
+  mark :8
+  data 71
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :9
+  data 3
+  r2
+  
+  blob
+  mark :10
+  data 3
+  r2
+  
+  commit refs/heads/name1
+  mark :11
+  committer "debugbuilddag" <debugbuilddag> 2 -0000
+  data 2
+  r2
+  from :7
+  M 644 :8 mf
+  M 644 :9 nf2
+  M 644 :10 of
+  
+  blob
+  mark :12
+  data 74
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6 r3
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :13
+  data 3
+  r3
+  
+  blob
+  mark :14
+  data 3
+  r3
+  
+  commit refs/heads/name1
+  mark :15
+  committer "debugbuilddag" <debugbuilddag> 3 -0000
+  data 2
+  r3
+  from :11
+  M 644 :12 mf
+  M 644 :13 nf3
+  M 644 :14 of
+  
+  blob
+  mark :16
+  data 77
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6 r3
+  7
+  8 r4
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :17
+  data 3
+  r4
+  
+  blob
+  mark :18
+  data 3
+  r4
+  
+  commit refs/heads/name1
+  mark :19
+  committer "debugbuilddag" <debugbuilddag> 4 -0000
+  data 2
+  r4
+  from :15
+  M 644 :16 mf
+  M 644 :17 nf4
+  M 644 :18 of
+  
+  blob
+  mark :20
+  data 71
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10 r5
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :21
+  data 3
+  r5
+  
+  blob
+  mark :22
+  data 3
+  r5
+  
+  commit refs/heads/name2
+  mark :23
+  committer "debugbuilddag" <debugbuilddag> 5 -0000
+  data 2
+  r5
+  from :7
+  M 644 :20 mf
+  M 644 :21 nf5
+  M 644 :22 of
+  
+  blob
+  mark :24
+  data 74
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :25
+  data 3
+  r6
+  
+  blob
+  mark :26
+  data 3
+  r6
+  
+  commit refs/heads/name2
+  mark :27
+  committer "debugbuilddag" <debugbuilddag> 6 -0000
+  data 2
+  r6
+  from :23
+  M 644 :24 mf
+  M 644 :25 nf6
+  M 644 :26 of
+  
+  blob
+  mark :28
+  data 77
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14 r7
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :29
+  data 3
+  r7
+  
+  blob
+  mark :30
+  data 3
+  r7
+  
+  commit refs/heads/name2
+  mark :31
+  committer "debugbuilddag" <debugbuilddag> 7 -0000
+  data 2
+  r7
+  from :27
+  M 644 :28 mf
+  M 644 :29 nf7
+  M 644 :30 of
+  
+  blob
+  mark :32
+  data 80
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14 r7
+  15
+  16 r8
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :33
+  data 3
+  r8
+  
+  blob
+  mark :34
+  data 3
+  r8
+  
+  commit refs/heads/name2
+  mark :35
+  committer "debugbuilddag" <debugbuilddag> 8 -0000
+  data 2
+  r8
+  from :31
+  M 644 :32 mf
+  M 644 :33 nf8
+  M 644 :34 of
+  
+  blob
+  mark :36
+  data 92
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6 r3
+  7
+  8 r4
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14 r7
+  15
+  16 r8
+  17
+  18 r9
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :37
+  data 3
+  r9
+  
+  blob
+  mark :38
+  data 3
+  r9
+  
+  commit refs/heads/both
+  mark :39
+  committer "debugbuilddag" <debugbuilddag> 9 -0000
+  data 2
+  r9
+  from :35
+  merge :19
+  M 644 :36 mf
+  M 644 :9 nf2
+  M 644 :13 nf3
+  M 644 :17 nf4
+  M 644 :37 nf9
+  M 644 :38 of
+  
+  blob
+  mark :40
+  data 96
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6 r3
+  7
+  8 r4
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14 r7
+  15
+  16 r8
+  17
+  18 r9
+  19
+  20 r10
+  21
+  22
+  23
+  
+  blob
+  mark :41
+  data 4
+  r10
+  
+  blob
+  mark :42
+  data 4
+  r10
+  
+  commit refs/heads/both
+  mark :43
+  committer "debugbuilddag" <debugbuilddag> 10 -0000
+  data 3
+  r10
+  from :39
+  M 644 :40 mf
+  M 644 :41 nf10
+  M 644 :42 of
+  
+  blob
+  mark :44
+  data 100
+  0 r0
+  1
+  2 r1
+  3
+  4 r2
+  5
+  6 r3
+  7
+  8 r4
+  9
+  10 r5
+  11
+  12 r6
+  13
+  14 r7
+  15
+  16 r8
+  17
+  18 r9
+  19
+  20 r10
+  21
+  22 r11
+  23
+  
+  blob
+  mark :45
+  data 4
+  r11
+  
+  blob
+  mark :46
+  data 4
+  r11
+  
+  commit refs/heads/both
+  mark :47
+  committer "debugbuilddag" <debugbuilddag> 11 -0000
+  data 3
+  r11
+  from :43
+  M 644 :44 mf
+  M 644 :45 nf11
+  M 644 :46 of
+  
+  commit refs/heads/both
+  mark :48
+  committer "debugbuilddag" <debugbuilddag> 12 -0000
+  data 3
+  r12
+  from :43
+  D nf10
+  
+  commit refs/heads/both
+  mark :49
+  committer "test" <test> 13 -0000
+  data 13
+  debugbuilddag
+  from :47
+  merge :48
+  D nf10
+  
+  $ cat fastexport.marks
+  e1767c7564f83127d75331428473dd0512b36cc6
+  2c436e3f677d989438ddd9a7e5e4d56e016dfd35
+  ae6ae30a671be09096aaaf51217b3691eec0eee0
+  016f8fd6128ac4bd19ec5a6ae128dadc3873b13f
+  a0e6fc91007068df3bc60f46ce0a893a73189b54
+  1a085e1daf625e186ee0064c64ff41731a901f24
+  bf4022f1addd28523fb1122ac6166a29da58d34c
+  2c45ad1c720111830380baa89a6a16cae1bef688
+  180506669a19f4b8317009fc6fa0043966d1ffb4
+  1ebc486e6a5c2c8ca7e531cf0b63dfcc071ec324
+  29863c4219cd68e0f57aecd5ffc12ba83313f26b
+  d20e5eeac6991189eefad45cd8ea0f6a32ce8122
+  710c4580a600b8aadc63fa3d7bb0fab71b127c04
+  fa27314b56d7b6f90c1caeebb2a74730b3747574
+  46148e496a8a75fde9e203b1ded69ec99289af27
+  e5548c667d7eeb6c326e723c579888341329c9fe
+  3c1407305701051cbed9f9cb9a68bdfb5997c235
+  e2ed51893b0a54bd7fef5a406a0c489d668f19c3
+  e8bc3a6ab9aef589f5db504f401953449a3c3a10
+  558f3a23efc0a1a972e14d5314a65918791b77be
+  0dbd89c185f53a1727c54cd1ce256482fa23968e
+  f84faeb138605b36d74324c6d0ea76a9099c3567
+  4baee2f72e9eeae2aef5b9e1ec416020090672ef
+  412c5793886eaaabb31debd36695f6215a719865
+  a0eafc60760d32b690564b8588ba042cc63e0c74
+  a53842517de32d2f926c38a170c29dc90ae3348a
+  94093a13175f1cfcbbfddaa0ceafbd3a89784b91
+  d2f0d76af0be0da17ec88190215eadb8706689ab
+  639939af794373d6c2ab12c2ef637cd220174389
+  cc8921e2b19a88147643ea825459ffa140e3d704
+  82c6c8b3ac6873fadd9083323b02cc6a53659130
+  c6cc0b14a3e6e61906242d6fce28b9510c9f9208
+  093593169cb4716f94e52ed7561bb84b36b7eb9d
+  034df75dc138e7507e061d26170b4c44321a5d92
+  0d0219415f18c43636163fff4160f41600951a25
+  f13693f6e6052eeb189521945fef56892e812fdb
+  1239c633b8a7a7283825dba9171bf285e5790852
+  34b655bd51e8573b8e85c1c1476a94d8573babef
+  0767d147d86e1546593bda50f1e11276c0ac8f1a
+  284ca43bbbe82e89c0f1d977e8ac6cfb969c05ec
+  15315ab9e272ec81ae8d847996e5bdecd5635b0b
+  78c10aaf21f49d518c7ccb8318c29abb5d4e5db7
+  9220596cb068dfc73e2f7e695dc8ad0858a936db
+  32abd0da49b7c7ee756298fc46a15584d6aedc99
+  33fbc651630ffa7ccbebfe4eb91320a873e7291c
+  868d828870663d075cdcff502d26cf8445ce068e
+  2cbd52c10e88ce604402dc83a869ec4f07765b3d
+  4f31c9604af676986343d775b05695f535e8db5e
+  e5c379648af4c9fa3b5546ab7ee6e61a36082830
+
+  $ hg fastexport --export-marks fastexport.marks2 -r 0
+  blob
+  mark :1
+  data 65
+  0 r0
+  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :2
+  data 3
+  r0
+  
+  commit refs/heads/default
+  mark :3
+  committer "debugbuilddag" <debugbuilddag> 0 -0000
+  data 2
+  r0
+  M 644 :1 mf
+  M 644 :2 nf0
+  M 644 :2 of
+  
+  $ cat fastexport.marks2
+  e1767c7564f83127d75331428473dd0512b36cc6
+  2c436e3f677d989438ddd9a7e5e4d56e016dfd35
+  ae6ae30a671be09096aaaf51217b3691eec0eee0
+  $ hg fastexport --import-marks fastexport.marks2 -r 1
+  blob
+  mark :4
+  data 68
+  0 r0
+  1
+  2 r1
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  
+  blob
+  mark :5
+  data 3
+  r1
+  
+  blob
+  mark :6
+  data 3
+  r1
+  
+  commit refs/heads/default
+  mark :7
+  committer "debugbuilddag" <debugbuilddag> 1 -0000
+  data 2
+  r1
+  from :3
+  M 644 :4 mf
+  M 644 :5 nf1
+  M 644 :6 of
+  
--- a/tests/test-fncache.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-fncache.t	Mon Mar 09 10:18:40 2020 -0700
@@ -356,7 +356,7 @@
   $ cat .hg/store/fncache | sort
   data/y.i
   data/z.i
-  $ hg recover
+  $ hg recover --verify
   rolling back interrupted transaction
   checking changesets
   checking manifests
--- a/tests/test-graft-interrupted.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-graft-interrupted.t	Mon Mar 09 10:18:40 2020 -0700
@@ -431,7 +431,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}"
   @  6:6ec71c037d94 added x
   |
-  | o  5:36b793615f78 added foo to c
+  | %  5:36b793615f78 added foo to c
   | |
   | | o  4:863a25e1a9ea added x
   | |/
@@ -622,7 +622,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | o  3:09e253b87e17 A in file a
+  | %  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
@@ -669,7 +669,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | o  3:09e253b87e17 A in file a
+  | %  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
@@ -712,7 +712,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | o  3:09e253b87e17 A in file a
+  | %  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
--- a/tests/test-graft.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-graft.t	Mon Mar 09 10:18:40 2020 -0700
@@ -204,7 +204,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -223,7 +224,8 @@
   updating the branch cache
   grafting 5:97f8bfe72746 "5"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'c' -> dst: 'b' 
+     on local side:
+      src: 'c' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -239,7 +241,8 @@
   scanning for duplicate grafts
   grafting 4:9c233e8e184d "4"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'c' -> dst: 'b' 
+     on local side:
+      src: 'c' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -746,12 +749,16 @@
   scanning for duplicate grafts
   grafting 13:7a4785234d87 "2"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87
   starting 4 threads for background file closing (?)
+  nothing to commit, clearing merge state
   note: graft of 13:7a4785234d87 created no changes to commit
   $ hg log -r 'destination(13)'
 All copies of a cset
@@ -807,13 +814,14 @@
   note: graft of 19:9627f653b421 created no changes to commit
   grafting 0:68795b066622 "0"
 
-graft --force after backout
+graft --force after backout. Do the backout with graft too, to make
+sure we support issue6248.
 
   $ echo abc > a
   $ hg ci -m 24
-  $ hg backout 24
-  reverting a
-  changeset 25:71c4e63d4f98 backs out changeset 24:2e7ea477be26
+  $ hg graft --base . -r ".^" --no-commit
+  grafting 23:b1cac6de36a9 "0"
+  $ hg commit -m 'Backed out changeset 2e7ea477be26'
   $ hg graft 24
   skipping ancestor revision 24:2e7ea477be26
   [255]
--- a/tests/test-help.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-help.t	Mon Mar 09 10:18:40 2020 -0700
@@ -364,6 +364,7 @@
        eol           automatically manage newlines in repository files
        extdiff       command to allow external programs to compare revisions
        factotum      http authentication with factotum
+       fastexport    export repositories as git fast-import stream
        githelp       try mapping git commands to Mercurial commands
        gpg           commands to sign and verify changesets
        hgk           browse the repository in a graphical way
@@ -787,6 +788,12 @@
   (use 'hg help extensions' for information on enabling extensions)
   [255]
 
+Checking that help adapts based on the config:
+
+  $ hg help diff --config ui.tweakdefaults=true | egrep -e '^ *(-g|config)'
+   -g --[no-]git            use git extended diff format (default: on from
+                            config)
+
 Make sure that we don't run afoul of the help system thinking that
 this is a section and erroring out weirdly.
 
@@ -966,6 +973,8 @@
                  find the ancestor revision of two revisions in a given index
    debugapplystreamclonebundle
                  apply a stream clone bundle file
+   debugbackupbundle
+                 lists the changesets available in backup bundles
    debugbuilddag
                  builds a repo with a given DAG from scratch in the current
                  empty repo
@@ -1017,6 +1026,7 @@
                  print merge state
    debugnamecomplete
                  complete "names" - tags, open branch names, bookmark names
+   debugnodemap  write and inspect on disk nodemap
    debugobsolete
                  create arbitrary obsolete marker
    debugoptADV   (no help text available)
@@ -1054,6 +1064,8 @@
    debugsub      (no help text available)
    debugsuccessorssets
                  show set of successors for revision
+   debugtagscache
+                 display the contents of .hg/cache/hgtagsfnodes1
    debugtemplate
                  parse and apply a template
    debuguigetpass
--- a/tests/test-hgweb-auth.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-hgweb-auth.py	Mon Mar 09 10:18:40 2020 -0700
@@ -52,7 +52,7 @@
         for name in (b'.username', b'.password'):
             if (p + name) not in auth:
                 auth[p + name] = p
-    auth = dict((k, v) for k, v in auth.items() if v is not None)
+    auth = {k: v for k, v in auth.items() if v is not None}
 
     ui = writeauth(auth)
 
--- a/tests/test-histedit-non-commute-abort.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-histedit-non-commute-abort.t	Mon Mar 09 10:18:40 2020 -0700
@@ -77,36 +77,22 @@
 insert unsupported advisory merge record
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -x
   $ hg debugmergestate
-  * version 2 records
-  local: 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758
-  other: e860deea161a2f77de56603b340ebbb4536308ae
-  labels:
-    local: local
-    other: histedit
-  unrecognized entry: x	advisory record
-  file extras: e (ancestorlinknode = 0000000000000000000000000000000000000000)
-  file: e (record type "F", state "u", hash 58e6b3a414a1e090dfc6029add0f3555ccba127f)
-    local path: e (flags "")
-    ancestor path: e (node null)
+  local (local): 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758
+  other (histedit): e860deea161a2f77de56603b340ebbb4536308ae
+  file: e (state "u")
+    local path: e (hash 58e6b3a414a1e090dfc6029add0f3555ccba127f, flags "")
+    ancestor path: e (node 0000000000000000000000000000000000000000)
     other path: e (node 6b67ccefd5ce6de77e7ead4f5292843a0255329f)
+    extra: ancestorlinknode = 0000000000000000000000000000000000000000
   $ hg resolve -l
   U e
 
 insert unsupported mandatory merge record
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -X
   $ hg debugmergestate
-  * version 2 records
-  local: 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758
-  other: e860deea161a2f77de56603b340ebbb4536308ae
-  labels:
-    local: local
-    other: histedit
-  file extras: e (ancestorlinknode = 0000000000000000000000000000000000000000)
-  file: e (record type "F", state "u", hash 58e6b3a414a1e090dfc6029add0f3555ccba127f)
-    local path: e (flags "")
-    ancestor path: e (node null)
-    other path: e (node 6b67ccefd5ce6de77e7ead4f5292843a0255329f)
-  unrecognized entry: X	mandatory record
+  abort: unsupported merge state records: X
+  (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
+  [255]
   $ hg resolve -l
   abort: unsupported merge state records: X
   (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hooklib-changeset_obsoleted.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,84 @@
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > evolution = true
+  > 
+  > [extensions]
+  > notify =
+  > hooklib =
+  > 
+  > [phases]
+  > publish = False
+  > 
+  > [notify]
+  > sources = pull
+  > diffstat = False
+  > messageidseed = example
+  > domain = example.com
+  > 
+  > [reposubs]
+  > * = baz
+  > EOF
+  $ hg init a
+  $ hg --cwd a debugbuilddag +2
+  $ hg init b
+  $ cat <<EOF >> b/.hg/hgrc
+  > [hooks]
+  > incoming.notify = python:hgext.notify.hook
+  > pretxnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
+  > EOF
+  $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
+  pulling from ../a
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b:66f7d451a68b (2 drafts)
+  MIME-Version: 1.0
+  Content-Type: text/plain; charset="us-ascii"
+  Content-Transfer-Encoding: 7bit
+  Date: * (glob)
+  Subject: changeset in * (glob)
+  From: debugbuilddag@example.com
+  X-Hg-Notification: changeset 1ea73414a91b
+  Message-Id: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
+  To: baz@example.com
+  
+  changeset 1ea73414a91b in $TESTTMP/b
+  details: $TESTTMP/b?cmd=changeset;node=1ea73414a91b
+  description:
+  	r0
+  MIME-Version: 1.0
+  Content-Type: text/plain; charset="us-ascii"
+  Content-Transfer-Encoding: 7bit
+  Date: * (glob)
+  Subject: changeset in * (glob)
+  From: debugbuilddag@example.com
+  X-Hg-Notification: changeset 66f7d451a68b
+  Message-Id: <hg.364d03da7dc13829eb779a805be7e37f54f572e9afcea7d2626856a794d3e8f3@example.com>
+  To: baz@example.com
+  
+  changeset 66f7d451a68b in $TESTTMP/b
+  details: $TESTTMP/b?cmd=changeset;node=66f7d451a68b
+  description:
+  	r1
+  (run 'hg update' to get a working copy)
+  $ hg --cwd a debugobsolete 1ea73414a91b0920940797d8fc6a11e447f8ea1e
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  1 new orphan changesets
+  $ hg --cwd a push ../b --hidden | "$PYTHON" $TESTDIR/unwrap-message-id.py
+  1 new orphan changesets
+  pushing to ../b
+  searching for changes
+  no changes found
+  Subject: changeset abandoned
+  In-reply-to: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
+  Message-Id: <hg.d6329e9481594f0f3c8a84362b3511318bfbce50748ab1123f909eb6fbcab018@example.com>
+  Date: * (glob)
+  From: test@example.com
+  To: baz@example.com
+  
+  This changeset has been abandoned.
+  1 new obsolescence markers
+  obsoleted 1 changesets
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hooklib-changeset_published.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,84 @@
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > notify =
+  > hooklib =
+  > 
+  > [phases]
+  > publish = False
+  > 
+  > [notify]
+  > sources = pull
+  > diffstat = False
+  > messageidseed = example
+  > domain = example.com
+  > 
+  > [reposubs]
+  > * = baz
+  > EOF
+  $ hg init a
+  $ hg --cwd a debugbuilddag .
+  $ hg init b
+  $ cat <<EOF >> b/.hg/hgrc
+  > [hooks]
+  > incoming.notify = python:hgext.notify.hook
+  > txnclose-phase.changeset_published = python:hgext.hooklib.changeset_published.hook
+  > EOF
+  $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
+  pulling from ../a
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b (1 drafts)
+  MIME-Version: 1.0
+  Content-Type: text/plain; charset="us-ascii"
+  Content-Transfer-Encoding: 7bit
+  Date: * (glob)
+  Subject: changeset in * (glob)
+  From: debugbuilddag@example.com
+  X-Hg-Notification: changeset 1ea73414a91b
+  Message-Id: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
+  To: baz@example.com
+  
+  changeset 1ea73414a91b in $TESTTMP/b
+  details: $TESTTMP/b?cmd=changeset;node=1ea73414a91b
+  description:
+  	r0
+  (run 'hg update' to get a working copy)
+  $ hg --cwd a phase --public 0
+  $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
+  pulling from ../a
+  searching for changes
+  no changes found
+  1 local changesets published
+  Subject: changeset published
+  In-reply-to: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
+  Message-Id: <hg.2ec19bbddee5b542442bf5e1aed97bf706afff6aa765629883fbd1f4edd6fcb0@example.com>
+  Date: * (glob)
+  From: test@example.com
+  To: baz@example.com
+  
+  This changeset has been published.
+  $ hg --cwd b phase --force --draft 0
+  $ cat <<EOF >> b/.hg/hgrc
+  > [notify_published]
+  > messageidseed = example2
+  > domain = alt.example.com
+  > template = Subject: changeset published
+  >            From: hg@example.com\n
+  >            This draft changeset has been published.\n
+  > EOF
+  $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
+  pulling from ../a
+  searching for changes
+  no changes found
+  1 local changesets published
+  Subject: changeset published
+  From: hg@example.com
+  In-reply-to: <hg.e3381dc41c051215e50b1c166a72949d0fff99609eb373420bcb763af80ef230@alt.example.com>
+  Message-Id: <hg.c927f3d324e645a4245bfed20b0efb5b9582999d6be9bef45a37e7ec21208b24@alt.example.com>
+  Date: * (glob)
+  To: baz@example.com
+  
+  This draft changeset has been published.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hooklib-enforce_draft_commits.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,45 @@
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > hooklib =
+  > 
+  > [phases]
+  > publish = False
+  > EOF
+  $ hg init a
+  $ hg --cwd a debugbuilddag .
+  $ hg --cwd a phase --public 0
+  $ hg init b
+  $ cat <<EOF >> b/.hg/hgrc
+  > [hooks]
+  > pretxnclose-phase.enforce_draft_commits = \
+  >   python:hgext.hooklib.enforce_draft_commits.hook
+  > EOF
+  $ hg --cwd b pull ../a
+  pulling from ../a
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnclose-phase.enforce_draft_commits hook failed: New changeset 1ea73414a91b in phase 'public' rejected
+  transaction abort!
+  rollback completed
+  abort: New changeset 1ea73414a91b in phase 'public' rejected
+  [255]
+  $ hg --cwd a phase --force --draft 0
+  $ hg --cwd b pull ../a
+  pulling from ../a
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b (1 drafts)
+  (run 'hg update' to get a working copy)
+  $ hg --cwd a phase --public 0
+  $ hg --cwd b pull ../a
+  pulling from ../a
+  searching for changes
+  no changes found
+  error: pretxnclose-phase.enforce_draft_commits hook failed: Phase change from 'draft' to 'public' for 1ea73414a91b rejected
+  abort: Phase change from 'draft' to 'public' for 1ea73414a91b rejected
+  [255]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hooklib-reject_merge_commits.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,78 @@
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > hooklib =
+  > 
+  > [phases]
+  > publish = False
+  > EOF
+  $ hg init a
+  $ hg --cwd a debugbuilddag '.:parent.:childa*parent/childa<parent@otherbranch./childa'
+  $ hg --cwd a log -G
+  o    changeset:   4:a9fb040caedd
+  |\   branch:      otherbranch
+  | |  tag:         tip
+  | |  parent:      3:af739dfc49b4
+  | |  parent:      1:66f7d451a68b
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:04 1970 +0000
+  | |  summary:     r4
+  | |
+  | o  changeset:   3:af739dfc49b4
+  | |  branch:      otherbranch
+  | |  parent:      0:1ea73414a91b
+  | |  user:        debugbuilddag
+  | |  date:        Thu Jan 01 00:00:03 1970 +0000
+  | |  summary:     r3
+  | |
+  +---o  changeset:   2:a6b287721c3b
+  | |/   parent:      0:1ea73414a91b
+  | |    parent:      1:66f7d451a68b
+  | |    user:        debugbuilddag
+  | |    date:        Thu Jan 01 00:00:02 1970 +0000
+  | |    summary:     r2
+  | |
+  o |  changeset:   1:66f7d451a68b
+  |/   tag:         childa
+  |    user:        debugbuilddag
+  |    date:        Thu Jan 01 00:00:01 1970 +0000
+  |    summary:     r1
+  |
+  o  changeset:   0:1ea73414a91b
+     tag:         parent
+     user:        debugbuilddag
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     r0
+  
+  $ hg init b
+  $ cat <<EOF >> b/.hg/hgrc
+  > [hooks]
+  > pretxnchangegroup.reject_merge_commits = \
+  >   python:hgext.hooklib.reject_merge_commits.hook
+  > EOF
+  $ hg --cwd b pull ../a -r a6b287721c3b
+  pulling from ../a
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnchangegroup.reject_merge_commits hook failed: a6b287721c3b rejected as merge on the same branch. Please consider rebase.
+  transaction abort!
+  rollback completed
+  abort: a6b287721c3b rejected as merge on the same branch. Please consider rebase.
+  [255]
+  $ hg --cwd b pull ../a -r 1ea73414a91b
+  pulling from ../a
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b (1 drafts)
+  (run 'hg update' to get a working copy)
+  $ hg --cwd b pull ../a -r a9fb040caedd
+  pulling from ../a
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 0 changes to 0 files
+  new changesets 66f7d451a68b:a9fb040caedd (3 drafts)
+  (run 'hg update' to get a working copy)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hooklib-reject_new_heads.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,53 @@
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > hooklib =
+  > 
+  > [phases]
+  > publish = False
+  > EOF
+  $ hg init a
+  $ hg --cwd a debugbuilddag '.:parent.*parent'
+  $ hg --cwd a log -G
+  o  changeset:   2:fa942426a6fd
+  |  tag:         tip
+  |  parent:      0:1ea73414a91b
+  |  user:        debugbuilddag
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
+  |  summary:     r2
+  |
+  | o  changeset:   1:66f7d451a68b
+  |/   user:        debugbuilddag
+  |    date:        Thu Jan 01 00:00:01 1970 +0000
+  |    summary:     r1
+  |
+  o  changeset:   0:1ea73414a91b
+     tag:         parent
+     user:        debugbuilddag
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     r0
+  
+  $ hg init b
+  $ cat <<EOF >> b/.hg/hgrc
+  > [hooks]
+  > pretxnclose.reject_new_heads = \
+  >   python:hgext.hooklib.reject_new_heads.hook
+  > EOF
+  $ hg --cwd b pull ../a
+  pulling from ../a
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnclose.reject_new_heads hook failed: Changes on branch 'default' resulted in multiple heads
+  transaction abort!
+  rollback completed
+  abort: Changes on branch 'default' resulted in multiple heads
+  [255]
+  $ hg --cwd b pull ../a -r 1ea73414a91b
+  pulling from ../a
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b (1 drafts)
+  (run 'hg update' to get a working copy)
--- a/tests/test-http-bad-server.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-http-bad-server.t	Mon Mar 09 10:18:40 2020 -0700
@@ -15,6 +15,8 @@
   > sparse-revlog = no
   > [devel]
   > legacy.exchange = phases
+  > [server]
+  > concurrent-push-mode = strict
   > EOF
 
   $ hg init server0
--- a/tests/test-http-protocol.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-http-protocol.t	Mon Mar 09 10:18:40 2020 -0700
@@ -321,7 +321,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
   s> setsockopt(6, 1, 1) -> None (?)
   s>     POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
@@ -437,7 +437,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 480\r\n
+  s>     Content-Length: 503\r\n
   s>     \r\n
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
@@ -474,7 +474,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 480\r\n
+  s>     Content-Length: 503\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
@@ -745,7 +745,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 480\r\n
+  s>     Content-Length: 503\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
--- a/tests/test-http.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-http.t	Mon Mar 09 10:18:40 2020 -0700
@@ -320,20 +320,20 @@
   list of changesets:
   7f4e523d01f2cc3765ac8934da3d14db775ff872
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 205 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   sending unbundle command
-  sending 1013 bytes
+  sending 1040 bytes
   devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
-  devel-peer-request:   Content-length 1013
+  devel-peer-request:   Content-length 1040
   devel-peer-request:   Content-type application/mercurial-0.1
   devel-peer-request:   Vary X-HgArg-1,X-HgProto-1
   devel-peer-request:   X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
   devel-peer-request:   16 bytes of commands arguments in headers
-  devel-peer-request:   1013 bytes of data
+  devel-peer-request:   1040 bytes of data
   devel-peer-request:   finished in *.???? seconds (200) (glob)
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
--- a/tests/test-infinitepush-ci.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-infinitepush-ci.t	Mon Mar 09 10:18:40 2020 -0700
@@ -50,7 +50,7 @@
       6cb0989601f1  added a
 
   $ scratchnodes
-  6cb0989601f1fb5805238edfb16f3606713d9a0b a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
+  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
 
 Understanding how data is stored on the bundlestore in server
 -------------------------------------------------------------
@@ -61,8 +61,8 @@
   index
 
 filebundlestore stores the bundles
-  $ ls ../repo/.hg/scratchbranches/filebundlestore/a4/c2/
-  a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
+  $ ls ../repo/.hg/scratchbranches/filebundlestore/3b/41/
+  3b414252ff8acab801318445d88ff48faf4a28c3
 
 index/nodemap stores a map of node id and file in which bundle is stored in filebundlestore
   $ ls ../repo/.hg/scratchbranches/index/
@@ -82,7 +82,7 @@
 Applying the changeset from the bundlestore
 --------------------------------------------
 
-  $ hg unbundle .hg/scratchbranches/filebundlestore/a4/c2/a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
+  $ hg unbundle .hg/scratchbranches/filebundlestore/3b/41/3b414252ff8acab801318445d88ff48faf4a28c3
   adding changesets
   adding manifests
   adding file changes
@@ -133,9 +133,9 @@
 
 Both of the new changesets are stored in a single bundle-file
   $ scratchnodes
-  6cb0989601f1fb5805238edfb16f3606713d9a0b a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a ee41a41cefb7817cbfb235b4f6e9f27dbad6ca1f
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 ee41a41cefb7817cbfb235b4f6e9f27dbad6ca1f
+  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
+  bf8a6e3011b345146bbbedbcb1ebd4837571492a 239585f5e61f0c09ce7106bdc1097bff731738f4
+  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 239585f5e61f0c09ce7106bdc1097bff731738f4
 
 Pushing more changesets to the server
 -------------------------------------
@@ -158,11 +158,11 @@
 
 Sneak peek into the bundlestore at the server
   $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 57e00c0d4f26e2a2a72b751b63d9abc4f3eb28e7
-  6cb0989601f1fb5805238edfb16f3606713d9a0b a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 57e00c0d4f26e2a2a72b751b63d9abc4f3eb28e7
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 57e00c0d4f26e2a2a72b751b63d9abc4f3eb28e7
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 57e00c0d4f26e2a2a72b751b63d9abc4f3eb28e7
+  1bb96358eda285b536c6d1c66846a7cdb2336cea 98fbae0016662521b0007da1b7bc349cd3caacd1
+  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
+  b4e4bce660512ad3e71189e14588a70ac8e31fef 98fbae0016662521b0007da1b7bc349cd3caacd1
+  bf8a6e3011b345146bbbedbcb1ebd4837571492a 98fbae0016662521b0007da1b7bc349cd3caacd1
+  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 98fbae0016662521b0007da1b7bc349cd3caacd1
 
 Checking if `hg pull` pulls something or `hg incoming` shows something
 -----------------------------------------------------------------------
@@ -309,14 +309,14 @@
 
   $ cd ../repo
   $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  6cb0989601f1fb5805238edfb16f3606713d9a0b a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
-  9b42578d44473575994109161430d65dd147d16d 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 0a6e70ecd5b98d22382f69b93909f557ac6a9927
+  1bb96358eda285b536c6d1c66846a7cdb2336cea 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
+  9b42578d44473575994109161430d65dd147d16d 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  b4e4bce660512ad3e71189e14588a70ac8e31fef 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  bf8a6e3011b345146bbbedbcb1ebd4837571492a 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 280a46a259a268f0e740c81c5a7751bdbfaec85f
 
-  $ hg unbundle .hg/scratchbranches/filebundlestore/0a/6e/0a6e70ecd5b98d22382f69b93909f557ac6a9927
+  $ hg unbundle .hg/scratchbranches/filebundlestore/28/0a/280a46a259a268f0e740c81c5a7751bdbfaec85f
   adding changesets
   adding manifests
   adding file changes
@@ -392,13 +392,13 @@
   $ cd ../repo
 
   $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  6cb0989601f1fb5805238edfb16f3606713d9a0b a4c202c147a9c4bb91bbadb56321fc5f3950f7f2
+  1bb96358eda285b536c6d1c66846a7cdb2336cea 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
   99949238d9ac7f2424a33a46dface6f866afd059 090a24fe63f31d3b4bee714447f835c8c362ff57
-  9b42578d44473575994109161430d65dd147d16d 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 0a6e70ecd5b98d22382f69b93909f557ac6a9927
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 0a6e70ecd5b98d22382f69b93909f557ac6a9927
+  9b42578d44473575994109161430d65dd147d16d 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  b4e4bce660512ad3e71189e14588a70ac8e31fef 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  bf8a6e3011b345146bbbedbcb1ebd4837571492a 280a46a259a268f0e740c81c5a7751bdbfaec85f
+  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 280a46a259a268f0e740c81c5a7751bdbfaec85f
 
   $ hg glog
   o  6:9b42578d4447 added f
--- a/tests/test-install.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-install.t	Mon Mar 09 10:18:40 2020 -0700
@@ -2,12 +2,14 @@
   $ hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
   checking Python security support (*) (glob)
     TLS 1.2 not supported by Python install; network connections lack modern security (?)
     SNI not supported by Python install; may have connectivity issues with some servers (?)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
@@ -43,6 +45,7 @@
     "hgverextra": "*", (glob)
     "problems": 0,
     "pythonexe": "*", (glob)
+    "pythonimplementation": "*", (glob)
     "pythonlib": "*", (glob)
     "pythonsecurity": [*], (glob)
     "pythonver": "*.*.*", (glob)
@@ -58,12 +61,14 @@
   $ HGUSER= hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
   checking Python security support (*) (glob)
     TLS 1.2 not supported by Python install; network connections lack modern security (?)
     SNI not supported by Python install; may have connectivity issues with some servers (?)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
@@ -103,12 +108,14 @@
   $ HGEDITOR="~/tools/testeditor.exe" hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
   checking Python security support (*) (glob)
     TLS 1.2 not supported by Python install; network connections lack modern security (?)
     SNI not supported by Python install; may have connectivity issues with some servers (?)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
@@ -128,12 +135,14 @@
   $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
   checking Python security support (*) (glob)
     TLS 1.2 not supported by Python install; network connections lack modern security (?)
     SNI not supported by Python install; may have connectivity issues with some servers (?)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
@@ -185,9 +194,11 @@
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (3.*) (glob)
   checking Python lib (*)... (glob)
   checking Python security support (*) (glob)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
@@ -221,11 +232,13 @@
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob)
   checking Python lib (*)... (glob)
   checking Python security support (*) (glob)
     TLS 1.2 not supported by Python install; network connections lack modern security (?)
     SNI not supported by Python install; may have connectivity issues with some servers (?)
+  checking Rust extensions \((installed|missing)\) (re)
   checking Mercurial version (*) (glob)
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
--- a/tests/test-issue1802.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-issue1802.t	Mon Mar 09 10:18:40 2020 -0700
@@ -52,8 +52,6 @@
 Simulate a Windows merge:
 
   $ hg --config extensions.n=$TESTTMP/noexec.py merge --debug
-    unmatched files in local:
-     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: a03b0deabf2b, local: d6fa54f68ae1+, remote: 2d8bcf2dda39
--- a/tests/test-issue522.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-issue522.t	Mon Mar 09 10:18:40 2020 -0700
@@ -25,8 +25,6 @@
   $ hg ci -qAm 'add bar'
 
   $ hg merge --debug
-    unmatched files in local:
-     bar
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: bbd179dfa0a7, local: 71766447bdbb+, remote: 4d9e78aaceee
--- a/tests/test-issue672.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-issue672.t	Mon Mar 09 10:18:40 2020 -0700
@@ -28,7 +28,8 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' 
+     on remote side:
+      src: '1' -> dst: '1a' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -56,7 +57,8 @@
     unmatched files in local:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' *
+     on local side:
+      src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -78,7 +80,8 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' *
+     on remote side:
+      src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
--- a/tests/test-journal-exists.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-journal-exists.t	Mon Mar 09 10:18:40 2020 -0700
@@ -15,11 +15,7 @@
 
   $ hg recover
   rolling back interrupted transaction
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  (verify step skipped, run `hg verify` to check your repository content)
 
 recover, explicit verify
 
--- a/tests/test-lfs-bundle.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-lfs-bundle.t	Mon Mar 09 10:18:40 2020 -0700
@@ -95,3 +95,32 @@
   OK
   ---- Applying src-lfs.bundle to dst-lfs ----
   OK
+
+Hint if the cache location cannot be inferred from the environment
+
+#if windows
+  $ unset LOCALAPPDATA
+  $ unset APPDATA
+  $ HGRCPATH= hg config lfs --debug
+  abort: unknown lfs usercache location
+  (define LOCALAPPDATA or APPDATA in the environment, or set lfs.usercache)
+  [255]
+#endif
+
+#if osx
+  $ unset HOME
+  $ HGRCPATH= hg config lfs --debug
+  abort: unknown lfs usercache location
+  (define HOME in the environment, or set lfs.usercache)
+  [255]
+#endif
+
+#if no-windows no-osx
+  $ unset XDG_CACHE_HOME
+  $ unset HOME
+  $ HGRCPATH= hg config lfs --debug
+  abort: unknown lfs usercache location
+  (define XDG_CACHE_HOME or HOME in the environment, or set lfs.usercache)
+  [255]
+#endif
+
--- a/tests/test-lfs-serve-access.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-lfs-serve-access.t	Mon Mar 09 10:18:40 2020 -0700
@@ -17,6 +17,7 @@
   $ hg init server
   $ hg --config "lfs.usercache=$TESTTMP/servercache" \
   >    --config experimental.lfs.serve=False -R server serve -d \
+  >    --config experimental.lfs.worker-enable=False \
   >    -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
   $ cat hg.pid >> $DAEMON_PIDS
 
@@ -65,7 +66,7 @@
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
 
   $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
@@ -165,7 +166,7 @@
   $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
 
@@ -311,7 +312,7 @@
   $ cat $TESTTMP/access.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -330,7 +331,7 @@
   $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
@@ -481,7 +482,7 @@
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
--- a/tests/test-lfs-serve.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-lfs-serve.t	Mon Mar 09 10:18:40 2020 -0700
@@ -65,6 +65,7 @@
   > debugprocessors = $TESTTMP/debugprocessors.py
   > [experimental]
   > lfs.disableusercache = True
+  > lfs.worker-enable = False
   > [lfs]
   > threshold=10
   > [web]
--- a/tests/test-lfs-test-server.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-lfs-test-server.t	Mon Mar 09 10:18:40 2020 -0700
@@ -40,6 +40,8 @@
 #endif
 
   $ cat >> $HGRCPATH <<EOF
+  > [experimental]
+  > lfs.worker-enable = False
   > [extensions]
   > lfs=
   > [lfs]
@@ -294,7 +296,7 @@
   bundle2-output-bundle: "HG20", 5 parts total
   bundle2-output-part: "replycaps" * bytes payload (glob)
   bundle2-output-part: "check:phases" 24 bytes payload
-  bundle2-output-part: "check:heads" streamed payload
+  bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
@@ -302,7 +304,7 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "check:heads" supported
+  bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size 20
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
--- a/tests/test-manifest.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-manifest.py	Mon Mar 09 10:18:40 2020 -0700
@@ -171,7 +171,7 @@
         self.assertEqual(want, m[b'foo'])
         # make sure the suffix survives a copy
         match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
-        m2 = m.matches(match)
+        m2 = m._matches(match)
         self.assertEqual(want, m2[b'foo'])
         self.assertEqual(1, len(m2))
         m2 = m.copy()
@@ -196,7 +196,7 @@
 
         match.matchfn = filt
         with self.assertRaises(AssertionError):
-            m.matches(match)
+            m._matches(match)
 
     def testRemoveItem(self):
         m = self.parsemanifest(A_SHORT_MANIFEST)
@@ -300,7 +300,7 @@
         m = self.parsemanifest(A_HUGE_MANIFEST)
 
         match = matchmod.exact([b'file1', b'file200', b'file300'])
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
             HASH_2,
@@ -318,7 +318,7 @@
         match = matchmod.exact(
             [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
         )
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(
             [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
@@ -332,7 +332,7 @@
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
         )
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual([], m2.keys())
 
@@ -343,7 +343,7 @@
 
         flist = m.keys()[80:300]
         match = matchmod.exact(flist)
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(flist, m2.keys())
 
@@ -352,7 +352,7 @@
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(m.keys(), m2.keys())
 
@@ -364,7 +364,7 @@
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
         )
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(
             [
@@ -388,7 +388,7 @@
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
         match = matchmod.exact([b'a/b'])
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual([], m2.keys())
 
@@ -400,7 +400,7 @@
         match = matchmod.match(
             util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
         )
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(
             [
@@ -423,7 +423,7 @@
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
-        m2 = m.matches(match)
+        m2 = m._matches(match)
 
         self.assertEqual(
             [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
--- a/tests/test-merge-changedelete.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-merge-changedelete.t	Mon Mar 09 10:18:40 2020 -0700
@@ -76,27 +76,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -145,27 +141,23 @@
   R file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -227,27 +219,23 @@
   R file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   *** file1 does not exist
   --- file2 ---
   2
@@ -293,27 +281,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   *** file1 does not exist
   --- file2 ---
   2
@@ -346,27 +330,23 @@
   R file2
   R file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "r", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "r")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -395,27 +375,23 @@
   R file2
   R file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "r", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "r")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   *** file1 does not exist
   --- file2 ---
   2
@@ -445,27 +421,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -506,27 +478,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -569,27 +537,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -629,27 +593,23 @@
   U file2
   U file3
   --- debugmergestate ---
-  * version 2 records
-  local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4
+  other (merge rev): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
-  file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11)
-    local path: file3 (flags "")
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file3 (state "u")
+    local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
     other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -802,22 +762,18 @@
   U file1
   U file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -845,22 +801,18 @@
   R file1
   R file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -886,22 +838,18 @@
   R file1
   R file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "r", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "r")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   *** file1 does not exist
   --- file2 ---
   2
@@ -929,22 +877,18 @@
   U file1
   U file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -980,22 +924,18 @@
   U file1
   U file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
@@ -1032,22 +972,18 @@
   U file1
   U file2
   --- debugmergestate ---
-  * version 2 records
-  local: ab57bf49aa276a22d35a473592d4c34b5abc3eff
-  other: 10f9a0a634e82080907e62f075ab119cbc565ea6
-  labels:
-    local: working copy
-    other: destination
-  file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  other (destination): 10f9a0a634e82080907e62f075ab119cbc565ea6
+  file: file1 (state "u")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
-    other path: file1 (node null)
-  file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff)
-  file: file2 (record type "C", state "u", hash null)
-    local path: file2 (flags "")
+    other path: file1 (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+  file: file2 (state "u")
+    local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
+    extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
   --- file1 ---
   1
   changed
--- a/tests/test-merge-criss-cross.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-merge-criss-cross.t	Mon Mar 09 10:18:40 2020 -0700
@@ -410,11 +410,6 @@
   note: merging c0ef19750a22+ and 6ca01f7342b9 using bids from ancestors 11b5b303e36c and 154e6000f54e
   
   calculating bids for ancestor 11b5b303e36c
-    unmatched files in local:
-     d1/a
-     d1/b
-    unmatched files in other:
-     d2/b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 11b5b303e36c, local: c0ef19750a22+, remote: 6ca01f7342b9
@@ -424,7 +419,8 @@
     unmatched files in other:
      d2/b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'd1/b' -> dst: 'd2/b' 
+     on remote side:
+      src: 'd1/b' -> dst: 'd2/b' 
     checking for directory renames
      discovered dir src: 'd1/' -> dst: 'd2/'
   resolving manifests
--- a/tests/test-merge2.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-merge2.t	Mon Mar 09 10:18:40 2020 -0700
@@ -50,4 +50,8 @@
   adding b
   created new head
 
+  $ hg merge 'wdir()'
+  abort: merging with the working copy has no effect
+  [255]
+
   $ cd ..
--- a/tests/test-merge4.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-merge4.t	Mon Mar 09 10:18:40 2020 -0700
@@ -23,3 +23,37 @@
   abort: cannot commit merge with missing files
   [255]
 
+
+Test conflict*() revsets
+
+# Bad usage
+  $ hg log -r 'conflictlocal(foo)'
+  hg: parse error: conflictlocal takes no arguments
+  [255]
+  $ hg log -r 'conflictother(foo)'
+  hg: parse error: conflictother takes no arguments
+  [255]
+  $ hg co -C .
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+# No merge parents when not merging
+  $ hg log -r 'conflictlocal() + conflictother()'
+# No merge parents when there is no conflict
+  $ hg merge 1
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg log -r 'conflictlocal() + conflictother()'
+  $ hg co -C .
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo conflict > b
+  $ hg ci -Aqm 'conflicting change to b'
+  $ hg merge 1
+  merging b
+  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+# Shows merge parents when there is a conflict
+  $ hg log -r 'conflictlocal()' -T '{rev} {desc}\n'
+  3 conflicting change to b
+  $ hg log -r 'conflictother()' -T '{rev} {desc}\n'
+  1 commit #1
--- a/tests/test-mq-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-mq-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -132,7 +132,6 @@
   patch didn't work out, merging patcha
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   0 files updated, 2 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
   applying patcha2
   now at: patcha2
 
--- a/tests/test-pathconflicts-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-pathconflicts-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -75,6 +75,12 @@
   M a/b/c/d
   A a/b~0ed027b96f31
   R a/b
+  $ hg debugmergestate
+  local (working copy): 0ed027b96f31a2560c8abe689ba59876409a2b8e
+  other (merge rev): 9049d9534d5c5d16264aab02b4b9e20d03faabef
+  file: a/b (state "pu")
+    rename side: l
+    renamed path: a/b~0ed027b96f31
   $ hg resolve --all
   a/b: path conflict must be resolved manually
   $ hg forget a/b~0ed027b96f31 && rm a/b~0ed027b96f31
@@ -106,6 +112,12 @@
   $ hg mv a/b~2ea68033e3be a/b.old
   $ hg resolve --mark a/b
   (no more unresolved files)
+  $ hg debugmergestate
+  local (working copy): 2ea68033e3be03a560471c1fc9e5704fbedb9b4b
+  other (merge rev): 9049d9534d5c5d16264aab02b4b9e20d03faabef
+  file: a/b (state "pr")
+    rename side: l
+    renamed path: a/b~2ea68033e3be
   $ hg resolve --list
   R a/b
   $ hg commit -m "merge link and dir (renamed link)"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-persistent-nodemap.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,146 @@
+===================================
+Test the persistent on-disk nodemap
+===================================
+
+
+  $ hg init test-repo
+  $ cd test-repo
+  $ cat << EOF >> .hg/hgrc
+  > [experimental]
+  > exp-persistent-nodemap=yes
+  > [devel]
+  > persistent-nodemap=yes
+  > EOF
+  $ hg debugbuilddag .+5000
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5000
+  data-length: 122880
+  data-unused: 0
+  $ f --size .hg/store/00changelog.n
+  .hg/store/00changelog.n: size=42
+
+Simple lookup works
+
+  $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
+  $ hg log -r "$ANYNODE" --template '{rev}\n'
+  5000
+
+  $ f --sha256 .hg/store/00changelog-*.nd
+  .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
+  $ hg debugnodemap --dump-new | f --sha256 --size
+  size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+  $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
+  size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+  0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
+  0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
+  0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
+  0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
+  0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
+  00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
+  00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
+  00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  $ hg debugnodemap --check
+  revision in index:   5001
+  revision in nodemap: 5001
+
+add a new commit
+
+  $ hg up
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo foo > foo
+  $ hg add foo
+  $ hg ci -m 'foo'
+
+#if pure
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5001
+  data-length: 123072
+  data-unused: 192
+#else
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5001
+  data-length: 122880
+  data-unused: 0
+#endif
+  $ f --size .hg/store/00changelog.n
+  .hg/store/00changelog.n: size=42
+
+(The pure code use the debug code that perform incremental update, the C code reencode from scratch)
+
+#if pure
+  $ f --sha256 .hg/store/00changelog-*.nd --size
+  .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
+
+#else
+  $ f --sha256 .hg/store/00changelog-*.nd --size
+  .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
+
+#endif
+
+  $ hg debugnodemap --check
+  revision in index:   5002
+  revision in nodemap: 5002
+
+Test code path without mmap
+---------------------------
+
+  $ echo bar > bar
+  $ hg add bar
+  $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
+
+  $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
+  revision in index:   5003
+  revision in nodemap: 5003
+  $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
+  revision in index:   5003
+  revision in nodemap: 5003
+
+
+#if pure
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5002
+  data-length: 123328
+  data-unused: 384
+  $ f --sha256 .hg/store/00changelog-*.nd --size
+  .hg/store/00changelog-????????????????.nd: size=123328, sha256=10d26e9776b6596af0f89143a54eba8cc581e929c38242a02a7b0760698c6c70 (glob)
+
+#else
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5002
+  data-length: 122944
+  data-unused: 0
+  $ f --sha256 .hg/store/00changelog-*.nd --size
+  .hg/store/00changelog-????????????????.nd: size=122944, sha256=755976b22b64ab680401b45395953504e64e7fa8c31ac570f58dee21e15f9bc0 (glob)
+#endif
+
+Test force warming the cache
+
+  $ rm .hg/store/00changelog.n
+  $ hg debugnodemap --metadata
+  $ hg debugupdatecache
+#if pure
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5002
+  data-length: 122944
+  data-unused: 0
+#else
+  $ hg debugnodemap --metadata
+  uid: ???????????????? (glob)
+  tip-rev: 5002
+  data-length: 122944
+  data-unused: 0
+#endif
--- a/tests/test-phabricator.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-phabricator.t	Mon Mar 09 10:18:40 2020 -0700
@@ -29,6 +29,21 @@
   >  --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
   abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
 
+Missing arguments print the command help
+
+  $ hg phabread
+  hg phabread: invalid arguments
+  hg phabread DREVSPEC [OPTIONS]
+  
+  print patches from Phabricator suitable for importing
+  
+  options:
+  
+    --stack read dependencies
+  
+  (use 'hg phabread -h' to show more help)
+  [255]
+
 Basic phabread:
   $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
   # HG changeset patch
--- a/tests/test-purge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-purge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -120,19 +120,32 @@
   directory/untracked_file
   $ rm directory/untracked_file
 
-skip ignored files if --all not specified
+skip ignored files if -i or --all not specified
 
   $ touch ignored
   $ hg purge -p
   $ hg purge -v
+  $ touch untracked_file
   $ ls
   directory
   ignored
   r1
+  untracked_file
+  $ hg purge -p -i
+  ignored
+  $ hg purge -v -i
+  removing file ignored
+  $ ls
+  directory
+  r1
+  untracked_file
+  $ touch ignored
   $ hg purge -p --all
   ignored
+  untracked_file
   $ hg purge -v --all
   removing file ignored
+  removing file untracked_file
   $ ls
   directory
   r1
--- a/tests/test-push-race.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-push-race.t	Mon Mar 09 10:18:40 2020 -0700
@@ -119,11 +119,11 @@
 
 #testcases strict unrelated
 
-#if unrelated
+#if strict
 
   $ cat >> $HGRCPATH << EOF
   > [server]
-  > concurrent-push-mode = check-related
+  > concurrent-push-mode = strict
   > EOF
 
 #endif
--- a/tests/test-rebase-abort.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-abort.t	Mon Mar 09 10:18:40 2020 -0700
@@ -88,18 +88,13 @@
 
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -x
   $ hg debugmergestate
-  * version 2 records
-  local: 3e046f2ecedb793b97ed32108086edd1a162f8bc
-  other: 46f0b057b5c061d276b91491c22151f78698abd2
-  labels:
-    local: dest
-    other: source
-  unrecognized entry: x	advisory record
-  file extras: common (ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c)
-  file: common (record type "F", state "u", hash 94c8c21d08740f5da9eaa38d1f175c592692f0d1)
-    local path: common (flags "")
+  local (dest): 3e046f2ecedb793b97ed32108086edd1a162f8bc
+  other (source): 46f0b057b5c061d276b91491c22151f78698abd2
+  file: common (state "u")
+    local path: common (hash 94c8c21d08740f5da9eaa38d1f175c592692f0d1, flags "")
     ancestor path: common (node de0a666fdd9c1a0b0698b90d85064d8bd34f74b6)
     other path: common (node 2f6411de53677f6f1048fef5bf888d67a342e0a5)
+    extra: ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c
   $ hg resolve -l
   U common
 
@@ -107,18 +102,9 @@
 
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -X
   $ hg debugmergestate
-  * version 2 records
-  local: 3e046f2ecedb793b97ed32108086edd1a162f8bc
-  other: 46f0b057b5c061d276b91491c22151f78698abd2
-  labels:
-    local: dest
-    other: source
-  file extras: common (ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c)
-  file: common (record type "F", state "u", hash 94c8c21d08740f5da9eaa38d1f175c592692f0d1)
-    local path: common (flags "")
-    ancestor path: common (node de0a666fdd9c1a0b0698b90d85064d8bd34f74b6)
-    other path: common (node 2f6411de53677f6f1048fef5bf888d67a342e0a5)
-  unrecognized entry: X	mandatory record
+  abort: unsupported merge state records: X
+  (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
+  [255]
   $ hg resolve -l
   abort: unsupported merge state records: X
   (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
@@ -236,7 +222,7 @@
   [1]
 
   $ hg tglog
-  @  4:draft 'C1'
+  %  4:draft 'C1'
   |
   o  3:draft 'B bis'
   |
--- a/tests/test-rebase-collapse.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-collapse.t	Mon Mar 09 10:18:40 2020 -0700
@@ -486,61 +486,6 @@
   abort: cannot collapse multiple named branches
   [255]
 
-  $ repeatchange() {
-  >   hg checkout $1
-  >   hg cp d z
-  >   echo blah >> z
-  >   hg commit -Am "$2" --user "$3"
-  > }
-  $ repeatchange 3 "E" "user1"
-  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ repeatchange 3 "E" "user2"
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  created new head
-  $ hg tglog
-  @  5: fbfb97b1089a 'E'
-  |
-  | o  4: f338eb3c2c7c 'E'
-  |/
-  o  3: 41acb9dca9eb 'D'
-  |
-  | o  2: 8ac4a08debf1 'C' two
-  | |
-  | o  1: 1ba175478953 'B' one
-  |/
-  o  0: 1994f17a630e 'A'
-  
-  $ hg rebase -s 5 -d 4
-  rebasing 5:fbfb97b1089a "E" (tip)
-  note: not rebasing 5:fbfb97b1089a "E" (tip), its destination already has all its changes
-  saved backup bundle to $TESTTMP/e/.hg/strip-backup/fbfb97b1089a-553e1d85-rebase.hg
-  $ hg tglog
-  @  4: f338eb3c2c7c 'E'
-  |
-  o  3: 41acb9dca9eb 'D'
-  |
-  | o  2: 8ac4a08debf1 'C' two
-  | |
-  | o  1: 1ba175478953 'B' one
-  |/
-  o  0: 1994f17a630e 'A'
-  
-  $ hg export tip
-  # HG changeset patch
-  # User user1
-  # Date 0 0
-  #      Thu Jan 01 00:00:00 1970 +0000
-  # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213
-  # Parent  41acb9dca9eb976e84cd21fcb756b4afa5a35c09
-  E
-  
-  diff -r 41acb9dca9eb -r f338eb3c2c7c z
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,2 @@
-  +d
-  +blah
-
   $ cd ..
 
 Rebase, collapse and copies
@@ -767,7 +712,7 @@
   |
   | @  2: 82b8abf9c185 'D'
   | |
-  @ |  1: f899f3910ce7 'B'
+  % |  1: f899f3910ce7 'B'
   |/
   o  0: 4a2df7238c3b 'A'
   
@@ -791,7 +736,7 @@
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
   $ hg tglog
-  @  3: 63668d570d21 'C'
+  %  3: 63668d570d21 'C'
   |
   | @  2: 82b8abf9c185 'D'
   | |
@@ -817,7 +762,7 @@
   abort: edit failed: false exited with status 1
   [255]
   $ hg tglog
-  o  3: 63668d570d21 'C'
+  %  3: 63668d570d21 'C'
   |
   | @  2: 82b8abf9c185 'D'
   | |
--- a/tests/test-rebase-conflicts.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-conflicts.t	Mon Mar 09 10:18:40 2020 -0700
@@ -429,3 +429,72 @@
   |/
   o  0:draft 'A'
   
+
+Test where the conflict happens when rebasing a merge commit
+
+  $ cd $TESTTMP
+  $ hg init conflict-in-merge
+  $ cd conflict-in-merge
+  $ hg debugdrawdag <<'EOS'
+  > F # F/conflict = foo\n
+  > |\
+  > D E
+  > |/
+  > C B # B/conflict = bar\n
+  > |/
+  > A
+  > EOS
+
+  $ hg co F
+  5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg rebase -d B
+  rebasing 2:dc0947a82db8 "C" (C)
+  rebasing 3:e7b3f00ed42e "D" (D)
+  rebasing 4:03ca77807e91 "E" (E)
+  rebasing 5:9a6b91dc2044 "F" (F tip)
+  merging conflict
+  warning: conflicts while merging conflict! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg tglog
+  @  8:draft 'E'
+  |
+  | @  7:draft 'D'
+  |/
+  o  6:draft 'C'
+  |
+  | %    5:draft 'F'
+  | |\
+  | | o  4:draft 'E'
+  | | |
+  | o |  3:draft 'D'
+  | |/
+  | o  2:draft 'C'
+  | |
+  o |  1:draft 'B'
+  |/
+  o  0:draft 'A'
+  
+  $ echo baz > conflict
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg rebase -c
+  already rebased 2:dc0947a82db8 "C" (C) as 0199610c343e
+  already rebased 3:e7b3f00ed42e "D" (D) as f0dd538aaa63
+  already rebased 4:03ca77807e91 "E" (E) as cbf25af8347d
+  rebasing 5:9a6b91dc2044 "F" (F)
+  saved backup bundle to $TESTTMP/conflict-in-merge/.hg/strip-backup/dc0947a82db8-ca7e7d5b-rebase.hg
+  $ hg tglog
+  @    5:draft 'F'
+  |\
+  | o  4:draft 'E'
+  | |
+  o |  3:draft 'D'
+  |/
+  o  2:draft 'C'
+  |
+  o  1:draft 'B'
+  |
+  o  0:draft 'A'
+  
--- a/tests/test-rebase-dest.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-dest.t	Mon Mar 09 10:18:40 2020 -0700
@@ -256,7 +256,7 @@
   > EOS
   rebasing 3:a4256619d830 "B" (B)
   rebasing 6:8e139e245220 "C" (C tip)
-  o    8: 51e2ce92e06a C
+  o    8: d7d1169e9b1c C
   |\
   | o    7: 2ed0c8546285 B
   | |\
--- a/tests/test-rebase-interruptions.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-interruptions.t	Mon Mar 09 10:18:40 2020 -0700
@@ -294,7 +294,7 @@
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
-  | @  6: a0b2430ebfb8 secret 'F'
+  | o  6: a0b2430ebfb8 secret 'F'
   | |
   o |  5: 45396c49d53b public 'B'
   | |
@@ -345,7 +345,7 @@
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
-  | @  6: a0b2430ebfb8 secret 'F'
+  | o  6: a0b2430ebfb8 secret 'F'
   | |
   o |  5: 45396c49d53b public 'B'
   | |
@@ -395,7 +395,7 @@
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
-  | @  6: a0b2430ebfb8 secret 'F'
+  | o  6: a0b2430ebfb8 secret 'F'
   | |
   o |  5: 45396c49d53b public 'B'
   | |
--- a/tests/test-rebase-newancestor.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-newancestor.t	Mon Mar 09 10:18:40 2020 -0700
@@ -68,11 +68,6 @@
 that is mixed up with the actual merge stuff and there is in general no way to
 separate them.
 
-Note: The dev branch contains _no_ changes to f-default. It might be unclear
-how rebasing of ancestor merges should be handled, but the current behavior
-with spurious prompts for conflicts in files that didn't change seems very
-wrong.
-
   $ hg init ancestor-merge
   $ cd ancestor-merge
 
@@ -133,16 +128,11 @@
   note: not rebasing 1:1d1a643d390e "dev: create branch", its destination already has all its changes
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  file 'f-default' was deleted in local [dest] but was modified in other [source].
-  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
-  What do you want to do? c
+  note: not rebasing 4:4b019212aaf6 "dev: merge default", its destination already has all its changes
   rebasing 6:010ced67e558 "dev: merge default"
+  note: not rebasing 6:010ced67e558 "dev: merge default", its destination already has all its changes
   saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-4a6f6d17-rebase.hg
   $ hg tglog
-  o  6: de147e4f69cf 'dev: merge default'
-  |
-  o  5: eda7b7f46f5d 'dev: merge default'
-  |
   o  4: 3e075b1c0a40 'dev: f-dev stuff'
   |
   @  3: e08089805d82 'default: f-other stuff'
@@ -163,28 +153,8 @@
   > EOF
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  file 'f-default' was deleted in local [dest] but was modified in other [source].
-  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
-  What do you want to do? c
-  rebasing 6:010ced67e558 "dev: merge default"
-  saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-827d7a44-rebase.hg
-  $ hg tglog
-  o  7: de147e4f69cf 'dev: merge default'
-  |
-  o  6: eda7b7f46f5d 'dev: merge default'
-  |
-  o  5: 3e075b1c0a40 'dev: f-dev stuff'
-  |
-  o  4: e08089805d82 'default: f-other stuff'
-  |
-  o  3: 462860db70a1 'default: remove f-default'
-  |
-  o  2: f157ecfd2b6b 'default: f-default stuff'
-  |
-  | o  1: 1d1a643d390e 'dev: create branch' dev
-  |/
-  o  0: e90e8eb90b6f 'default: create f-default'
-  
+  abort: rebasing 4:4b019212aaf6 will include unwanted changes from 1:1d1a643d390e
+  [255]
   $ cd ..
 
 
@@ -284,18 +254,7 @@
   rebasing 6:4c5f12f25ebe "merge rebase ancestors" (tip)
   resolving manifests
   removing other
-  note: merging f9daf77ffe76+ and 4c5f12f25ebe using bids from ancestors a60552eb93fb and f59da8fc0fcf
-  
-  calculating bids for ancestor a60552eb93fb
   resolving manifests
-  
-  calculating bids for ancestor f59da8fc0fcf
-  resolving manifests
-  
-  auction for merging merge bids
-   other: consensus for g
-  end of auction
-  
   getting other
   committing files:
   other
--- a/tests/test-rebase-obsolete.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-obsolete.t	Mon Mar 09 10:18:40 2020 -0700
@@ -1795,19 +1795,15 @@
   $ hg log -G
   @  2:b18e25de2cf5 D
   |
-  | @  1:2ec65233581b B (pruned using prune)
-  |/
   o  0:426bada5c675 A
   
   $ hg summary
   parent: 2:b18e25de2cf5 tip
    D
-  parent: 1:2ec65233581b  (obsolete)
-   B
   branch: default
-  commit: 2 modified, 1 unknown, 1 unresolved (merge)
+  commit: 1 modified, 1 added, 1 unknown, 1 unresolved
   update: (current)
-  phases: 3 draft
+  phases: 2 draft
   rebase: 0 rebased, 2 remaining (rebase --continue)
 
   $ hg rebase --abort
--- a/tests/test-rebase-parameters.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-parameters.t	Mon Mar 09 10:18:40 2020 -0700
@@ -92,6 +92,14 @@
   empty "rev" revision set - nothing to rebase
   [1]
 
+  $ hg rebase --rev 'wdir()' --dest 6
+  abort: cannot rebase the working copy
+  [255]
+
+  $ hg rebase --source 'wdir()' --dest 6
+  abort: cannot rebase the working copy
+  [255]
+
   $ hg rebase --source '1 & !1' --dest 8
   empty "source" revision set - nothing to rebase
   [1]
@@ -473,11 +481,9 @@
   $ hg summary
   parent: 1:56daeba07f4b 
    c2
-  parent: 2:e4e3f3546619 tip
-   c2b
   branch: default
-  commit: 1 modified, 1 unresolved (merge)
-  update: (current)
+  commit: 1 unresolved (clean)
+  update: 1 new changesets, 2 branch heads (merge)
   phases: 3 draft
   rebase: 0 rebased, 1 remaining (rebase --continue)
 
--- a/tests/test-rebase-rename.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-rename.t	Mon Mar 09 10:18:40 2020 -0700
@@ -108,6 +108,62 @@
   
   
 
+  $ repeatchange() {
+  >   hg checkout $1
+  >   hg cp a z
+  >   echo blah >> z
+  >   hg commit -Am "$2" --user "$3"
+  > }
+  $ repeatchange 1 "E" "user1"
+  2 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  created new head
+  $ repeatchange 1 "E" "user2"
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  created new head
+  $ hg tglog
+  @  5: af8ad1f97097 'E'
+  |
+  | o  4: 60f545c27784 'E'
+  |/
+  | o  3: 032a9b75e83b 'rename A'
+  | |
+  | o  2: 220d0626d185 'rename B'
+  |/
+  o  1: 3ab5da9a5c01 'B'
+  |
+  o  0: 1994f17a630e 'A'
+  
+  $ hg rebase -s 5 -d 4
+  rebasing 5:af8ad1f97097 "E" (tip)
+  note: not rebasing 5:af8ad1f97097 "E" (tip), its destination already has all its changes
+  saved backup bundle to $TESTTMP/a/.hg/strip-backup/af8ad1f97097-c3e90708-rebase.hg
+  $ hg tglog
+  @  4: 60f545c27784 'E'
+  |
+  | o  3: 032a9b75e83b 'rename A'
+  | |
+  | o  2: 220d0626d185 'rename B'
+  |/
+  o  1: 3ab5da9a5c01 'B'
+  |
+  o  0: 1994f17a630e 'A'
+  
+  $ hg export tip
+  # HG changeset patch
+  # User user1
+  # Date 0 0
+  #      Thu Jan 01 00:00:00 1970 +0000
+  # Node ID 60f545c277846e6bad309919bae3ae106f59cb39
+  # Parent  3ab5da9a5c01faa02c20f2ec4870a4f689c92da6
+  E
+  
+  diff -r 3ab5da9a5c01 -r 60f545c27784 z
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,2 @@
+  +a
+  +blah
+
   $ cd ..
 
 
--- a/tests/test-rebase-transaction.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rebase-transaction.t	Mon Mar 09 10:18:40 2020 -0700
@@ -114,7 +114,7 @@
   |
   | @  4: Z
   | |
-  @ |  3: C
+  % |  3: C
   | |
   | o  2: Y
   | |
@@ -123,9 +123,9 @@
   o  0: A
   
   $ hg st
-  M C
   M conflict
   A B
+  A C
   ? conflict.orig
   $ echo resolved > conflict
   $ hg resolve -m
--- a/tests/test-rename-after-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rename-after-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -120,4 +120,14 @@
   $ hg log -r tip -C -v | grep copies
   copies:      b2 (b1)
 
+Test marking/unmarking copies in merge commit
+
+  $ hg copy --forget --at-rev . b2
+  abort: cannot mark/unmark copy in merge commit
+  [255]
+
+  $ hg copy --after --at-rev . b1 b2
+  abort: cannot mark/unmark copy in merge commit
+  [255]
+
   $ cd ..
--- a/tests/test-rename-dir-merge.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rename-dir-merge.t	Mon Mar 09 10:18:40 2020 -0700
@@ -30,8 +30,9 @@
      b/a
      b/b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a/a' -> dst: 'b/a' 
-     src: 'a/b' -> dst: 'b/b' 
+     on remote side:
+      src: 'a/a' -> dst: 'b/a' 
+      src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
      discovered dir src: 'a/' -> dst: 'b/'
      pending file src: 'a/c' -> dst: 'b/c'
@@ -75,8 +76,9 @@
     unmatched files in other:
      a/c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a/a' -> dst: 'b/a' 
-     src: 'a/b' -> dst: 'b/b' 
+     on local side:
+      src: 'a/a' -> dst: 'b/a' 
+      src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
      discovered dir src: 'a/' -> dst: 'b/'
      pending file src: 'a/c' -> dst: 'b/c'
--- a/tests/test-rename-merge1.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rename-merge1.t	Mon Mar 09 10:18:40 2020 -0700
@@ -28,9 +28,11 @@
      b
      b2
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
-     src: 'a2' -> dst: 'b2' !
-     src: 'a2' -> dst: 'c2' !
+     on local side:
+      src: 'a2' -> dst: 'c2' !
+     on remote side:
+      src: 'a' -> dst: 'b' *
+      src: 'a2' -> dst: 'b2' !
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -170,7 +172,8 @@
     unmatched files in other:
      newfile
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'file' -> dst: 'newfile' %
+     on remote side:
+      src: 'file' -> dst: 'newfile' %
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -184,3 +187,50 @@
   $ hg status
   M newfile
   $ cd ..
+
+Create x and y, then modify y and rename x to z on one side of merge, and
+modify x and rename y to z on the other side.
+  $ hg init conflicting-target
+  $ cd conflicting-target
+  $ echo x > x
+  $ echo y > y
+  $ hg ci -Aqm 'add x and y'
+  $ hg mv x z
+  $ echo foo >> y
+  $ hg ci -qm 'modify y, rename x to z'
+  $ hg co -q 0
+  $ hg mv y z
+  $ echo foo >> x
+  $ hg ci -qm 'modify x, rename y to z'
+# We should probably tell the user about the conflicting rename sources.
+# Depending on which side they pick, we should take that rename and get
+# the changes to the source from the other side. The unchanged file should
+# remain.
+  $ hg merge --debug 1 -t :merge3
+    all copies found (* = to merge, ! = divergent, % = renamed and deleted):
+     on local side:
+      src: 'y' -> dst: 'z' *
+     on remote side:
+      src: 'x' -> dst: 'z' *
+    checking for directory renames
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 5151c134577e, local: 07fcbc9a74ed+, remote: f21419739508
+   preserving z for resolve of z
+  starting 4 threads for background file closing (?)
+   z: both renamed from y -> m (premerge)
+  picked tool ':merge3' for z (binary False symlink False changedelete False)
+  merging z
+  my z@07fcbc9a74ed+ other z@f21419739508 ancestor y@5151c134577e
+   premerge successful
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls
+  x
+  z
+  $ cat x
+  x
+  foo
+# 'z' should have had the added 'foo' line
+  $ cat z
+  x
--- a/tests/test-rename-merge2.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rename-merge2.t	Mon Mar 09 10:18:40 2020 -0700
@@ -79,7 +79,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -117,7 +118,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -156,7 +158,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -194,7 +197,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -231,7 +235,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -263,7 +268,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -294,7 +300,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -327,7 +334,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -355,7 +363,10 @@
   test L:um a b R:um a b W:       - 9  do merge with ancestor in a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -400,8 +411,10 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' !
-     src: 'a' -> dst: 'c' !
+     on local side:
+      src: 'a' -> dst: 'b' !
+     on remote side:
+      src: 'a' -> dst: 'c' !
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -434,7 +447,8 @@
   test L:nc a b R:up b   W:       - 12 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -473,7 +487,8 @@
   test L:up b   R:nm a b W:       - 13 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -513,7 +528,8 @@
   test L:nc a b R:up a b W:       - 14 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -522,19 +538,15 @@
    preserving rev for resolve of rev
    a: remote is newer -> g
   getting a
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+  my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -553,7 +565,8 @@
   test L:up b   R:nm a b W:       - 15 merge b no ancestor, remove a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -593,7 +606,8 @@
   test L:nc a b R:up a b W:       - 16 get a, merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -602,19 +616,15 @@
    preserving rev for resolve of rev
    a: remote is newer -> g
   getting a
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+  my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -633,7 +643,8 @@
   test L:up a b R:nc a b W:       - 17 keep a, merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -641,19 +652,15 @@
    preserving b for resolve of b
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
+  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
@@ -672,7 +679,8 @@
   test L:nm a b R:up a b W:       - 18 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -680,35 +688,24 @@
    preserving b for resolve of b
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
-   a: prompt deleted/changed -> m (premerge)
-  picked tool ':prompt' for a (binary False symlink False changedelete True)
-  file 'a' was deleted in local [working copy] but was modified in other [merge rev].
-  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
-  What do you want to do? u
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
+  my b@02963e448370+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
   launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
   merge tool returned: 0
-  0 files updated, 2 files merged, 0 files removed, 1 files unresolved
-  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
   --------------
-  M a
   M b
-  abort: unresolved merge conflicts (see 'hg help resolve')
   --------------
   
   $ tm "up a b" "nm a b" "      " "19 merge b no ancestor, prompt remove a"
@@ -717,44 +714,34 @@
   test L:up a b R:nm a b W:       - 19 merge b no ancestor, prompt remove a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 0b76e65c8289+, remote: bdb19105162a
-   preserving a for resolve of a
    preserving b for resolve of b
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
-   a: prompt changed/deleted -> m (premerge)
-  picked tool ':prompt' for a (binary False symlink False changedelete True)
-  file 'a' was deleted in other [merge rev] but was modified in local [working copy].
-  You can use (c)hanged version, (d)elete, or leave (u)nresolved.
-  What do you want to do? u
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
+  my b@0b76e65c8289+ other b@bdb19105162a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
   launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
   merge tool returned: 0
-  0 files updated, 2 files merged, 0 files removed, 1 files unresolved
-  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
   --------------
   M b
   C a
-  abort: unresolved merge conflicts (see 'hg help resolve')
   --------------
   
   $ tm "up a  " "um a b" "      " "20 merge a and b to b, remove a"
@@ -765,7 +752,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -807,7 +795,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -853,7 +842,8 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -936,11 +926,14 @@
      4/g
      7/f
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1/f' -> dst: '1/g' *
-     src: '3/f' -> dst: '3/g' *
-     src: '4/f' -> dst: '4/g' *
-     src: '5/f' -> dst: '5/g' *
-     src: '6/f' -> dst: '6/g' *
+     on local side:
+      src: '1/f' -> dst: '1/g' *
+      src: '5/f' -> dst: '5/g' *
+      src: '6/f' -> dst: '6/g' *
+     on remote side:
+      src: '1/f' -> dst: '1/g' *
+      src: '3/f' -> dst: '3/g' *
+      src: '4/f' -> dst: '4/g' *
     checking for directory renames
   $ hg mani
   0/f
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rename-rev.t	Mon Mar 09 10:18:40 2020 -0700
@@ -0,0 +1,56 @@
+  $ hg init
+  $ mkdir d1 d1/d11 d2
+  $ echo d1/a > d1/a
+  $ echo d1/ba > d1/ba
+  $ echo d1/a1 > d1/d11/a1
+  $ echo d1/b > d1/b
+  $ echo d2/b > d2/b
+  $ hg add d1/a d1/b d1/ba d1/d11/a1 d2/b
+  $ hg commit -m "intial"
+
+
+Test single file
+
+# One recoded copy, one copy to record after commit
+  $ hg cp d1/b d1/c
+  $ cp d1/b d1/d
+  $ hg add d1/d
+  $ hg ci -m 'copy d1/b to d1/c and d1/d'
+  $ hg st -C --change .
+  A d1/c
+    d1/b
+  A d1/d
+# Errors out without --after for now
+  $ hg cp --at-rev . d1/b d1/d
+  abort: --at-rev requires --after
+  [255]
+# Errors out with non-existent destination
+  $ hg cp -A --at-rev . d1/b d1/non-existent
+  abort: d1/non-existent: copy destination does not exist in 8a9d70fa20c9
+  [255]
+# Successful invocation
+  $ hg cp -A --at-rev . d1/b d1/d
+  saved backup bundle to $TESTTMP/.hg/strip-backup/8a9d70fa20c9-973ae357-copy.hg
+# New copy is recorded, and previously recorded copy is also still there
+  $ hg st -C --change .
+  A d1/c
+    d1/b
+  A d1/d
+    d1/b
+
+Test using directory as destination
+
+  $ hg co 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ cp -R d1 d3
+  $ hg add d3
+  adding d3/a
+  adding d3/b
+  adding d3/ba
+  adding d3/d11/a1
+  $ hg ci -m 'copy d1/ to d3/'
+  created new head
+  $ hg cp -A --at-rev . d1 d3
+  abort: d3: --at-rev does not support a directory as destination
+  [255]
+
--- a/tests/test-repair-strip.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-repair-strip.t	Mon Mar 09 10:18:40 2020 -0700
@@ -25,7 +25,9 @@
   >   else
   >       echo "(no journal)"
   >   fi
-  >   ls .hg/store/journal >/dev/null 2>&1 && hg recover
+  >   if ls .hg/store/journal >/dev/null 2>&1; then
+  >     hg recover --verify
+  >   fi
   >   ls .hg/strip-backup/* >/dev/null 2>&1 && hg unbundle -q .hg/strip-backup/*
   >   rm -rf .hg/strip-backup
   > }
--- a/tests/test-repo-compengines.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-repo-compengines.t	Mon Mar 09 10:18:40 2020 -0700
@@ -22,10 +22,15 @@
 Unknown compression engine to format.compression aborts
 
   $ hg --config format.revlog-compression=unknown init unknown
-  abort: compression engine unknown defined by format.revlog-compression not available
+  abort: compression engines "unknown" defined by format.revlog-compression not available
   (run "hg debuginstall" to list available compression engines)
   [255]
 
+unknown compression engine in a list with known one works fine
+
+  $ hg --config format.revlog-compression=zlib,unknown init zlib-before-unknow
+  $ hg --config format.revlog-compression=unknown,zlib init unknown-before-zlib
+
 A requirement specifying an unknown compression engine results in bail
 
   $ hg init unknownrequirement
--- a/tests/test-repo-filters-tiptoe.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-repo-filters-tiptoe.t	Mon Mar 09 10:18:40 2020 -0700
@@ -67,6 +67,12 @@
   R a
   ! b
 
+  $ hg status --copies
+  M c
+  A d
+  R a
+  ! b
+
 Getting data about the working copy parent
 
   $ hg log -r '.' -T "{node}\n{date}\n"
--- a/tests/test-resolve.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-resolve.t	Mon Mar 09 10:18:40 2020 -0700
@@ -306,48 +306,40 @@
 
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -x
   $ hg debugmergestate
-  * version 2 records
-  local: 57653b9f834a4493f7240b0681efcb9ae7cab745
-  other: dc77451844e37f03f5c559e3b8529b2b48d381d1
-  labels:
-    local: working copy
-    other: merge rev
-  unrecognized entry: x	advisory record
-  file extras: file1 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac)
-  file: file1 (record type "F", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
+  local (working copy): 57653b9f834a4493f7240b0681efcb9ae7cab745
+  other (merge rev): dc77451844e37f03f5c559e3b8529b2b48d381d1
+  file: file1 (state "r")
+    local path: file1 (hash 60b27f004e454aca81b0480209cce5081ec52390, flags "")
     ancestor path: file1 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
     other path: file1 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
-  file extras: file2 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac)
-  file: file2 (record type "F", state "u", hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523)
-    local path: file2 (flags "")
+    extra: ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac
+  file: file2 (state "u")
+    local path: file2 (hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523, flags "")
     ancestor path: file2 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
     other path: file2 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
+    extra: ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac
   $ hg resolve -l
   R file1
   U file2
 
+test json output
+
+  $ hg debugmergestate -T json
+  [
+   {
+    "commits": [{"label": "working copy", "name": "local", "node": "57653b9f834a4493f7240b0681efcb9ae7cab745"}, {"label": "merge rev", "name": "other", "node": "dc77451844e37f03f5c559e3b8529b2b48d381d1"}],
+    "files": [{"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file1", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "60b27f004e454aca81b0480209cce5081ec52390", "local_path": "file1", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file1", "path": "file1", "state": "r"}, {"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file2", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523", "local_path": "file2", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file2", "path": "file2", "state": "u"}]
+   }
+  ]
+
+
 insert unsupported mandatory merge record
 
   $ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -X
   $ hg debugmergestate
-  * version 2 records
-  local: 57653b9f834a4493f7240b0681efcb9ae7cab745
-  other: dc77451844e37f03f5c559e3b8529b2b48d381d1
-  labels:
-    local: working copy
-    other: merge rev
-  file extras: file1 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac)
-  file: file1 (record type "F", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390)
-    local path: file1 (flags "")
-    ancestor path: file1 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
-    other path: file1 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
-  file extras: file2 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac)
-  file: file2 (record type "F", state "u", hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523)
-    local path: file2 (flags "")
-    ancestor path: file2 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
-    other path: file2 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
-  unrecognized entry: X	mandatory record
+  abort: unsupported merge state records: X
+  (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
+  [255]
   $ hg resolve -l
   abort: unsupported merge state records: X
   (see https://mercurial-scm.org/wiki/MergeStateRecords for more information)
--- a/tests/test-revlog-ancestry.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-revlog-ancestry.py	Mon Mar 09 10:18:40 2020 -0700
@@ -25,11 +25,11 @@
 
 
 def update(rev):
-    merge.update(repo, rev, branchmerge=False, force=True)
+    merge.clean_update(repo[rev])
 
 
 def merge_(rev):
-    merge.update(repo, rev, branchmerge=True, force=False)
+    merge.merge(repo[rev])
 
 
 if __name__ == '__main__':
--- a/tests/test-revlog-raw.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-revlog-raw.py	Mon Mar 09 10:18:40 2020 -0700
@@ -229,7 +229,7 @@
 
     # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
     gray = lambda x: x ^ (x >> 1)
-    reversegray = dict((gray(i), i) for i in range(m))
+    reversegray = {gray(i): i for i in range(m)}
 
     # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
     # the next unused gray code where higher n bits equal to X.
--- a/tests/test-rollback.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-rollback.t	Mon Mar 09 10:18:40 2020 -0700
@@ -190,7 +190,7 @@
 
 corrupt journal test
   $ echo "foo" > .hg/store/journal
-  $ hg recover
+  $ hg recover --verify
   rolling back interrupted transaction
   couldn't read journal entry 'foo\n'!
   checking changesets
--- a/tests/test-shelve.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-shelve.t	Mon Mar 09 10:18:40 2020 -0700
@@ -171,6 +171,8 @@
   $ hg mv b b.rename
   moving b/b to b.rename/b
   $ hg cp c c.copy
+  $ hg mv d ghost
+  $ rm ghost
   $ hg status -C
   M a/a
   A b.rename/b
@@ -178,12 +180,15 @@
   A c.copy
     c
   R b/b
+  R d
+  ! ghost
+    d
 
 the common case - no options or filenames
 
   $ hg shelve
   shelved as default-01
-  2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  3 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg status -C
 
 ensure that our shelved changes exist
@@ -254,6 +259,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ hg shelve -l
 
 (both of default.hg and default-1.hg should be still kept, because it
@@ -287,6 +293,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ HGEDITOR=cat hg shelve -q -n wibble -m wat -e a
   wat
   
@@ -306,6 +313,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ hg shelve -l --stat
   wibble          (*)    wat (glob)
    a/a |  1 +
@@ -323,6 +331,7 @@
   A c.copy
     c
   R b/b
+  R d
 
 ensure old shelve backups are being deleted automatically
 
@@ -363,6 +372,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   # The repository is in an unfinished *unshelve* state.
   
@@ -401,6 +411,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   $ hg diff
   diff --git a/a/a b/a/a
@@ -412,13 +423,19 @@
    c
   +=======
   +a
-  +>>>>>>> shelve:       a68ec3400638 - shelve: changes to: [mq]: second.patch
+  +>>>>>>> shelve:       203c9f771d2b - shelve: changes to: [mq]: second.patch
   diff --git a/b/b b/b.rename/b
   rename from b/b
   rename to b.rename/b
   diff --git a/c b/c.copy
   copy from c
   copy to c.copy
+  diff --git a/d b/d
+  deleted file mode 100644
+  --- a/d
+  +++ /dev/null
+  @@ -1,1 +0,0 @@
+  -d
   $ hg resolve -l
   U a/a
 
@@ -434,6 +451,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   $ hg unshelve -a
   unshelve of 'default' aborted
@@ -512,6 +530,7 @@
     c
   A foo/foo
   R b/b
+  R d
   ? a/a.orig
 
 there should be no shelves left
--- a/tests/test-ssh-bundle1.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-ssh-bundle1.t	Mon Mar 09 10:18:40 2020 -0700
@@ -482,7 +482,7 @@
   sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
-  remote: 440 (sshv1 !)
+  remote: 463 (sshv1 !)
   protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
--- a/tests/test-ssh-proto-unbundle.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-ssh-proto-unbundle.t	Mon Mar 09 10:18:40 2020 -0700
@@ -56,8 +56,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -109,8 +109,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -235,8 +235,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -293,8 +293,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -359,8 +359,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -418,8 +418,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -485,8 +485,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -543,8 +543,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -609,8 +609,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -668,8 +668,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -735,8 +735,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -796,8 +796,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -865,8 +865,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -923,8 +923,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -989,8 +989,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1050,8 +1050,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1119,8 +1119,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1180,8 +1180,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1255,8 +1255,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1314,8 +1314,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1382,8 +1382,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1441,8 +1441,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1511,8 +1511,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1572,8 +1572,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1650,8 +1650,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1715,8 +1715,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1788,8 +1788,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1843,8 +1843,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1918,8 +1918,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1977,8 +1977,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
--- a/tests/test-ssh-proto.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-ssh-proto.t	Mon Mar 09 10:18:40 2020 -0700
@@ -64,7 +64,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -86,8 +86,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 `hg debugserve --sshstdio` works
@@ -96,7 +96,7 @@
   $ hg debugserve --sshstdio << EOF
   > hello
   > EOF
-  440
+  463
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 I/O logging works
@@ -106,24 +106,24 @@
   > EOF
   e> flush() -> None
   o> write(4) -> 4:
-  o>     440\n
-  o> write(440) -> 440:
+  o>     463\n
+  o> write(463) -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
-  440
+  463
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> flush() -> None
 
   $ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
   > hello
   > EOF
-  440
+  463
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
   $ cat $TESTTMP/io
   e> flush() -> None
   o> write(4) -> 4:
-  o>     440\n
-  o> write(440) -> 440:
+  o>     463\n
+  o> write(463) -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> flush() -> None
 
@@ -149,8 +149,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -187,7 +187,7 @@
   remote: banner: line 7
   remote: banner: line 8
   remote: banner: line 9
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -245,8 +245,8 @@
   o> readline() -> 15:
   o>     banner: line 9\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -297,12 +297,12 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
+  o>     463\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-  o> readline() -> 440:
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -316,7 +316,7 @@
   sending hello command
   sending between command
   remote: 0
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -365,8 +365,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -390,7 +390,7 @@
   remote: 0
   remote: 0
   remote: 0
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -447,8 +447,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -494,8 +494,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -539,8 +539,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -609,8 +609,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Incomplete dictionary send
@@ -691,8 +691,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -725,8 +725,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -768,8 +768,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -797,8 +797,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(105) -> 105:
   i>     between\n
@@ -838,8 +838,8 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -887,8 +887,8 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -914,7 +914,7 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     440\n
+  o>     463\n
 
 Send an upgrade request to a server that doesn't support that command
 
@@ -943,8 +943,8 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -962,7 +962,7 @@
   sending hello command
   sending between command
   remote: 0
-  remote: 440
+  remote: 463
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -1005,8 +1005,8 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     439\n
-  o> readline() -> 440:
+  o>     462\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
   $ cd ..
@@ -1062,6 +1062,8 @@
     changegroup
       01
       02
+    checkheads
+      related
     digests
       md5
       sha1
@@ -1112,14 +1114,14 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     439\n
-  o> readline() -> 440:
+  o>     462\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     424\n
-  o> readline() -> 424:
+  o>     447\n
+  o> readline() -> 447:
   o>     capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Multiple upgrades is not allowed
@@ -1150,8 +1152,8 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     439\n
-  o> readline() -> 440:
+  o>     462\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(45) -> 45:
   i>     upgrade another-token proto=irrelevant\n
@@ -1222,8 +1224,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -1341,8 +1343,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1379,8 +1381,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1429,8 +1431,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1459,8 +1461,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1490,8 +1492,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1523,8 +1525,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1557,8 +1559,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1593,8 +1595,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1632,8 +1634,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1672,8 +1674,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -1724,8 +1726,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1757,8 +1759,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1807,8 +1809,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1845,8 +1847,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1884,8 +1886,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1920,8 +1922,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1957,8 +1959,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1990,8 +1992,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -2028,8 +2030,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -2069,8 +2071,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -2135,8 +2137,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     440\n
-  o> readline() -> 440:
+  o>     463\n
+  o> readline() -> 463:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -2175,8 +2177,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     439\n
-  o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     462\n
+  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending batch with 3 sub-commands
--- a/tests/test-ssh.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-ssh.t	Mon Mar 09 10:18:40 2020 -0700
@@ -513,7 +513,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 440 (sshv1 !)
+  remote: 463 (sshv1 !)
   protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
@@ -532,7 +532,7 @@
   no changes found
   devel-peer-request: getbundle
   devel-peer-request:   bookmarks: 1 bytes
-  devel-peer-request:   bundlecaps: 266 bytes
+  devel-peer-request:   bundlecaps: 289 bytes
   devel-peer-request:   cg: 1 bytes
   devel-peer-request:   common: 122 bytes
   devel-peer-request:   heads: 122 bytes
--- a/tests/test-strip.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-strip.t	Mon Mar 09 10:18:40 2020 -0700
@@ -591,6 +591,18 @@
   phases: 2 draft
   mq:     3 unapplied
 
+  $ hg log --graph
+  @  changeset:   1:76dcf9fab855
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     b
+  |
+  %  changeset:   0:9ab35a2d17cb
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     a
+  
   $ echo c > b
   $ hg strip tip
   abort: uncommitted changes
--- a/tests/test-tags.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-tags.t	Mon Mar 09 10:18:40 2020 -0700
@@ -103,6 +103,9 @@
   0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
   0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
   0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
+  $ hg debugtagscache
+  0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing/invalid
+  1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
 
 Repeat with cold tag cache:
 
@@ -368,6 +371,24 @@
   1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
   1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
 
+On junk data + missing cache entries, hg also overwrites the junk.
+
+  $ rm -f .hg/cache/tags2-visible
+  >>> import os
+  >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
+  ...     fp.seek(-10, os.SEEK_END) and None
+  ...     fp.truncate() and None
+
+  $ hg debugtagscache | tail -2
+  4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing/invalid
+  $ hg tags
+  tip                                5:8dbfe60eff30
+  bar                                1:78391a272241
+  $ hg debugtagscache | tail -2
+  4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+
 #if unix-permissions no-root
 Errors writing to .hgtags fnodes cache are silently ignored
 
--- a/tests/test-uncommit.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-uncommit.t	Mon Mar 09 10:18:40 2020 -0700
@@ -489,7 +489,7 @@
   $ hg add b
   $ hg status
   A b
-  $ hg unc a
+  $ hg uncommit a
   note: keeping empty commit
   $ cat a
   super critical info!
@@ -503,11 +503,11 @@
   
   $ hg ci -Am 'add b'
   $ echo 'foo bar' > b
-  $ hg unc b
+  $ hg uncommit b
   abort: uncommitted changes
   (requires --allow-dirty-working-copy to uncommit)
   [255]
-  $ hg unc --allow-dirty-working-copy b
+  $ hg uncommit --allow-dirty-working-copy b
   $ hg log
   changeset:   3:30fa958635b2
   tag:         tip
--- a/tests/test-up-local-change.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-up-local-change.t	Mon Mar 09 10:18:40 2020 -0700
@@ -40,8 +40,6 @@
   summary:     1
   
   $ hg --debug up
-    unmatched files in other:
-     b
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
@@ -91,8 +89,6 @@
   summary:     1
   
   $ hg --debug up
-    unmatched files in other:
-     b
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
--- a/tests/test-update-branches.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-update-branches.t	Mon Mar 09 10:18:40 2020 -0700
@@ -189,17 +189,17 @@
   parent=2
 
   $ revtest '-cC dirty linear'  dirty 1 2 -cC
-  abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
+  abort: cannot specify both --clean and --check
   parent=1
   M foo
 
   $ revtest '-mc dirty linear'  dirty 1 2 -mc
-  abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
+  abort: cannot specify both --check and --merge
   parent=1
   M foo
 
   $ revtest '-mC dirty linear'  dirty 1 2 -mC
-  abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
+  abort: cannot specify both --clean and --merge
   parent=1
   M foo
 
@@ -249,6 +249,19 @@
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
   use 'hg resolve' to retry unresolved file merges
   [1]
+  $ hg log -G --template '{rev}:{node|short} {parents} {branches}\n'
+  o  5:ff252e8273df  b1
+  |
+  @  4:d047485b3896 0:60829823a42a  b1
+  |
+  | %  3:6efa171f091b 1:0786582aa4b1
+  | |
+  | | o  2:bd10386d478c
+  | |/
+  | o  1:0786582aa4b1
+  |/
+  o  0:60829823a42a
+  
   $ hg st
   M a
   ? a.orig
@@ -330,6 +343,21 @@
   $ hg resolve -l
   U a
 
+Try to make empty commit while there are conflicts
+  $ hg revert -r . a
+  $ rm a.orig
+  $ hg ci -m empty
+  abort: unresolved merge conflicts (see 'hg help resolve')
+  [255]
+  $ hg resolve -m a
+  (no more unresolved files)
+  $ hg resolve -l
+  R a
+  $ hg ci -m empty
+  nothing changed
+  [1]
+  $ hg resolve -l
+
 Change/delete conflict is not allowed
   $ hg up -qC 3
   $ hg rm foo
--- a/tests/test-wireproto-command-capabilities.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-wireproto-command-capabilities.t	Mon Mar 09 10:18:40 2020 -0700
@@ -150,7 +150,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -190,7 +190,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -223,7 +223,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -484,7 +484,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
   s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
--- a/tests/test-wireproto-content-redirects.t	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/test-wireproto-content-redirects.t	Mon Mar 09 10:18:40 2020 -0700
@@ -66,9 +66,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2285\r\n
+  s>     Content-Length: 2308\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-a is compatible) (tls1.2 !)
   (remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
   sending capabilities command
@@ -396,9 +396,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2312\r\n
+  s>     Content-Length: 2335\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-a is compatible)
   (remote redirect target target-b uses unsupported protocol: unknown)
   sending capabilities command
@@ -731,9 +731,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2272\r\n
+  s>     Content-Length: 2295\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (redirect target target-bad-tls requires SNI, which is unsupported)
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
@@ -1055,9 +1055,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2278\r\n
+  s>     Content-Length: 2301\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
--- a/tests/unwrap-message-id.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/unwrap-message-id.py	Mon Mar 09 10:18:40 2020 -0700
@@ -1,6 +1,8 @@
 from __future__ import absolute_import, print_function
 
-import re
 import sys
 
-print(re.sub(r"(?<=Message-Id:) \n ", " ", sys.stdin.read()), end="")
+for line in sys.stdin:
+    if line.lower() in ("message-id: \n", "in-reply-to: \n"):
+        line = line[:-2]
+    print(line, end="")
--- a/tests/wireprotosimplecache.py	Mon Mar 09 01:11:59 2020 +0100
+++ b/tests/wireprotosimplecache.py	Mon Mar 09 10:18:40 2020 -0700
@@ -116,7 +116,7 @@
             redirectable = False
         else:
             clienttargets = set(self.redirecttargets)
-            ourtargets = set(t[b'name'] for t in loadredirecttargets(self.ui))
+            ourtargets = {t[b'name'] for t in loadredirecttargets(self.ui)}
 
             # We only ever redirect to a single target (for now). So we don't
             # need to store which target matched.