changeset 44240:15eb90965a00

merge with stable
author Augie Fackler <augie@google.com>
date Mon, 03 Feb 2020 11:51:52 -0500
parents 830eae18b2f3 (diff) c002d504ff11 (current diff)
children aea79f41ee55
files contrib/packaging/inno/requirements.txt mercurial/commands.py setup.py
diffstat 75 files changed, 2496 insertions(+), 1500 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Mon Feb 03 11:07:34 2020 -0500
+++ b/Makefile	Mon Feb 03 11:51:52 2020 -0500
@@ -64,6 +64,7 @@
 	$(MAKE) -C doc
 
 cleanbutpackages:
+	rm -f hg.exe
 	-$(PYTHON) setup.py clean --all # ignore errors from this command
 	find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \
 		\( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';'
--- a/black.toml	Mon Feb 03 11:07:34 2020 -0500
+++ b/black.toml	Mon Feb 03 11:51:52 2020 -0500
@@ -9,7 +9,6 @@
 | \.mypy_cache/
 | \.venv/
 | mercurial/thirdparty/
-| contrib/python-zstandard/
 '''
 skip-string-normalization = true
 quiet = true
--- a/contrib/examples/fix.hgrc	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/examples/fix.hgrc	Mon Feb 03 11:51:52 2020 -0500
@@ -6,7 +6,7 @@
 rustfmt:pattern = set:**.rs
 
 black:command = black --config=black.toml -
-black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"
+black:pattern = set:**.py - mercurial/thirdparty/**
 
 # Mercurial doesn't have any Go code, but if we did this is how we
 # would configure `hg fix` for Go:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/pyoxidizer.bzl	Mon Feb 03 11:51:52 2020 -0500
@@ -0,0 +1,57 @@
+# Instructions:
+#
+# 1. cargo install --version 0.5.0 pyoxidizer
+# 2. cd /path/to/hg
+# 3. pyoxidizer build --path contrib/packaging [--release]
+# 4. Run build/pyoxidizer/<arch>/<debug|release>/app/hg
+#
+# If you need to build again, you need to remove the build/lib.* and
+# build/temp.* directories, otherwise PyOxidizer fails to pick up C
+# extensions. This is a bug in PyOxidizer.
+
+ROOT = CWD + "/../.."
+
+set_build_path(ROOT + "/build/pyoxidizer")
+
+def make_exe():
+    dist = default_python_distribution()
+
+    code = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
+
+    config = PythonInterpreterConfig(
+        raw_allocator = "system",
+        run_eval = code,
+        # We need this to make resourceutil happy, since it looks for sys.frozen.
+        sys_frozen = True,
+    )
+
+    exe = dist.to_python_executable(
+        name = "hg",
+        config = config,
+    )
+
+    # Use setup.py install to build Mercurial and collect Python resources to
+    # embed in the executable.
+    resources = dist.setup_py_install(ROOT)
+    exe.add_python_resources(resources)
+
+    return exe
+
+def make_install(exe):
+    m = FileManifest()
+
+    # `hg` goes in root directory.
+    m.add_python_resource(".", exe)
+
+    templates = glob(
+        include=[ROOT + "/mercurial/templates/**/*"],
+        strip_prefix = ROOT + "/mercurial/",
+    )
+    m.add_manifest(templates)
+
+    return m
+
+register_target("exe", make_exe)
+register_target("app", make_install, depends = ["exe"], default = True)
+
+resolve_targets()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/phab-clean.py	Mon Feb 03 11:51:52 2020 -0500
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+#
+# A small script to automatically reject idle Diffs
+#
+# you need to set the PHABBOT_USER and PHABBOT_TOKEN environment variable for authentication
+from __future__ import absolute_import, print_function
+
+import datetime
+import os
+import sys
+
+import phabricator
+
+MESSAGE = """There seems to have been no activities on this Diff for the past 3 Months.
+
+By policy, we are automatically moving it out of the `need-review` state.
+
+Please, move it back to `need-review` without hesitation if this diff should still be discussed.
+
+:baymax:need-review-idle:
+"""
+
+
+PHAB_URL = "https://phab.mercurial-scm.org/api/"
+USER = os.environ.get("PHABBOT_USER", "baymax")
+TOKEN = os.environ.get("PHABBOT_TOKEN")
+
+
+NOW = datetime.datetime.now()
+
+# 3 months in seconds
+DELAY = 60 * 60 * 24 * 30 * 3
+
+
+def get_all_diff(phab):
+    """Fetch all the diff that the need review"""
+    return phab.differential.query(
+        status="status-needs-review",
+        order="order-modified",
+        paths=[('HG', None)],
+    )
+
+
+def filter_diffs(diffs, older_than):
+    """filter diffs to only keep the one unmodified sin <older_than> seconds"""
+    olds = []
+    for d in diffs:
+        modified = int(d['dateModified'])
+        modified = datetime.datetime.fromtimestamp(modified)
+        d["idleFor"] = idle_for = NOW - modified
+        if idle_for.total_seconds() > older_than:
+            olds.append(d)
+    return olds
+
+
+def nudge_diff(phab, diff):
+    """Comment on the idle diff and reject it"""
+    diff_id = int(d['id'])
+    phab.differential.createcomment(
+        revision_id=diff_id, message=MESSAGE, action="reject"
+    )
+
+
+if not USER:
+    print(
+        "not user specified please set PHABBOT_USER and PHABBOT_TOKEN",
+        file=sys.stderr,
+    )
+elif not TOKEN:
+    print(
+        "not api-token specified please set PHABBOT_USER and PHABBOT_TOKEN",
+        file=sys.stderr,
+    )
+    sys.exit(1)
+
+phab = phabricator.Phabricator(USER, host=PHAB_URL, token=TOKEN)
+phab.connect()
+phab.update_interfaces()
+print('Hello "%s".' % phab.user.whoami()['realName'])
+
+diffs = get_all_diff(phab)
+print("Found %d Diffs" % len(diffs))
+olds = filter_diffs(diffs, DELAY)
+print("Found %d old Diffs" % len(olds))
+for d in olds:
+    diff_id = d['id']
+    status = d['statusName']
+    modified = int(d['dateModified'])
+    idle_for = d["idleFor"]
+    msg = 'nudging D%s in "%s" state for %s'
+    print(msg % (diff_id, status, idle_for))
+    # uncomment to actually affect phab
+    nudge_diff(phab, d)
--- a/contrib/python-zstandard/make_cffi.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/make_cffi.py	Mon Feb 03 11:51:52 2020 -0500
@@ -52,7 +52,8 @@
 
 # Headers whose preprocessed output will be fed into cdef().
 HEADERS = [
-    os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
+    os.path.join(HERE, "zstd", *p)
+    for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
 ]
 
 INCLUDE_DIRS = [
@@ -139,7 +140,9 @@
         env = dict(os.environ)
         if getattr(compiler, "_paths", None):
             env["PATH"] = compiler._paths
-        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env)
+        process = subprocess.Popen(
+            args + [input_file], stdout=subprocess.PIPE, env=env
+        )
         output = process.communicate()[0]
         ret = process.poll()
         if ret:
--- a/contrib/python-zstandard/setup.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/setup.py	Mon Feb 03 11:51:52 2020 -0500
@@ -87,7 +87,9 @@
         break
 
 if not version:
-    raise Exception("could not resolve package version; " "this should never happen")
+    raise Exception(
+        "could not resolve package version; " "this should never happen"
+    )
 
 setup(
     name="zstandard",
--- a/contrib/python-zstandard/setup_zstd.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/setup_zstd.py	Mon Feb 03 11:51:52 2020 -0500
@@ -138,12 +138,16 @@
     if not system_zstd:
         sources.update([os.path.join(actual_root, p) for p in zstd_sources])
         if support_legacy:
-            sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy])
+            sources.update(
+                [os.path.join(actual_root, p) for p in zstd_sources_legacy]
+            )
     sources = list(sources)
 
     include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
     if not system_zstd:
-        include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
+        include_dirs.update(
+            [os.path.join(actual_root, d) for d in zstd_includes]
+        )
         if support_legacy:
             include_dirs.update(
                 [os.path.join(actual_root, d) for d in zstd_includes_legacy]
--- a/contrib/python-zstandard/tests/common.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/common.py	Mon Feb 03 11:51:52 2020 -0500
@@ -50,7 +50,9 @@
         os.environ.update(old_env)
 
     if mod.backend != "cffi":
-        raise Exception("got the zstandard %s backend instead of cffi" % mod.backend)
+        raise Exception(
+            "got the zstandard %s backend instead of cffi" % mod.backend
+        )
 
     # If CFFI version is available, dynamically construct test methods
     # that use it.
@@ -84,7 +86,9 @@
                 fn.__func__.func_defaults,
                 fn.__func__.func_closure,
             )
-            new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class)
+            new_method = types.UnboundMethodType(
+                new_fn, fn.im_self, fn.im_class
+            )
 
         setattr(cls, name, new_method)
 
@@ -194,4 +198,6 @@
     expensive_settings = hypothesis.settings(deadline=None, max_examples=10000)
     hypothesis.settings.register_profile("expensive", expensive_settings)
 
-    hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default"))
+    hypothesis.settings.load_profile(
+        os.environ.get("HYPOTHESIS_PROFILE", "default")
+    )
--- a/contrib/python-zstandard/tests/test_buffer_util.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_buffer_util.py	Mon Feb 03 11:51:52 2020 -0500
@@ -67,7 +67,8 @@
             self.skipTest("BufferWithSegments not available")
 
         b = zstd.BufferWithSegments(
-            b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)])
+            b"foofooxfooxy",
+            b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]),
         )
         self.assertEqual(len(b), 3)
         self.assertEqual(b.size, 12)
@@ -83,17 +84,23 @@
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass at least 1 argument"
+        ):
             zstd.BufferWithSegmentsCollection()
 
     def test_argument_validation(self):
         if not hasattr(zstd, "BufferWithSegmentsCollection"):
             self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(None)
 
-        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+        with self.assertRaisesRegex(
+            TypeError, "arguments must be BufferWithSegments"
+        ):
             zstd.BufferWithSegmentsCollection(
                 zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None
             )
--- a/contrib/python-zstandard/tests/test_compressor.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_compressor.py	Mon Feb 03 11:51:52 2020 -0500
@@ -24,7 +24,9 @@
 
 
 def multithreaded_chunk_size(level, source_size=0):
-    params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size)
+    params = zstd.ZstdCompressionParameters.from_level(
+        level, source_size=source_size
+    )
 
     return 1 << (params.window_log + 2)
 
@@ -86,7 +88,9 @@
 
         # This matches the test for read_to_iter() below.
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o")
+        result = cctx.compress(
+            b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o"
+        )
         self.assertEqual(
             result,
             b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
@@ -99,7 +103,9 @@
         result = cctx.compress(b"foo" * 256)
 
     def test_no_magic(self):
-        params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1)
+        params = zstd.ZstdCompressionParameters.from_level(
+            1, format=zstd.FORMAT_ZSTD1
+        )
         cctx = zstd.ZstdCompressor(compression_params=params)
         magic = cctx.compress(b"foobar")
 
@@ -223,7 +229,8 @@
 
         self.assertEqual(
             result,
-            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f",
+            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00"
+            b"\x66\x6f\x6f",
         )
 
     def test_multithreaded_compression_params(self):
@@ -234,7 +241,9 @@
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, 3)
 
-        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f")
+        self.assertEqual(
+            result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f"
+        )
 
 
 @make_cffi
@@ -347,7 +356,9 @@
         )
         self.assertEqual(cobj.compress(b"bar"), b"")
         # 3 byte header plus content.
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar")
+        self.assertEqual(
+            cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar"
+        )
         self.assertEqual(cobj.flush(), b"\x01\x00\x00")
 
     def test_flush_empty_block(self):
@@ -445,7 +456,9 @@
         self.assertEqual(int(r), 0)
         self.assertEqual(w, 9)
 
-        self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
+        self.assertEqual(
+            dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00"
+        )
 
     def test_large_data(self):
         source = io.BytesIO()
@@ -478,7 +491,9 @@
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         cctx.copy_stream(source, with_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
         no_params = zstd.get_frame_parameters(no_checksum.getvalue())
         with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -585,7 +600,9 @@
         cctx = zstd.ZstdCompressor()
 
         with cctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -744,7 +761,9 @@
         source = io.BytesIO(b"foobar")
 
         with cctx.stream_reader(source, size=2) as reader:
-            with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            with self.assertRaisesRegex(
+                zstd.ZstdError, "Src size is incorrect"
+            ):
                 reader.read(10)
 
         # Try another compression operation.
@@ -1126,7 +1145,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertTrue(with_params.has_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+        self.assertEqual(
+            len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+        )
 
     def test_write_content_size(self):
         no_size = NonClosingBytesIO()
@@ -1145,7 +1166,9 @@
 
         # Declaring size will write the header.
         with_size = NonClosingBytesIO()
-        with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor:
+        with cctx.stream_writer(
+            with_size, size=len(b"foobar" * 256)
+        ) as compressor:
             self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         no_params = zstd.get_frame_parameters(no_size.getvalue())
@@ -1191,7 +1214,9 @@
         self.assertFalse(no_params.has_checksum)
         self.assertFalse(with_params.has_checksum)
 
-        self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
+        self.assertEqual(
+            len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4
+        )
 
     def test_memory_size(self):
         cctx = zstd.ZstdCompressor(level=3)
@@ -1337,7 +1362,9 @@
         for chunk in cctx.read_to_iter(b"foobar"):
             pass
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             for chunk in cctx.read_to_iter(True):
                 pass
 
@@ -1513,7 +1540,9 @@
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24))
+        self.assertEqual(
+            dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)
+        )
 
     def test_small_chunk_size(self):
         cctx = zstd.ZstdCompressor()
@@ -1533,7 +1562,8 @@
 
         dctx = zstd.ZstdDecompressor()
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024
+            dctx.decompress(b"".join(chunks), max_output_size=10000),
+            b"foo" * 1024,
         )
 
     def test_input_types(self):
@@ -1602,7 +1632,8 @@
         list(chunker.finish())
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, r"cannot call compress\(\) after compression finished"
+            zstd.ZstdError,
+            r"cannot call compress\(\) after compression finished",
         ):
             list(chunker.compress(b"foo"))
 
@@ -1644,7 +1675,9 @@
         with self.assertRaises(TypeError):
             cctx.multi_compress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             cctx.multi_compress_to_buffer([u"foo"])
 
     def test_empty_input(self):
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Mon Feb 03 11:51:52 2020 -0500
@@ -28,9 +28,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read(self, original, level, source_read_size, read_size):
+    def test_stream_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -58,9 +62,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -155,9 +163,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_readinto(self, original, level, source_read_size, read_size):
+    def test_stream_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -184,9 +196,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_readinto(self, original, level, source_read_size, read_size):
+    def test_buffer_source_readinto(
+        self, original, level, source_read_size, read_size
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
@@ -285,9 +301,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_stream_source_read1(self, original, level, source_read_size, read_size):
+    def test_stream_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -315,9 +335,13 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
-    def test_buffer_source_read1(self, original, level, source_read_size, read_size):
+    def test_buffer_source_read1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -412,7 +436,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_stream_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -446,7 +472,9 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 16384),
-        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+        read_size=strategies.integers(
+            1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
     )
     def test_buffer_source_readinto1(
         self, original, level, source_read_size, read_size
@@ -576,7 +604,9 @@
         read_size=strategies.integers(min_value=1, max_value=1048576),
         write_size=strategies.integers(min_value=1, max_value=1048576),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
@@ -585,7 +615,11 @@
         dest = io.BytesIO()
 
         cctx.copy_stream(
-            source, dest, size=len(original), read_size=read_size, write_size=write_size
+            source,
+            dest,
+            size=len(original),
+            read_size=read_size,
+            write_size=write_size,
         )
 
         self.assertEqual(dest.getvalue(), ref_frame)
@@ -675,7 +709,9 @@
         decompressed_chunks.append(dobj.decompress(chunk))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
@@ -690,7 +726,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         refcctx = zstd.ZstdCompressor(level=level)
         ref_frame = refcctx.compress(original)
 
@@ -699,7 +737,10 @@
         cctx = zstd.ZstdCompressor(level=level)
         chunks = list(
             cctx.read_to_iter(
-                source, size=len(original), read_size=read_size, write_size=write_size
+                source,
+                size=len(original),
+                read_size=read_size,
+                write_size=write_size,
             )
         )
 
@@ -710,7 +751,9 @@
 class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
@@ -776,7 +819,8 @@
         dctx = zstd.ZstdDecompressor()
 
         self.assertEqual(
-            dctx.decompress(b"".join(chunks), max_output_size=len(original)), original
+            dctx.decompress(b"".join(chunks), max_output_size=len(original)),
+            original,
         )
 
         self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
@@ -794,7 +838,9 @@
         input_sizes=strategies.data(),
         flushes=strategies.data(),
     )
-    def test_flush_block(self, original, level, chunk_size, input_sizes, flushes):
+    def test_flush_block(
+        self, original, level, chunk_size, input_sizes, flushes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         chunker = cctx.chunker(chunk_size=chunk_size)
 
@@ -830,7 +876,9 @@
         decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
         self.assertEqual(
-            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            dctx.decompress(
+                b"".join(compressed_chunks), max_output_size=len(original)
+            ),
             original,
         )
         self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Mon Feb 03 11:51:52 2020 -0500
@@ -65,7 +65,9 @@
         p = zstd.ZstdCompressionParameters(threads=4)
         self.assertEqual(p.threads, 4)
 
-        p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6)
+        p = zstd.ZstdCompressionParameters(
+            threads=2, job_size=1048576, overlap_log=6
+        )
         self.assertEqual(p.threads, 2)
         self.assertEqual(p.job_size, 1048576)
         self.assertEqual(p.overlap_log, 6)
@@ -128,7 +130,9 @@
         with self.assertRaisesRegex(
             ValueError, "cannot specify both ldm_hash_rate_log"
         ):
-            zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4)
+            zstd.ZstdCompressionParameters(
+                ldm_hash_rate_log=8, ldm_hash_every_log=4
+            )
 
         p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8)
         self.assertEqual(p.ldm_hash_every_log, 8)
@@ -137,7 +141,9 @@
         self.assertEqual(p.ldm_hash_every_log, 16)
 
     def test_overlap_log(self):
-        with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"):
+        with self.assertRaisesRegex(
+            ValueError, "cannot specify both overlap_log"
+        ):
             zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9)
 
         p = zstd.ZstdCompressionParameters(overlap_log=2)
@@ -169,10 +175,14 @@
                     zstd.get_frame_parameters(u"foobarbaz")
 
     def test_invalid_input_sizes(self):
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(b"")
 
-        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "not enough data for frame"
+        ):
             zstd.get_frame_parameters(zstd.FRAME_HEADER)
 
     def test_invalid_frame(self):
@@ -201,7 +211,9 @@
         self.assertTrue(params.has_checksum)
 
         # Upper 2 bits indicate content size.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x40\x00\xff\x00"
+        )
         self.assertEqual(params.content_size, 511)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
@@ -215,7 +227,9 @@
         self.assertFalse(params.has_checksum)
 
         # Set multiple things.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00")
+        params = zstd.get_frame_parameters(
+            zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00"
+        )
         self.assertEqual(params.content_size, 272)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 15)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Mon Feb 03 11:51:52 2020 -0500
@@ -23,7 +23,9 @@
 s_chainlog = strategies.integers(
     min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX
 )
-s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX)
+s_hashlog = strategies.integers(
+    min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX
+)
 s_searchlog = strategies.integers(
     min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX
 )
@@ -61,7 +63,14 @@
         s_strategy,
     )
     def test_valid_init(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         zstd.ZstdCompressionParameters(
             window_log=windowlog,
@@ -83,7 +92,14 @@
         s_strategy,
     )
     def test_estimated_compression_context_size(
-        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+        self,
+        windowlog,
+        chainlog,
+        hashlog,
+        searchlog,
+        minmatch,
+        targetlength,
+        strategy,
     ):
         if minmatch == zstd.MINMATCH_MIN and strategy in (
             zstd.STRATEGY_FAST,
--- a/contrib/python-zstandard/tests/test_decompressor.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Mon Feb 03 11:51:52 2020 -0500
@@ -170,11 +170,15 @@
             dctx.decompress(compressed, max_output_size=len(source) - 1)
 
         # Input size + 1 works
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) + 1
+        )
         self.assertEqual(decompressed, source)
 
         # A much larger buffer works.
-        decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64)
+        decompressed = dctx.decompress(
+            compressed, max_output_size=len(source) * 64
+        )
         self.assertEqual(decompressed, source)
 
     def test_stupidly_large_output_buffer(self):
@@ -237,7 +241,8 @@
         dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
 
         with self.assertRaisesRegex(
-            zstd.ZstdError, "decompression error: Frame requires too much memory"
+            zstd.ZstdError,
+            "decompression error: Frame requires too much memory",
         ):
             dctx.decompress(frame, max_output_size=len(source))
 
@@ -291,7 +296,9 @@
         self.assertEqual(w, len(source.getvalue()))
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
 
         dest = OpCountingBytesIO()
         dctx = zstd.ZstdDecompressor()
@@ -309,7 +316,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(b"foo") as reader:
-            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot __enter__ multiple times"
+            ):
                 with reader as reader2:
                     pass
 
@@ -474,7 +483,9 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(frame) as reader:
-            with self.assertRaisesRegex(ValueError, "cannot seek to negative position"):
+            with self.assertRaisesRegex(
+                ValueError, "cannot seek to negative position"
+            ):
                 reader.seek(-1, os.SEEK_SET)
 
             reader.read(1)
@@ -490,7 +501,8 @@
                 reader.seek(-1, os.SEEK_CUR)
 
             with self.assertRaisesRegex(
-                ValueError, "zstd decompression streams cannot be seeked with SEEK_END"
+                ValueError,
+                "zstd decompression streams cannot be seeked with SEEK_END",
             ):
                 reader.seek(0, os.SEEK_END)
 
@@ -743,7 +755,9 @@
 
     def test_read_lines(self):
         cctx = zstd.ZstdCompressor()
-        source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024))
+        source = b"\n".join(
+            ("line %d" % i).encode("ascii") for i in range(1024)
+        )
 
         frame = cctx.compress(source)
 
@@ -821,7 +835,9 @@
         dobj = dctx.decompressobj()
         dobj.decompress(data)
 
-        with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "cannot use a decompressobj"
+        ):
             dobj.decompress(data)
             self.assertIsNone(dobj.flush())
 
@@ -1124,7 +1140,9 @@
         # Buffer protocol works.
         dctx.read_to_iter(b"foobar")
 
-        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+        with self.assertRaisesRegex(
+            ValueError, "must pass an object with a read"
+        ):
             b"".join(dctx.read_to_iter(True))
 
     def test_empty_input(self):
@@ -1226,7 +1244,9 @@
         decompressed = b"".join(chunks)
         self.assertEqual(decompressed, source.getvalue())
 
-    @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+    @unittest.skipUnless(
+        "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set"
+    )
     def test_large_input(self):
         bytes = list(struct.Struct(">B").pack(i) for i in range(256))
         compressed = NonClosingBytesIO()
@@ -1241,13 +1261,16 @@
                     len(compressed.getvalue())
                     > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
                 )
-                have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                have_raw = (
+                    input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                )
                 if have_compressed and have_raw:
                     break
 
         compressed = io.BytesIO(compressed.getvalue())
         self.assertGreater(
-            len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+            len(compressed.getvalue()),
+            zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
         )
 
         dctx = zstd.ZstdDecompressor()
@@ -1303,7 +1326,9 @@
         self.assertEqual(streamed, source.getvalue())
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+        source = OpCountingBytesIO(
+            zstd.ZstdCompressor().compress(b"foobarfoobar")
+        )
         dctx = zstd.ZstdDecompressor()
         for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
             self.assertEqual(len(chunk), 1)
@@ -1355,10 +1380,14 @@
         ):
             dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 0 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 0 missing content size in frame"
@@ -1389,10 +1418,14 @@
         ):
             dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 1 is not a valid zstd frame"
+        ):
             dctx.decompress_content_dict_chain([initial, b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+            b"foo" * 64
+        )
 
         with self.assertRaisesRegex(
             ValueError, "chunk 1 missing content size in frame"
@@ -1400,7 +1433,9 @@
             dctx.decompress_content_dict_chain([initial, no_size])
 
         # Corrupt second frame.
-        cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64))
+        cctx = zstd.ZstdCompressor(
+            dict_data=zstd.ZstdCompressionDict(b"foo" * 64)
+        )
         frame = cctx.compress(b"bar" * 64)
         frame = frame[0:12] + frame[15:]
 
@@ -1447,7 +1482,9 @@
         with self.assertRaises(TypeError):
             dctx.multi_decompress_to_buffer((1, 2))
 
-        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+        with self.assertRaisesRegex(
+            TypeError, "item 0 not a bytes like object"
+        ):
             dctx.multi_decompress_to_buffer([u"foo"])
 
         with self.assertRaisesRegex(
@@ -1491,7 +1528,9 @@
         if not hasattr(dctx, "multi_decompress_to_buffer"):
             self.skipTest("multi_decompress_to_buffer not available")
 
-        result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+        result = dctx.multi_decompress_to_buffer(
+            frames, decompressed_sizes=sizes
+        )
 
         self.assertEqual(len(result), len(frames))
         self.assertEqual(result.size(), sum(map(len, original)))
@@ -1582,10 +1621,15 @@
         # And a manual mode.
         b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
         b1 = zstd.BufferWithSegments(
-            b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]))
+            b,
+            struct.pack(
+                "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
+            ),
         )
 
-        b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+        b = b"".join(
+            [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]
+        )
         b2 = zstd.BufferWithSegments(
             b,
             struct.pack(
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Mon Feb 03 11:51:52 2020 -0500
@@ -196,7 +196,9 @@
         streaming=strategies.booleans(),
         source_read_size=strategies.integers(1, 1048576),
     )
-    def test_stream_source_readall(self, original, level, streaming, source_read_size):
+    def test_stream_source_readall(
+        self, original, level, streaming, source_read_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -398,7 +400,9 @@
         write_size=strategies.integers(min_value=1, max_value=8192),
         input_sizes=strategies.data(),
     )
-    def test_write_size_variance(self, original, level, write_size, input_sizes):
+    def test_write_size_variance(
+        self, original, level, write_size, input_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -433,7 +437,9 @@
         read_size=strategies.integers(min_value=1, max_value=8192),
         write_size=strategies.integers(min_value=1, max_value=8192),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -441,7 +447,9 @@
         dest = io.BytesIO()
 
         dctx = zstd.ZstdDecompressor()
-        dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
+        dctx.copy_stream(
+            source, dest, read_size=read_size, write_size=write_size
+        )
 
         self.assertEqual(dest.getvalue(), original)
 
@@ -490,11 +498,14 @@
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         write_size=strategies.integers(
-            min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+            min_value=1,
+            max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
         ),
         chunk_sizes=strategies.data(),
     )
-    def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
+    def test_random_output_sizes(
+        self, original, level, write_size, chunk_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -524,7 +535,9 @@
         read_size=strategies.integers(min_value=1, max_value=4096),
         write_size=strategies.integers(min_value=1, max_value=4096),
     )
-    def test_read_write_size_variance(self, original, level, read_size, write_size):
+    def test_read_write_size_variance(
+        self, original, level, read_size, write_size
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -532,7 +545,9 @@
 
         dctx = zstd.ZstdDecompressor()
         chunks = list(
-            dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
+            dctx.read_to_iter(
+                source, read_size=read_size, write_size=write_size
+            )
         )
 
         self.assertEqual(b"".join(chunks), original)
@@ -542,7 +557,9 @@
 class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase):
     @hypothesis.given(
         original=strategies.lists(
-            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+            strategies.sampled_from(random_input_data()),
+            min_size=1,
+            max_size=1024,
         ),
         threads=strategies.integers(min_value=1, max_value=8),
         use_dict=strategies.booleans(),
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Feb 03 11:51:52 2020 -0500
@@ -51,11 +51,15 @@
         self.assertEqual(d.d, 16)
 
     def test_set_dict_id(self):
-        d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), k=64, d=16, dict_id=42
+        )
         self.assertEqual(d.dict_id(), 42)
 
     def test_optimize(self):
-        d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16)
+        d = zstd.train_dictionary(
+            8192, generate_samples(), threads=-1, steps=1, d=16
+        )
 
         # This varies by platform.
         self.assertIn(d.k, (50, 2000))
@@ -71,10 +75,14 @@
     def test_bad_precompute_compress(self):
         d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16)
 
-        with self.assertRaisesRegex(ValueError, "must specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must specify one of level or "
+        ):
             d.precompute_compress()
 
-        with self.assertRaisesRegex(ValueError, "must only specify one of level or "):
+        with self.assertRaisesRegex(
+            ValueError, "must only specify one of level or "
+        ):
             d.precompute_compress(
                 level=3, compression_params=zstd.CompressionParameters()
             )
@@ -88,5 +96,7 @@
         d = zstd.ZstdCompressionDict(
             b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT
         )
-        with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "unable to precompute dictionary"
+        ):
             d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/cffi.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/contrib/python-zstandard/zstandard/cffi.py	Mon Feb 03 11:51:52 2020 -0500
@@ -299,10 +299,14 @@
         _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
         _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
         _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
-        _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_targetLength, target_length
+        )
 
         if strategy != -1 and compression_strategy != -1:
-            raise ValueError("cannot specify both compression_strategy and strategy")
+            raise ValueError(
+                "cannot specify both compression_strategy and strategy"
+            )
 
         if compression_strategy != -1:
             strategy = compression_strategy
@@ -313,12 +317,16 @@
         _set_compression_parameter(
             params, lib.ZSTD_c_contentSizeFlag, write_content_size
         )
-        _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_checksumFlag, write_checksum
+        )
         _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
         _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
 
         if overlap_log != -1 and overlap_size_log != -1:
-            raise ValueError("cannot specify both overlap_log and overlap_size_log")
+            raise ValueError(
+                "cannot specify both overlap_log and overlap_size_log"
+            )
 
         if overlap_size_log != -1:
             overlap_log = overlap_size_log
@@ -326,12 +334,16 @@
             overlap_log = 0
 
         _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
-        _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_forceMaxWindow, force_max_window
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
         )
         _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
-        _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+        )
         _set_compression_parameter(
             params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
         )
@@ -346,7 +358,9 @@
         elif ldm_hash_rate_log == -1:
             ldm_hash_rate_log = 0
 
-        _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+        )
 
     @property
     def format(self):
@@ -354,7 +368,9 @@
 
     @property
     def compression_level(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_compressionLevel)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_compressionLevel
+        )
 
     @property
     def window_log(self):
@@ -386,7 +402,9 @@
 
     @property
     def write_content_size(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_contentSizeFlag)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_contentSizeFlag
+        )
 
     @property
     def write_checksum(self):
@@ -410,7 +428,9 @@
 
     @property
     def force_max_window(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_forceMaxWindow)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_forceMaxWindow
+        )
 
     @property
     def enable_ldm(self):
@@ -428,11 +448,15 @@
 
     @property
     def ldm_bucket_size_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmBucketSizeLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmBucketSizeLog
+        )
 
     @property
     def ldm_hash_rate_log(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashRateLog)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_ldmHashRateLog
+        )
 
     @property
     def ldm_hash_every_log(self):
@@ -457,7 +481,8 @@
     zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to set compression context parameter: %s" % _zstd_error(zresult)
+            "unable to set compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
 
@@ -467,14 +492,17 @@
     zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
     if lib.ZSTD_isError(zresult):
         raise ZstdError(
-            "unable to get compression context parameter: %s" % _zstd_error(zresult)
+            "unable to get compression context parameter: %s"
+            % _zstd_error(zresult)
         )
 
     return result[0]
 
 
 class ZstdCompressionWriter(object):
-    def __init__(self, compressor, writer, source_size, write_size, write_return_read):
+    def __init__(
+        self, compressor, writer, source_size, write_size, write_return_read
+    ):
         self._compressor = compressor
         self._writer = writer
         self._write_size = write_size
@@ -491,7 +519,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
     def __enter__(self):
         if self._closed:
@@ -595,13 +625,20 @@
 
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_compressStream2(
-                self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                self._compressor._cctx,
+                out_buffer,
+                in_buffer,
+                lib.ZSTD_e_continue,
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -637,10 +674,14 @@
                 self._compressor._cctx, out_buffer, in_buffer, flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 self._bytes_compressed += out_buffer.pos
                 out_buffer.pos = 0
@@ -672,7 +713,9 @@
                 self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
@@ -681,7 +724,10 @@
         return b"".join(chunks)
 
     def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
-        if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+        if flush_mode not in (
+            COMPRESSOBJ_FLUSH_FINISH,
+            COMPRESSOBJ_FLUSH_BLOCK,
+        ):
             raise ValueError("flush mode not recognized")
 
         if self._finished:
@@ -768,7 +814,9 @@
                 self._in.pos = 0
 
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos == self._out.size:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -780,7 +828,8 @@
 
         if self._in.src != ffi.NULL:
             raise ZstdError(
-                "cannot call flush() before consuming output from " "previous operation"
+                "cannot call flush() before consuming output from "
+                "previous operation"
             )
 
         while True:
@@ -788,7 +837,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -812,7 +863,9 @@
                 self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd compress error: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -939,7 +992,10 @@
         old_pos = out_buffer.pos
 
         zresult = lib.ZSTD_compressStream2(
-            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue
+            self._compressor._cctx,
+            out_buffer,
+            self._in_buffer,
+            lib.ZSTD_e_continue,
         )
 
         self._bytes_compressed += out_buffer.pos - old_pos
@@ -997,7 +1053,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1102,7 +1160,9 @@
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s", _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1170,13 +1230,17 @@
         threads=0,
     ):
         if level > lib.ZSTD_maxCLevel():
-            raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel())
+            raise ValueError(
+                "level must be less than %d" % lib.ZSTD_maxCLevel()
+            )
 
         if threads < 0:
             threads = _cpu_count()
 
         if compression_params and write_checksum is not None:
-            raise ValueError("cannot define compression_params and " "write_checksum")
+            raise ValueError(
+                "cannot define compression_params and " "write_checksum"
+            )
 
         if compression_params and write_content_size is not None:
             raise ValueError(
@@ -1184,7 +1248,9 @@
             )
 
         if compression_params and write_dict_id is not None:
-            raise ValueError("cannot define compression_params and " "write_dict_id")
+            raise ValueError(
+                "cannot define compression_params and " "write_dict_id"
+            )
 
         if compression_params and threads:
             raise ValueError("cannot define compression_params and threads")
@@ -1201,7 +1267,9 @@
 
             self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
 
-            _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level)
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_compressionLevel, level
+            )
 
             _set_compression_parameter(
                 self._params,
@@ -1210,7 +1278,9 @@
             )
 
             _set_compression_parameter(
-                self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0
+                self._params,
+                lib.ZSTD_c_checksumFlag,
+                1 if write_checksum else 0,
             )
 
             _set_compression_parameter(
@@ -1218,7 +1288,9 @@
             )
 
             if threads:
-                _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads)
+                _set_compression_parameter(
+                    self._params, lib.ZSTD_c_nbWorkers, threads
+                )
 
         cctx = lib.ZSTD_createCCtx()
         if cctx == ffi.NULL:
@@ -1237,10 +1309,13 @@
             )
 
     def _setup_cctx(self):
-        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params)
+        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+            self._cctx, self._params
+        )
         if lib.ZSTD_isError(zresult):
             raise ZstdError(
-                "could not set compression parameters: %s" % _zstd_error(zresult)
+                "could not set compression parameters: %s"
+                % _zstd_error(zresult)
             )
 
         dict_data = self._dict_data
@@ -1259,7 +1334,8 @@
 
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "could not load compression dictionary: %s" % _zstd_error(zresult)
+                    "could not load compression dictionary: %s"
+                    % _zstd_error(zresult)
                 )
 
     def memory_size(self):
@@ -1275,7 +1351,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         out_buffer = ffi.new("ZSTD_outBuffer *")
         in_buffer = ffi.new("ZSTD_inBuffer *")
@@ -1307,11 +1385,15 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         cobj = ZstdCompressionObj()
         cobj._out = ffi.new("ZSTD_outBuffer *")
-        cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._dst_buffer = ffi.new(
+            "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        )
         cobj._out.dst = cobj._dst_buffer
         cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
         cobj._out.pos = 0
@@ -1328,7 +1410,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionChunker(self, chunk_size=chunk_size)
 
@@ -1353,7 +1437,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1381,7 +1467,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1423,7 +1511,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         return ZstdCompressionReader(self, source, read_size)
 
@@ -1443,7 +1533,9 @@
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
 
-        return ZstdCompressionWriter(self, writer, size, write_size, write_return_read)
+        return ZstdCompressionWriter(
+            self, writer, size, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -1473,7 +1565,9 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "error setting source size: %s" % _zstd_error(zresult)
+            )
 
         in_buffer = ffi.new("ZSTD_inBuffer *")
         out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1517,7 +1611,9 @@
                     self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
                 )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd compress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1596,10 +1692,14 @@
     data_buffer = ffi.from_buffer(data)
     zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
     if lib.ZSTD_isError(zresult):
-        raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult))
+        raise ZstdError(
+            "cannot get frame parameters: %s" % _zstd_error(zresult)
+        )
 
     if zresult:
-        raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult)
+        raise ZstdError(
+            "not enough data for frame parameters; need %d bytes" % zresult
+        )
 
     return FrameParameters(params[0])
 
@@ -1611,9 +1711,14 @@
         self.k = k
         self.d = d
 
-        if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT):
+        if dict_type not in (
+            DICT_TYPE_AUTO,
+            DICT_TYPE_RAWCONTENT,
+            DICT_TYPE_FULLDICT,
+        ):
             raise ValueError(
-                "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants"
+                "invalid dictionary load mode: %d; must use "
+                "DICT_TYPE_* constants"
             )
 
         self._dict_type = dict_type
@@ -1630,7 +1735,9 @@
 
     def precompute_compress(self, level=0, compression_params=None):
         if level and compression_params:
-            raise ValueError("must only specify one of level or " "compression_params")
+            raise ValueError(
+                "must only specify one of level or " "compression_params"
+            )
 
         if not level and not compression_params:
             raise ValueError("must specify one of level or compression_params")
@@ -1675,7 +1782,9 @@
         if ddict == ffi.NULL:
             raise ZstdError("could not create decompression dict")
 
-        ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict))
+        ddict = ffi.gc(
+            ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+        )
         self.__dict__["_ddict"] = ddict
 
         return ddict
@@ -1805,7 +1914,9 @@
                 self._decompressor._dctx, out_buffer, in_buffer
             )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompressor error: %s" % _zstd_error(zresult)
+                )
 
             if zresult == 0:
                 self._finished = True
@@ -2105,16 +2216,22 @@
 
         if whence == os.SEEK_SET:
             if pos < 0:
-                raise ValueError("cannot seek to negative position with SEEK_SET")
+                raise ValueError(
+                    "cannot seek to negative position with SEEK_SET"
+                )
 
             if pos < self._bytes_decompressed:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos - self._bytes_decompressed
 
         elif whence == os.SEEK_CUR:
             if pos < 0:
-                raise ValueError("cannot seek zstd decompression stream " "backwards")
+                raise ValueError(
+                    "cannot seek zstd decompression stream " "backwards"
+                )
 
             read_amount = pos
         elif whence == os.SEEK_END:
@@ -2123,7 +2240,9 @@
             )
 
         while read_amount:
-            result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
+            result = self.read(
+                min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+            )
 
             if not result:
                 break
@@ -2257,10 +2376,14 @@
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                raise ZstdError(
+                    "zstd decompress error: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                self._writer.write(
+                    ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                )
                 total_write += out_buffer.pos
                 out_buffer.pos = 0
 
@@ -2299,7 +2422,9 @@
 
         data_buffer = ffi.from_buffer(data)
 
-        output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+        output_size = lib.ZSTD_getFrameContentSize(
+            data_buffer, len(data_buffer)
+        )
 
         if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
             raise ZstdError("error determining content size from frame header")
@@ -2307,7 +2432,9 @@
             return b""
         elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
             if not max_output_size:
-                raise ZstdError("could not determine content size in frame header")
+                raise ZstdError(
+                    "could not determine content size in frame header"
+                )
 
             result_buffer = ffi.new("char[]", max_output_size)
             result_size = max_output_size
@@ -2330,7 +2457,9 @@
         if lib.ZSTD_isError(zresult):
             raise ZstdError("decompression error: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError("decompression error: did not decompress full frame")
+            raise ZstdError(
+                "decompression error: did not decompress full frame"
+            )
         elif output_size and out_buffer.pos != output_size:
             raise ZstdError(
                 "decompression error: decompressed %d bytes; expected %d"
@@ -2346,7 +2475,9 @@
         read_across_frames=False,
     ):
         self._ensure_dctx()
-        return ZstdDecompressionReader(self, source, read_size, read_across_frames)
+        return ZstdDecompressionReader(
+            self, source, read_size, read_across_frames
+        )
 
     def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
         if write_size < 1:
@@ -2421,9 +2552,13 @@
             while in_buffer.pos < in_buffer.size:
                 assert out_buffer.pos == 0
 
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd decompress error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -2449,7 +2584,9 @@
         if not hasattr(writer, "write"):
             raise ValueError("must pass an object with a write() method")
 
-        return ZstdDecompressionWriter(self, writer, write_size, write_return_read)
+        return ZstdDecompressionWriter(
+            self, writer, write_size, write_return_read
+        )
 
     write_to = stream_writer
 
@@ -2491,7 +2628,9 @@
 
             # Flush all read data to output.
             while in_buffer.pos < in_buffer.size:
-                zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+                zresult = lib.ZSTD_decompressStream(
+                    self._dctx, out_buffer, in_buffer
+                )
                 if lib.ZSTD_isError(zresult):
                     raise ZstdError(
                         "zstd decompressor error: %s" % _zstd_error(zresult)
@@ -2521,7 +2660,9 @@
         # All chunks should be zstd frames and should have content size set.
         chunk_buffer = ffi.from_buffer(chunk)
         params = ffi.new("ZSTD_frameHeader *")
-        zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+        zresult = lib.ZSTD_getFrameHeader(
+            params, chunk_buffer, len(chunk_buffer)
+        )
         if lib.ZSTD_isError(zresult):
             raise ValueError("chunk 0 is not a valid zstd frame")
         elif zresult:
@@ -2546,7 +2687,9 @@
 
         zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "could not decompress chunk 0: %s" % _zstd_error(zresult)
+            )
         elif zresult:
             raise ZstdError("chunk 0 did not decompress full frame")
 
@@ -2561,11 +2704,15 @@
                 raise ValueError("chunk %d must be bytes" % i)
 
             chunk_buffer = ffi.from_buffer(chunk)
-            zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+            zresult = lib.ZSTD_getFrameHeader(
+                params, chunk_buffer, len(chunk_buffer)
+            )
             if lib.ZSTD_isError(zresult):
                 raise ValueError("chunk %d is not a valid zstd frame" % i)
             elif zresult:
-                raise ValueError("chunk %d is too small to contain a zstd frame" % i)
+                raise ValueError(
+                    "chunk %d is too small to contain a zstd frame" % i
+                )
 
             if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
                 raise ValueError("chunk %d missing content size in frame" % i)
@@ -2580,7 +2727,9 @@
             in_buffer.size = len(chunk_buffer)
             in_buffer.pos = 0
 
-            zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+            zresult = lib.ZSTD_decompressStream(
+                self._dctx, out_buffer, in_buffer
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "could not decompress chunk %d: %s" % _zstd_error(zresult)
@@ -2597,7 +2746,9 @@
         lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
 
         if self._max_window_size:
-            zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size)
+            zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+                self._dctx, self._max_window_size
+            )
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
                     "unable to set max window size: %s" % _zstd_error(zresult)
@@ -2605,11 +2756,14 @@
 
         zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult))
+            raise ZstdError(
+                "unable to set decoding format: %s" % _zstd_error(zresult)
+            )
 
         if self._dict_data and load_dict:
             zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
             if lib.ZSTD_isError(zresult):
                 raise ZstdError(
-                    "unable to reference prepared dictionary: %s" % _zstd_error(zresult)
+                    "unable to reference prepared dictionary: %s"
+                    % _zstd_error(zresult)
                 )
--- a/hgext/absorb.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/absorb.py	Mon Feb 03 11:51:52 2020 -0500
@@ -1077,7 +1077,7 @@
             b'i',
             b'interactive',
             None,
-            _(b'interactively select which chunks to apply (EXPERIMENTAL)'),
+            _(b'interactively select which chunks to apply'),
         ),
         (
             b'e',
--- a/hgext/histedit.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/histedit.py	Mon Feb 03 11:51:52 2020 -0500
@@ -649,7 +649,7 @@
             repo.ui.setconfig(
                 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
             )
-            stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
+            stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
         finally:
             repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
     return stats
--- a/hgext/lfs/blobstore.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/lfs/blobstore.py	Mon Feb 03 11:51:52 2020 -0500
@@ -94,15 +94,12 @@
         pass
 
 
-class filewithprogress(object):
+class lfsuploadfile(object):
     """a file-like object that supports __len__ and read.
-
-    Useful to provide progress information for how many bytes are read.
     """
 
-    def __init__(self, fp, callback):
+    def __init__(self, fp):
         self._fp = fp
-        self._callback = callback  # func(readsize)
         fp.seek(0, os.SEEK_END)
         self._len = fp.tell()
         fp.seek(0)
@@ -113,14 +110,12 @@
     def read(self, size):
         if self._fp is None:
             return b''
-        data = self._fp.read(size)
-        if data:
-            if self._callback:
-                self._callback(len(data))
-        else:
+        return self._fp.read(size)
+
+    def close(self):
+        if self._fp is not None:
             self._fp.close()
             self._fp = None
-        return data
 
 
 class local(object):
@@ -495,15 +490,17 @@
                     _(b'detected corrupt lfs object: %s') % oid,
                     hint=_(b'run hg verify'),
                 )
-            request.data = filewithprogress(localstore.open(oid), None)
-            request.get_method = lambda: r'PUT'
-            request.add_header('Content-Type', 'application/octet-stream')
-            request.add_header('Content-Length', len(request.data))
 
         for k, v in headers:
             request.add_header(pycompat.strurl(k), pycompat.strurl(v))
 
         try:
+            if action == b'upload':
+                request.data = lfsuploadfile(localstore.open(oid))
+                request.get_method = lambda: 'PUT'
+                request.add_header('Content-Type', 'application/octet-stream')
+                request.add_header('Content-Length', len(request.data))
+
             with contextlib.closing(self.urlopener.open(request)) as res:
                 contentlength = res.info().get(b"content-length")
                 ui = self.ui  # Shorten debug lines
@@ -545,6 +542,9 @@
             raise LfsRemoteError(
                 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
             )
+        finally:
+            if request.data:
+                request.data.close()
 
     def _batch(self, pointers, localstore, action):
         if action not in [b'upload', b'download']:
--- a/hgext/rebase.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/rebase.py	Mon Feb 03 11:51:52 2020 -0500
@@ -37,6 +37,7 @@
     hg,
     merge as mergemod,
     mergeutil,
+    node as nodemod,
     obsolete,
     obsutil,
     patch,
@@ -1011,10 +1012,10 @@
     action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
     if action:
         cmdutil.check_incompatible_arguments(
-            opts, action, b'confirm', b'dry_run'
+            opts, action, [b'confirm', b'dry_run']
         )
         cmdutil.check_incompatible_arguments(
-            opts, action, b'rev', b'source', b'base', b'dest'
+            opts, action, [b'rev', b'source', b'base', b'dest']
         )
     cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
     cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
@@ -1028,7 +1029,7 @@
     if opts.get(b'auto_orphans'):
         disallowed_opts = set(opts) - {b'auto_orphans'}
         cmdutil.check_incompatible_arguments(
-            opts, b'auto_orphans', *disallowed_opts
+            opts, b'auto_orphans', disallowed_opts
         )
 
         userrevs = list(repo.revs(opts.get(b'auto_orphans')))
@@ -1265,8 +1266,7 @@
         if not src:
             ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
             return None
-        rebaseset = repo.revs(b'(%ld)::', src)
-        assert rebaseset
+        rebaseset = repo.revs(b'(%ld)::', src) or src
     else:
         base = scmutil.revrange(repo, [basef or b'.'])
         if not base:
@@ -1341,6 +1341,8 @@
                 )
             return None
 
+    if nodemod.wdirrev in rebaseset:
+        raise error.Abort(_(b'cannot rebase the working copy'))
     rebasingwcp = repo[b'.'].rev() in rebaseset
     ui.log(
         b"rebase",
@@ -1739,12 +1741,6 @@
     if any(p != nullrev and isancestor(rev, p) for p in newps):
         raise error.Abort(_(b'source is ancestor of destination'))
 
-    # "rebasenode" updates to new p1, use the corresponding merge base.
-    if bases[0] != nullrev:
-        base = bases[0]
-    else:
-        base = None
-
     # Check if the merge will contain unwanted changes. That may happen if
     # there are multiple special (non-changelog ancestor) merge bases, which
     # cannot be handled well by the 3-way merge algorithm. For example:
@@ -1767,8 +1763,9 @@
                 continue
             # Revisions in the side (not chosen as merge base) branch that
             # might contain "surprising" contents
+            other_bases = set(bases) - {base}
             siderevs = list(
-                repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
+                repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
             )
 
             # If those revisions are covered by rebaseset, the result is good.
@@ -1792,14 +1789,6 @@
             for i, revs in enumerate(unwanted)
             if revs is not None
         )
-        base = bases[i]
-
-        # newps[0] should match merge base if possible. Currently, if newps[i]
-        # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
-        # the other's ancestor. In that case, it's fine to not swap newps here.
-        # (see CASE-1 and CASE-2 above)
-        if i != 0 and newps[i] != nullrev:
-            newps[0], newps[i] = newps[i], newps[0]
 
         # The merge will include unwanted revisions. Abort now. Revisit this if
         # we have a more advanced merge algorithm that handles multiple bases.
@@ -1816,6 +1805,21 @@
                 % (rev, repo[rev], unwanteddesc)
             )
 
+        # newps[0] should match merge base if possible. Currently, if newps[i]
+        # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
+        # the other's ancestor. In that case, it's fine to not swap newps here.
+        # (see CASE-1 and CASE-2 above)
+        if i != 0:
+            if newps[i] != nullrev:
+                newps[0], newps[i] = newps[i], newps[0]
+            bases[0], bases[i] = bases[i], bases[0]
+
+    # "rebasenode" updates to new p1, use the corresponding merge base.
+    if bases[0] != nullrev:
+        base = bases[0]
+    else:
+        base = None
+
     repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
 
     return newps[0], newps[1], base
--- a/hgext/releasenotes.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/releasenotes.py	Mon Feb 03 11:51:52 2020 -0500
@@ -654,7 +654,7 @@
     opts = pycompat.byteskwargs(opts)
     sections = releasenotessections(ui, repo)
 
-    cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check')
+    cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check'])
 
     if opts.get(b'list'):
         return _getadmonitionlist(ui, sections)
--- a/hgext/transplant.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/hgext/transplant.py	Mon Feb 03 11:51:52 2020 -0500
@@ -761,12 +761,12 @@
     def checkopts(opts, revs):
         if opts.get(b'continue'):
             cmdutil.check_incompatible_arguments(
-                opts, b'continue', b'branch', b'all', b'merge'
+                opts, b'continue', [b'branch', b'all', b'merge']
             )
             return
         if opts.get(b'stop'):
             cmdutil.check_incompatible_arguments(
-                opts, b'stop', b'branch', b'all', b'merge'
+                opts, b'stop', [b'branch', b'all', b'merge']
             )
             return
         if not (
--- a/mercurial/cmdutil.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/cmdutil.py	Mon Feb 03 11:51:52 2020 -0500
@@ -281,11 +281,11 @@
     return previous
 
 
-def check_incompatible_arguments(opts, first, *others):
+def check_incompatible_arguments(opts, first, others):
     """abort if the first argument is given along with any of the others
 
     Unlike check_at_most_one_arg(), `others` are not mutually exclusive
-    among themselves.
+    among themselves, and they're passed as a single collection.
     """
     for other in others:
         check_at_most_one_arg(opts, first, other)
--- a/mercurial/commands.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/commands.py	Mon Feb 03 11:51:52 2020 -0500
@@ -1228,7 +1228,7 @@
 
     action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
     if action:
-        cmdutil.check_incompatible_arguments(opts, action, b'rev')
+        cmdutil.check_incompatible_arguments(opts, action, [b'rev'])
     elif names or rev:
         action = b'add'
     elif inactive:
@@ -1236,7 +1236,9 @@
     else:
         action = b'list'
 
-    cmdutil.check_incompatible_arguments(opts, b'inactive', b'delete', b'list')
+    cmdutil.check_incompatible_arguments(
+        opts, b'inactive', [b'delete', b'list']
+    )
     if not names and action in {b'add', b'delete'}:
         raise error.Abort(_(b"bookmark name required"))
 
@@ -4847,6 +4849,7 @@
     abort = opts.get(b'abort')
     if abort and repo.dirstate.p2() == nullid:
         cmdutil.wrongtooltocontinue(repo, _(b'merge'))
+    cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
     if abort:
         state = cmdutil.getunfinishedstate(repo)
         if state and state._opname != b'merge':
@@ -4856,10 +4859,8 @@
             )
         if node:
             raise error.Abort(_(b"cannot specify a node with --abort"))
-        if opts.get(b'rev'):
-            raise error.Abort(_(b"cannot specify both --rev and --abort"))
-        if opts.get(b'preview'):
-            raise error.Abort(_(b"cannot specify --preview with --abort"))
+        return hg.abortmerge(repo.ui, repo)
+
     if opts.get(b'rev') and node:
         raise error.Abort(_(b"please specify just one revision"))
     if not node:
@@ -4867,8 +4868,7 @@
 
     if node:
         node = scmutil.revsingle(repo, node).node()
-
-    if not node and not abort:
+    else:
         if ui.configbool(b'commands', b'merge.require-rev'):
             raise error.Abort(
                 _(
@@ -4878,9 +4878,12 @@
             )
         node = repo[destutil.destmerge(repo)].node()
 
+    if node is None:
+        raise error.Abort(_(b'merging with the working copy has no effect'))
+
     if opts.get(b'preview'):
         # find nodes that are ancestors of p2 but not of p1
-        p1 = repo.lookup(b'.')
+        p1 = repo[b'.'].node()
         p2 = node
         nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
 
@@ -4896,12 +4899,7 @@
         force = opts.get(b'force')
         labels = [b'working copy', b'merge rev']
         return hg.merge(
-            repo,
-            node,
-            force=force,
-            mergeforce=force,
-            labels=labels,
-            abort=abort,
+            repo, node, force=force, mergeforce=force, labels=labels
         )
 
 
--- a/mercurial/copies.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/copies.py	Mon Feb 03 11:51:52 2020 -0500
@@ -452,44 +452,34 @@
 
     ```other changed <file> which local deleted```
 
-    Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
-    "dirmove".
+    Returns a tuple where:
 
-    "copy" is a mapping from destination name -> source name,
-    where source is in c1 and destination is in c2 or vice-versa.
-
-    "movewithdir" is a mapping from source name -> destination name,
-    where the file at source present in one context but not the other
-    needs to be moved to destination by the merge process, because the
-    other context moved the directory it is in.
+    "branch_copies" an instance of branch_copies.
 
     "diverge" is a mapping of source name -> list of destination names
     for divergent renames.
 
-    "renamedelete" is a mapping of source name -> list of destination
-    names for files deleted in c1 that were renamed in c2 or vice-versa.
-
-    "dirmove" is a mapping of detected source dir -> destination dir renames.
-    This is needed for handling changes to new files previously grafted into
-    renamed directories.
-
     This function calls different copytracing algorithms based on config.
     """
     # avoid silly behavior for update from empty dir
     if not c1 or not c2 or c1 == c2:
-        return {}, {}, {}, {}, {}
+        return branch_copies(), branch_copies(), {}
 
     narrowmatch = c1.repo().narrowmatch()
 
     # avoid silly behavior for parent -> working dir
     if c2.node() is None and c1.node() == repo.dirstate.p1():
-        return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
+        return (
+            branch_copies(_dirstatecopies(repo, narrowmatch)),
+            branch_copies(),
+            {},
+        )
 
     copytracing = repo.ui.config(b'experimental', b'copytrace')
     if stringutil.parsebool(copytracing) is False:
         # stringutil.parsebool() returns None when it is unable to parse the
         # value, so we should rely on making sure copytracing is on such cases
-        return {}, {}, {}, {}, {}
+        return branch_copies(), branch_copies(), {}
 
     if usechangesetcentricalgo(repo):
         # The heuristics don't make sense when we need changeset-centric algos
@@ -537,15 +527,45 @@
         if src not in m1:
             # renamed on side 1, deleted on side 2
             renamedelete[src] = dsts1
+    elif src not in mb:
+        # Work around the "short-circuit to avoid issues with merge states"
+        # thing in pathcopies(): pathcopies(x, y) can return a copy where the
+        # destination doesn't exist in y.
+        pass
     elif m2[src] != mb[src]:
         if not _related(c2[src], base[src]):
             return
         # modified on side 2
         for dst in dsts1:
-            if dst not in m2:
-                # dst not added on side 2 (handle as regular
-                # "both created" case in manifestmerge otherwise)
-                copy[dst] = src
+            copy[dst] = src
+
+
+class branch_copies(object):
+    """Information about copies made on one side of a merge/graft.
+
+    "copy" is a mapping from destination name -> source name,
+    where source is in c1 and destination is in c2 or vice-versa.
+
+    "movewithdir" is a mapping from source name -> destination name,
+    where the file at source present in one context but not the other
+    needs to be moved to destination by the merge process, because the
+    other context moved the directory it is in.
+
+    "renamedelete" is a mapping of source name -> list of destination
+    names for files deleted in c1 that were renamed in c2 or vice-versa.
+
+    "dirmove" is a mapping of detected source dir -> destination dir renames.
+    This is needed for handling changes to new files previously grafted into
+    renamed directories.
+    """
+
+    def __init__(
+        self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
+    ):
+        self.copy = {} if copy is None else copy
+        self.renamedelete = {} if renamedelete is None else renamedelete
+        self.dirmove = {} if dirmove is None else dirmove
+        self.movewithdir = {} if movewithdir is None else movewithdir
 
 
 def _fullcopytracing(repo, c1, c2, base):
@@ -563,6 +583,9 @@
     copies1 = pathcopies(base, c1)
     copies2 = pathcopies(base, c2)
 
+    if not (copies1 or copies2):
+        return branch_copies(), branch_copies(), {}
+
     inversecopies1 = {}
     inversecopies2 = {}
     for dst, src in copies1.items():
@@ -570,9 +593,11 @@
     for dst, src in copies2.items():
         inversecopies2.setdefault(src, []).append(dst)
 
-    copy = {}
+    copy1 = {}
+    copy2 = {}
     diverge = {}
-    renamedelete = {}
+    renamedelete1 = {}
+    renamedelete2 = {}
     allsources = set(inversecopies1) | set(inversecopies2)
     for src in allsources:
         dsts1 = inversecopies1.get(src)
@@ -589,7 +614,8 @@
                 # and 'd' and deletes 'a'.
                 if dsts1 & dsts2:
                     for dst in dsts1 & dsts2:
-                        copy[dst] = src
+                        copy1[dst] = src
+                        copy2[dst] = src
                 else:
                     diverge[src] = sorted(dsts1 | dsts2)
             elif src in m1 and src in m2:
@@ -597,27 +623,21 @@
                 dsts1 = set(dsts1)
                 dsts2 = set(dsts2)
                 for dst in dsts1 & dsts2:
-                    copy[dst] = src
+                    copy1[dst] = src
+                    copy2[dst] = src
             # TODO: Handle cases where it was renamed on one side and copied
             # on the other side
         elif dsts1:
             # copied/renamed only on side 1
             _checksinglesidecopies(
-                src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
+                src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
             )
         elif dsts2:
             # copied/renamed only on side 2
             _checksinglesidecopies(
-                src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
+                src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
             )
 
-    renamedeleteset = set()
-    divergeset = set()
-    for dsts in diverge.values():
-        divergeset.update(dsts)
-    for dsts in renamedelete.values():
-        renamedeleteset.update(dsts)
-
     # find interesting file sets from manifests
     addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
     addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
@@ -630,33 +650,60 @@
     if u2:
         repo.ui.debug(b"%s:\n   %s\n" % (header % b'other', b"\n   ".join(u2)))
 
-    fullcopy = copies1.copy()
-    fullcopy.update(copies2)
-    if not fullcopy:
-        return copy, {}, diverge, renamedelete, {}
+    if repo.ui.debugflag:
+        renamedeleteset = set()
+        divergeset = set()
+        for dsts in diverge.values():
+            divergeset.update(dsts)
+        for dsts in renamedelete1.values():
+            renamedeleteset.update(dsts)
+        for dsts in renamedelete2.values():
+            renamedeleteset.update(dsts)
 
-    if repo.ui.debugflag:
         repo.ui.debug(
             b"  all copies found (* = to merge, ! = divergent, "
             b"% = renamed and deleted):\n"
         )
-        for f in sorted(fullcopy):
-            note = b""
-            if f in copy:
-                note += b"*"
-            if f in divergeset:
-                note += b"!"
-            if f in renamedeleteset:
-                note += b"%"
-            repo.ui.debug(
-                b"   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
-            )
-    del divergeset
+        for side, copies in ((b"local", copies1), (b"remote", copies2)):
+            if not copies:
+                continue
+            repo.ui.debug(b"   on %s side:\n" % side)
+            for f in sorted(copies):
+                note = b""
+                if f in copy1 or f in copy2:
+                    note += b"*"
+                if f in divergeset:
+                    note += b"!"
+                if f in renamedeleteset:
+                    note += b"%"
+                repo.ui.debug(
+                    b"    src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
+                )
+        del renamedeleteset
+        del divergeset
 
     repo.ui.debug(b"  checking for directory renames\n")
 
+    dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
+    dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
+
+    branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
+    branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
+
+    return branch_copies1, branch_copies2, diverge
+
+
+def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
+    """Finds moved directories and files that should move with them.
+
+    ctx: the context for one of the sides
+    copy: files copied on the same side (as ctx)
+    fullcopy: files copied on the same side (as ctx), including those that
+              merge.manifestmerge() won't care about
+    addedfiles: added files on the other side (compared to ctx)
+    """
     # generate a directory move map
-    d1, d2 = c1.dirs(), c2.dirs()
+    d = ctx.dirs()
     invalid = set()
     dirmove = {}
 
@@ -667,12 +714,9 @@
         if dsrc in invalid:
             # already seen to be uninteresting
             continue
-        elif dsrc in d1 and ddst in d1:
+        elif dsrc in d and ddst in d:
             # directory wasn't entirely moved locally
             invalid.add(dsrc)
-        elif dsrc in d2 and ddst in d2:
-            # directory wasn't entirely moved remotely
-            invalid.add(dsrc)
         elif dsrc in dirmove and dirmove[dsrc] != ddst:
             # files from the same directory moved to two different places
             invalid.add(dsrc)
@@ -683,10 +727,10 @@
     for i in invalid:
         if i in dirmove:
             del dirmove[i]
-    del d1, d2, invalid
+    del d, invalid
 
     if not dirmove:
-        return copy, {}, diverge, renamedelete, {}
+        return {}, {}
 
     dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
 
@@ -697,7 +741,7 @@
 
     movewithdir = {}
     # check unaccounted nonoverlapping files against directory moves
-    for f in u1 + u2:
+    for f in addedfiles:
         if f not in fullcopy:
             for d in dirmove:
                 if f.startswith(d):
@@ -711,7 +755,7 @@
                         )
                     break
 
-    return copy, movewithdir, diverge, renamedelete, dirmove
+    return dirmove, movewithdir
 
 
 def _heuristicscopytracing(repo, c1, c2, base):
@@ -744,8 +788,6 @@
     if c2.rev() is None:
         c2 = c2.p1()
 
-    copies = {}
-
     changedfiles = set()
     m1 = c1.manifest()
     if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
@@ -765,10 +807,11 @@
         changedfiles.update(ctx.files())
         ctx = ctx.p1()
 
+    copies2 = {}
     cp = _forwardcopies(base, c2)
     for dst, src in pycompat.iteritems(cp):
         if src in m1:
-            copies[dst] = src
+            copies2[dst] = src
 
     # file is missing if it isn't present in the destination, but is present in
     # the base and present in the source.
@@ -777,6 +820,7 @@
     filt = lambda f: f not in m1 and f in base and f in c2
     missingfiles = [f for f in changedfiles if filt(f)]
 
+    copies1 = {}
     if missingfiles:
         basenametofilename = collections.defaultdict(list)
         dirnametofilename = collections.defaultdict(list)
@@ -818,9 +862,9 @@
                     # if there are a few related copies then we'll merge
                     # changes into all of them. This matches the behaviour
                     # of upstream copytracing
-                    copies[candidate] = f
+                    copies1[candidate] = f
 
-    return copies, {}, {}, {}, {}
+    return branch_copies(copies1), branch_copies(copies2), {}
 
 
 def _related(f1, f2):
--- a/mercurial/debugcommands.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/debugcommands.py	Mon Feb 03 11:51:52 2020 -0500
@@ -13,6 +13,7 @@
 import errno
 import operator
 import os
+import platform
 import random
 import re
 import socket
@@ -1487,6 +1488,11 @@
         pycompat.sysexecutable or _(b"unknown"),
     )
     fm.write(
+        b'pythonimplementation',
+        _(b"checking Python implementation (%s)\n"),
+        pycompat.sysbytes(platform.python_implementation()),
+    )
+    fm.write(
         b'pythonver',
         _(b"checking Python version (%s)\n"),
         (b"%d.%d.%d" % sys.version_info[:3]),
--- a/mercurial/hg.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/hg.py	Mon Feb 03 11:51:52 2020 -0500
@@ -1040,10 +1040,10 @@
 def clean(repo, node, show_stats=True, quietempty=False):
     """forcibly switch the working directory to node, clobbering changes"""
     stats = updaterepo(repo, node, True)
+    assert stats.unresolvedcount == 0
     repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
     if show_stats:
         _showstats(repo, stats, quietempty)
-    return stats.unresolvedcount > 0
 
 
 # naming conflict in updatetotally()
@@ -1138,19 +1138,10 @@
 
 
 def merge(
-    repo,
-    node,
-    force=None,
-    remind=True,
-    mergeforce=False,
-    labels=None,
-    abort=False,
+    repo, node, force=None, remind=True, mergeforce=False, labels=None,
 ):
     """Branch merge with node, resolving changes. Return true if any
     unresolved conflicts."""
-    if abort:
-        return abortmerge(repo.ui, repo)
-
     stats = mergemod.update(
         repo,
         node,
@@ -1183,8 +1174,8 @@
 
     repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
     stats = mergemod.update(repo, node, branchmerge=False, force=True)
+    assert stats.unresolvedcount == 0
     _showstats(repo, stats)
-    return stats.unresolvedcount > 0
 
 
 def _incoming(
--- a/mercurial/match.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/match.py	Mon Feb 03 11:51:52 2020 -0500
@@ -24,7 +24,7 @@
 )
 from .utils import stringutil
 
-rustmod = policy.importrust('filepatterns')
+rustmod = policy.importrust('dirstate')
 
 allpatternkinds = (
     b're',
@@ -1273,15 +1273,6 @@
     '''Convert a (normalized) pattern of any kind into a
     regular expression.
     globsuffix is appended to the regexp of globs.'''
-
-    if rustmod is not None:
-        try:
-            return rustmod.build_single_regex(kind, pat, globsuffix)
-        except rustmod.PatternError:
-            raise error.ProgrammingError(
-                b'not a regex pattern: %s:%s' % (kind, pat)
-            )
-
     if not pat and kind in (b'glob', b'relpath'):
         return b''
     if kind == b're':
@@ -1554,18 +1545,6 @@
     This is useful to debug ignore patterns.
     '''
 
-    if rustmod is not None:
-        result, warnings = rustmod.read_pattern_file(
-            filepath, bool(warn), sourceinfo,
-        )
-
-        for warning_params in warnings:
-            # Can't be easily emitted from Rust, because it would require
-            # a mechanism for both gettext and calling the `warn` function.
-            warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
-
-        return result
-
     syntaxes = {
         b're': b'relre:',
         b'regexp': b'relre:',
--- a/mercurial/merge.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/merge.py	Mon Feb 03 11:51:52 2020 -0500
@@ -386,18 +386,26 @@
         return configmergedriver
 
     @util.propertycache
-    def localctx(self):
+    def local(self):
         if self._local is None:
-            msg = b"localctx accessed but self._local isn't set"
+            msg = b"local accessed but self._local isn't set"
             raise error.ProgrammingError(msg)
-        return self._repo[self._local]
+        return self._local
+
+    @util.propertycache
+    def localctx(self):
+        return self._repo[self.local]
+
+    @util.propertycache
+    def other(self):
+        if self._other is None:
+            msg = b"other accessed but self._other isn't set"
+            raise error.ProgrammingError(msg)
+        return self._other
 
     @util.propertycache
     def otherctx(self):
-        if self._other is None:
-            msg = b"otherctx accessed but self._other isn't set"
-            raise error.ProgrammingError(msg)
-        return self._repo[self._other]
+        return self._repo[self.other]
 
     def active(self):
         """Whether mergestate is active.
@@ -1256,17 +1264,19 @@
     if matcher is not None and matcher.always():
         matcher = None
 
-    copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
-
     # manifests fetched in order are going to be faster, so prime the caches
     [
         x.manifest()
         for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
     ]
 
+    branch_copies1 = copies.branch_copies()
+    branch_copies2 = copies.branch_copies()
+    diverge = {}
     if followcopies:
-        ret = copies.mergecopies(repo, wctx, p2, pa)
-        copy, movewithdir, diverge, renamedelete, dirmove = ret
+        branch_copies1, branch_copies2, diverge = copies.mergecopies(
+            repo, wctx, p2, pa
+        )
 
     boolbm = pycompat.bytestr(bool(branchmerge))
     boolf = pycompat.bytestr(bool(force))
@@ -1278,8 +1288,10 @@
     repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
 
     m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
-    copied = set(copy.values())
-    copied.update(movewithdir.values())
+    copied1 = set(branch_copies1.copy.values())
+    copied1.update(branch_copies1.movewithdir.values())
+    copied2 = set(branch_copies2.copy.values())
+    copied2.update(branch_copies2.movewithdir.values())
 
     if b'.hgsubstate' in m1 and wctx.rev() is None:
         # Check whether sub state is modified, and overwrite the manifest
@@ -1299,10 +1311,10 @@
         relevantfiles = set(ma.diff(m2).keys())
 
         # For copied and moved files, we need to add the source file too.
-        for copykey, copyvalue in pycompat.iteritems(copy):
+        for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
             if copyvalue in relevantfiles:
                 relevantfiles.add(copykey)
-        for movedirkey in movewithdir:
+        for movedirkey in branch_copies1.movewithdir:
             relevantfiles.add(movedirkey)
         filesmatcher = scmutil.matchfiles(repo, relevantfiles)
         matcher = matchmod.intersectmatchers(matcher, filesmatcher)
@@ -1313,7 +1325,10 @@
     for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
         if n1 and n2:  # file exists on both local and remote side
             if f not in ma:
-                fa = copy.get(f, None)
+                # TODO: what if they're renamed from different sources?
+                fa = branch_copies1.copy.get(
+                    f, None
+                ) or branch_copies2.copy.get(f, None)
                 if fa is not None:
                     actions[f] = (
                         ACTION_MERGE,
@@ -1356,10 +1371,12 @@
                         b'versions differ',
                     )
         elif n1:  # file exists only on local side
-            if f in copied:
+            if f in copied2:
                 pass  # we'll deal with it on m2 side
-            elif f in movewithdir:  # directory rename, move local
-                f2 = movewithdir[f]
+            elif (
+                f in branch_copies1.movewithdir
+            ):  # directory rename, move local
+                f2 = branch_copies1.movewithdir[f]
                 if f2 in m2:
                     actions[f2] = (
                         ACTION_MERGE,
@@ -1372,8 +1389,8 @@
                         (f, fl1),
                         b'remote directory rename - move from %s' % f,
                     )
-            elif f in copy:
-                f2 = copy[f]
+            elif f in branch_copies1.copy:
+                f2 = branch_copies1.copy[f]
                 actions[f] = (
                     ACTION_MERGE,
                     (f, f2, f2, False, pa.node()),
@@ -1397,10 +1414,10 @@
                 else:
                     actions[f] = (ACTION_REMOVE, None, b'other deleted')
         elif n2:  # file exists only on remote side
-            if f in copied:
+            if f in copied1:
                 pass  # we'll deal with it on m1 side
-            elif f in movewithdir:
-                f2 = movewithdir[f]
+            elif f in branch_copies2.movewithdir:
+                f2 = branch_copies2.movewithdir[f]
                 if f2 in m1:
                     actions[f2] = (
                         ACTION_MERGE,
@@ -1413,8 +1430,8 @@
                         (f, fl2),
                         b'local directory rename - get from %s' % f,
                     )
-            elif f in copy:
-                f2 = copy[f]
+            elif f in branch_copies2.copy:
+                f2 = branch_copies2.copy[f]
                 if f2 in m2:
                     actions[f] = (
                         ACTION_MERGE,
@@ -1451,10 +1468,10 @@
                     )
             elif n2 != ma[f]:
                 df = None
-                for d in dirmove:
+                for d in branch_copies1.dirmove:
                     if f.startswith(d):
                         # new file added in a directory that was moved
-                        df = dirmove[d] + f[len(d) :]
+                        df = branch_copies1.dirmove[d] + f[len(d) :]
                         break
                 if df is not None and df in m1:
                     actions[df] = (
@@ -1481,6 +1498,9 @@
         # Updates "actions" in place
         _filternarrowactions(narrowmatch, branchmerge, actions)
 
+    renamedelete = branch_copies1.renamedelete
+    renamedelete.update(branch_copies2.renamedelete)
+
     return actions, diverge, renamedelete
 
 
@@ -2288,13 +2308,6 @@
                     ),
                 )
             )
-    # If we're doing a partial update, we need to skip updating
-    # the dirstate, so make a note of any partial-ness to the
-    # update here.
-    if matcher is None or matcher.always():
-        partial = False
-    else:
-        partial = True
     with repo.wlock():
         if wc is None:
             wc = repo[None]
@@ -2507,7 +2520,11 @@
         ### apply phase
         if not branchmerge:  # just jump to the new rev
             fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
-        if not partial and not wc.isinmemory():
+        # If we're doing a partial update, we need to skip updating
+        # the dirstate.
+        always = matcher is None or matcher.always()
+        updatedirstate = always and not wc.isinmemory()
+        if updatedirstate:
             repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
             # note that we're in the middle of an update
             repo.vfs.write(b'updatestate', p2.hex())
@@ -2553,7 +2570,6 @@
                 )
             )
 
-        updatedirstate = not partial and not wc.isinmemory()
         wantfiledata = updatedirstate and not branchmerge
         stats, getfiledata = applyupdates(
             repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
@@ -2574,7 +2590,7 @@
     if not branchmerge:
         sparse.prunetemporaryincludes(repo)
 
-    if not partial:
+    if updatedirstate:
         repo.hook(
             b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
         )
@@ -2582,7 +2598,13 @@
 
 
 def graft(
-    repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
+    repo,
+    ctx,
+    base=None,
+    labels=None,
+    keepparent=False,
+    keepconflictparent=False,
+    wctx=None,
 ):
     """Do a graft-like merge.
 
@@ -2593,7 +2615,7 @@
     renames/copies appropriately.
 
     ctx - changeset to rebase
-    base - merge base, usually ctx.p1()
+    base - merge base, or ctx.p1() if not specified
     labels - merge labels eg ['local', 'graft']
     keepparent - keep second parent if any
     keepconflictparent - if unresolved, keep parent used for the merge
@@ -2605,8 +2627,9 @@
     # to copy commits), and 2) informs update that the incoming changes are
     # newer than the destination so it doesn't prompt about "remote changed foo
     # which local deleted".
-    wctx = repo[None]
+    wctx = wctx or repo[None]
     pctx = wctx.p1()
+    base = base or ctx.p1()
     mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
 
     stats = update(
@@ -2617,6 +2640,7 @@
         base.node(),
         mergeancestor=mergeancestor,
         labels=labels,
+        wc=wctx,
     )
 
     if keepconflictparent and stats.unresolvedcount:
@@ -2631,11 +2655,16 @@
     if pother == pctx.node():
         pother = nullid
 
-    with repo.dirstate.parentchange():
-        repo.setparents(pctx.node(), pother)
-        repo.dirstate.write(repo.currenttransaction())
+    if wctx.isinmemory():
+        wctx.setparents(pctx.node(), pother)
         # fix up dirstate for copies and renames
         copies.graftcopies(wctx, ctx, base)
+    else:
+        with repo.dirstate.parentchange():
+            repo.setparents(pctx.node(), pother)
+            repo.dirstate.write(repo.currenttransaction())
+            # fix up dirstate for copies and renames
+            copies.graftcopies(wctx, ctx, base)
     return stats
 
 
--- a/mercurial/pathutil.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/pathutil.py	Mon Feb 03 11:51:52 2020 -0500
@@ -84,7 +84,7 @@
                         _(b"path contains illegal component: %s") % path
                     )
         if b'.hg' in _lowerclean(path):
-            lparts = [_lowerclean(p.lower()) for p in parts]
+            lparts = [_lowerclean(p) for p in parts]
             for p in b'.hg', b'.hg.':
                 if p in lparts[1:]:
                     pos = lparts.index(p)
@@ -99,7 +99,6 @@
 
         parts.pop()
         normparts.pop()
-        prefixes = []
         # It's important that we check the path parts starting from the root.
         # This means we won't accidentally traverse a symlink into some other
         # filesystem (which is potentially expensive to access).
@@ -110,13 +109,11 @@
                 continue
             if self._realfs:
                 self._checkfs(prefix, path)
-            prefixes.append(normprefix)
+            if self._cached:
+                self.auditeddir.add(normprefix)
 
         if self._cached:
             self.audited.add(normpath)
-            # only add prefixes to the cache after checking everything: we don't
-            # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
-            self.auditeddir.update(prefixes)
 
     def _checkfs(self, prefix, path):
         """raise exception if a file system backed check fails"""
--- a/mercurial/profiling.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/profiling.py	Mon Feb 03 11:51:52 2020 -0500
@@ -186,6 +186,7 @@
         self._output = None
         self._fp = None
         self._fpdoclose = True
+        self._flushfp = None
         self._profiler = None
         self._enabled = enabled
         self._entered = False
@@ -246,6 +247,8 @@
             else:
                 self._fpdoclose = False
                 self._fp = self._ui.ferr
+                # Ensure we've flushed fout before writing to ferr.
+                self._flushfp = self._ui.fout
 
             if proffn is not None:
                 pass
@@ -265,6 +268,7 @@
     def __exit__(self, exception_type, exception_value, traceback):
         propagate = None
         if self._profiler is not None:
+            self._uiflush()
             propagate = self._profiler.__exit__(
                 exception_type, exception_value, traceback
             )
@@ -280,3 +284,7 @@
     def _closefp(self):
         if self._fpdoclose and self._fp is not None:
             self._fp.close()
+
+    def _uiflush(self):
+        if self._flushfp:
+            self._flushfp.flush()
--- a/mercurial/shelve.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/shelve.py	Mon Feb 03 11:51:52 2020 -0500
@@ -996,7 +996,6 @@
         stats = merge.graft(
             repo,
             shelvectx,
-            shelvectx.p1(),
             labels=[b'shelve', b'working-copy'],
             keepconflictparent=True,
         )
--- a/mercurial/ui.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/mercurial/ui.py	Mon Feb 03 11:51:52 2020 -0500
@@ -1939,30 +1939,6 @@
         if self._progbar is not None and self._progbar.printed:
             self._progbar.clear()
 
-    def progress(self, topic, pos, item=b"", unit=b"", total=None):
-        '''show a progress message
-
-        By default a textual progress bar will be displayed if an operation
-        takes too long. 'topic' is the current operation, 'item' is a
-        non-numeric marker of the current position (i.e. the currently
-        in-process file), 'pos' is the current numeric position (i.e.
-        revision, bytes, etc.), unit is a corresponding unit label,
-        and total is the highest expected pos.
-
-        Multiple nested topics may be active at a time.
-
-        All topics should be marked closed by setting pos to None at
-        termination.
-        '''
-        self.deprecwarn(
-            b"use ui.makeprogress() instead of ui.progress()", b"5.1"
-        )
-        progress = self.makeprogress(topic, unit, total)
-        if pos is not None:
-            progress.update(pos, item=item)
-        else:
-            progress.complete()
-
     def makeprogress(self, topic, unit=b"", total=None):
         """Create a progress helper for the specified topic"""
         if getattr(self._fmsgerr, 'structured', False):
--- a/relnotes/next	Mon Feb 03 11:07:34 2020 -0500
+++ b/relnotes/next	Mon Feb 03 11:51:52 2020 -0500
@@ -12,3 +12,11 @@
 
 == Internal API Changes ==
 
+ * The deprecated `ui.progress()` has now been deleted. Please use
+   `ui.makeprogress()` instead.
+
+ * `hg.merge()` has lost its `abort` argument. Please call
+   `hg.abortmerge()` directly instead.
+
+ * The `*others` argument of `cmdutil.check_incompatible_arguments()`
+   changed from being varargs argument to being a single collection.
--- a/rust/Cargo.lock	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/Cargo.lock	Mon Feb 03 11:51:52 2020 -0500
@@ -2,23 +2,20 @@
 # It is not intended for manual editing.
 [[package]]
 name = "aho-corasick"
-version = "0.7.6"
+version = "0.7.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "arrayvec"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "autocfg"
-version = "0.1.6"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "autocfg"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -33,11 +30,10 @@
 
 [[package]]
 name = "c2-chacha"
-version = "0.2.2"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -55,50 +51,52 @@
 
 [[package]]
 name = "cpython"
-version = "0.3.0"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-deque"
-version = "0.7.1"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.7.2"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-queue"
-version = "0.1.2"
+version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.6.6"
+version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -115,25 +113,39 @@
 
 [[package]]
 name = "getrandom"
-version = "0.1.12"
+version = "0.1.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "hermit-abi"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "hex"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "hg-core"
 version = "0.1.0"
 dependencies = [
  "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -141,9 +153,9 @@
 name = "hg-cpython"
 version = "0.1.0"
 dependencies = [
- "cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -151,7 +163,7 @@
 version = "0.1.0"
 dependencies = [
  "hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -161,64 +173,60 @@
 
 [[package]]
 name = "libc"
-version = "0.2.64"
+version = "0.2.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "memchr"
-version = "2.2.1"
+version = "2.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "memoffset"
-version = "0.5.1"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "nodrop"
-version = "0.1.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "num-traits"
-version = "0.2.8"
+version = "0.2.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "num_cpus"
-version = "1.10.1"
+version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "ppv-lite86"
-version = "0.2.5"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "python27-sys"
-version = "0.3.0"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "python3-sys"
-version = "0.3.0"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -226,8 +234,8 @@
 version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -241,11 +249,11 @@
 
 [[package]]
 name = "rand"
-version = "0.7.2"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -256,7 +264,7 @@
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -265,7 +273,7 @@
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -287,7 +295,7 @@
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -319,7 +327,7 @@
 version = "0.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -331,7 +339,7 @@
 dependencies = [
  "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -342,7 +350,7 @@
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -356,24 +364,24 @@
 
 [[package]]
 name = "rayon"
-version = "1.2.0"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.6.0"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -386,18 +394,18 @@
 
 [[package]]
 name = "regex"
-version = "1.3.1"
+version = "1.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.12"
+version = "0.6.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -428,7 +436,7 @@
 
 [[package]]
 name = "thread_local"
-version = "0.3.6"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -439,12 +447,12 @@
 version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "wasi"
-version = "0.7.0"
+version = "0.9.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -467,34 +475,35 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [metadata]
-"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
-"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
-"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
+"checksum aho-corasick 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5f56c476256dc249def911d6f7580b5fc7e875895b5d7ee88f5d602208035744"
+"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
+"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
 "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 "checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
-"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101"
+"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb"
 "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85532c648315aeb0829ad216a6a29aa3212cf9319bc7f6daf1404aa0bdd1485f"
-"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
-"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"
-"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
-"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
+"checksum cpython 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "86eab84f48335293c53d06565bcffe4024f7294edaf223e499fda536532e4b55"
+"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca"
+"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"
+"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
+"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"
 "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
 "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571"
+"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
+"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772"
+"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e"
 "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c"
-"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
-"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
-"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
-"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32"
-"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
-"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b"
-"checksum python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "372555e88a6bc8109eb641380240dc8d25a128fc48363ec9075664daadffdd5b"
-"checksum python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a8ebed3f1201fda179f3960609dbbc10cd8c75e9f2afcb03788278f367d8ea"
+"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
+"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223"
+"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
+"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
+"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
+"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
+"checksum python27-sys 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ecc2dbf296df07aaa029b6f6e03bdd058fe9827cf2f075261839a9dff49ef419"
+"checksum python3-sys 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d296550fff0db9d77bae50fe3ecd5997e4929aadca4444e57b22a4bc3bf24fd3"
 "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
-"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
+"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
 "checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
 "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
@@ -507,18 +516,18 @@
 "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
 "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
 "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
-"checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123"
-"checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b"
+"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
+"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
 "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"
-"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
+"checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87"
+"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90"
 "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
 "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
 "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
+"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
 "checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
-"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
+"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
 "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/hg-core/Cargo.toml	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-core/Cargo.toml	Mon Feb 03 11:51:52 2020 -0500
@@ -10,10 +10,11 @@
 
 [dependencies]
 byteorder = "1.3.1"
+hex = "0.4.0"
 lazy_static = "1.3.0"
 memchr = "2.2.0"
 rand = "0.6.5"
 rand_pcg = "0.1.1"
-rayon = "1.2.0"
+rayon = "1.3.0"
 regex = "1.1.0"
 twox-hash = "1.5.0"
--- a/rust/hg-core/src/dirstate/status.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-core/src/dirstate/status.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -272,7 +272,7 @@
 
 pub fn status<'a: 'c, 'b: 'c, 'c>(
     dmap: &'a DirstateMap,
-    matcher: &'b (impl Matcher),
+    matcher: &'b impl Matcher,
     root_dir: impl AsRef<Path> + Sync + Send + Copy,
     list_clean: bool,
     last_normal_time: i64,
--- a/rust/hg-core/src/revlog.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-core/src/revlog.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -5,6 +5,10 @@
 // GNU General Public License version 2 or any later version.
 //! Mercurial concepts for handling revision history
 
+pub mod node;
+pub mod nodemap;
+pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+
 /// Mercurial revision numbers
 ///
 /// As noted in revlog.c, revision numbers are actually encoded in
@@ -36,3 +40,17 @@
     ParentOutOfRange(Revision),
     WorkingDirectoryUnsupported,
 }
+
+/// The Mercurial Revlog Index
+///
+/// This is currently limited to the minimal interface that is needed for
+/// the [`nodemap`](nodemap/index.html) module
+pub trait RevlogIndex {
+    /// Total number of Revisions referenced in this index
+    fn len(&self) -> usize;
+
+    /// Return a reference to the Node or `None` if rev is out of bounds
+    ///
+    /// `NULL_REVISION` is not considered to be out of bounds.
+    fn node(&self, rev: Revision) -> Option<&Node>;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/node.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -0,0 +1,368 @@
+// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Definitions and utilities for Revision nodes
+//!
+//! In Mercurial code base, it is customary to call "a node" the binary SHA
+//! of a revision.
+
+use hex::{self, FromHex, FromHexError};
+
+/// The length in bytes of a `Node`
+///
+/// This constant is meant to ease refactors of this module, and
+/// are private so that calling code does not expect all nodes have
+/// the same size, should we support several formats concurrently in
+/// the future.
+const NODE_BYTES_LENGTH: usize = 20;
+
+/// The length in bytes of a `Node`
+///
+/// see also `NODES_BYTES_LENGTH` about it being private.
+const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH;
+
+/// Private alias for readability and to ease future change
+type NodeData = [u8; NODE_BYTES_LENGTH];
+
+/// Binary revision SHA
+///
+/// ## Future changes of hash size
+///
+/// To accomodate future changes of hash size, Rust callers
+/// should use the conversion methods at the boundaries (FFI, actual
+/// computation of hashes and I/O) only, and only if required.
+///
+/// All other callers outside of unit tests should just handle `Node` values
+/// and never make any assumption on the actual length, using [`nybbles_len`]
+/// if they need a loop boundary.
+///
+/// All methods that create a `Node` either take a type that enforces
+/// the size or fail immediately at runtime with [`ExactLengthRequired`].
+///
+/// [`nybbles_len`]: #method.nybbles_len
+/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
+#[derive(Clone, Debug, PartialEq)]
+pub struct Node {
+    data: NodeData,
+}
+
+/// The node value for NULL_REVISION
+pub const NULL_NODE: Node = Node {
+    data: [0; NODE_BYTES_LENGTH],
+};
+
+impl From<NodeData> for Node {
+    fn from(data: NodeData) -> Node {
+        Node { data }
+    }
+}
+
+#[derive(Debug, PartialEq)]
+pub enum NodeError {
+    ExactLengthRequired(usize, String),
+    PrefixTooLong(String),
+    HexError(FromHexError, String),
+}
+
+/// Low level utility function, also for prefixes
+fn get_nybble(s: &[u8], i: usize) -> u8 {
+    if i % 2 == 0 {
+        s[i / 2] >> 4
+    } else {
+        s[i / 2] & 0x0f
+    }
+}
+
+impl Node {
+    /// Retrieve the `i`th half-byte of the binary data.
+    ///
+    /// This is also the `i`th hexadecimal digit in numeric form,
+    /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+    pub fn get_nybble(&self, i: usize) -> u8 {
+        get_nybble(&self.data, i)
+    }
+
+    /// Length of the data, in nybbles
+    pub fn nybbles_len(&self) -> usize {
+        // public exposure as an instance method only, so that we can
+        // easily support several sizes of hashes if needed in the future.
+        NODE_NYBBLES_LENGTH
+    }
+
+    /// Convert from hexadecimal string representation
+    ///
+    /// Exact length is required.
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn from_hex(hex: &str) -> Result<Node, NodeError> {
+        Ok(NodeData::from_hex(hex)
+            .map_err(|e| NodeError::from((e, hex)))?
+            .into())
+    }
+
+    /// Convert to hexadecimal string representation
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn encode_hex(&self) -> String {
+        hex::encode(self.data)
+    }
+
+    /// Provide access to binary data
+    ///
+    /// This is needed by FFI layers, for instance to return expected
+    /// binary values to Python.
+    pub fn as_bytes(&self) -> &[u8] {
+        &self.data
+    }
+}
+
+impl<T: AsRef<str>> From<(FromHexError, T)> for NodeError {
+    fn from(err_offender: (FromHexError, T)) -> Self {
+        let (err, offender) = err_offender;
+        match err {
+            FromHexError::InvalidStringLength => {
+                NodeError::ExactLengthRequired(
+                    NODE_NYBBLES_LENGTH,
+                    offender.as_ref().to_owned(),
+                )
+            }
+            _ => NodeError::HexError(err, offender.as_ref().to_owned()),
+        }
+    }
+}
+
+/// The beginning of a binary revision SHA.
+///
+/// Since it can potentially come from an hexadecimal representation with
+/// odd length, it needs to carry around whether the last 4 bits are relevant
+/// or not.
+#[derive(Debug, PartialEq)]
+pub struct NodePrefix {
+    buf: Vec<u8>,
+    is_odd: bool,
+}
+
+impl NodePrefix {
+    /// Convert from hexadecimal string representation
+    ///
+    /// Similarly to `hex::decode`, can be used with Unicode string types
+    /// (`String`, `&str`) as well as bytes.
+    ///
+    /// To be used in FFI and I/O only, in order to facilitate future
+    /// changes of hash format.
+    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, NodeError> {
+        let hex = hex.as_ref();
+        let len = hex.len();
+        if len > NODE_NYBBLES_LENGTH {
+            return Err(NodeError::PrefixTooLong(
+                String::from_utf8_lossy(hex).to_owned().to_string(),
+            ));
+        }
+
+        let is_odd = len % 2 == 1;
+        let even_part = if is_odd { &hex[..len - 1] } else { hex };
+        let mut buf: Vec<u8> = Vec::from_hex(&even_part)
+            .map_err(|e| (e, String::from_utf8_lossy(hex)))?;
+
+        if is_odd {
+            let latest_char = char::from(hex[len - 1]);
+            let latest_nybble = latest_char.to_digit(16).ok_or_else(|| {
+                (
+                    FromHexError::InvalidHexCharacter {
+                        c: latest_char,
+                        index: len - 1,
+                    },
+                    String::from_utf8_lossy(hex),
+                )
+            })? as u8;
+            buf.push(latest_nybble << 4);
+        }
+        Ok(NodePrefix { buf, is_odd })
+    }
+
+    pub fn borrow(&self) -> NodePrefixRef {
+        NodePrefixRef {
+            buf: &self.buf,
+            is_odd: self.is_odd,
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct NodePrefixRef<'a> {
+    buf: &'a [u8],
+    is_odd: bool,
+}
+
+impl<'a> NodePrefixRef<'a> {
+    pub fn len(&self) -> usize {
+        if self.is_odd {
+            self.buf.len() * 2 - 1
+        } else {
+            self.buf.len() * 2
+        }
+    }
+
+    pub fn is_prefix_of(&self, node: &Node) -> bool {
+        if self.is_odd {
+            let buf = self.buf;
+            let last_pos = buf.len() - 1;
+            node.data.starts_with(buf.split_at(last_pos).0)
+                && node.data[last_pos] >> 4 == buf[last_pos] >> 4
+        } else {
+            node.data.starts_with(self.buf)
+        }
+    }
+
+    /// Retrieve the `i`th half-byte from the prefix.
+    ///
+    /// This is also the `i`th hexadecimal digit in numeric form,
+    /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+    pub fn get_nybble(&self, i: usize) -> u8 {
+        assert!(i < self.len());
+        get_nybble(self.buf, i)
+    }
+}
+
+/// A shortcut for full `Node` references
+impl<'a> From<&'a Node> for NodePrefixRef<'a> {
+    fn from(node: &'a Node) -> Self {
+        NodePrefixRef {
+            buf: &node.data,
+            is_odd: false,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    fn sample_node() -> Node {
+        let mut data = [0; NODE_BYTES_LENGTH];
+        data.copy_from_slice(&[
+            0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
+            0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef,
+        ]);
+        data.into()
+    }
+
+    /// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH`
+    ///
+    /// The padding is made with zeros
+    pub fn hex_pad_right(hex: &str) -> String {
+        let mut res = hex.to_string();
+        while res.len() < NODE_NYBBLES_LENGTH {
+            res.push('0');
+        }
+        res
+    }
+
+    fn sample_node_hex() -> String {
+        hex_pad_right("0123456789abcdeffedcba9876543210deadbeef")
+    }
+
+    #[test]
+    fn test_node_from_hex() {
+        assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node()));
+
+        let mut short = hex_pad_right("0123");
+        short.pop();
+        short.pop();
+        assert_eq!(
+            Node::from_hex(&short),
+            Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)),
+        );
+
+        let not_hex = hex_pad_right("012... oops");
+        assert_eq!(
+            Node::from_hex(&not_hex),
+            Err(NodeError::HexError(
+                FromHexError::InvalidHexCharacter { c: '.', index: 3 },
+                not_hex,
+            )),
+        );
+    }
+
+    #[test]
+    fn test_node_encode_hex() {
+        assert_eq!(sample_node().encode_hex(), sample_node_hex());
+    }
+
+    #[test]
+    fn test_prefix_from_hex() -> Result<(), NodeError> {
+        assert_eq!(
+            NodePrefix::from_hex("0e1")?,
+            NodePrefix {
+                buf: vec![14, 16],
+                is_odd: true
+            }
+        );
+        assert_eq!(
+            NodePrefix::from_hex("0e1a")?,
+            NodePrefix {
+                buf: vec![14, 26],
+                is_odd: false
+            }
+        );
+
+        // checking limit case
+        let node_as_vec = sample_node().data.iter().cloned().collect();
+        assert_eq!(
+            NodePrefix::from_hex(sample_node_hex())?,
+            NodePrefix {
+                buf: node_as_vec,
+                is_odd: false
+            }
+        );
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_prefix_from_hex_errors() {
+        assert_eq!(
+            NodePrefix::from_hex("testgr"),
+            Err(NodeError::HexError(
+                FromHexError::InvalidHexCharacter { c: 't', index: 0 },
+                "testgr".to_string()
+            ))
+        );
+        let mut long = NULL_NODE.encode_hex();
+        long.push('c');
+        match NodePrefix::from_hex(&long)
+            .expect_err("should be refused as too long")
+        {
+            NodeError::PrefixTooLong(s) => assert_eq!(s, long),
+            err => panic!(format!("Should have been TooLong, got {:?}", err)),
+        }
+    }
+
+    #[test]
+    fn test_is_prefix_of() -> Result<(), NodeError> {
+        let mut node_data = [0; NODE_BYTES_LENGTH];
+        node_data[0] = 0x12;
+        node_data[1] = 0xca;
+        let node = Node::from(node_data);
+        assert!(NodePrefix::from_hex("12")?.borrow().is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("1a")?.borrow().is_prefix_of(&node));
+        assert!(NodePrefix::from_hex("12c")?.borrow().is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("12d")?.borrow().is_prefix_of(&node));
+        Ok(())
+    }
+
+    #[test]
+    fn test_get_nybble() -> Result<(), NodeError> {
+        let prefix = NodePrefix::from_hex("dead6789cafe")?;
+        assert_eq!(prefix.borrow().get_nybble(0), 13);
+        assert_eq!(prefix.borrow().get_nybble(7), 9);
+        Ok(())
+    }
+}
+
+#[cfg(test)]
+pub use tests::hex_pad_right;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/nodemap.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -0,0 +1,526 @@
+// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
+//           and Mercurial contributors
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+//! Indexing facilities for fast retrieval of `Revision` from `Node`
+//!
+//! This provides a variation on the 16-ary radix tree that is
+//! provided as "nodetree" in revlog.c, ready for append-only persistence
+//! on disk.
+//!
+//! Following existing implicit conventions, the "nodemap" terminology
+//! is used in a more abstract context.
+
+use super::{
+    Node, NodeError, NodePrefix, NodePrefixRef, Revision, RevlogIndex,
+};
+use std::fmt;
+use std::ops::Deref;
+use std::ops::Index;
+
+#[derive(Debug, PartialEq)]
+pub enum NodeMapError {
+    MultipleResults,
+    InvalidNodePrefix(NodeError),
+    /// A `Revision` stored in the nodemap could not be found in the index
+    RevisionNotInIndex(Revision),
+}
+
+impl From<NodeError> for NodeMapError {
+    fn from(err: NodeError) -> Self {
+        NodeMapError::InvalidNodePrefix(err)
+    }
+}
+
+/// Mapping system from Mercurial nodes to revision numbers.
+///
+/// ## `RevlogIndex` and `NodeMap`
+///
+/// One way to think about their relationship is that
+/// the `NodeMap` is a prefix-oriented reverse index of the `Node` information
+/// carried by a [`RevlogIndex`].
+///
+/// Many of the methods in this trait take a `RevlogIndex` argument
+/// which is used for validation of their results. This index must naturally
+/// be the one the `NodeMap` is about, and it must be consistent.
+///
+/// Notably, the `NodeMap` must not store
+/// information about more `Revision` values than there are in the index.
+/// In these methods, an encountered `Revision` is not in the index, a
+/// [`RevisionNotInIndex`] error is returned.
+///
+/// In insert operations, the rule is thus that the `NodeMap` must always
+/// be updated after the `RevlogIndex`
+/// be updated first, and the `NodeMap` second.
+///
+/// [`RevisionNotInIndex`]: enum.NodeMapError.html#variant.RevisionNotInIndex
+/// [`RevlogIndex`]: ../trait.RevlogIndex.html
+pub trait NodeMap {
+    /// Find the unique `Revision` having the given `Node`
+    ///
+    /// If no Revision matches the given `Node`, `Ok(None)` is returned.
+    fn find_node(
+        &self,
+        index: &impl RevlogIndex,
+        node: &Node,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        self.find_bin(index, node.into())
+    }
+
+    /// Find the unique Revision whose `Node` starts with a given binary prefix
+    ///
+    /// If no Revision matches the given prefix, `Ok(None)` is returned.
+    ///
+    /// If several Revisions match the given prefix, a [`MultipleResults`]
+    /// error is returned.
+    fn find_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<Revision>, NodeMapError>;
+
+    /// Find the unique Revision whose `Node` hexadecimal string representation
+    /// starts with a given prefix
+    ///
+    /// If no Revision matches the given prefix, `Ok(None)` is returned.
+    ///
+    /// If several Revisions match the given prefix, a [`MultipleResults`]
+    /// error is returned.
+    fn find_hex(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: &str,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        self.find_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
+    }
+}
+
+/// Low level NodeTree [`Blocks`] elements
+///
+/// These are exactly as for instance on persistent storage.
+type RawElement = i32;
+
+/// High level representation of values in NodeTree
+/// [`Blocks`](struct.Block.html)
+///
+/// This is the high level representation that most algorithms should
+/// use.
+#[derive(Clone, Debug, Eq, PartialEq)]
+enum Element {
+    Rev(Revision),
+    Block(usize),
+    None,
+}
+
+impl From<RawElement> for Element {
+    /// Conversion from low level representation, after endianness conversion.
+    ///
+    /// See [`Block`](struct.Block.html) for explanation about the encoding.
+    fn from(raw: RawElement) -> Element {
+        if raw >= 0 {
+            Element::Block(raw as usize)
+        } else if raw == -1 {
+            Element::None
+        } else {
+            Element::Rev(-raw - 2)
+        }
+    }
+}
+
+impl From<Element> for RawElement {
+    fn from(element: Element) -> RawElement {
+        match element {
+            Element::None => 0,
+            Element::Block(i) => i as RawElement,
+            Element::Rev(rev) => -rev - 2,
+        }
+    }
+}
+
+/// A logical block of the `NodeTree`, packed with a fixed size.
+///
+/// These are always used in container types implementing `Index<Block>`,
+/// such as `&Block`
+///
+/// As an array of integers, its ith element encodes that the
+/// ith potential edge from the block, representing the ith hexadecimal digit
+/// (nybble) `i` is either:
+///
+/// - absent (value -1)
+/// - another `Block` in the same indexable container (value ≥ 0)
+///  - a `Revision` leaf (value ≤ -2)
+///
+/// Endianness has to be fixed for consistency on shared storage across
+/// different architectures.
+///
+/// A key difference with the C `nodetree` is that we need to be
+/// able to represent the [`Block`] at index 0, hence -1 is the empty marker
+/// rather than 0 and the `Revision` range upper limit of -2 instead of -1.
+///
+/// Another related difference is that `NULL_REVISION` (-1) is not
+/// represented at all, because we want an immutable empty nodetree
+/// to be valid.
+
+#[derive(Clone, PartialEq)]
+pub struct Block([RawElement; 16]);
+
+impl Block {
+    fn new() -> Self {
+        Block([-1; 16])
+    }
+
+    fn get(&self, nybble: u8) -> Element {
+        Element::from(RawElement::from_be(self.0[nybble as usize]))
+    }
+
+    fn set(&mut self, nybble: u8, element: Element) {
+        self.0[nybble as usize] = RawElement::to_be(element.into())
+    }
+}
+
+impl fmt::Debug for Block {
+    /// sparse representation for testing and debugging purposes
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_map()
+            .entries((0..16).filter_map(|i| match self.get(i) {
+                Element::None => None,
+                element => Some((i, element)),
+            }))
+            .finish()
+    }
+}
+
+/// A mutable 16-radix tree with the root block logically at the end
+///
+/// Because of the append only nature of our node trees, we need to
+/// keep the original untouched and store new blocks separately.
+///
+/// The mutable root `Block` is kept apart so that we don't have to rebump
+/// it on each insertion.
+pub struct NodeTree {
+    readonly: Box<dyn Deref<Target = [Block]> + Send>,
+    growable: Vec<Block>,
+    root: Block,
+}
+
+impl Index<usize> for NodeTree {
+    type Output = Block;
+
+    fn index(&self, i: usize) -> &Block {
+        let ro_len = self.readonly.len();
+        if i < ro_len {
+            &self.readonly[i]
+        } else if i == ro_len + self.growable.len() {
+            &self.root
+        } else {
+            &self.growable[i - ro_len]
+        }
+    }
+}
+
+/// Return `None` unless the `Node` for `rev` has given prefix in `index`.
+fn has_prefix_or_none<'p>(
+    idx: &impl RevlogIndex,
+    prefix: NodePrefixRef<'p>,
+    rev: Revision,
+) -> Result<Option<Revision>, NodeMapError> {
+    idx.node(rev)
+        .ok_or_else(|| NodeMapError::RevisionNotInIndex(rev))
+        .map(|node| {
+            if prefix.is_prefix_of(node) {
+                Some(rev)
+            } else {
+                None
+            }
+        })
+}
+
+impl NodeTree {
+    /// Initiate a NodeTree from an immutable slice-like of `Block`
+    ///
+    /// We keep `readonly` and clone its root block if it isn't empty.
+    fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
+        let root = readonly
+            .last()
+            .map(|b| b.clone())
+            .unwrap_or_else(|| Block::new());
+        NodeTree {
+            readonly: readonly,
+            growable: Vec::new(),
+            root: root,
+        }
+    }
+
+    /// Total number of blocks
+    fn len(&self) -> usize {
+        self.readonly.len() + self.growable.len() + 1
+    }
+
+    /// Implemented for completeness
+    ///
+    /// A `NodeTree` always has at least the mutable root block.
+    #[allow(dead_code)]
+    fn is_empty(&self) -> bool {
+        false
+    }
+
+    /// Main working method for `NodeTree` searches
+    ///
+    /// This partial implementation lacks special cases for NULL_REVISION
+    fn lookup<'p>(
+        &self,
+        prefix: NodePrefixRef<'p>,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        for visit_item in self.visit(prefix) {
+            if let Some(opt) = visit_item.final_revision() {
+                return Ok(opt);
+            }
+        }
+        Err(NodeMapError::MultipleResults)
+    }
+
+    fn visit<'n, 'p>(
+        &'n self,
+        prefix: NodePrefixRef<'p>,
+    ) -> NodeTreeVisitor<'n, 'p> {
+        NodeTreeVisitor {
+            nt: self,
+            prefix: prefix,
+            visit: self.len() - 1,
+            nybble_idx: 0,
+            done: false,
+        }
+    }
+}
+
+struct NodeTreeVisitor<'n, 'p> {
+    nt: &'n NodeTree,
+    prefix: NodePrefixRef<'p>,
+    visit: usize,
+    nybble_idx: usize,
+    done: bool,
+}
+
+#[derive(Debug, PartialEq, Clone)]
+struct NodeTreeVisitItem {
+    block_idx: usize,
+    nybble: u8,
+    element: Element,
+}
+
+impl<'n, 'p> Iterator for NodeTreeVisitor<'n, 'p> {
+    type Item = NodeTreeVisitItem;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.done || self.nybble_idx >= self.prefix.len() {
+            return None;
+        }
+
+        let nybble = self.prefix.get_nybble(self.nybble_idx);
+        self.nybble_idx += 1;
+
+        let visit = self.visit;
+        let element = self.nt[visit].get(nybble);
+        if let Element::Block(idx) = element {
+            self.visit = idx;
+        } else {
+            self.done = true;
+        }
+
+        Some(NodeTreeVisitItem {
+            block_idx: visit,
+            nybble: nybble,
+            element: element,
+        })
+    }
+}
+
+impl NodeTreeVisitItem {
+    // Return `Some(opt)` if this item is final, with `opt` being the
+    // `Revision` that it may represent.
+    //
+    // If the item is not terminal, return `None`
+    fn final_revision(&self) -> Option<Option<Revision>> {
+        match self.element {
+            Element::Block(_) => None,
+            Element::Rev(r) => Some(Some(r)),
+            Element::None => Some(None),
+        }
+    }
+}
+
+impl From<Vec<Block>> for NodeTree {
+    fn from(vec: Vec<Block>) -> Self {
+        Self::new(Box::new(vec))
+    }
+}
+
+impl fmt::Debug for NodeTree {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let readonly: &[Block] = &*self.readonly;
+        write!(
+            f,
+            "readonly: {:?}, growable: {:?}, root: {:?}",
+            readonly, self.growable, self.root
+        )
+    }
+}
+
+impl NodeMap for NodeTree {
+    fn find_bin<'a>(
+        &self,
+        idx: &impl RevlogIndex,
+        prefix: NodePrefixRef<'a>,
+    ) -> Result<Option<Revision>, NodeMapError> {
+        self.lookup(prefix.clone()).and_then(|opt| {
+            opt.map_or(Ok(None), |rev| has_prefix_or_none(idx, prefix, rev))
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::NodeMapError::*;
+    use super::*;
+    use crate::revlog::node::{hex_pad_right, Node};
+    use std::collections::HashMap;
+
+    /// Creates a `Block` using a syntax close to the `Debug` output
+    macro_rules! block {
+        {$($nybble:tt : $variant:ident($val:tt)),*} => (
+            {
+                let mut block = Block::new();
+                $(block.set($nybble, Element::$variant($val)));*;
+                block
+            }
+        )
+    }
+
+    #[test]
+    fn test_block_debug() {
+        let mut block = Block::new();
+        block.set(1, Element::Rev(3));
+        block.set(10, Element::Block(0));
+        assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}");
+    }
+
+    #[test]
+    fn test_block_macro() {
+        let block = block! {5: Block(2)};
+        assert_eq!(format!("{:?}", block), "{5: Block(2)}");
+
+        let block = block! {13: Rev(15), 5: Block(2)};
+        assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}");
+    }
+
+    #[test]
+    fn test_raw_block() {
+        let mut raw = [-1; 16];
+        raw[0] = 0;
+        raw[1] = RawElement::to_be(15);
+        raw[2] = RawElement::to_be(-2);
+        raw[3] = RawElement::to_be(-1);
+        raw[4] = RawElement::to_be(-3);
+        let block = Block(raw);
+        assert_eq!(block.get(0), Element::Block(0));
+        assert_eq!(block.get(1), Element::Block(15));
+        assert_eq!(block.get(3), Element::None);
+        assert_eq!(block.get(2), Element::Rev(0));
+        assert_eq!(block.get(4), Element::Rev(1));
+    }
+
+    type TestIndex = HashMap<Revision, Node>;
+
+    impl RevlogIndex for TestIndex {
+        fn node(&self, rev: Revision) -> Option<&Node> {
+            self.get(&rev)
+        }
+
+        fn len(&self) -> usize {
+            self.len()
+        }
+    }
+
+    /// Pad hexadecimal Node prefix with zeros on the right, then insert
+    ///
+    /// This avoids having to repeatedly write very long hexadecimal
+    /// strings for test data, and brings actual hash size independency.
+    fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
+        idx.insert(rev, Node::from_hex(&hex_pad_right(hex)).unwrap());
+    }
+
+    fn sample_nodetree() -> NodeTree {
+        NodeTree::from(vec![
+            block![0: Rev(9)],
+            block![0: Rev(0), 1: Rev(9)],
+            block![0: Block(1), 1:Rev(1)],
+        ])
+    }
+
+    #[test]
+    fn test_nt_debug() {
+        let nt = sample_nodetree();
+        assert_eq!(
+            format!("{:?}", nt),
+            "readonly: \
+             [{0: Rev(9)}, {0: Rev(0), 1: Rev(9)}, {0: Block(1), 1: Rev(1)}], \
+             growable: [], \
+             root: {0: Block(1), 1: Rev(1)}",
+        );
+    }
+
+    #[test]
+    fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
+        let mut idx: TestIndex = HashMap::new();
+        pad_insert(&mut idx, 1, "1234deadcafe");
+
+        let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
+        assert_eq!(nt.find_hex(&idx, "1")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "12")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "1234de")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "1a")?, None);
+        assert_eq!(nt.find_hex(&idx, "ab")?, None);
+
+        // and with full binary Nodes
+        assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
+        let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
+        assert_eq!(nt.find_node(&idx, &unknown)?, None);
+        Ok(())
+    }
+
+    #[test]
+    fn test_immutable_find_one_jump() {
+        let mut idx = TestIndex::new();
+        pad_insert(&mut idx, 9, "012");
+        pad_insert(&mut idx, 0, "00a");
+
+        let nt = sample_nodetree();
+
+        assert_eq!(nt.find_hex(&idx, "0"), Err(MultipleResults));
+        assert_eq!(nt.find_hex(&idx, "01"), Ok(Some(9)));
+        assert_eq!(nt.find_hex(&idx, "00"), Ok(Some(0)));
+        assert_eq!(nt.find_hex(&idx, "00a"), Ok(Some(0)));
+    }
+
+    #[test]
+    fn test_mutated_find() -> Result<(), NodeMapError> {
+        let mut idx = TestIndex::new();
+        pad_insert(&mut idx, 9, "012");
+        pad_insert(&mut idx, 0, "00a");
+        pad_insert(&mut idx, 2, "cafe");
+        pad_insert(&mut idx, 3, "15");
+        pad_insert(&mut idx, 1, "10");
+
+        let nt = NodeTree {
+            readonly: sample_nodetree().readonly,
+            growable: vec![block![0: Rev(1), 5: Rev(3)]],
+            root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
+        };
+        assert_eq!(nt.find_hex(&idx, "10")?, Some(1));
+        assert_eq!(nt.find_hex(&idx, "c")?, Some(2));
+        assert_eq!(nt.find_hex(&idx, "00")?, Some(0));
+        assert_eq!(nt.find_hex(&idx, "01")?, Some(9));
+        Ok(())
+    }
+}
--- a/rust/hg-core/src/utils/hg_path.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-core/src/utils/hg_path.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -138,6 +138,81 @@
             None
         }
     }
+
+    #[cfg(windows)]
+    /// Copied from the Python stdlib's `os.path.splitdrive` implementation.
+    ///
+    /// Split a pathname into drive/UNC sharepoint and relative path
+    /// specifiers. Returns a 2-tuple (drive_or_unc, path); either part may
+    /// be empty.
+    ///
+    /// If you assign
+    ///  result = split_drive(p)
+    /// It is always true that:
+    ///  result[0] + result[1] == p
+    ///
+    /// If the path contained a drive letter, drive_or_unc will contain
+    /// everything up to and including the colon.
+    /// e.g. split_drive("c:/dir") returns ("c:", "/dir")
+    ///
+    /// If the path contained a UNC path, the drive_or_unc will contain the
+    /// host name and share up to but not including the fourth directory
+    /// separator character.
+    /// e.g. split_drive("//host/computer/dir") returns ("//host/computer",
+    /// "/dir")
+    ///
+    /// Paths cannot contain both a drive letter and a UNC path.
+    pub fn split_drive<'a>(&self) -> (&HgPath, &HgPath) {
+        let bytes = self.as_bytes();
+        let is_sep = |b| std::path::is_separator(b as char);
+
+        if self.len() < 2 {
+            (HgPath::new(b""), &self)
+        } else if is_sep(bytes[0])
+            && is_sep(bytes[1])
+            && (self.len() == 2 || !is_sep(bytes[2]))
+        {
+            // Is a UNC path:
+            // vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
+            // \\machine\mountpoint\directory\etc\...
+            //           directory ^^^^^^^^^^^^^^^
+
+            let machine_end_index = bytes[2..].iter().position(|b| is_sep(*b));
+            let mountpoint_start_index = if let Some(i) = machine_end_index {
+                i + 2
+            } else {
+                return (HgPath::new(b""), &self);
+            };
+
+            match bytes[mountpoint_start_index + 1..]
+                .iter()
+                .position(|b| is_sep(*b))
+            {
+                // A UNC path can't have two slashes in a row
+                // (after the initial two)
+                Some(0) => (HgPath::new(b""), &self),
+                Some(i) => {
+                    let (a, b) =
+                        bytes.split_at(mountpoint_start_index + 1 + i);
+                    (HgPath::new(a), HgPath::new(b))
+                }
+                None => (&self, HgPath::new(b"")),
+            }
+        } else if bytes[1] == b':' {
+            // Drive path c:\directory
+            let (a, b) = bytes.split_at(2);
+            (HgPath::new(a), HgPath::new(b))
+        } else {
+            (HgPath::new(b""), &self)
+        }
+    }
+
+    #[cfg(unix)]
+    /// Split a pathname into drive and path. On Posix, drive is always empty.
+    pub fn split_drive(&self) -> (&HgPath, &HgPath) {
+        (HgPath::new(b""), &self)
+    }
+
     /// Checks for errors in the path, short-circuiting at the first one.
     /// This generates fine-grained errors useful for debugging.
     /// To simply check if the path is valid during tests, use `is_valid`.
@@ -473,4 +548,101 @@
         let base = HgPath::new(b"ends/");
         assert_eq!(Some(HgPath::new(b"with/dir/")), path.relative_to(base));
     }
+
+    #[test]
+    #[cfg(unix)]
+    fn test_split_drive() {
+        // Taken from the Python stdlib's tests
+        assert_eq!(
+            HgPath::new(br"/foo/bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"foo:bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"foo:bar"))
+        );
+        assert_eq!(
+            HgPath::new(br":foo:bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br":foo:bar"))
+        );
+        // Also try NT paths; should not split them
+        assert_eq!(
+            HgPath::new(br"c:\foo\bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"c:\foo\bar"))
+        );
+        assert_eq!(
+            HgPath::new(b"c:/foo/bar").split_drive(),
+            (HgPath::new(b""), HgPath::new(br"c:/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(b""),
+                HgPath::new(br"\\conky\mountpoint\foo\bar")
+            )
+        );
+    }
+
+    #[test]
+    #[cfg(windows)]
+    fn test_split_drive() {
+        assert_eq!(
+            HgPath::new(br"c:\foo\bar").split_drive(),
+            (HgPath::new(br"c:"), HgPath::new(br"\foo\bar"))
+        );
+        assert_eq!(
+            HgPath::new(b"c:/foo/bar").split_drive(),
+            (HgPath::new(br"c:"), HgPath::new(br"/foo/bar"))
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br"\\conky\mountpoint"),
+                HgPath::new(br"\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"//conky/mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br"//conky/mountpoint"),
+                HgPath::new(br"/foo/bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"\\\conky\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"\\\conky\mountpoint\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"///conky/mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"///conky/mountpoint/foo/bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"\\conky\\mountpoint\foo\bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"\\conky\\mountpoint\foo\bar")
+            )
+        );
+        assert_eq!(
+            HgPath::new(br"//conky//mountpoint/foo/bar").split_drive(),
+            (
+                HgPath::new(br""),
+                HgPath::new(br"//conky//mountpoint/foo/bar")
+            )
+        );
+        // UNC part containing U+0130
+        assert_eq!(
+            HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT/foo/bar").split_drive(),
+            (
+                HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT"),
+                HgPath::new(br"/foo/bar")
+            )
+        );
+    }
 }
--- a/rust/hg-cpython/Cargo.toml	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/Cargo.toml	Mon Feb 03 11:51:52 2020 -0500
@@ -25,5 +25,5 @@
 libc = '*'
 
 [dependencies.cpython]
-version = "0.3"
+version = "0.4"
 default-features = false
--- a/rust/hg-cpython/src/dirstate/copymap.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/copymap.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -8,11 +8,12 @@
 //! Bindings for `hg::dirstate::dirstate_map::CopyMap` provided by the
 //! `hg-core` package.
 
-use cpython::{PyBytes, PyClone, PyDict, PyObject, PyResult, Python};
+use cpython::{
+    PyBytes, PyClone, PyDict, PyObject, PyResult, Python, UnsafePyLeaked,
+};
 use std::cell::RefCell;
 
 use crate::dirstate::dirstate_map::DirstateMap;
-use crate::ref_sharing::PyLeaked;
 use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
 
 py_class!(pub class CopyMap |py| {
@@ -104,14 +105,14 @@
 
 py_shared_iterator!(
     CopyMapKeysIterator,
-    PyLeaked<CopyMapIter<'static>>,
+    UnsafePyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     CopyMapItemsIterator,
-    PyLeaked<CopyMapIter<'static>>,
+    UnsafePyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key_value,
     Option<(PyBytes, PyBytes)>
 );
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -13,11 +13,10 @@
 
 use cpython::{
     exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
-    Python,
+    Python, UnsafePyLeaked,
 };
 
 use crate::dirstate::extract_dirstate;
-use crate::ref_sharing::{PyLeaked, PySharedRefCell};
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
     DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
@@ -25,7 +24,7 @@
 };
 
 py_class!(pub class Dirs |py| {
-    data inner: PySharedRefCell<DirsMultiset>;
+    @shared data inner: DirsMultiset;
 
     // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes
     // a `list`)
@@ -65,14 +64,11 @@
                 })?
         };
 
-        Self::create_instance(
-            py,
-            PySharedRefCell::new(inner),
-        )
+        Self::create_instance(py, inner)
     }
 
     def addpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.add_path(
+        self.inner(py).borrow_mut().add_path(
             HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
         ).and(Ok(py.None())).or_else(|e| {
             match e {
@@ -90,7 +86,7 @@
     }
 
     def delpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.delete_path(
+        self.inner(py).borrow_mut().delete_path(
             HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
         )
             .and(Ok(py.None()))
@@ -109,7 +105,7 @@
             })
     }
     def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirsMultisetKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -117,17 +113,15 @@
     }
 
     def __contains__(&self, item: PyObject) -> PyResult<bool> {
-        Ok(self.inner_shared(py).borrow().contains(HgPath::new(
+        Ok(self.inner(py).borrow().contains(HgPath::new(
             item.extract::<PyBytes>(py)?.data(py).as_ref(),
         )))
     }
 });
 
-py_shared_ref!(Dirs, DirsMultiset, inner, inner_shared);
-
 impl Dirs {
     pub fn from_inner(py: Python, d: DirsMultiset) -> PyResult<Self> {
-        Self::create_instance(py, PySharedRefCell::new(d))
+        Self::create_instance(py, d)
     }
 
     fn translate_key(
@@ -140,7 +134,7 @@
 
 py_shared_iterator!(
     DirsMultisetKeysIterator,
-    PyLeaked<DirsMultisetIter<'static>>,
+    UnsafePyLeaked<DirsMultisetIter<'static>>,
     Dirs::translate_key,
     Option<PyBytes>
 );
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -14,13 +14,12 @@
 
 use cpython::{
     exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyObject,
-    PyResult, PyTuple, Python, PythonObject, ToPyObject,
+    PyResult, PyTuple, Python, PythonObject, ToPyObject, UnsafePyLeaked,
 };
 
 use crate::{
     dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
     dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
-    ref_sharing::{PyLeaked, PySharedRefCell},
 };
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
@@ -42,18 +41,15 @@
 //     All attributes also have to have a separate refcount data attribute for
 //     leaks, with all methods that go along for reference sharing.
 py_class!(pub class DirstateMap |py| {
-    data inner: PySharedRefCell<RustDirstateMap>;
+    @shared data inner: RustDirstateMap;
 
     def __new__(_cls, _root: PyObject) -> PyResult<Self> {
         let inner = RustDirstateMap::default();
-        Self::create_instance(
-            py,
-            PySharedRefCell::new(inner),
-        )
+        Self::create_instance(py, inner)
     }
 
     def clear(&self) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.clear();
+        self.inner(py).borrow_mut().clear();
         Ok(py.None())
     }
 
@@ -63,7 +59,7 @@
         default: Option<PyObject> = None
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
-        match self.inner_shared(py).borrow().get(HgPath::new(key.data(py))) {
+        match self.inner(py).borrow().get(HgPath::new(key.data(py))) {
             Some(entry) => {
                 Ok(Some(make_dirstate_tuple(py, entry)?))
             },
@@ -80,7 +76,7 @@
         size: PyObject,
         mtime: PyObject
     ) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?.add_file(
+        self.inner(py).borrow_mut().add_file(
             HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
             oldstate.extract::<PyBytes>(py)?.data(py)[0]
                 .try_into()
@@ -108,7 +104,7 @@
         oldstate: PyObject,
         size: PyObject
     ) -> PyResult<PyObject> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .remove_file(
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -132,7 +128,7 @@
         f: PyObject,
         oldstate: PyObject
     ) -> PyResult<PyBool> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .drop_file(
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -163,7 +159,7 @@
                 ))
             })
             .collect();
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .clear_ambiguous_times(files?, now.extract(py)?);
         Ok(py.None())
     }
@@ -171,7 +167,7 @@
     // TODO share the reference
     def nonnormalentries(&self) -> PyResult<PyObject> {
         let (non_normal, other_parent) =
-            self.inner_shared(py).borrow().non_normal_other_parent_entries();
+            self.inner(py).borrow().non_normal_other_parent_entries();
 
         let locals = PyDict::new(py);
         locals.set_item(
@@ -198,7 +194,7 @@
 
     def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
         let d = d.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py).borrow_mut()?
+        Ok(self.inner(py).borrow_mut()
             .has_tracked_dir(HgPath::new(d.data(py)))
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -208,7 +204,7 @@
 
     def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
         let d = d.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py).borrow_mut()?
+        Ok(self.inner(py).borrow_mut()
             .has_dir(HgPath::new(d.data(py)))
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -217,7 +213,7 @@
     }
 
     def parents(&self, st: PyObject) -> PyResult<PyTuple> {
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .parents(st.extract::<PyBytes>(py)?.data(py))
             .and_then(|d| {
                 Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
@@ -235,13 +231,13 @@
         let p1 = extract_node_id(py, &p1)?;
         let p2 = extract_node_id(py, &p2)?;
 
-        self.inner_shared(py).borrow_mut()?
+        self.inner(py).borrow_mut()
             .set_parents(&DirstateParents { p1, p2 });
         Ok(py.None())
     }
 
     def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
-        match self.inner_shared(py).borrow_mut()?
+        match self.inner(py).borrow_mut()
             .read(st.extract::<PyBytes>(py)?.data(py))
         {
             Ok(Some(parents)) => Ok(Some(
@@ -268,7 +264,7 @@
             p2: extract_node_id(py, &p2)?,
         };
 
-        match self.inner_shared(py).borrow_mut()?.pack(parents, now) {
+        match self.inner(py).borrow_mut().pack(parents, now) {
             Ok(packed) => Ok(PyBytes::new(py, &packed)),
             Err(_) => Err(PyErr::new::<exc::OSError, _>(
                 py,
@@ -280,7 +276,7 @@
     def filefoldmapasdict(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
         for (key, value) in
-            self.inner_shared(py).borrow_mut()?.build_file_fold_map().iter()
+            self.inner(py).borrow_mut().build_file_fold_map().iter()
         {
             dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
         }
@@ -288,18 +284,18 @@
     }
 
     def __len__(&self) -> PyResult<usize> {
-        Ok(self.inner_shared(py).borrow().len())
+        Ok(self.inner(py).borrow().len())
     }
 
     def __contains__(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
-        Ok(self.inner_shared(py).borrow().contains_key(HgPath::new(key.data(py))))
+        Ok(self.inner(py).borrow().contains_key(HgPath::new(key.data(py))))
     }
 
     def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         let key = HgPath::new(key.data(py));
-        match self.inner_shared(py).borrow().get(key) {
+        match self.inner(py).borrow().get(key) {
             Some(entry) => {
                 Ok(make_dirstate_tuple(py, entry)?)
             },
@@ -311,7 +307,7 @@
     }
 
     def keys(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -319,7 +315,7 @@
     }
 
     def items(&self) -> PyResult<DirstateMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -327,7 +323,7 @@
     }
 
     def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -336,14 +332,14 @@
 
     def getdirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_dirs()
+        self.inner(py).borrow_mut().set_dirs()
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
             })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner_shared(py).borrow(),
+                &self.inner(py).borrow(),
                 Some(EntryState::Removed),
             )
             .map_err(|e| {
@@ -353,14 +349,14 @@
     }
     def getalldirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_all_dirs()
+        self.inner(py).borrow_mut().set_all_dirs()
             .map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
             })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner_shared(py).borrow(),
+                &self.inner(py).borrow(),
                 None,
             ).map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -371,7 +367,7 @@
     // TODO all copymap* methods, see docstring above
     def copymapcopy(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
-        for (key, value) in self.inner_shared(py).borrow().copy_map.iter() {
+        for (key, value) in self.inner(py).borrow().copy_map.iter() {
             dict.set_item(
                 py,
                 PyBytes::new(py, key.as_ref()),
@@ -383,7 +379,7 @@
 
     def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
         let key = key.extract::<PyBytes>(py)?;
-        match self.inner_shared(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+        match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
             Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
             None => Err(PyErr::new::<exc::KeyError, _>(
                 py,
@@ -396,12 +392,12 @@
     }
 
     def copymaplen(&self) -> PyResult<usize> {
-        Ok(self.inner_shared(py).borrow().copy_map.len())
+        Ok(self.inner(py).borrow().copy_map.len())
     }
     def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
         Ok(self
-            .inner_shared(py)
+            .inner(py)
             .borrow()
             .copy_map
             .contains_key(HgPath::new(key.data(py))))
@@ -413,7 +409,7 @@
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
         match self
-            .inner_shared(py)
+            .inner(py)
             .borrow()
             .copy_map
             .get(HgPath::new(key.data(py)))
@@ -431,7 +427,7 @@
     ) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         let value = value.extract::<PyBytes>(py)?;
-        self.inner_shared(py).borrow_mut()?.copy_map.insert(
+        self.inner(py).borrow_mut().copy_map.insert(
             HgPathBuf::from_bytes(key.data(py)),
             HgPathBuf::from_bytes(value.data(py)),
         );
@@ -444,8 +440,8 @@
     ) -> PyResult<Option<PyObject>> {
         let key = key.extract::<PyBytes>(py)?;
         match self
-            .inner_shared(py)
-            .borrow_mut()?
+            .inner(py)
+            .borrow_mut()
             .copy_map
             .remove(HgPath::new(key.data(py)))
         {
@@ -455,7 +451,7 @@
     }
 
     def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         CopyMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -463,7 +459,7 @@
     }
 
     def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable();
+        let leaked_ref = self.inner(py).leak_immutable();
         CopyMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -477,7 +473,7 @@
         &'a self,
         py: Python<'a>,
     ) -> Ref<'a, RustDirstateMap> {
-        self.inner_shared(py).borrow()
+        self.inner(py).borrow()
     }
     fn translate_key(
         py: Python,
@@ -497,18 +493,16 @@
     }
 }
 
-py_shared_ref!(DirstateMap, RustDirstateMap, inner, inner_shared);
-
 py_shared_iterator!(
     DirstateMapKeysIterator,
-    PyLeaked<StateMapIter<'static>>,
+    UnsafePyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     DirstateMapItemsIterator,
-    PyLeaked<StateMapIter<'static>>,
+    UnsafePyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key_value,
     Option<(PyBytes, PyObject)>
 );
--- a/rust/hg-cpython/src/dirstate/status.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/status.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -33,7 +33,7 @@
     let list = PyList::new(py, &[]);
 
     for (i, path) in collection.iter().enumerate() {
-        list.insert_item(
+        list.insert(
             py,
             i,
             PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
--- a/rust/hg-cpython/src/exceptions.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/exceptions.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -13,7 +13,7 @@
 //!
 //! [`GraphError`]: struct.GraphError.html
 use cpython::{
-    exc::{IOError, RuntimeError, ValueError},
+    exc::{RuntimeError, ValueError},
     py_exception, PyErr, Python,
 };
 use hg;
@@ -39,34 +39,4 @@
     }
 }
 
-py_exception!(rustext, PatternError, RuntimeError);
-py_exception!(rustext, PatternFileError, RuntimeError);
 py_exception!(rustext, HgPathPyError, RuntimeError);
-
-impl PatternError {
-    pub fn pynew(py: Python, inner: hg::PatternError) -> PyErr {
-        match inner {
-            hg::PatternError::UnsupportedSyntax(m) => {
-                PatternError::new(py, ("PatternError", m))
-            }
-        }
-    }
-}
-
-impl PatternFileError {
-    pub fn pynew(py: Python, inner: hg::PatternFileError) -> PyErr {
-        match inner {
-            hg::PatternFileError::IO(e) => {
-                let value = (e.raw_os_error().unwrap_or(2), e.to_string());
-                PyErr::new::<IOError, _>(py, value)
-            }
-            hg::PatternFileError::Pattern(e, l) => match e {
-                hg::PatternError::UnsupportedSyntax(m) => {
-                    PatternFileError::new(py, ("PatternFileError", m, l))
-                }
-            },
-        }
-    }
-}
-
-py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs	Mon Feb 03 11:07:34 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-// filepatterns.rs
-//
-// Copyright 2019, Georges Racinet <gracinet@anybox.fr>,
-// Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Bindings for the `hg::filepatterns` module provided by the
-//! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns`
-//! and can be used as replacement for the the pure `filepatterns` Python
-//! module.
-use crate::exceptions::{PatternError, PatternFileError};
-use cpython::{
-    PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject,
-};
-use hg::utils::files;
-use hg::{build_single_regex, read_pattern_file, LineNumber, PatternTuple};
-use std::path::PathBuf;
-
-/// Rust does not like functions with different return signatures.
-/// The 3-tuple version is always returned by the hg-core function,
-/// the (potential) conversion is handled at this level since it is not likely
-/// to have any measurable impact on performance.
-///
-/// The Python implementation passes a function reference for `warn` instead
-/// of a boolean that is used to emit warnings while parsing. The Rust
-/// implementation chooses to accumulate the warnings and propagate them to
-/// Python upon completion. See the `readpatternfile` function in `match.py`
-/// for more details.
-fn read_pattern_file_wrapper(
-    py: Python,
-    file_path: PyObject,
-    warn: bool,
-    source_info: bool,
-) -> PyResult<PyTuple> {
-    let bytes = file_path.extract::<PyBytes>(py)?;
-    let path = files::get_path_from_bytes(bytes.data(py));
-    match read_pattern_file(path, warn) {
-        Ok((patterns, warnings)) => {
-            if source_info {
-                let itemgetter = |x: &PatternTuple| {
-                    (PyBytes::new(py, &x.0), x.1, PyBytes::new(py, &x.2))
-                };
-                let results: Vec<(PyBytes, LineNumber, PyBytes)> =
-                    patterns.iter().map(itemgetter).collect();
-                return Ok((results, warnings_to_py_bytes(py, &warnings))
-                    .to_py_object(py));
-            }
-            let itemgetter = |x: &PatternTuple| PyBytes::new(py, &x.0);
-            let results: Vec<PyBytes> =
-                patterns.iter().map(itemgetter).collect();
-            Ok(
-                (results, warnings_to_py_bytes(py, &warnings))
-                    .to_py_object(py),
-            )
-        }
-        Err(e) => Err(PatternFileError::pynew(py, e)),
-    }
-}
-
-fn warnings_to_py_bytes(
-    py: Python,
-    warnings: &[(PathBuf, Vec<u8>)],
-) -> Vec<(PyBytes, PyBytes)> {
-    warnings
-        .iter()
-        .map(|(path, syn)| {
-            (
-                PyBytes::new(py, &files::get_bytes_from_path(path)),
-                PyBytes::new(py, syn),
-            )
-        })
-        .collect()
-}
-
-fn build_single_regex_wrapper(
-    py: Python,
-    kind: PyObject,
-    pat: PyObject,
-    globsuffix: PyObject,
-) -> PyResult<PyBytes> {
-    match build_single_regex(
-        kind.extract::<PyBytes>(py)?.data(py),
-        pat.extract::<PyBytes>(py)?.data(py),
-        globsuffix.extract::<PyBytes>(py)?.data(py),
-    ) {
-        Ok(regex) => Ok(PyBytes::new(py, &regex)),
-        Err(e) => Err(PatternError::pynew(py, e)),
-    }
-}
-
-pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
-    let dotted_name = &format!("{}.filepatterns", package);
-    let m = PyModule::new(py, dotted_name)?;
-
-    m.add(py, "__package__", package)?;
-    m.add(
-        py,
-        "__doc__",
-        "Patterns files parsing - Rust implementation",
-    )?;
-    m.add(
-        py,
-        "build_single_regex",
-        py_fn!(
-            py,
-            build_single_regex_wrapper(
-                kind: PyObject,
-                pat: PyObject,
-                globsuffix: PyObject
-            )
-        ),
-    )?;
-    m.add(
-        py,
-        "read_pattern_file",
-        py_fn!(
-            py,
-            read_pattern_file_wrapper(
-                file_path: PyObject,
-                warn: bool,
-                source_info: bool
-            )
-        ),
-    )?;
-    m.add(py, "PatternError", py.get_type::<PatternError>())?;
-    let sys = PyModule::import(py, "sys")?;
-    let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
-    sys_modules.set_item(py, dotted_name, &m)?;
-
-    Ok(m)
-}
--- a/rust/hg-cpython/src/lib.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/lib.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -33,7 +33,6 @@
 pub mod dirstate;
 pub mod discovery;
 pub mod exceptions;
-pub mod filepatterns;
 pub mod parsers;
 pub mod revlog;
 pub mod utils;
@@ -53,25 +52,10 @@
     m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
     m.add(
         py,
-        "filepatterns",
-        filepatterns::init_module(py, &dotted_name)?,
-    )?;
-    m.add(
-        py,
         "parsers",
         parsers::init_parsers_module(py, &dotted_name)?,
     )?;
     m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
-    m.add(
-        py,
-        "PatternFileError",
-        py.get_type::<exceptions::PatternFileError>(),
-    )?;
-    m.add(
-        py,
-        "PatternError",
-        py.get_type::<exceptions::PatternError>(),
-    )?;
     Ok(())
 });
 
--- a/rust/hg-cpython/src/ref_sharing.rs	Mon Feb 03 11:07:34 2020 -0500
+++ b/rust/hg-cpython/src/ref_sharing.rs	Mon Feb 03 11:51:52 2020 -0500
@@ -22,413 +22,6 @@
 
 //! Macros for use in the `hg-cpython` bridge library.
 
-use crate::exceptions::AlreadyBorrowed;
-use cpython::{exc, PyClone, PyErr, PyObject, PyResult, Python};
-use std::cell::{Ref, RefCell, RefMut};
-use std::ops::{Deref, DerefMut};
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-/// Manages the shared state between Python and Rust
-///
-/// `PySharedState` is owned by `PySharedRefCell`, and is shared across its
-/// derived references. The consistency of these references are guaranteed
-/// as follows:
-///
-/// - The immutability of `py_class!` object fields. Any mutation of
-///   `PySharedRefCell` is allowed only through its `borrow_mut()`.
-/// - The `py: Python<'_>` token, which makes sure that any data access is
-///   synchronized by the GIL.
-/// - The underlying `RefCell`, which prevents `PySharedRefCell` data from
-///   being directly borrowed or leaked while it is mutably borrowed.
-/// - The `borrow_count`, which is the number of references borrowed from
-///   `PyLeaked`. Just like `RefCell`, mutation is prohibited while `PyLeaked`
-///   is borrowed.
-/// - The `generation` counter, which increments on `borrow_mut()`. `PyLeaked`
-///   reference is valid only if the `current_generation()` equals to the
-///   `generation` at the time of `leak_immutable()`.
-#[derive(Debug, Default)]
-struct PySharedState {
-    // The counter variable could be Cell<usize> since any operation on
-    // PySharedState is synchronized by the GIL, but being "atomic" makes
-    // PySharedState inherently Sync. The ordering requirement doesn't
-    // matter thanks to the GIL.
-    borrow_count: AtomicUsize,
-    generation: AtomicUsize,
-}
-
-impl PySharedState {
-    fn borrow_mut<'a, T>(
-        &'a self,
-        py: Python<'a>,
-        pyrefmut: RefMut<'a, T>,
-    ) -> PyResult<RefMut<'a, T>> {
-        match self.current_borrow_count(py) {
-            0 => {
-                // Note that this wraps around to the same value if mutably
-                // borrowed more than usize::MAX times, which wouldn't happen
-                // in practice.
-                self.generation.fetch_add(1, Ordering::Relaxed);
-                Ok(pyrefmut)
-            }
-            _ => Err(AlreadyBorrowed::new(
-                py,
-                "Cannot borrow mutably while immutably borrowed",
-            )),
-        }
-    }
-
-    /// Return a reference to the wrapped data and its state with an
-    /// artificial static lifetime.
-    /// We need to be protected by the GIL for thread-safety.
-    ///
-    /// # Safety
-    ///
-    /// This is highly unsafe since the lifetime of the given data can be
-    /// extended. Do not call this function directly.
-    unsafe fn leak_immutable<T>(
-        &self,
-        _py: Python,
-        data: Ref<T>,
-    ) -> (&'static T, &'static PySharedState) {
-        let ptr: *const T = &*data;
-        let state_ptr: *const PySharedState = self;
-        (&*ptr, &*state_ptr)
-    }
-
-    fn current_borrow_count(&self, _py: Python) -> usize {
-        self.borrow_count.load(Ordering::Relaxed)
-    }
-
-    fn increase_borrow_count(&self, _py: Python) {
-        // Note that this wraps around if there are more than usize::MAX
-        // borrowed references, which shouldn't happen due to memory limit.
-        self.borrow_count.fetch_add(1, Ordering::Relaxed);
-    }
-
-    fn decrease_borrow_count(&self, _py: Python) {
-        let prev_count = self.borrow_count.fetch_sub(1, Ordering::Relaxed);
-        assert!(prev_count > 0);
-    }
-
-    fn current_generation(&self, _py: Python) -> usize {
-        self.generation.load(Ordering::Relaxed)
-    }
-}
-
-/// Helper to keep the borrow count updated while the shared object is
-/// immutably borrowed without using the `RefCell` interface.
-struct BorrowPyShared<'a> {
-    py: Python<'a>,
-    py_shared_state: &'a PySharedState,
-}
-
-impl<'a> BorrowPyShared<'a> {
-    fn new(
-        py: Python<'a>,
-        py_shared_state: &'a PySharedState,
-    ) -> BorrowPyShared<'a> {
-        py_shared_state.increase_borrow_count(py);
-        BorrowPyShared {
-            py,
-            py_shared_state,
-        }
-    }
-}
-
-impl Drop for BorrowPyShared<'_> {
-    fn drop(&mut self) {
-        self.py_shared_state.decrease_borrow_count(self.py);
-    }
-}
-
-/// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
-///
-/// This object can be stored in a `py_class!` object as a data field. Any
-/// operation is allowed through the `PySharedRef` interface.
-#[derive(Debug)]
-pub struct PySharedRefCell<T> {
-    inner: RefCell<T>,
-    py_shared_state: PySharedState,
-}
-
-impl<T> PySharedRefCell<T> {
-    pub fn new(value: T) -> PySharedRefCell<T> {
-        Self {
-            inner: RefCell::new(value),
-            py_shared_state: PySharedState::default(),
-        }
-    }
-
-    fn borrow<'a>(&'a self, _py: Python<'a>) -> Ref<'a, T> {
-        // py_shared_state isn't involved since
-        // - inner.borrow() would fail if self is mutably borrowed,
-        // - and inner.borrow_mut() would fail while self is borrowed.
-        self.inner.borrow()
-    }
-
-    // TODO: maybe this should be named as try_borrow_mut(), and use
-    // inner.try_borrow_mut(). The current implementation panics if
-    // self.inner has been borrowed, but returns error if py_shared_state
-    // refuses to borrow.
-    fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<RefMut<'a, T>> {
-        self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
-    }
-}
-
-/// Sharable data member of type `T` borrowed from the `PyObject`.
-pub struct PySharedRef<'a, T> {
-    py: Python<'a>,
-    owner: &'a PyObject,
-    data: &'a PySharedRefCell<T>,
-}
-
-impl<'a, T> PySharedRef<'a, T> {
-    /// # Safety
-    ///
-    /// The `data` must be owned by the `owner`. Otherwise, the leak count
-    /// would get wrong.
-    pub unsafe fn new(
-        py: Python<'a>,
-        owner: &'a PyObject,
-        data: &'a PySharedRefCell<T>,
-    ) -> Self {
-        Self { py, owner, data }
-    }
-
-    pub fn borrow(&self) -> Ref<'a, T> {
-        self.data.borrow(self.py)
-    }
-
-    pub fn borrow_mut(&self) -> PyResult<RefMut<'a, T>> {
-        self.data.borrow_mut(self.py)
-    }
-
-    /// Returns a leaked reference.
-    ///
-    /// # Panics
-    ///
-    /// Panics if this is mutably borrowed.
-    pub fn leak_immutable(&self) -> PyLeaked<&'static T> {
-        let state = &self.data.py_shared_state;
-        // make sure self.data isn't mutably borrowed; otherwise the
-        // generation number can't be trusted.
-        let data_ref = self.borrow();
-        unsafe {
-            let (static_ref, static_state_ref) =
-                state.leak_immutable(self.py, data_ref);
-            PyLeaked::new(self.py, self.owner, static_ref, static_state_ref)
-        }
-    }
-}
-
-/// Allows a `py_class!` generated struct to share references to one of its
-/// data members with Python.
-///
-/// # Parameters
-///
-/// * `$name` is the same identifier used in for `py_class!` macro call.
-/// * `$inner_struct` is the identifier of the underlying Rust struct
-/// * `$data_member` is the identifier of the data member of `$inner_struct`
-/// that will be shared.
-/// * `$shared_accessor` is the function name to be generated, which allows
-/// safe access to the data member.
-///
-/// # Safety
-///
-/// `$data_member` must persist while the `$name` object is alive. In other
-/// words, it must be an accessor to a data field of the Python object.
-///
-/// # Example
-///
-/// ```
-/// struct MyStruct {
-///     inner: Vec<u32>;
-/// }
-///
-/// py_class!(pub class MyType |py| {
-///     data inner: PySharedRefCell<MyStruct>;
-/// });
-///
-/// py_shared_ref!(MyType, MyStruct, inner, inner_shared);
-/// ```
-macro_rules! py_shared_ref {
-    (
-        $name: ident,
-        $inner_struct: ident,
-        $data_member: ident,
-        $shared_accessor: ident
-    ) => {
-        impl $name {
-            /// Returns a safe reference to the shared `$data_member`.
-            ///
-            /// This function guarantees that `PySharedRef` is created with
-            /// the valid `self` and `self.$data_member(py)` pair.
-            fn $shared_accessor<'a>(
-                &'a self,
-                py: Python<'a>,
-            ) -> $crate::ref_sharing::PySharedRef<'a, $inner_struct> {
-                use cpython::PythonObject;
-                use $crate::ref_sharing::PySharedRef;
-                let owner = self.as_object();
-                let data = self.$data_member(py);
-                unsafe { PySharedRef::new(py, owner, data) }
-            }
-        }
-    };
-}
-
-/// Manage immutable references to `PyObject` leaked into Python iterators.
-///
-/// This reference will be invalidated once the original value is mutably
-/// borrowed.
-pub struct PyLeaked<T> {
-    inner: PyObject,
-    data: Option<T>,
-    py_shared_state: &'static PySharedState,
-    /// Generation counter of data `T` captured when PyLeaked is created.
-    generation: usize,
-}
-
-// DO NOT implement Deref for PyLeaked<T>! Dereferencing PyLeaked
-// without taking Python GIL wouldn't be safe. Also, the underling reference
-// is invalid if generation != py_shared_state.generation.
-
-impl<T> PyLeaked<T> {
-    /// # Safety
-    ///
-    /// The `py_shared_state` must be owned by the `inner` Python object.
-    fn new(
-        py: Python,
-        inner: &PyObject,
-        data: T,
-        py_shared_state: &'static PySharedState,
-    ) -> Self {
-        Self {
-            inner: inner.clone_ref(py),
-            data: Some(data),
-            py_shared_state,
-            generation: py_shared_state.current_generation(py),
-        }
-    }
-
-    /// Immutably borrows the wrapped value.
-    ///
-    /// Borrowing fails if the underlying reference has been invalidated.
-    pub fn try_borrow<'a>(
-        &'a self,
-        py: Python<'a>,
-    ) -> PyResult<PyLeakedRef<'a, T>> {
-        self.validate_generation(py)?;
-        Ok(PyLeakedRef {
-            _borrow: BorrowPyShared::new(py, self.py_shared_state),
-            data: self.data.as_ref().unwrap(),
-        })
-    }
-
-    /// Mutably borrows the wrapped value.
-    ///
-    /// Borrowing fails if the underlying reference has been invalidated.
-    ///
-    /// Typically `T` is an iterator. If `T` is an immutable reference,
-    /// `get_mut()` is useless since the inner value can't be mutated.
-    pub fn try_borrow_mut<'a>(
-        &'a mut self,
-        py: Python<'a>,
-    ) -> PyResult<PyLeakedRefMut<'a, T>> {
-        self.validate_generation(py)?;
-        Ok(PyLeakedRefMut {
-            _borrow: BorrowPyShared::new(py, self.py_shared_state),
-            data: self.data.as_mut().unwrap(),
-        })
-    }
-
-    /// Converts the inner value by the given function.
-    ///
-    /// Typically `T` is a static reference to a container, and `U` is an
-    /// iterator of that container.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the underlying reference has been invalidated.
-    ///
-    /// This is typically called immediately after the `PyLeaked` is obtained.
-    /// In which case, the reference must be valid and no panic would occur.
-    ///
-    /// # Safety
-    ///
-    /// The lifetime of the object passed in to the function `f` is cheated.
-    /// It's typically a static reference, but is valid only while the
-    /// corresponding `PyLeaked` is alive. Do not copy it out of the
-    /// function call.
-    pub unsafe fn map<U>(
-        mut self,
-        py: Python,
-        f: impl FnOnce(T) -> U,
-    ) -> PyLeaked<U> {
-        // Needs to test the generation value to make sure self.data reference
-        // is still intact.
-        self.validate_generation(py)
-            .expect("map() over invalidated leaked reference");
-
-        // f() could make the self.data outlive. That's why map() is unsafe.
-        // In order to make this function safe, maybe we'll need a way to
-        // temporarily restrict the lifetime of self.data and translate the
-        // returned object back to Something<'static>.
-        let new_data = f(self.data.take().unwrap());
-        PyLeaked {
-            inner: self.inner.clone_ref(py),
-            data: Some(new_data),
-            py_shared_state: self.py_shared_state,
-            generation: self.generation,
-        }
-    }
-
-    fn validate_generation(&self, py: Python) -> PyResult<()> {
-        if self.py_shared_state.current_generation(py) == self.generation {
-            Ok(())
-        } else {
-            Err(PyErr::new::<exc::RuntimeError, _>(
-                py,
-                "Cannot access to leaked reference after mutation",
-            ))
-        }
-    }
-}
-
-/// Immutably borrowed reference to a leaked value.
-pub struct PyLeakedRef<'a, T> {
-    _borrow: BorrowPyShared<'a>,
-    data: &'a T,
-}
-
-impl<T> Deref for PyLeakedRef<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        self.data
-    }
-}
-
-/// Mutably borrowed reference to a leaked value.
-pub struct PyLeakedRefMut<'a, T> {
-    _borrow: BorrowPyShared<'a>,
-    data: &'a mut T,
-}
-
-impl<T> Deref for PyLeakedRefMut<'_, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        self.data
-    }
-}
-
-impl<T> DerefMut for PyLeakedRefMut<'_, T> {
-    fn deref_mut(&mut self) -> &mut T {
-        self.data
-    }
-}
-
 /// Defines a `py_class!` that acts as a Python iterator over a Rust iterator.
 ///
 /// TODO: this is a bit awkward to use, and a better (more complicated)
@@ -437,12 +30,18 @@
 /// # Parameters
 ///
 /// * `$name` is the identifier to give to the resulting Rust struct.
-/// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call.
+/// * `$leaked` corresponds to `UnsafePyLeaked` in the matching `@shared data`
+/// declaration.
 /// * `$iterator_type` is the type of the Rust iterator.
 /// * `$success_func` is a function for processing the Rust `(key, value)`
 /// tuple on iteration success, turning it into something Python understands.
 /// * `$success_func` is the return type of `$success_func`
 ///
+/// # Safety
+///
+/// `$success_func` may take a reference, but it's lifetime may be cheated.
+/// Do not copy it out of the function call.
+///
 /// # Example
 ///
 /// ```
@@ -451,7 +50,7 @@
 /// }
 ///
 /// py_class!(pub class MyType |py| {
-///     data inner: PySharedRefCell<MyStruct>;
+///     @shared data inner: MyStruct;
 ///
 ///     def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
 ///         let leaked_ref = self.inner_shared(py).leak_immutable();
@@ -475,11 +74,9 @@
 ///     }
 /// }
 ///
-/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef);
-///
 /// py_shared_iterator!(
 ///     MyTypeItemsIterator,
-///     PyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
+///     UnsafePyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
 ///     MyType::translate_key_value,
 ///     Option<(PyBytes, PyBytes)>
 /// );
@@ -496,9 +93,10 @@
 
             def __next__(&self) -> PyResult<$success_type> {
                 let mut leaked = self.inner(py).borrow_mut();
-                let mut iter = leaked.try_borrow_mut(py)?;
+                let mut iter = unsafe { leaked.try_borrow_mut(py)? };
                 match iter.next() {
                     None => Ok(None),
+                    // res may be a reference of cheated 'static lifetime
                     Some(res) => $success_func(py, res),
                 }
             }
@@ -521,116 +119,3 @@
         }
     };
 }
-
-#[cfg(test)]
-#[cfg(any(feature = "python27-bin", feature = "python3-bin"))]
-mod test {
-    use super::*;
-    use cpython::{GILGuard, Python};
-
-    py_class!(class Owner |py| {
-        data string: PySharedRefCell<String>;
-    });
-    py_shared_ref!(Owner, String, string, string_shared);
-
-    fn prepare_env() -> (GILGuard, Owner) {
-        let gil = Python::acquire_gil();
-        let py = gil.python();
-        let owner =
-            Owner::create_instance(py, PySharedRefCell::new("new".to_owned()))
-                .unwrap();
-        (gil, owner)
-    }
-
-    #[test]
-    fn test_leaked_borrow() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let leaked_ref = leaked.try_borrow(py).unwrap();
-        assert_eq!(*leaked_ref, "new");
-    }
-
-    #[test]
-    fn test_leaked_borrow_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        let mut leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
-        assert_eq!(leaked_ref.next(), Some('n'));
-        assert_eq!(leaked_ref.next(), Some('e'));
-        assert_eq!(leaked_ref.next(), Some('w'));
-        assert_eq!(leaked_ref.next(), None);
-    }
-
-    #[test]
-    fn test_leaked_borrow_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        assert!(leaked.try_borrow(py).is_err());
-    }
-
-    #[test]
-    fn test_leaked_borrow_mut_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        assert!(leaked_iter.try_borrow_mut(py).is_err());
-    }
-
-    #[test]
-    #[should_panic(expected = "map() over invalidated leaked reference")]
-    fn test_leaked_map_after_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let leaked = owner.string_shared(py).leak_immutable();
-        owner.string_shared(py).borrow_mut().unwrap().clear();
-        let _leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-    }
-
-    #[test]
-    fn test_borrow_mut_while_leaked_ref() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-        let leaked = owner.string_shared(py).leak_immutable();
-        {
-            let _leaked_ref = leaked.try_borrow(py).unwrap();
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-            {
-                let _leaked_ref2 = leaked.try_borrow(py).unwrap();
-                assert!(owner.string_shared(py).borrow_mut().is_err());
-            }
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-        }
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-    }
-
-    #[test]
-    fn test_borrow_mut_while_leaked_ref_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-        let leaked = owner.string_shared(py).leak_immutable();
-        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
-        {
-            let _leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
-            assert!(owner.string_shared(py).borrow_mut().is_err());
-        }
-        assert!(owner.string_shared(py).borrow_mut().is_ok());
-    }
-
-    #[test]
-    #[should_panic(expected = "mutably borrowed")]
-    fn test_leak_while_borrow_mut() {
-        let (gil, owner) = prepare_env();
-        let py = gil.python();
-        let _mut_ref = owner.string_shared(py).borrow_mut();
-        owner.string_shared(py).leak_immutable();
-    }
-}
--- a/setup.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/setup.py	Mon Feb 03 11:51:52 2020 -0500
@@ -323,7 +323,7 @@
     # gives precedence to hg.exe in the current directory, so fall back to the
     # python invocation of local hg, where pythonXY.dll can always be found.
     check_cmd = ['log', '-r.', '-Ttest']
-    if os.name != 'nt':
+    if os.name != 'nt' or not os.path.exists("hg.exe"):
         try:
             retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
         except EnvironmentError:
--- a/tests/run-tests.py	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/run-tests.py	Mon Feb 03 11:51:52 2020 -0500
@@ -555,12 +555,6 @@
         help="use pure Python code instead of C extensions",
     )
     hgconf.add_argument(
-        "-3",
-        "--py3-warnings",
-        action="store_true",
-        help="enable Py3k warnings on Python 2.7+",
-    )
-    hgconf.add_argument(
         "--with-chg",
         metavar="CHG",
         help="use specified chg wrapper in place of hg",
@@ -748,9 +742,6 @@
             )
         options.timeout = 0
         options.slowtimeout = 0
-    if options.py3_warnings:
-        if PYTHON3:
-            parser.error('--py3-warnings can only be used on Python 2.7')
 
     if options.blacklist:
         options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -909,7 +900,6 @@
         timeout=None,
         startport=None,
         extraconfigopts=None,
-        py3warnings=False,
         shell=None,
         hgcommand=None,
         slowtimeout=None,
@@ -942,8 +932,6 @@
         must have the form "key=value" (something understood by hgrc). Values
         of the form "foo.key=value" will result in "[foo] key=value".
 
-        py3warnings enables Py3k warnings.
-
         shell is the shell to execute tests in.
         """
         if timeout is None:
@@ -968,7 +956,6 @@
         self._slowtimeout = slowtimeout
         self._startport = startport
         self._extraconfigopts = extraconfigopts or []
-        self._py3warnings = py3warnings
         self._shell = _bytespath(shell)
         self._hgcommand = hgcommand or b'hg'
         self._usechg = usechg
@@ -1515,9 +1502,8 @@
         return os.path.join(self._testdir, b'%s.out' % self.bname)
 
     def _run(self, env):
-        py3switch = self._py3warnings and b' -3' or b''
         # Quote the python(3) executable for Windows
-        cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
+        cmd = b'"%s" "%s"' % (PYTHON, self.path)
         vlog("# Running", cmd.decode("utf-8"))
         normalizenewlines = os.name == 'nt'
         result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
@@ -3366,7 +3352,6 @@
             timeout=self.options.timeout,
             startport=self._getport(count),
             extraconfigopts=self.options.extra_config_opt,
-            py3warnings=self.options.py3_warnings,
             shell=self.options.shell,
             hgcommand=self._hgcommand,
             usechg=bool(self.options.with_chg or self.options.chg),
@@ -3512,15 +3497,6 @@
 
         self._usecorrectpython()
 
-        if self.options.py3_warnings and not self.options.anycoverage:
-            vlog("# Updating hg command to enable Py3k Warnings switch")
-            with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
-                lines = [line.rstrip() for line in f]
-                lines[0] += ' -3'
-            with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
-                for line in lines:
-                    f.write(line + '\n')
-
         hgbat = os.path.join(self._bindir, b'hg.bat')
         if os.path.isfile(hgbat):
             # hg.bat expects to be put in bin/scripts while run-tests.py
--- a/tests/test-check-format.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-check-format.t	Mon Feb 03 11:51:52 2020 -0500
@@ -1,5 +1,5 @@
 #require black
 
   $ cd $RUNTESTDIR/..
-  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
+  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
 
--- a/tests/test-check-module-imports.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-check-module-imports.t	Mon Feb 03 11:51:52 2020 -0500
@@ -24,6 +24,7 @@
   > -X contrib/packaging/hg-docker \
   > -X contrib/packaging/hgpackaging/ \
   > -X contrib/packaging/inno/ \
+  > -X contrib/phab-clean.py \
   > -X contrib/python-zstandard/ \
   > -X contrib/win32/hgwebdir_wsgi.py \
   > -X contrib/perf-utils/perf-revlog-write-plot.py \
--- a/tests/test-commit-unresolved.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-commit-unresolved.t	Mon Feb 03 11:51:52 2020 -0500
@@ -60,7 +60,7 @@
   abort: cannot specify a node with --abort
   [255]
   $ hg merge --abort --rev e4501
-  abort: cannot specify both --rev and --abort
+  abort: cannot specify both --abort and --rev
   [255]
 
 #if abortcommand
@@ -144,7 +144,7 @@
   (branch merge, don't forget to commit)
 
   $ hg merge --preview --abort
-  abort: cannot specify --preview with --abort
+  abort: cannot specify both --abort and --preview
   [255]
 
   $ hg abort
--- a/tests/test-copy-move-merge.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-copy-move-merge.t	Mon Feb 03 11:51:52 2020 -0500
@@ -27,8 +27,9 @@
      b
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
-     src: 'a' -> dst: 'c' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
+      src: 'a' -> dst: 'c' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
--- a/tests/test-double-merge.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-double-merge.t	Mon Feb 03 11:51:52 2020 -0500
@@ -29,7 +29,8 @@
     unmatched files in other:
      bar
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'foo' -> dst: 'bar' *
+     on remote side:
+      src: 'foo' -> dst: 'bar' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
--- a/tests/test-graft.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-graft.t	Mon Feb 03 11:51:52 2020 -0500
@@ -204,7 +204,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -223,7 +224,8 @@
   updating the branch cache
   grafting 5:97f8bfe72746 "5"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'c' -> dst: 'b' 
+     on local side:
+      src: 'c' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -239,7 +241,8 @@
   scanning for duplicate grafts
   grafting 4:9c233e8e184d "4"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'c' -> dst: 'b' 
+     on local side:
+      src: 'c' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
@@ -746,7 +749,10 @@
   scanning for duplicate grafts
   grafting 13:7a4785234d87 "2"
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
--- a/tests/test-install.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-install.t	Mon Feb 03 11:51:52 2020 -0500
@@ -2,6 +2,7 @@
   $ hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
@@ -43,6 +44,7 @@
     "hgverextra": "*", (glob)
     "problems": 0,
     "pythonexe": "*", (glob)
+    "pythonimplementation": "*", (glob)
     "pythonlib": "*", (glob)
     "pythonsecurity": [*], (glob)
     "pythonver": "*.*.*", (glob)
@@ -58,6 +60,7 @@
   $ HGUSER= hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
@@ -103,6 +106,7 @@
   $ HGEDITOR="~/tools/testeditor.exe" hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
@@ -128,6 +132,7 @@
   $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob) (no-py3 !)
   checking Python version (3.*) (glob) (py3 !)
   checking Python lib (.*[Ll]ib.*)... (re)
@@ -185,6 +190,7 @@
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (3.*) (glob)
   checking Python lib (*)... (glob)
   checking Python security support (*) (glob)
@@ -221,6 +227,7 @@
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
+  checking Python implementation (*) (glob)
   checking Python version (2.*) (glob)
   checking Python lib (*)... (glob)
   checking Python security support (*) (glob)
--- a/tests/test-issue1802.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-issue1802.t	Mon Feb 03 11:51:52 2020 -0500
@@ -52,8 +52,6 @@
 Simulate a Windows merge:
 
   $ hg --config extensions.n=$TESTTMP/noexec.py merge --debug
-    unmatched files in local:
-     b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: a03b0deabf2b, local: d6fa54f68ae1+, remote: 2d8bcf2dda39
--- a/tests/test-issue522.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-issue522.t	Mon Feb 03 11:51:52 2020 -0500
@@ -25,8 +25,6 @@
   $ hg ci -qAm 'add bar'
 
   $ hg merge --debug
-    unmatched files in local:
-     bar
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: bbd179dfa0a7, local: 71766447bdbb+, remote: 4d9e78aaceee
--- a/tests/test-issue672.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-issue672.t	Mon Feb 03 11:51:52 2020 -0500
@@ -28,7 +28,8 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' 
+     on remote side:
+      src: '1' -> dst: '1a' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -56,7 +57,8 @@
     unmatched files in local:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' *
+     on local side:
+      src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -78,7 +80,8 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1' -> dst: '1a' *
+     on remote side:
+      src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
--- a/tests/test-merge-criss-cross.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-merge-criss-cross.t	Mon Feb 03 11:51:52 2020 -0500
@@ -410,11 +410,6 @@
   note: merging c0ef19750a22+ and 6ca01f7342b9 using bids from ancestors 11b5b303e36c and 154e6000f54e
   
   calculating bids for ancestor 11b5b303e36c
-    unmatched files in local:
-     d1/a
-     d1/b
-    unmatched files in other:
-     d2/b
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 11b5b303e36c, local: c0ef19750a22+, remote: 6ca01f7342b9
@@ -424,7 +419,8 @@
     unmatched files in other:
      d2/b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'd1/b' -> dst: 'd2/b' 
+     on remote side:
+      src: 'd1/b' -> dst: 'd2/b' 
     checking for directory renames
      discovered dir src: 'd1/' -> dst: 'd2/'
   resolving manifests
--- a/tests/test-merge2.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-merge2.t	Mon Feb 03 11:51:52 2020 -0500
@@ -50,4 +50,8 @@
   adding b
   created new head
 
+  $ hg merge 'wdir()'
+  abort: merging with the working copy has no effect
+  [255]
+
   $ cd ..
--- a/tests/test-rebase-collapse.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rebase-collapse.t	Mon Feb 03 11:51:52 2020 -0500
@@ -486,61 +486,6 @@
   abort: cannot collapse multiple named branches
   [255]
 
-  $ repeatchange() {
-  >   hg checkout $1
-  >   hg cp d z
-  >   echo blah >> z
-  >   hg commit -Am "$2" --user "$3"
-  > }
-  $ repeatchange 3 "E" "user1"
-  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ repeatchange 3 "E" "user2"
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  created new head
-  $ hg tglog
-  @  5: fbfb97b1089a 'E'
-  |
-  | o  4: f338eb3c2c7c 'E'
-  |/
-  o  3: 41acb9dca9eb 'D'
-  |
-  | o  2: 8ac4a08debf1 'C' two
-  | |
-  | o  1: 1ba175478953 'B' one
-  |/
-  o  0: 1994f17a630e 'A'
-  
-  $ hg rebase -s 5 -d 4
-  rebasing 5:fbfb97b1089a "E" (tip)
-  note: not rebasing 5:fbfb97b1089a "E" (tip), its destination already has all its changes
-  saved backup bundle to $TESTTMP/e/.hg/strip-backup/fbfb97b1089a-553e1d85-rebase.hg
-  $ hg tglog
-  @  4: f338eb3c2c7c 'E'
-  |
-  o  3: 41acb9dca9eb 'D'
-  |
-  | o  2: 8ac4a08debf1 'C' two
-  | |
-  | o  1: 1ba175478953 'B' one
-  |/
-  o  0: 1994f17a630e 'A'
-  
-  $ hg export tip
-  # HG changeset patch
-  # User user1
-  # Date 0 0
-  #      Thu Jan 01 00:00:00 1970 +0000
-  # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213
-  # Parent  41acb9dca9eb976e84cd21fcb756b4afa5a35c09
-  E
-  
-  diff -r 41acb9dca9eb -r f338eb3c2c7c z
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,2 @@
-  +d
-  +blah
-
   $ cd ..
 
 Rebase, collapse and copies
--- a/tests/test-rebase-parameters.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rebase-parameters.t	Mon Feb 03 11:51:52 2020 -0500
@@ -92,6 +92,14 @@
   empty "rev" revision set - nothing to rebase
   [1]
 
+  $ hg rebase --rev 'wdir()' --dest 6
+  abort: cannot rebase the working copy
+  [255]
+
+  $ hg rebase --source 'wdir()' --dest 6
+  abort: cannot rebase the working copy
+  [255]
+
   $ hg rebase --source '1 & !1' --dest 8
   empty "source" revision set - nothing to rebase
   [1]
--- a/tests/test-rebase-rename.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rebase-rename.t	Mon Feb 03 11:51:52 2020 -0500
@@ -108,6 +108,62 @@
   
   
 
+  $ repeatchange() {
+  >   hg checkout $1
+  >   hg cp a z
+  >   echo blah >> z
+  >   hg commit -Am "$2" --user "$3"
+  > }
+  $ repeatchange 1 "E" "user1"
+  2 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  created new head
+  $ repeatchange 1 "E" "user2"
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  created new head
+  $ hg tglog
+  @  5: af8ad1f97097 'E'
+  |
+  | o  4: 60f545c27784 'E'
+  |/
+  | o  3: 032a9b75e83b 'rename A'
+  | |
+  | o  2: 220d0626d185 'rename B'
+  |/
+  o  1: 3ab5da9a5c01 'B'
+  |
+  o  0: 1994f17a630e 'A'
+  
+  $ hg rebase -s 5 -d 4
+  rebasing 5:af8ad1f97097 "E" (tip)
+  note: not rebasing 5:af8ad1f97097 "E" (tip), its destination already has all its changes
+  saved backup bundle to $TESTTMP/a/.hg/strip-backup/af8ad1f97097-c3e90708-rebase.hg
+  $ hg tglog
+  @  4: 60f545c27784 'E'
+  |
+  | o  3: 032a9b75e83b 'rename A'
+  | |
+  | o  2: 220d0626d185 'rename B'
+  |/
+  o  1: 3ab5da9a5c01 'B'
+  |
+  o  0: 1994f17a630e 'A'
+  
+  $ hg export tip
+  # HG changeset patch
+  # User user1
+  # Date 0 0
+  #      Thu Jan 01 00:00:00 1970 +0000
+  # Node ID 60f545c277846e6bad309919bae3ae106f59cb39
+  # Parent  3ab5da9a5c01faa02c20f2ec4870a4f689c92da6
+  E
+  
+  diff -r 3ab5da9a5c01 -r 60f545c27784 z
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,2 @@
+  +a
+  +blah
+
   $ cd ..
 
 
--- a/tests/test-rename-dir-merge.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rename-dir-merge.t	Mon Feb 03 11:51:52 2020 -0500
@@ -30,8 +30,9 @@
      b/a
      b/b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a/a' -> dst: 'b/a' 
-     src: 'a/b' -> dst: 'b/b' 
+     on remote side:
+      src: 'a/a' -> dst: 'b/a' 
+      src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
      discovered dir src: 'a/' -> dst: 'b/'
      pending file src: 'a/c' -> dst: 'b/c'
@@ -75,8 +76,9 @@
     unmatched files in other:
      a/c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a/a' -> dst: 'b/a' 
-     src: 'a/b' -> dst: 'b/b' 
+     on local side:
+      src: 'a/a' -> dst: 'b/a' 
+      src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
      discovered dir src: 'a/' -> dst: 'b/'
      pending file src: 'a/c' -> dst: 'b/c'
--- a/tests/test-rename-merge1.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rename-merge1.t	Mon Feb 03 11:51:52 2020 -0500
@@ -28,9 +28,11 @@
      b
      b2
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
-     src: 'a2' -> dst: 'b2' !
-     src: 'a2' -> dst: 'c2' !
+     on local side:
+      src: 'a2' -> dst: 'c2' !
+     on remote side:
+      src: 'a' -> dst: 'b' *
+      src: 'a2' -> dst: 'b2' !
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -170,7 +172,8 @@
     unmatched files in other:
      newfile
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'file' -> dst: 'newfile' %
+     on remote side:
+      src: 'file' -> dst: 'newfile' %
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -184,3 +187,50 @@
   $ hg status
   M newfile
   $ cd ..
+
+Create x and y, then modify y and rename x to z on one side of merge, and
+modify x and rename y to z on the other side.
+  $ hg init conflicting-target
+  $ cd conflicting-target
+  $ echo x > x
+  $ echo y > y
+  $ hg ci -Aqm 'add x and y'
+  $ hg mv x z
+  $ echo foo >> y
+  $ hg ci -qm 'modify y, rename x to z'
+  $ hg co -q 0
+  $ hg mv y z
+  $ echo foo >> x
+  $ hg ci -qm 'modify x, rename y to z'
+# We should probably tell the user about the conflicting rename sources.
+# Depending on which side they pick, we should take that rename and get
+# the changes to the source from the other side. The unchanged file should
+# remain.
+  $ hg merge --debug 1 -t :merge3
+    all copies found (* = to merge, ! = divergent, % = renamed and deleted):
+     on local side:
+      src: 'y' -> dst: 'z' *
+     on remote side:
+      src: 'x' -> dst: 'z' *
+    checking for directory renames
+  resolving manifests
+   branchmerge: True, force: False, partial: False
+   ancestor: 5151c134577e, local: 07fcbc9a74ed+, remote: f21419739508
+   preserving z for resolve of z
+  starting 4 threads for background file closing (?)
+   z: both renamed from y -> m (premerge)
+  picked tool ':merge3' for z (binary False symlink False changedelete False)
+  merging z
+  my z@07fcbc9a74ed+ other z@f21419739508 ancestor y@5151c134577e
+   premerge successful
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls
+  x
+  z
+  $ cat x
+  x
+  foo
+# 'z' should have had the added 'foo' line
+  $ cat z
+  x
--- a/tests/test-rename-merge2.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-rename-merge2.t	Mon Feb 03 11:51:52 2020 -0500
@@ -79,7 +79,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -117,7 +118,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -156,7 +158,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -194,7 +197,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -231,7 +235,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -263,7 +268,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -294,7 +300,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -327,7 +334,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -355,7 +363,10 @@
   test L:um a b R:um a b W:       - 9  do merge with ancestor in a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -400,8 +411,10 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' !
-     src: 'a' -> dst: 'c' !
+     on local side:
+      src: 'a' -> dst: 'b' !
+     on remote side:
+      src: 'a' -> dst: 'c' !
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -434,7 +447,8 @@
   test L:nc a b R:up b   W:       - 12 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -473,7 +487,8 @@
   test L:up b   R:nm a b W:       - 13 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -513,7 +528,8 @@
   test L:nc a b R:up a b W:       - 14 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -522,19 +538,15 @@
    preserving rev for resolve of rev
    a: remote is newer -> g
   getting a
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+  my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -553,7 +565,8 @@
   test L:up b   R:nm a b W:       - 15 merge b no ancestor, remove a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -593,7 +606,8 @@
   test L:nc a b R:up a b W:       - 16 get a, merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -602,19 +616,15 @@
    preserving rev for resolve of rev
    a: remote is newer -> g
   getting a
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+  my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -633,7 +643,8 @@
   test L:up a b R:nc a b W:       - 17 keep a, merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -641,19 +652,15 @@
    preserving b for resolve of b
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
+  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
@@ -672,7 +679,8 @@
   test L:nm a b R:up a b W:       - 18 merge b no ancestor
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -680,35 +688,24 @@
    preserving b for resolve of b
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
-   a: prompt deleted/changed -> m (premerge)
-  picked tool ':prompt' for a (binary False symlink False changedelete True)
-  file 'a' was deleted in local [working copy] but was modified in other [merge rev].
-  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
-  What do you want to do? u
-   b: both created -> m (premerge)
+   b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
+  my b@02963e448370+ other b@8dbce441892a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
   launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
   merge tool returned: 0
-  0 files updated, 2 files merged, 0 files removed, 1 files unresolved
-  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
   --------------
-  M a
   M b
-  abort: unresolved merge conflicts (see 'hg help resolve')
   --------------
   
   $ tm "up a b" "nm a b" "      " "19 merge b no ancestor, prompt remove a"
@@ -717,44 +714,34 @@
   test L:up a b R:nm a b W:       - 19 merge b no ancestor, prompt remove a
   --------------
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' 
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 0b76e65c8289+, remote: bdb19105162a
-   preserving a for resolve of a
    preserving b for resolve of b
    preserving rev for resolve of rev
+   b: both renamed from a -> m (premerge)
   starting 4 threads for background file closing (?)
-   a: prompt changed/deleted -> m (premerge)
-  picked tool ':prompt' for a (binary False symlink False changedelete True)
-  file 'a' was deleted in other [merge rev] but was modified in local [working copy].
-  You can use (c)hanged version, (d)elete, or leave (u)nresolved.
-  What do you want to do? u
-   b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
-  my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
+  my b@0b76e65c8289+ other b@bdb19105162a ancestor a@924404dff337
+   premerge successful
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
   my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
-   b: both created -> m (merge)
-  picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
-  my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
-  launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
-  merge tool returned: 0
    rev: versions differ -> m (merge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
   launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
   merge tool returned: 0
-  0 files updated, 2 files merged, 0 files removed, 1 files unresolved
-  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
   --------------
   M b
   C a
-  abort: unresolved merge conflicts (see 'hg help resolve')
   --------------
   
   $ tm "up a  " "um a b" "      " "20 merge a and b to b, remove a"
@@ -765,7 +752,8 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on remote side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -807,7 +795,8 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -853,7 +842,8 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: 'a' -> dst: 'b' *
+     on local side:
+      src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    branchmerge: True, force: False, partial: False
@@ -936,11 +926,14 @@
      4/g
      7/f
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     src: '1/f' -> dst: '1/g' *
-     src: '3/f' -> dst: '3/g' *
-     src: '4/f' -> dst: '4/g' *
-     src: '5/f' -> dst: '5/g' *
-     src: '6/f' -> dst: '6/g' *
+     on local side:
+      src: '1/f' -> dst: '1/g' *
+      src: '5/f' -> dst: '5/g' *
+      src: '6/f' -> dst: '6/g' *
+     on remote side:
+      src: '1/f' -> dst: '1/g' *
+      src: '3/f' -> dst: '3/g' *
+      src: '4/f' -> dst: '4/g' *
     checking for directory renames
   $ hg mani
   0/f
--- a/tests/test-shelve.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-shelve.t	Mon Feb 03 11:51:52 2020 -0500
@@ -171,6 +171,8 @@
   $ hg mv b b.rename
   moving b/b to b.rename/b
   $ hg cp c c.copy
+  $ hg mv d ghost
+  $ rm ghost
   $ hg status -C
   M a/a
   A b.rename/b
@@ -178,12 +180,15 @@
   A c.copy
     c
   R b/b
+  R d
+  ! ghost
+    d
 
 the common case - no options or filenames
 
   $ hg shelve
   shelved as default-01
-  2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  3 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg status -C
 
 ensure that our shelved changes exist
@@ -254,6 +259,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ hg shelve -l
 
 (both of default.hg and default-1.hg should be still kept, because it
@@ -287,6 +293,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ HGEDITOR=cat hg shelve -q -n wibble -m wat -e a
   wat
   
@@ -306,6 +313,7 @@
   A c.copy
     c
   R b/b
+  R d
   $ hg shelve -l --stat
   wibble          (*)    wat (glob)
    a/a |  1 +
@@ -323,6 +331,7 @@
   A c.copy
     c
   R b/b
+  R d
 
 ensure old shelve backups are being deleted automatically
 
@@ -363,6 +372,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   # The repository is in an unfinished *unshelve* state.
   
@@ -401,6 +411,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   $ hg diff
   diff --git a/a/a b/a/a
@@ -412,13 +423,19 @@
    c
   +=======
   +a
-  +>>>>>>> working-copy: a68ec3400638 - shelve: changes to: [mq]: second.patch
+  +>>>>>>> working-copy: 203c9f771d2b - shelve: changes to: [mq]: second.patch
   diff --git a/b/b b/b.rename/b
   rename from b/b
   rename to b.rename/b
   diff --git a/c b/c.copy
   copy from c
   copy to c.copy
+  diff --git a/d b/d
+  deleted file mode 100644
+  --- a/d
+  +++ /dev/null
+  @@ -1,1 +0,0 @@
+  -d
   $ hg resolve -l
   U a/a
 
@@ -434,6 +451,7 @@
   M b.rename/b
   M c.copy
   R b/b
+  R d
   ? a/a.orig
   $ hg unshelve -a
   unshelve of 'default' aborted
@@ -512,6 +530,7 @@
     c
   A foo/foo
   R b/b
+  R d
   ? a/a.orig
 
 there should be no shelves left
--- a/tests/test-uncommit.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-uncommit.t	Mon Feb 03 11:51:52 2020 -0500
@@ -489,7 +489,7 @@
   $ hg add b
   $ hg status
   A b
-  $ hg unc a
+  $ hg uncommit a
   note: keeping empty commit
   $ cat a
   super critical info!
@@ -503,11 +503,11 @@
   
   $ hg ci -Am 'add b'
   $ echo 'foo bar' > b
-  $ hg unc b
+  $ hg uncommit b
   abort: uncommitted changes
   (requires --allow-dirty-working-copy to uncommit)
   [255]
-  $ hg unc --allow-dirty-working-copy b
+  $ hg uncommit --allow-dirty-working-copy b
   $ hg log
   changeset:   3:30fa958635b2
   tag:         tip
--- a/tests/test-up-local-change.t	Mon Feb 03 11:07:34 2020 -0500
+++ b/tests/test-up-local-change.t	Mon Feb 03 11:51:52 2020 -0500
@@ -40,8 +40,6 @@
   summary:     1
   
   $ hg --debug up
-    unmatched files in other:
-     b
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
@@ -91,8 +89,6 @@
   summary:     1
   
   $ hg --debug up
-    unmatched files in other:
-     b
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb