--- a/Makefile Fri Dec 13 10:37:45 2019 +0100
+++ b/Makefile Thu Feb 13 10:12:12 2020 -0800
@@ -64,6 +64,7 @@
$(MAKE) -C doc
cleanbutpackages:
+ rm -f hg.exe
-$(PYTHON) setup.py clean --all # ignore errors from this command
find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \
\( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';'
--- a/black.toml Fri Dec 13 10:37:45 2019 +0100
+++ b/black.toml Thu Feb 13 10:12:12 2020 -0800
@@ -9,7 +9,6 @@
| \.mypy_cache/
| \.venv/
| mercurial/thirdparty/
-| contrib/python-zstandard/
'''
skip-string-normalization = true
quiet = true
--- a/contrib/chg/chg.c Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/chg/chg.c Thu Feb 13 10:12:12 2020 -0800
@@ -226,6 +226,16 @@
}
argv[argsize - 1] = NULL;
+ const char *lc_ctype_env = getenv("LC_CTYPE");
+ if (lc_ctype_env == NULL) {
+ if (putenv("CHG_CLEAR_LC_CTYPE=") != 0)
+ abortmsgerrno("failed to putenv CHG_CLEAR_LC_CTYPE");
+ } else {
+ if (setenv("CHGORIG_LC_CTYPE", lc_ctype_env, 1) != 0) {
+ abortmsgerrno("failed to setenv CHGORIG_LC_CTYYPE");
+ }
+ }
+
if (putenv("CHGINTERNALMARK=") != 0)
abortmsgerrno("failed to putenv");
if (execvp(hgcmd, (char **)argv) < 0)
--- a/contrib/examples/fix.hgrc Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/examples/fix.hgrc Thu Feb 13 10:12:12 2020 -0800
@@ -6,7 +6,7 @@
rustfmt:pattern = set:**.rs
black:command = black --config=black.toml -
-black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"
+black:pattern = set:**.py - mercurial/thirdparty/**
# Mercurial doesn't have any Go code, but if we did this is how we
# would configure `hg fix` for Go:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/pyoxidizer.bzl Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,58 @@
+# Instructions:
+#
+# 1. cargo install --version 0.5.0 pyoxidizer
+# 2. cd /path/to/hg
+# 3. pyoxidizer build --path contrib/packaging [--release]
+# 4. Run build/pyoxidizer/<arch>/<debug|release>/app/hg
+#
+# If you need to build again, you need to remove the build/lib.* and
+# build/temp.* directories, otherwise PyOxidizer fails to pick up C
+# extensions. This is a bug in PyOxidizer.
+
+ROOT = CWD + "/../.."
+
+set_build_path(ROOT + "/build/pyoxidizer")
+
+def make_exe():
+ dist = default_python_distribution()
+
+ code = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
+
+ config = PythonInterpreterConfig(
+ raw_allocator = "system",
+ run_eval = code,
+ # We need this to make resourceutil happy, since it looks for sys.frozen.
+ sys_frozen = True,
+ legacy_windows_stdio = True,
+ )
+
+ exe = dist.to_python_executable(
+ name = "hg",
+ config = config,
+ )
+
+ # Use setup.py install to build Mercurial and collect Python resources to
+ # embed in the executable.
+ resources = dist.setup_py_install(ROOT)
+ exe.add_python_resources(resources)
+
+ return exe
+
+def make_install(exe):
+ m = FileManifest()
+
+ # `hg` goes in root directory.
+ m.add_python_resource(".", exe)
+
+ templates = glob(
+ include=[ROOT + "/mercurial/templates/**/*"],
+ strip_prefix = ROOT + "/mercurial/",
+ )
+ m.add_manifest(templates)
+
+ return m
+
+register_target("exe", make_exe)
+register_target("app", make_install, depends = ["exe"], default = True)
+
+resolve_targets()
--- a/contrib/perf.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/perf.py Thu Feb 13 10:12:12 2020 -0800
@@ -1536,6 +1536,7 @@
matters.
Example of useful set to test:
+
* tip
* 0
* -10:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/phab-clean.py Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+#
+# A small script to automatically reject idle Diffs
+#
+# you need to set the PHABBOT_USER and PHABBOT_TOKEN environment variable for authentication
+from __future__ import absolute_import, print_function
+
+import datetime
+import os
+import sys
+
+import phabricator
+
+MESSAGE = """There seems to have been no activities on this Diff for the past 3 Months.
+
+By policy, we are automatically moving it out of the `need-review` state.
+
+Please, move it back to `need-review` without hesitation if this diff should still be discussed.
+
+:baymax:need-review-idle:
+"""
+
+
+PHAB_URL = "https://phab.mercurial-scm.org/api/"
+USER = os.environ.get("PHABBOT_USER", "baymax")
+TOKEN = os.environ.get("PHABBOT_TOKEN")
+
+
+NOW = datetime.datetime.now()
+
+# 3 months in seconds
+DELAY = 60 * 60 * 24 * 30 * 3
+
+
+def get_all_diff(phab):
+ """Fetch all the diff that the need review"""
+ return phab.differential.query(
+ status="status-needs-review",
+ order="order-modified",
+ paths=[('HG', None)],
+ )
+
+
+def filter_diffs(diffs, older_than):
+ """filter diffs to only keep the one unmodified sin <older_than> seconds"""
+ olds = []
+ for d in diffs:
+ modified = int(d['dateModified'])
+ modified = datetime.datetime.fromtimestamp(modified)
+ d["idleFor"] = idle_for = NOW - modified
+ if idle_for.total_seconds() > older_than:
+ olds.append(d)
+ return olds
+
+
+def nudge_diff(phab, diff):
+ """Comment on the idle diff and reject it"""
+ diff_id = int(d['id'])
+ phab.differential.createcomment(
+ revision_id=diff_id, message=MESSAGE, action="reject"
+ )
+
+
+if not USER:
+ print(
+ "not user specified please set PHABBOT_USER and PHABBOT_TOKEN",
+ file=sys.stderr,
+ )
+elif not TOKEN:
+ print(
+ "not api-token specified please set PHABBOT_USER and PHABBOT_TOKEN",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+phab = phabricator.Phabricator(USER, host=PHAB_URL, token=TOKEN)
+phab.connect()
+phab.update_interfaces()
+print('Hello "%s".' % phab.user.whoami()['realName'])
+
+diffs = get_all_diff(phab)
+print("Found %d Diffs" % len(diffs))
+olds = filter_diffs(diffs, DELAY)
+print("Found %d old Diffs" % len(olds))
+for d in olds:
+ diff_id = d['id']
+ status = d['statusName']
+ modified = int(d['dateModified'])
+ idle_for = d["idleFor"]
+ msg = 'nudging D%s in "%s" state for %s'
+ print(msg % (diff_id, status, idle_for))
+ # uncomment to actually affect phab
+ nudge_diff(phab, d)
--- a/contrib/python-zstandard/make_cffi.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/make_cffi.py Thu Feb 13 10:12:12 2020 -0800
@@ -52,7 +52,8 @@
# Headers whose preprocessed output will be fed into cdef().
HEADERS = [
- os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
+ os.path.join(HERE, "zstd", *p)
+ for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
]
INCLUDE_DIRS = [
@@ -139,7 +140,9 @@
env = dict(os.environ)
if getattr(compiler, "_paths", None):
env["PATH"] = compiler._paths
- process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env)
+ process = subprocess.Popen(
+ args + [input_file], stdout=subprocess.PIPE, env=env
+ )
output = process.communicate()[0]
ret = process.poll()
if ret:
--- a/contrib/python-zstandard/setup.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/setup.py Thu Feb 13 10:12:12 2020 -0800
@@ -87,7 +87,9 @@
break
if not version:
- raise Exception("could not resolve package version; " "this should never happen")
+ raise Exception(
+ "could not resolve package version; " "this should never happen"
+ )
setup(
name="zstandard",
--- a/contrib/python-zstandard/setup_zstd.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/setup_zstd.py Thu Feb 13 10:12:12 2020 -0800
@@ -138,12 +138,16 @@
if not system_zstd:
sources.update([os.path.join(actual_root, p) for p in zstd_sources])
if support_legacy:
- sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy])
+ sources.update(
+ [os.path.join(actual_root, p) for p in zstd_sources_legacy]
+ )
sources = list(sources)
include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
if not system_zstd:
- include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
+ include_dirs.update(
+ [os.path.join(actual_root, d) for d in zstd_includes]
+ )
if support_legacy:
include_dirs.update(
[os.path.join(actual_root, d) for d in zstd_includes_legacy]
--- a/contrib/python-zstandard/tests/common.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/common.py Thu Feb 13 10:12:12 2020 -0800
@@ -50,7 +50,9 @@
os.environ.update(old_env)
if mod.backend != "cffi":
- raise Exception("got the zstandard %s backend instead of cffi" % mod.backend)
+ raise Exception(
+ "got the zstandard %s backend instead of cffi" % mod.backend
+ )
# If CFFI version is available, dynamically construct test methods
# that use it.
@@ -84,7 +86,9 @@
fn.__func__.func_defaults,
fn.__func__.func_closure,
)
- new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class)
+ new_method = types.UnboundMethodType(
+ new_fn, fn.im_self, fn.im_class
+ )
setattr(cls, name, new_method)
@@ -194,4 +198,6 @@
expensive_settings = hypothesis.settings(deadline=None, max_examples=10000)
hypothesis.settings.register_profile("expensive", expensive_settings)
- hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default"))
+ hypothesis.settings.load_profile(
+ os.environ.get("HYPOTHESIS_PROFILE", "default")
+ )
--- a/contrib/python-zstandard/tests/test_buffer_util.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_buffer_util.py Thu Feb 13 10:12:12 2020 -0800
@@ -67,7 +67,8 @@
self.skipTest("BufferWithSegments not available")
b = zstd.BufferWithSegments(
- b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)])
+ b"foofooxfooxy",
+ b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]),
)
self.assertEqual(len(b), 3)
self.assertEqual(b.size, 12)
@@ -83,17 +84,23 @@
if not hasattr(zstd, "BufferWithSegmentsCollection"):
self.skipTest("BufferWithSegmentsCollection not available")
- with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"):
+ with self.assertRaisesRegex(
+ ValueError, "must pass at least 1 argument"
+ ):
zstd.BufferWithSegmentsCollection()
def test_argument_validation(self):
if not hasattr(zstd, "BufferWithSegmentsCollection"):
self.skipTest("BufferWithSegmentsCollection not available")
- with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+ with self.assertRaisesRegex(
+ TypeError, "arguments must be BufferWithSegments"
+ ):
zstd.BufferWithSegmentsCollection(None)
- with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+ with self.assertRaisesRegex(
+ TypeError, "arguments must be BufferWithSegments"
+ ):
zstd.BufferWithSegmentsCollection(
zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None
)
--- a/contrib/python-zstandard/tests/test_compressor.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_compressor.py Thu Feb 13 10:12:12 2020 -0800
@@ -24,7 +24,9 @@
def multithreaded_chunk_size(level, source_size=0):
- params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size)
+ params = zstd.ZstdCompressionParameters.from_level(
+ level, source_size=source_size
+ )
return 1 << (params.window_log + 2)
@@ -86,7 +88,9 @@
# This matches the test for read_to_iter() below.
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
- result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o")
+ result = cctx.compress(
+ b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o"
+ )
self.assertEqual(
result,
b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
@@ -99,7 +103,9 @@
result = cctx.compress(b"foo" * 256)
def test_no_magic(self):
- params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1)
+ params = zstd.ZstdCompressionParameters.from_level(
+ 1, format=zstd.FORMAT_ZSTD1
+ )
cctx = zstd.ZstdCompressor(compression_params=params)
magic = cctx.compress(b"foobar")
@@ -223,7 +229,8 @@
self.assertEqual(
result,
- b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f",
+ b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00"
+ b"\x66\x6f\x6f",
)
def test_multithreaded_compression_params(self):
@@ -234,7 +241,9 @@
params = zstd.get_frame_parameters(result)
self.assertEqual(params.content_size, 3)
- self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f")
+ self.assertEqual(
+ result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f"
+ )
@make_cffi
@@ -347,7 +356,9 @@
)
self.assertEqual(cobj.compress(b"bar"), b"")
# 3 byte header plus content.
- self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar")
+ self.assertEqual(
+ cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar"
+ )
self.assertEqual(cobj.flush(), b"\x01\x00\x00")
def test_flush_empty_block(self):
@@ -445,7 +456,9 @@
self.assertEqual(int(r), 0)
self.assertEqual(w, 9)
- self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
+ self.assertEqual(
+ dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00"
+ )
def test_large_data(self):
source = io.BytesIO()
@@ -478,7 +491,9 @@
cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
cctx.copy_stream(source, with_checksum)
- self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+ self.assertEqual(
+ len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+ )
no_params = zstd.get_frame_parameters(no_checksum.getvalue())
with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -585,7 +600,9 @@
cctx = zstd.ZstdCompressor()
with cctx.stream_reader(b"foo") as reader:
- with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+ with self.assertRaisesRegex(
+ ValueError, "cannot __enter__ multiple times"
+ ):
with reader as reader2:
pass
@@ -744,7 +761,9 @@
source = io.BytesIO(b"foobar")
with cctx.stream_reader(source, size=2) as reader:
- with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+ with self.assertRaisesRegex(
+ zstd.ZstdError, "Src size is incorrect"
+ ):
reader.read(10)
# Try another compression operation.
@@ -1126,7 +1145,9 @@
self.assertFalse(no_params.has_checksum)
self.assertTrue(with_params.has_checksum)
- self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
+ self.assertEqual(
+ len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4
+ )
def test_write_content_size(self):
no_size = NonClosingBytesIO()
@@ -1145,7 +1166,9 @@
# Declaring size will write the header.
with_size = NonClosingBytesIO()
- with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor:
+ with cctx.stream_writer(
+ with_size, size=len(b"foobar" * 256)
+ ) as compressor:
self.assertEqual(compressor.write(b"foobar" * 256), 0)
no_params = zstd.get_frame_parameters(no_size.getvalue())
@@ -1191,7 +1214,9 @@
self.assertFalse(no_params.has_checksum)
self.assertFalse(with_params.has_checksum)
- self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
+ self.assertEqual(
+ len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4
+ )
def test_memory_size(self):
cctx = zstd.ZstdCompressor(level=3)
@@ -1337,7 +1362,9 @@
for chunk in cctx.read_to_iter(b"foobar"):
pass
- with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+ with self.assertRaisesRegex(
+ ValueError, "must pass an object with a read"
+ ):
for chunk in cctx.read_to_iter(True):
pass
@@ -1513,7 +1540,9 @@
dctx = zstd.ZstdDecompressor()
- self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24))
+ self.assertEqual(
+ dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)
+ )
def test_small_chunk_size(self):
cctx = zstd.ZstdCompressor()
@@ -1533,7 +1562,8 @@
dctx = zstd.ZstdDecompressor()
self.assertEqual(
- dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024
+ dctx.decompress(b"".join(chunks), max_output_size=10000),
+ b"foo" * 1024,
)
def test_input_types(self):
@@ -1602,7 +1632,8 @@
list(chunker.finish())
with self.assertRaisesRegex(
- zstd.ZstdError, r"cannot call compress\(\) after compression finished"
+ zstd.ZstdError,
+ r"cannot call compress\(\) after compression finished",
):
list(chunker.compress(b"foo"))
@@ -1644,7 +1675,9 @@
with self.assertRaises(TypeError):
cctx.multi_compress_to_buffer((1, 2))
- with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+ with self.assertRaisesRegex(
+ TypeError, "item 0 not a bytes like object"
+ ):
cctx.multi_compress_to_buffer([u"foo"])
def test_empty_input(self):
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py Thu Feb 13 10:12:12 2020 -0800
@@ -28,9 +28,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_stream_source_read(self, original, level, source_read_size, read_size):
+ def test_stream_source_read(
+ self, original, level, source_read_size, read_size
+ ):
if read_size == 0:
read_size = -1
@@ -58,9 +62,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_buffer_source_read(self, original, level, source_read_size, read_size):
+ def test_buffer_source_read(
+ self, original, level, source_read_size, read_size
+ ):
if read_size == 0:
read_size = -1
@@ -155,9 +163,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_stream_source_readinto(self, original, level, source_read_size, read_size):
+ def test_stream_source_readinto(
+ self, original, level, source_read_size, read_size
+ ):
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -184,9 +196,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_buffer_source_readinto(self, original, level, source_read_size, read_size):
+ def test_buffer_source_readinto(
+ self, original, level, source_read_size, read_size
+ ):
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -285,9 +301,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_stream_source_read1(self, original, level, source_read_size, read_size):
+ def test_stream_source_read1(
+ self, original, level, source_read_size, read_size
+ ):
if read_size == 0:
read_size = -1
@@ -315,9 +335,13 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
- def test_buffer_source_read1(self, original, level, source_read_size, read_size):
+ def test_buffer_source_read1(
+ self, original, level, source_read_size, read_size
+ ):
if read_size == 0:
read_size = -1
@@ -412,7 +436,9 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
def test_stream_source_readinto1(
self, original, level, source_read_size, read_size
@@ -446,7 +472,9 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 16384),
- read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+ read_size=strategies.integers(
+ 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ ),
)
def test_buffer_source_readinto1(
self, original, level, source_read_size, read_size
@@ -576,7 +604,9 @@
read_size=strategies.integers(min_value=1, max_value=1048576),
write_size=strategies.integers(min_value=1, max_value=1048576),
)
- def test_read_write_size_variance(self, original, level, read_size, write_size):
+ def test_read_write_size_variance(
+ self, original, level, read_size, write_size
+ ):
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -585,7 +615,11 @@
dest = io.BytesIO()
cctx.copy_stream(
- source, dest, size=len(original), read_size=read_size, write_size=write_size
+ source,
+ dest,
+ size=len(original),
+ read_size=read_size,
+ write_size=write_size,
)
self.assertEqual(dest.getvalue(), ref_frame)
@@ -675,7 +709,9 @@
decompressed_chunks.append(dobj.decompress(chunk))
self.assertEqual(
- dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+ dctx.decompress(
+ b"".join(compressed_chunks), max_output_size=len(original)
+ ),
original,
)
self.assertEqual(b"".join(decompressed_chunks), original)
@@ -690,7 +726,9 @@
read_size=strategies.integers(min_value=1, max_value=4096),
write_size=strategies.integers(min_value=1, max_value=4096),
)
- def test_read_write_size_variance(self, original, level, read_size, write_size):
+ def test_read_write_size_variance(
+ self, original, level, read_size, write_size
+ ):
refcctx = zstd.ZstdCompressor(level=level)
ref_frame = refcctx.compress(original)
@@ -699,7 +737,10 @@
cctx = zstd.ZstdCompressor(level=level)
chunks = list(
cctx.read_to_iter(
- source, size=len(original), read_size=read_size, write_size=write_size
+ source,
+ size=len(original),
+ read_size=read_size,
+ write_size=write_size,
)
)
@@ -710,7 +751,9 @@
class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase):
@hypothesis.given(
original=strategies.lists(
- strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+ strategies.sampled_from(random_input_data()),
+ min_size=1,
+ max_size=1024,
),
threads=strategies.integers(min_value=1, max_value=8),
use_dict=strategies.booleans(),
@@ -776,7 +819,8 @@
dctx = zstd.ZstdDecompressor()
self.assertEqual(
- dctx.decompress(b"".join(chunks), max_output_size=len(original)), original
+ dctx.decompress(b"".join(chunks), max_output_size=len(original)),
+ original,
)
self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
@@ -794,7 +838,9 @@
input_sizes=strategies.data(),
flushes=strategies.data(),
)
- def test_flush_block(self, original, level, chunk_size, input_sizes, flushes):
+ def test_flush_block(
+ self, original, level, chunk_size, input_sizes, flushes
+ ):
cctx = zstd.ZstdCompressor(level=level)
chunker = cctx.chunker(chunk_size=chunk_size)
@@ -830,7 +876,9 @@
decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
self.assertEqual(
- dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+ dctx.decompress(
+ b"".join(compressed_chunks), max_output_size=len(original)
+ ),
original,
)
self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_data_structures.py Thu Feb 13 10:12:12 2020 -0800
@@ -65,7 +65,9 @@
p = zstd.ZstdCompressionParameters(threads=4)
self.assertEqual(p.threads, 4)
- p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6)
+ p = zstd.ZstdCompressionParameters(
+ threads=2, job_size=1048576, overlap_log=6
+ )
self.assertEqual(p.threads, 2)
self.assertEqual(p.job_size, 1048576)
self.assertEqual(p.overlap_log, 6)
@@ -128,7 +130,9 @@
with self.assertRaisesRegex(
ValueError, "cannot specify both ldm_hash_rate_log"
):
- zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4)
+ zstd.ZstdCompressionParameters(
+ ldm_hash_rate_log=8, ldm_hash_every_log=4
+ )
p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8)
self.assertEqual(p.ldm_hash_every_log, 8)
@@ -137,7 +141,9 @@
self.assertEqual(p.ldm_hash_every_log, 16)
def test_overlap_log(self):
- with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"):
+ with self.assertRaisesRegex(
+ ValueError, "cannot specify both overlap_log"
+ ):
zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9)
p = zstd.ZstdCompressionParameters(overlap_log=2)
@@ -169,10 +175,14 @@
zstd.get_frame_parameters(u"foobarbaz")
def test_invalid_input_sizes(self):
- with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+ with self.assertRaisesRegex(
+ zstd.ZstdError, "not enough data for frame"
+ ):
zstd.get_frame_parameters(b"")
- with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+ with self.assertRaisesRegex(
+ zstd.ZstdError, "not enough data for frame"
+ ):
zstd.get_frame_parameters(zstd.FRAME_HEADER)
def test_invalid_frame(self):
@@ -201,7 +211,9 @@
self.assertTrue(params.has_checksum)
# Upper 2 bits indicate content size.
- params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00")
+ params = zstd.get_frame_parameters(
+ zstd.FRAME_HEADER + b"\x40\x00\xff\x00"
+ )
self.assertEqual(params.content_size, 511)
self.assertEqual(params.window_size, 1024)
self.assertEqual(params.dict_id, 0)
@@ -215,7 +227,9 @@
self.assertFalse(params.has_checksum)
# Set multiple things.
- params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00")
+ params = zstd.get_frame_parameters(
+ zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00"
+ )
self.assertEqual(params.content_size, 272)
self.assertEqual(params.window_size, 262144)
self.assertEqual(params.dict_id, 15)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py Thu Feb 13 10:12:12 2020 -0800
@@ -23,7 +23,9 @@
s_chainlog = strategies.integers(
min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX
)
-s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX)
+s_hashlog = strategies.integers(
+ min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX
+)
s_searchlog = strategies.integers(
min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX
)
@@ -61,7 +63,14 @@
s_strategy,
)
def test_valid_init(
- self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+ self,
+ windowlog,
+ chainlog,
+ hashlog,
+ searchlog,
+ minmatch,
+ targetlength,
+ strategy,
):
zstd.ZstdCompressionParameters(
window_log=windowlog,
@@ -83,7 +92,14 @@
s_strategy,
)
def test_estimated_compression_context_size(
- self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+ self,
+ windowlog,
+ chainlog,
+ hashlog,
+ searchlog,
+ minmatch,
+ targetlength,
+ strategy,
):
if minmatch == zstd.MINMATCH_MIN and strategy in (
zstd.STRATEGY_FAST,
--- a/contrib/python-zstandard/tests/test_decompressor.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor.py Thu Feb 13 10:12:12 2020 -0800
@@ -170,11 +170,15 @@
dctx.decompress(compressed, max_output_size=len(source) - 1)
# Input size + 1 works
- decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1)
+ decompressed = dctx.decompress(
+ compressed, max_output_size=len(source) + 1
+ )
self.assertEqual(decompressed, source)
# A much larger buffer works.
- decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64)
+ decompressed = dctx.decompress(
+ compressed, max_output_size=len(source) * 64
+ )
self.assertEqual(decompressed, source)
def test_stupidly_large_output_buffer(self):
@@ -237,7 +241,8 @@
dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
with self.assertRaisesRegex(
- zstd.ZstdError, "decompression error: Frame requires too much memory"
+ zstd.ZstdError,
+ "decompression error: Frame requires too much memory",
):
dctx.decompress(frame, max_output_size=len(source))
@@ -291,7 +296,9 @@
self.assertEqual(w, len(source.getvalue()))
def test_read_write_size(self):
- source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+ source = OpCountingBytesIO(
+ zstd.ZstdCompressor().compress(b"foobarfoobar")
+ )
dest = OpCountingBytesIO()
dctx = zstd.ZstdDecompressor()
@@ -309,7 +316,9 @@
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
- with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
+ with self.assertRaisesRegex(
+ ValueError, "cannot __enter__ multiple times"
+ ):
with reader as reader2:
pass
@@ -474,7 +483,9 @@
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
- with self.assertRaisesRegex(ValueError, "cannot seek to negative position"):
+ with self.assertRaisesRegex(
+ ValueError, "cannot seek to negative position"
+ ):
reader.seek(-1, os.SEEK_SET)
reader.read(1)
@@ -490,7 +501,8 @@
reader.seek(-1, os.SEEK_CUR)
with self.assertRaisesRegex(
- ValueError, "zstd decompression streams cannot be seeked with SEEK_END"
+ ValueError,
+ "zstd decompression streams cannot be seeked with SEEK_END",
):
reader.seek(0, os.SEEK_END)
@@ -743,7 +755,9 @@
def test_read_lines(self):
cctx = zstd.ZstdCompressor()
- source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024))
+ source = b"\n".join(
+ ("line %d" % i).encode("ascii") for i in range(1024)
+ )
frame = cctx.compress(source)
@@ -821,7 +835,9 @@
dobj = dctx.decompressobj()
dobj.decompress(data)
- with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"):
+ with self.assertRaisesRegex(
+ zstd.ZstdError, "cannot use a decompressobj"
+ ):
dobj.decompress(data)
self.assertIsNone(dobj.flush())
@@ -1124,7 +1140,9 @@
# Buffer protocol works.
dctx.read_to_iter(b"foobar")
- with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+ with self.assertRaisesRegex(
+ ValueError, "must pass an object with a read"
+ ):
b"".join(dctx.read_to_iter(True))
def test_empty_input(self):
@@ -1226,7 +1244,9 @@
decompressed = b"".join(chunks)
self.assertEqual(decompressed, source.getvalue())
- @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+ @unittest.skipUnless(
+ "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set"
+ )
def test_large_input(self):
bytes = list(struct.Struct(">B").pack(i) for i in range(256))
compressed = NonClosingBytesIO()
@@ -1241,13 +1261,16 @@
len(compressed.getvalue())
> zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
)
- have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+ have_raw = (
+ input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+ )
if have_compressed and have_raw:
break
compressed = io.BytesIO(compressed.getvalue())
self.assertGreater(
- len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+ len(compressed.getvalue()),
+ zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
)
dctx = zstd.ZstdDecompressor()
@@ -1303,7 +1326,9 @@
self.assertEqual(streamed, source.getvalue())
def test_read_write_size(self):
- source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
+ source = OpCountingBytesIO(
+ zstd.ZstdCompressor().compress(b"foobarfoobar")
+ )
dctx = zstd.ZstdDecompressor()
for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
self.assertEqual(len(chunk), 1)
@@ -1355,10 +1380,14 @@
):
dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
- with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"):
+ with self.assertRaisesRegex(
+ ValueError, "chunk 0 is not a valid zstd frame"
+ ):
dctx.decompress_content_dict_chain([b"foo" * 8])
- no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+ no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+ b"foo" * 64
+ )
with self.assertRaisesRegex(
ValueError, "chunk 0 missing content size in frame"
@@ -1389,10 +1418,14 @@
):
dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
- with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"):
+ with self.assertRaisesRegex(
+ ValueError, "chunk 1 is not a valid zstd frame"
+ ):
dctx.decompress_content_dict_chain([initial, b"foo" * 8])
- no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
+ no_size = zstd.ZstdCompressor(write_content_size=False).compress(
+ b"foo" * 64
+ )
with self.assertRaisesRegex(
ValueError, "chunk 1 missing content size in frame"
@@ -1400,7 +1433,9 @@
dctx.decompress_content_dict_chain([initial, no_size])
# Corrupt second frame.
- cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64))
+ cctx = zstd.ZstdCompressor(
+ dict_data=zstd.ZstdCompressionDict(b"foo" * 64)
+ )
frame = cctx.compress(b"bar" * 64)
frame = frame[0:12] + frame[15:]
@@ -1447,7 +1482,9 @@
with self.assertRaises(TypeError):
dctx.multi_decompress_to_buffer((1, 2))
- with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+ with self.assertRaisesRegex(
+ TypeError, "item 0 not a bytes like object"
+ ):
dctx.multi_decompress_to_buffer([u"foo"])
with self.assertRaisesRegex(
@@ -1491,7 +1528,9 @@
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
- result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+ result = dctx.multi_decompress_to_buffer(
+ frames, decompressed_sizes=sizes
+ )
self.assertEqual(len(result), len(frames))
self.assertEqual(result.size(), sum(map(len, original)))
@@ -1582,10 +1621,15 @@
# And a manual mode.
b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
b1 = zstd.BufferWithSegments(
- b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]))
+ b,
+ struct.pack(
+ "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
+ ),
)
- b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+ b = b"".join(
+ [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]
+ )
b2 = zstd.BufferWithSegments(
b,
struct.pack(
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Thu Feb 13 10:12:12 2020 -0800
@@ -196,7 +196,9 @@
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
)
- def test_stream_source_readall(self, original, level, streaming, source_read_size):
+ def test_stream_source_readall(
+ self, original, level, streaming, source_read_size
+ ):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
@@ -398,7 +400,9 @@
write_size=strategies.integers(min_value=1, max_value=8192),
input_sizes=strategies.data(),
)
- def test_write_size_variance(self, original, level, write_size, input_sizes):
+ def test_write_size_variance(
+ self, original, level, write_size, input_sizes
+ ):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
@@ -433,7 +437,9 @@
read_size=strategies.integers(min_value=1, max_value=8192),
write_size=strategies.integers(min_value=1, max_value=8192),
)
- def test_read_write_size_variance(self, original, level, read_size, write_size):
+ def test_read_write_size_variance(
+ self, original, level, read_size, write_size
+ ):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
@@ -441,7 +447,9 @@
dest = io.BytesIO()
dctx = zstd.ZstdDecompressor()
- dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
+ dctx.copy_stream(
+ source, dest, read_size=read_size, write_size=write_size
+ )
self.assertEqual(dest.getvalue(), original)
@@ -490,11 +498,14 @@
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
write_size=strategies.integers(
- min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ min_value=1,
+ max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
),
chunk_sizes=strategies.data(),
)
- def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
+ def test_random_output_sizes(
+ self, original, level, write_size, chunk_sizes
+ ):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
@@ -524,7 +535,9 @@
read_size=strategies.integers(min_value=1, max_value=4096),
write_size=strategies.integers(min_value=1, max_value=4096),
)
- def test_read_write_size_variance(self, original, level, read_size, write_size):
+ def test_read_write_size_variance(
+ self, original, level, read_size, write_size
+ ):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
@@ -532,7 +545,9 @@
dctx = zstd.ZstdDecompressor()
chunks = list(
- dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
+ dctx.read_to_iter(
+ source, read_size=read_size, write_size=write_size
+ )
)
self.assertEqual(b"".join(chunks), original)
@@ -542,7 +557,9 @@
class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase):
@hypothesis.given(
original=strategies.lists(
- strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+ strategies.sampled_from(random_input_data()),
+ min_size=1,
+ max_size=1024,
),
threads=strategies.integers(min_value=1, max_value=8),
use_dict=strategies.booleans(),
--- a/contrib/python-zstandard/tests/test_train_dictionary.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py Thu Feb 13 10:12:12 2020 -0800
@@ -51,11 +51,15 @@
self.assertEqual(d.d, 16)
def test_set_dict_id(self):
- d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42)
+ d = zstd.train_dictionary(
+ 8192, generate_samples(), k=64, d=16, dict_id=42
+ )
self.assertEqual(d.dict_id(), 42)
def test_optimize(self):
- d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16)
+ d = zstd.train_dictionary(
+ 8192, generate_samples(), threads=-1, steps=1, d=16
+ )
# This varies by platform.
self.assertIn(d.k, (50, 2000))
@@ -71,10 +75,14 @@
def test_bad_precompute_compress(self):
d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16)
- with self.assertRaisesRegex(ValueError, "must specify one of level or "):
+ with self.assertRaisesRegex(
+ ValueError, "must specify one of level or "
+ ):
d.precompute_compress()
- with self.assertRaisesRegex(ValueError, "must only specify one of level or "):
+ with self.assertRaisesRegex(
+ ValueError, "must only specify one of level or "
+ ):
d.precompute_compress(
level=3, compression_params=zstd.CompressionParameters()
)
@@ -88,5 +96,7 @@
d = zstd.ZstdCompressionDict(
b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT
)
- with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"):
+ with self.assertRaisesRegex(
+ zstd.ZstdError, "unable to precompute dictionary"
+ ):
d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/cffi.py Fri Dec 13 10:37:45 2019 +0100
+++ b/contrib/python-zstandard/zstandard/cffi.py Thu Feb 13 10:12:12 2020 -0800
@@ -299,10 +299,14 @@
_set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
_set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log)
_set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match)
- _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_targetLength, target_length
+ )
if strategy != -1 and compression_strategy != -1:
- raise ValueError("cannot specify both compression_strategy and strategy")
+ raise ValueError(
+ "cannot specify both compression_strategy and strategy"
+ )
if compression_strategy != -1:
strategy = compression_strategy
@@ -313,12 +317,16 @@
_set_compression_parameter(
params, lib.ZSTD_c_contentSizeFlag, write_content_size
)
- _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_checksumFlag, write_checksum
+ )
_set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
_set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
if overlap_log != -1 and overlap_size_log != -1:
- raise ValueError("cannot specify both overlap_log and overlap_size_log")
+ raise ValueError(
+ "cannot specify both overlap_log and overlap_size_log"
+ )
if overlap_size_log != -1:
overlap_log = overlap_size_log
@@ -326,12 +334,16 @@
overlap_log = 0
_set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
- _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_forceMaxWindow, force_max_window
+ )
_set_compression_parameter(
params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
)
_set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
- _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_ldmMinMatch, ldm_min_match
+ )
_set_compression_parameter(
params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
)
@@ -346,7 +358,9 @@
elif ldm_hash_rate_log == -1:
ldm_hash_rate_log = 0
- _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log)
+ _set_compression_parameter(
+ params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log
+ )
@property
def format(self):
@@ -354,7 +368,9 @@
@property
def compression_level(self):
- return _get_compression_parameter(self._params, lib.ZSTD_c_compressionLevel)
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_compressionLevel
+ )
@property
def window_log(self):
@@ -386,7 +402,9 @@
@property
def write_content_size(self):
- return _get_compression_parameter(self._params, lib.ZSTD_c_contentSizeFlag)
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_contentSizeFlag
+ )
@property
def write_checksum(self):
@@ -410,7 +428,9 @@
@property
def force_max_window(self):
- return _get_compression_parameter(self._params, lib.ZSTD_c_forceMaxWindow)
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_forceMaxWindow
+ )
@property
def enable_ldm(self):
@@ -428,11 +448,15 @@
@property
def ldm_bucket_size_log(self):
- return _get_compression_parameter(self._params, lib.ZSTD_c_ldmBucketSizeLog)
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_ldmBucketSizeLog
+ )
@property
def ldm_hash_rate_log(self):
- return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashRateLog)
+ return _get_compression_parameter(
+ self._params, lib.ZSTD_c_ldmHashRateLog
+ )
@property
def ldm_hash_every_log(self):
@@ -457,7 +481,8 @@
zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
if lib.ZSTD_isError(zresult):
raise ZstdError(
- "unable to set compression context parameter: %s" % _zstd_error(zresult)
+ "unable to set compression context parameter: %s"
+ % _zstd_error(zresult)
)
@@ -467,14 +492,17 @@
zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
if lib.ZSTD_isError(zresult):
raise ZstdError(
- "unable to get compression context parameter: %s" % _zstd_error(zresult)
+ "unable to get compression context parameter: %s"
+ % _zstd_error(zresult)
)
return result[0]
class ZstdCompressionWriter(object):
- def __init__(self, compressor, writer, source_size, write_size, write_return_read):
+ def __init__(
+ self, compressor, writer, source_size, write_size, write_return_read
+ ):
self._compressor = compressor
self._writer = writer
self._write_size = write_size
@@ -491,7 +519,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
def __enter__(self):
if self._closed:
@@ -595,13 +625,20 @@
while in_buffer.pos < in_buffer.size:
zresult = lib.ZSTD_compressStream2(
- self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+ self._compressor._cctx,
+ out_buffer,
+ in_buffer,
+ lib.ZSTD_e_continue,
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
- self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
total_write += out_buffer.pos
self._bytes_compressed += out_buffer.pos
out_buffer.pos = 0
@@ -637,10 +674,14 @@
self._compressor._cctx, out_buffer, in_buffer, flush
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
- self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
total_write += out_buffer.pos
self._bytes_compressed += out_buffer.pos
out_buffer.pos = 0
@@ -672,7 +713,9 @@
self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if self._out.pos:
chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
@@ -681,7 +724,10 @@
return b"".join(chunks)
def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
- if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+ if flush_mode not in (
+ COMPRESSOBJ_FLUSH_FINISH,
+ COMPRESSOBJ_FLUSH_BLOCK,
+ ):
raise ValueError("flush mode not recognized")
if self._finished:
@@ -768,7 +814,9 @@
self._in.pos = 0
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if self._out.pos == self._out.size:
yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -780,7 +828,8 @@
if self._in.src != ffi.NULL:
raise ZstdError(
- "cannot call flush() before consuming output from " "previous operation"
+ "cannot call flush() before consuming output from "
+ "previous operation"
)
while True:
@@ -788,7 +837,9 @@
self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if self._out.pos:
yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -812,7 +863,9 @@
self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if self._out.pos:
yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -939,7 +992,10 @@
old_pos = out_buffer.pos
zresult = lib.ZSTD_compressStream2(
- self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue
+ self._compressor._cctx,
+ out_buffer,
+ self._in_buffer,
+ lib.ZSTD_e_continue,
)
self._bytes_compressed += out_buffer.pos - old_pos
@@ -997,7 +1053,9 @@
self._bytes_compressed += out_buffer.pos - old_pos
if lib.ZSTD_isError(zresult):
- raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+ raise ZstdError(
+ "error ending compression stream: %s", _zstd_error(zresult)
+ )
if zresult == 0:
self._finished_output = True
@@ -1102,7 +1160,9 @@
self._bytes_compressed += out_buffer.pos - old_pos
if lib.ZSTD_isError(zresult):
- raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
+ raise ZstdError(
+ "error ending compression stream: %s", _zstd_error(zresult)
+ )
if zresult == 0:
self._finished_output = True
@@ -1170,13 +1230,17 @@
threads=0,
):
if level > lib.ZSTD_maxCLevel():
- raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel())
+ raise ValueError(
+ "level must be less than %d" % lib.ZSTD_maxCLevel()
+ )
if threads < 0:
threads = _cpu_count()
if compression_params and write_checksum is not None:
- raise ValueError("cannot define compression_params and " "write_checksum")
+ raise ValueError(
+ "cannot define compression_params and " "write_checksum"
+ )
if compression_params and write_content_size is not None:
raise ValueError(
@@ -1184,7 +1248,9 @@
)
if compression_params and write_dict_id is not None:
- raise ValueError("cannot define compression_params and " "write_dict_id")
+ raise ValueError(
+ "cannot define compression_params and " "write_dict_id"
+ )
if compression_params and threads:
raise ValueError("cannot define compression_params and threads")
@@ -1201,7 +1267,9 @@
self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
- _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level)
+ _set_compression_parameter(
+ self._params, lib.ZSTD_c_compressionLevel, level
+ )
_set_compression_parameter(
self._params,
@@ -1210,7 +1278,9 @@
)
_set_compression_parameter(
- self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0
+ self._params,
+ lib.ZSTD_c_checksumFlag,
+ 1 if write_checksum else 0,
)
_set_compression_parameter(
@@ -1218,7 +1288,9 @@
)
if threads:
- _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads)
+ _set_compression_parameter(
+ self._params, lib.ZSTD_c_nbWorkers, threads
+ )
cctx = lib.ZSTD_createCCtx()
if cctx == ffi.NULL:
@@ -1237,10 +1309,13 @@
)
def _setup_cctx(self):
- zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params)
+ zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(
+ self._cctx, self._params
+ )
if lib.ZSTD_isError(zresult):
raise ZstdError(
- "could not set compression parameters: %s" % _zstd_error(zresult)
+ "could not set compression parameters: %s"
+ % _zstd_error(zresult)
)
dict_data = self._dict_data
@@ -1259,7 +1334,8 @@
if lib.ZSTD_isError(zresult):
raise ZstdError(
- "could not load compression dictionary: %s" % _zstd_error(zresult)
+ "could not load compression dictionary: %s"
+ % _zstd_error(zresult)
)
def memory_size(self):
@@ -1275,7 +1351,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
out_buffer = ffi.new("ZSTD_outBuffer *")
in_buffer = ffi.new("ZSTD_inBuffer *")
@@ -1307,11 +1385,15 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
cobj = ZstdCompressionObj()
cobj._out = ffi.new("ZSTD_outBuffer *")
- cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+ cobj._dst_buffer = ffi.new(
+ "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+ )
cobj._out.dst = cobj._dst_buffer
cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
cobj._out.pos = 0
@@ -1328,7 +1410,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
return ZstdCompressionChunker(self, chunk_size=chunk_size)
@@ -1353,7 +1437,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
in_buffer = ffi.new("ZSTD_inBuffer *")
out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1381,7 +1467,9 @@
self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1423,7 +1511,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
return ZstdCompressionReader(self, source, read_size)
@@ -1443,7 +1533,9 @@
if size < 0:
size = lib.ZSTD_CONTENTSIZE_UNKNOWN
- return ZstdCompressionWriter(self, writer, size, write_size, write_return_read)
+ return ZstdCompressionWriter(
+ self, writer, size, write_size, write_return_read
+ )
write_to = stream_writer
@@ -1473,7 +1565,9 @@
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
if lib.ZSTD_isError(zresult):
- raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "error setting source size: %s" % _zstd_error(zresult)
+ )
in_buffer = ffi.new("ZSTD_inBuffer *")
out_buffer = ffi.new("ZSTD_outBuffer *")
@@ -1517,7 +1611,9 @@
self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd compress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1596,10 +1692,14 @@
data_buffer = ffi.from_buffer(data)
zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
if lib.ZSTD_isError(zresult):
- raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "cannot get frame parameters: %s" % _zstd_error(zresult)
+ )
if zresult:
- raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult)
+ raise ZstdError(
+ "not enough data for frame parameters; need %d bytes" % zresult
+ )
return FrameParameters(params[0])
@@ -1611,9 +1711,14 @@
self.k = k
self.d = d
- if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT):
+ if dict_type not in (
+ DICT_TYPE_AUTO,
+ DICT_TYPE_RAWCONTENT,
+ DICT_TYPE_FULLDICT,
+ ):
raise ValueError(
- "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants"
+ "invalid dictionary load mode: %d; must use "
+ "DICT_TYPE_* constants"
)
self._dict_type = dict_type
@@ -1630,7 +1735,9 @@
def precompute_compress(self, level=0, compression_params=None):
if level and compression_params:
- raise ValueError("must only specify one of level or " "compression_params")
+ raise ValueError(
+ "must only specify one of level or " "compression_params"
+ )
if not level and not compression_params:
raise ValueError("must specify one of level or compression_params")
@@ -1675,7 +1782,9 @@
if ddict == ffi.NULL:
raise ZstdError("could not create decompression dict")
- ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict))
+ ddict = ffi.gc(
+ ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)
+ )
self.__dict__["_ddict"] = ddict
return ddict
@@ -1805,7 +1914,9 @@
self._decompressor._dctx, out_buffer, in_buffer
)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd decompressor error: %s" % _zstd_error(zresult)
+ )
if zresult == 0:
self._finished = True
@@ -2105,16 +2216,22 @@
if whence == os.SEEK_SET:
if pos < 0:
- raise ValueError("cannot seek to negative position with SEEK_SET")
+ raise ValueError(
+ "cannot seek to negative position with SEEK_SET"
+ )
if pos < self._bytes_decompressed:
- raise ValueError("cannot seek zstd decompression stream " "backwards")
+ raise ValueError(
+ "cannot seek zstd decompression stream " "backwards"
+ )
read_amount = pos - self._bytes_decompressed
elif whence == os.SEEK_CUR:
if pos < 0:
- raise ValueError("cannot seek zstd decompression stream " "backwards")
+ raise ValueError(
+ "cannot seek zstd decompression stream " "backwards"
+ )
read_amount = pos
elif whence == os.SEEK_END:
@@ -2123,7 +2240,9 @@
)
while read_amount:
- result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
+ result = self.read(
+ min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+ )
if not result:
break
@@ -2257,10 +2376,14 @@
while in_buffer.pos < in_buffer.size:
zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd decompress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
- self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+ self._writer.write(
+ ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+ )
total_write += out_buffer.pos
out_buffer.pos = 0
@@ -2299,7 +2422,9 @@
data_buffer = ffi.from_buffer(data)
- output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
+ output_size = lib.ZSTD_getFrameContentSize(
+ data_buffer, len(data_buffer)
+ )
if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
raise ZstdError("error determining content size from frame header")
@@ -2307,7 +2432,9 @@
return b""
elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
if not max_output_size:
- raise ZstdError("could not determine content size in frame header")
+ raise ZstdError(
+ "could not determine content size in frame header"
+ )
result_buffer = ffi.new("char[]", max_output_size)
result_size = max_output_size
@@ -2330,7 +2457,9 @@
if lib.ZSTD_isError(zresult):
raise ZstdError("decompression error: %s" % _zstd_error(zresult))
elif zresult:
- raise ZstdError("decompression error: did not decompress full frame")
+ raise ZstdError(
+ "decompression error: did not decompress full frame"
+ )
elif output_size and out_buffer.pos != output_size:
raise ZstdError(
"decompression error: decompressed %d bytes; expected %d"
@@ -2346,7 +2475,9 @@
read_across_frames=False,
):
self._ensure_dctx()
- return ZstdDecompressionReader(self, source, read_size, read_across_frames)
+ return ZstdDecompressionReader(
+ self, source, read_size, read_across_frames
+ )
def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
if write_size < 1:
@@ -2421,9 +2552,13 @@
while in_buffer.pos < in_buffer.size:
assert out_buffer.pos == 0
- zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
if lib.ZSTD_isError(zresult):
- raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "zstd decompress error: %s" % _zstd_error(zresult)
+ )
if out_buffer.pos:
data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -2449,7 +2584,9 @@
if not hasattr(writer, "write"):
raise ValueError("must pass an object with a write() method")
- return ZstdDecompressionWriter(self, writer, write_size, write_return_read)
+ return ZstdDecompressionWriter(
+ self, writer, write_size, write_return_read
+ )
write_to = stream_writer
@@ -2491,7 +2628,9 @@
# Flush all read data to output.
while in_buffer.pos < in_buffer.size:
- zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
if lib.ZSTD_isError(zresult):
raise ZstdError(
"zstd decompressor error: %s" % _zstd_error(zresult)
@@ -2521,7 +2660,9 @@
# All chunks should be zstd frames and should have content size set.
chunk_buffer = ffi.from_buffer(chunk)
params = ffi.new("ZSTD_frameHeader *")
- zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+ zresult = lib.ZSTD_getFrameHeader(
+ params, chunk_buffer, len(chunk_buffer)
+ )
if lib.ZSTD_isError(zresult):
raise ValueError("chunk 0 is not a valid zstd frame")
elif zresult:
@@ -2546,7 +2687,9 @@
zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
if lib.ZSTD_isError(zresult):
- raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "could not decompress chunk 0: %s" % _zstd_error(zresult)
+ )
elif zresult:
raise ZstdError("chunk 0 did not decompress full frame")
@@ -2561,11 +2704,15 @@
raise ValueError("chunk %d must be bytes" % i)
chunk_buffer = ffi.from_buffer(chunk)
- zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
+ zresult = lib.ZSTD_getFrameHeader(
+ params, chunk_buffer, len(chunk_buffer)
+ )
if lib.ZSTD_isError(zresult):
raise ValueError("chunk %d is not a valid zstd frame" % i)
elif zresult:
- raise ValueError("chunk %d is too small to contain a zstd frame" % i)
+ raise ValueError(
+ "chunk %d is too small to contain a zstd frame" % i
+ )
if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
raise ValueError("chunk %d missing content size in frame" % i)
@@ -2580,7 +2727,9 @@
in_buffer.size = len(chunk_buffer)
in_buffer.pos = 0
- zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
+ zresult = lib.ZSTD_decompressStream(
+ self._dctx, out_buffer, in_buffer
+ )
if lib.ZSTD_isError(zresult):
raise ZstdError(
"could not decompress chunk %d: %s" % _zstd_error(zresult)
@@ -2597,7 +2746,9 @@
lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
if self._max_window_size:
- zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size)
+ zresult = lib.ZSTD_DCtx_setMaxWindowSize(
+ self._dctx, self._max_window_size
+ )
if lib.ZSTD_isError(zresult):
raise ZstdError(
"unable to set max window size: %s" % _zstd_error(zresult)
@@ -2605,11 +2756,14 @@
zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format)
if lib.ZSTD_isError(zresult):
- raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult))
+ raise ZstdError(
+ "unable to set decoding format: %s" % _zstd_error(zresult)
+ )
if self._dict_data and load_dict:
zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
if lib.ZSTD_isError(zresult):
raise ZstdError(
- "unable to reference prepared dictionary: %s" % _zstd_error(zresult)
+ "unable to reference prepared dictionary: %s"
+ % _zstd_error(zresult)
)
--- a/hgext/absorb.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/absorb.py Thu Feb 13 10:12:12 2020 -0800
@@ -1077,7 +1077,7 @@
b'i',
b'interactive',
None,
- _(b'interactively select which chunks to apply (EXPERIMENTAL)'),
+ _(b'interactively select which chunks to apply'),
),
(
b'e',
--- a/hgext/beautifygraph.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/beautifygraph.py Thu Feb 13 10:12:12 2020 -0800
@@ -71,6 +71,8 @@
return b'\xE2\x97\x8B' # U+25CB ○
if node == b'@':
return b'\xE2\x97\x8D' # U+25CD ◍
+ if node == b'%':
+ return b'\xE2\x97\x8D' # U+25CE ◎
if node == b'*':
return b'\xE2\x88\x97' # U+2217 ∗
if node == b'x':
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastexport.py Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,218 @@
+# Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""export repositories as git fast-import stream"""
+
+# The format specification for fast-import streams can be found at
+# https://git-scm.com/docs/git-fast-import#_input_format
+
+from __future__ import absolute_import
+import re
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullrev
+from mercurial.utils import stringutil
+from mercurial import (
+ error,
+ pycompat,
+ registrar,
+ scmutil,
+)
+from .convert import convcmd
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = b"ships-with-hg-core"
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]')
+GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]")
+
+
+def convert_to_git_user(authormap, user, rev):
+ mapped_user = authormap.get(user, user)
+ user_person = stringutil.person(mapped_user)
+ user_email = stringutil.email(mapped_user)
+ if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match(
+ user_person
+ ):
+ raise error.Abort(
+ _(b"Unable to parse user into person and email for revision " + rev)
+ )
+ if user_person:
+ return b'"' + user_person + b'" <' + user_email + b'>'
+ else:
+ return b"<" + user_email + b">"
+
+
+def convert_to_git_date(date):
+ timestamp, utcoff = date
+ tzsign = b"+" if utcoff < 0 else b"-"
+ if utcoff % 60 != 0:
+ raise error.Abort(
+ _(b"UTC offset in %b is not an integer number of seconds") % (date,)
+ )
+ utcoff = abs(utcoff) // 60
+ tzh = utcoff // 60
+ tzmin = utcoff % 60
+ return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin)
+
+
+def convert_to_git_ref(branch):
+ # XXX filter/map depending on git restrictions
+ return b"refs/heads/" + branch
+
+
+def write_data(buf, data, skip_newline):
+ buf.append(b"data %d\n" % len(data))
+ buf.append(data)
+ if not skip_newline or data[-1:] != b"\n":
+ buf.append(b"\n")
+
+
+def export_commit(ui, repo, rev, marks, authormap):
+ ctx = repo[rev]
+ revid = ctx.hex()
+ if revid in marks:
+ ui.warn(_(b"warning: revision %s already exported, skipped\n") % revid)
+ return
+ parents = [p for p in ctx.parents() if p.rev() != nullrev]
+ for p in parents:
+ if p.hex() not in marks:
+ ui.warn(
+ _(b"warning: parent %s of %s has not been exported, skipped\n")
+ % (p, revid)
+ )
+ return
+
+ # For all files modified by the commit, check if they have already
+ # been exported and otherwise dump the blob with the new mark.
+ for fname in ctx.files():
+ if fname not in ctx:
+ continue
+ filectx = ctx.filectx(fname)
+ filerev = hex(filectx.filenode())
+ if filerev not in marks:
+ mark = len(marks) + 1
+ marks[filerev] = mark
+ data = filectx.data()
+ buf = [b"blob\n", b"mark :%d\n" % mark]
+ write_data(buf, data, False)
+ ui.write(*buf, keepprogressbar=True)
+ del buf
+
+ # Assign a mark for the current revision for references by
+ # latter merge commits.
+ mark = len(marks) + 1
+ marks[revid] = mark
+
+ ref = convert_to_git_ref(ctx.branch())
+ buf = [
+ b"commit %s\n" % ref,
+ b"mark :%d\n" % mark,
+ b"committer %s %s\n"
+ % (
+ convert_to_git_user(authormap, ctx.user(), revid),
+ convert_to_git_date(ctx.date()),
+ ),
+ ]
+ write_data(buf, ctx.description(), True)
+ if parents:
+ buf.append(b"from :%d\n" % marks[parents[0].hex()])
+ if len(parents) == 2:
+ buf.append(b"merge :%d\n" % marks[parents[1].hex()])
+ p0ctx = repo[parents[0]]
+ files = ctx.manifest().diff(p0ctx.manifest())
+ else:
+ files = ctx.files()
+ filebuf = []
+ for fname in files:
+ if fname not in ctx:
+ filebuf.append((fname, b"D %s\n" % fname))
+ else:
+ filectx = ctx.filectx(fname)
+ filerev = filectx.filenode()
+ fileperm = b"755" if filectx.isexec() else b"644"
+ changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname)
+ filebuf.append((fname, changed))
+ filebuf.sort()
+ buf.extend(changed for (fname, changed) in filebuf)
+ del filebuf
+ buf.append(b"\n")
+ ui.write(*buf, keepprogressbar=True)
+ del buf
+
+
+isrev = re.compile(b"^[0-9a-f]{40}$")
+
+
+@command(
+ b"fastexport",
+ [
+ (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")),
+ (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")),
+ (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")),
+ (
+ b"A",
+ b"authormap",
+ b"",
+ _(b"remap usernames using this file"),
+ _(b"FILE"),
+ ),
+ ],
+ _(b"[OPTION]... [REV]..."),
+ helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
+def fastexport(ui, repo, *revs, **opts):
+ """export repository as git fast-import stream
+
+ This command lets you dump a repository as a human-readable text stream.
+ It can be piped into corresponding import routines like "git fast-import".
+ Incremental dumps can be created by using marks files.
+ """
+ opts = pycompat.byteskwargs(opts)
+
+ revs += tuple(opts.get(b"rev", []))
+ if not revs:
+ revs = scmutil.revrange(repo, [b":"])
+ else:
+ revs = scmutil.revrange(repo, revs)
+ if not revs:
+ raise error.Abort(_(b"no revisions matched"))
+ authorfile = opts.get(b"authormap")
+ if authorfile:
+ authormap = convcmd.readauthormap(ui, authorfile)
+ else:
+ authormap = {}
+
+ import_marks = opts.get(b"import_marks")
+ marks = {}
+ if import_marks:
+ with open(import_marks, "rb") as import_marks_file:
+ for line in import_marks_file:
+ line = line.strip()
+ if not isrev.match(line) or line in marks:
+ raise error.Abort(_(b"Corrupted marks file"))
+ marks[line] = len(marks) + 1
+
+ revs.sort()
+ with ui.makeprogress(
+ _(b"exporting"), unit=_(b"revisions"), total=len(revs)
+ ) as progress:
+ for rev in revs:
+ export_commit(ui, repo, rev, marks, authormap)
+ progress.increment()
+
+ export_marks = opts.get(b"export_marks")
+ if export_marks:
+ with open(export_marks, "wb") as export_marks_file:
+ output_marks = [None] * len(marks)
+ for k, v in marks.items():
+ output_marks[v - 1] = k
+ for k in output_marks:
+ export_marks_file.write(k + b"\n")
--- a/hgext/fix.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/fix.py Thu Feb 13 10:12:12 2020 -0800
@@ -735,15 +735,7 @@
wctx = context.overlayworkingctx(repo)
wctx.setbase(repo[newp1node])
- merge.update(
- repo,
- ctx.rev(),
- branchmerge=False,
- force=True,
- ancestor=p1rev,
- mergeancestor=False,
- wc=wctx,
- )
+ merge.revert_to(ctx, wc=wctx)
copies.graftcopies(wctx, ctx, ctx.p1())
for path in filedata.keys():
--- a/hgext/histedit.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/histedit.py Thu Feb 13 10:12:12 2020 -0800
@@ -649,7 +649,7 @@
repo.ui.setconfig(
b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
)
- stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
+ stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
finally:
repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
return stats
@@ -945,7 +945,7 @@
class base(histeditaction):
def run(self):
if self.repo[b'.'].node() != self.node:
- mergemod.update(self.repo, self.node, branchmerge=False, force=True)
+ mergemod.clean_update(self.repo[self.node])
return self.continueclean()
def continuedirty(self):
--- a/hgext/lfs/TODO.rst Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/lfs/TODO.rst Thu Feb 13 10:12:12 2020 -0800
@@ -38,9 +38,6 @@
* `hg diff` is similar, and probably shouldn't see the pointer file
-#. `Fix https multiplexing, and re-enable workers
- <https://www.mercurial-scm.org/pipermail/mercurial-devel/2018-January/109916.html>`_.
-
#. Show to-be-applied rules with `hg files -r 'wdir()' 'set:lfs()'`
* `debugignore` can show file + line number, so a dedicated command could be
--- a/hgext/lfs/__init__.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/lfs/__init__.py Thu Feb 13 10:12:12 2020 -0800
@@ -181,7 +181,7 @@
b'experimental', b'lfs.disableusercache', default=False,
)
eh.configitem(
- b'experimental', b'lfs.worker-enable', default=False,
+ b'experimental', b'lfs.worker-enable', default=True,
)
eh.configitem(
--- a/hgext/lfs/blobstore.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/lfs/blobstore.py Thu Feb 13 10:12:12 2020 -0800
@@ -21,6 +21,7 @@
from mercurial import (
encoding,
error,
+ httpconnection as httpconnectionmod,
node,
pathutil,
pycompat,
@@ -94,33 +95,16 @@
pass
-class filewithprogress(object):
- """a file-like object that supports __len__ and read.
-
- Useful to provide progress information for how many bytes are read.
+class lfsuploadfile(httpconnectionmod.httpsendfile):
+ """a file-like object that supports keepalive.
"""
- def __init__(self, fp, callback):
- self._fp = fp
- self._callback = callback # func(readsize)
- fp.seek(0, os.SEEK_END)
- self._len = fp.tell()
- fp.seek(0)
-
- def __len__(self):
- return self._len
+ def __init__(self, ui, filename):
+ super(lfsuploadfile, self).__init__(ui, filename, b'rb')
+ self.read = self._data.read
- def read(self, size):
- if self._fp is None:
- return b''
- data = self._fp.read(size)
- if data:
- if self._callback:
- self._callback(len(data))
- else:
- self._fp.close()
- self._fp = None
- return data
+ def _makeprogress(self):
+ return None # progress is handled by the worker client
class local(object):
@@ -144,6 +128,17 @@
def open(self, oid):
"""Open a read-only file descriptor to the named blob, in either the
usercache or the local store."""
+ return open(self.path(oid), 'rb')
+
+ def path(self, oid):
+ """Build the path for the given blob ``oid``.
+
+ If the blob exists locally, the path may point to either the usercache
+ or the local store. If it doesn't, it will point to the local store.
+ This is meant for situations where existing code that isn't LFS aware
+ needs to open a blob. Generally, prefer the ``open`` method on this
+ class.
+ """
# The usercache is the most likely place to hold the file. Commit will
# write to both it and the local store, as will anything that downloads
# the blobs. However, things like clone without an update won't
@@ -151,9 +146,9 @@
# the usercache is the only place it _could_ be. If not present, the
# missing file msg here will indicate the local repo, not the usercache.
if self.cachevfs.exists(oid):
- return self.cachevfs(oid, b'rb')
+ return self.cachevfs.join(oid)
- return self.vfs(oid, b'rb')
+ return self.vfs.join(oid)
def download(self, oid, src, content_length):
"""Read the blob from the remote source in chunks, verify the content,
@@ -495,15 +490,17 @@
_(b'detected corrupt lfs object: %s') % oid,
hint=_(b'run hg verify'),
)
- request.data = filewithprogress(localstore.open(oid), None)
- request.get_method = lambda: r'PUT'
- request.add_header('Content-Type', 'application/octet-stream')
- request.add_header('Content-Length', len(request.data))
for k, v in headers:
request.add_header(pycompat.strurl(k), pycompat.strurl(v))
try:
+ if action == b'upload':
+ request.data = lfsuploadfile(self.ui, localstore.path(oid))
+ request.get_method = lambda: 'PUT'
+ request.add_header('Content-Type', 'application/octet-stream')
+ request.add_header('Content-Length', request.data.length)
+
with contextlib.closing(self.urlopener.open(request)) as res:
contentlength = res.info().get(b"content-length")
ui = self.ui # Shorten debug lines
@@ -545,6 +542,9 @@
raise LfsRemoteError(
_(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
)
+ finally:
+ if request.data:
+ request.data.close()
def _batch(self, pointers, localstore, action):
if action not in [b'upload', b'download']:
--- a/hgext/purge.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/purge.py Thu Feb 13 10:12:12 2020 -0800
@@ -48,6 +48,7 @@
[
(b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
(b'', b'all', None, _(b'purge ignored files too')),
+ (b'i', b'ignored', None, _(b'purge only ignored files')),
(b'', b'dirs', None, _(b'purge empty directories')),
(b'', b'files', None, _(b'purge files')),
(b'p', b'print', None, _(b'print filenames instead of deleting them')),
@@ -80,7 +81,7 @@
But it will leave untouched:
- Modified and unmodified tracked files
- - Ignored files (unless --all is specified)
+ - Ignored files (unless -i or --all is specified)
- New files added to the repository (with :hg:`add`)
The --files and --dirs options can be used to direct purge to delete
@@ -96,12 +97,19 @@
option.
'''
opts = pycompat.byteskwargs(opts)
+ cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
act = not opts.get(b'print')
eol = b'\n'
if opts.get(b'print0'):
eol = b'\0'
act = False # --print0 implies --print
+ if opts.get(b'all', False):
+ ignored = True
+ unknown = True
+ else:
+ ignored = opts.get(b'ignored', False)
+ unknown = not ignored
removefiles = opts.get(b'files')
removedirs = opts.get(b'dirs')
@@ -115,7 +123,8 @@
paths = mergemod.purge(
repo,
match,
- ignored=opts.get(b'all', False),
+ unknown=unknown,
+ ignored=ignored,
removeemptydirs=removedirs,
removefiles=removefiles,
abortonerror=opts.get(b'abort_on_err'),
--- a/hgext/rebase.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/rebase.py Thu Feb 13 10:12:12 2020 -0800
@@ -37,6 +37,7 @@
hg,
merge as mergemod,
mergeutil,
+ node as nodemod,
obsolete,
obsutil,
patch,
@@ -177,6 +178,7 @@
# --continue or --abort)), the original repo should be used so
# visibility-dependent revsets are correct.
self.prepared = False
+ self.resume = False
self._repo = repo
self.ui = ui
@@ -366,6 +368,7 @@
_checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
+ self.resume = True
try:
self.restorestatus()
self.collapsemsg = restorecollapsemsg(self.repo, isabort)
@@ -503,7 +506,7 @@
p.complete()
ui.note(_(b'rebase merging completed\n'))
- def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
+ def _concludenode(self, rev, p1, editor, commitmsg=None):
'''Commit the wd changes with parents p1 and p2.
Reuse commit info from rev but also store useful information in extra.
@@ -527,8 +530,6 @@
if self.inmemory:
newnode = commitmemorynode(
repo,
- p1,
- p2,
wctx=self.wctx,
extra=extra,
commitmsg=commitmsg,
@@ -540,8 +541,6 @@
else:
newnode = commitnode(
repo,
- p1,
- p2,
extra=extra,
commitmsg=commitmsg,
editor=editor,
@@ -605,8 +604,9 @@
self.skipped,
self.obsoletenotrebased,
)
- if not self.inmemory and len(repo[None].parents()) == 2:
+ if self.resume and self.wctx.p1().rev() == p1:
repo.ui.debug(b'resuming interrupted rebase\n')
+ self.resume = False
else:
overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
with ui.configoverride(overrides, b'rebase'):
@@ -614,6 +614,7 @@
repo,
rev,
p1,
+ p2,
base,
self.collapsef,
dest,
@@ -635,13 +636,9 @@
editor = cmdutil.getcommiteditor(
editform=editform, **pycompat.strkwargs(opts)
)
- newnode = self._concludenode(rev, p1, p2, editor)
+ newnode = self._concludenode(rev, p1, editor)
else:
# Skip commit if we are collapsing
- if self.inmemory:
- self.wctx.setbase(repo[p1])
- else:
- repo.setparents(repo[p1].node())
newnode = None
# Update the state
if newnode is not None:
@@ -696,8 +693,9 @@
editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
revtoreuse = max(self.state)
+ self.wctx.setparents(repo[p1].node(), repo[self.external].node())
newnode = self._concludenode(
- revtoreuse, p1, self.external, editor, commitmsg=commitmsg
+ revtoreuse, p1, editor, commitmsg=commitmsg
)
if newnode is not None:
@@ -799,9 +797,7 @@
# Update away from the rebase if necessary
if shouldupdate:
- mergemod.update(
- repo, self.originalwd, branchmerge=False, force=True
- )
+ mergemod.clean_update(repo[self.originalwd])
# Strip from the first rebased revision
if rebased:
@@ -1011,10 +1007,10 @@
action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
if action:
cmdutil.check_incompatible_arguments(
- opts, action, b'confirm', b'dry_run'
+ opts, action, [b'confirm', b'dry_run']
)
cmdutil.check_incompatible_arguments(
- opts, action, b'rev', b'source', b'base', b'dest'
+ opts, action, [b'rev', b'source', b'base', b'dest']
)
cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
@@ -1028,7 +1024,7 @@
if opts.get(b'auto_orphans'):
disallowed_opts = set(opts) - {b'auto_orphans'}
cmdutil.check_incompatible_arguments(
- opts, b'auto_orphans', *disallowed_opts
+ opts, b'auto_orphans', disallowed_opts
)
userrevs = list(repo.revs(opts.get(b'auto_orphans')))
@@ -1265,8 +1261,7 @@
if not src:
ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
return None
- rebaseset = repo.revs(b'(%ld)::', src)
- assert rebaseset
+ rebaseset = repo.revs(b'(%ld)::', src) or src
else:
base = scmutil.revrange(repo, [basef or b'.'])
if not base:
@@ -1341,6 +1336,8 @@
)
return None
+ if nodemod.wdirrev in rebaseset:
+ raise error.Abort(_(b'cannot rebase the working copy'))
rebasingwcp = repo[b'.'].rev() in rebaseset
ui.log(
b"rebase",
@@ -1420,7 +1417,7 @@
)
-def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
+def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
'''Commit the memory changes with parents p1 and p2.
Return node of committed revision.'''
# Replicates the empty check in ``repo.commit``.
@@ -1433,7 +1430,6 @@
if b'branch' in extra:
branch = extra[b'branch']
- wctx.setparents(repo[p1].node(), repo[p2].node())
memctx = wctx.tomemctx(
commitmsg,
date=date,
@@ -1447,15 +1443,13 @@
return commitres
-def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
+def commitnode(repo, editor, extra, user, date, commitmsg):
'''Commit the wd changes with parents p1 and p2.
Return node of committed revision.'''
dsguard = util.nullcontextmanager()
if not repo.ui.configbool(b'rebase', b'singletransaction'):
dsguard = dirstateguard.dirstateguard(repo, b'rebase')
with dsguard:
- repo.setparents(repo[p1].node(), repo[p2].node())
-
# Commit might fail if unresolved files exist
newnode = repo.commit(
text=commitmsg, user=user, date=date, extra=extra, editor=editor
@@ -1465,7 +1459,7 @@
return newnode
-def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
+def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
"""Rebase a single revision rev on top of p1 using base as merge ancestor"""
# Merge phase
# Update to destination and merge it with local
@@ -1475,7 +1469,7 @@
else:
if repo[b'.'].rev() != p1:
repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
- mergemod.update(repo, p1, branchmerge=False, force=True)
+ mergemod.clean_update(p1ctx)
else:
repo.ui.debug(b" already in destination\n")
# This is, alas, necessary to invalidate workingctx's manifest cache,
@@ -1499,6 +1493,7 @@
labels=[b'dest', b'source'],
wc=wctx,
)
+ wctx.setparents(p1ctx.node(), repo[p2].node())
if collapse:
copies.graftcopies(wctx, ctx, repo[dest])
else:
@@ -1678,22 +1673,6 @@
elif p in state and state[p] > 0:
np = state[p]
- # "bases" only record "special" merge bases that cannot be
- # calculated from changelog DAG (i.e. isancestor(p, np) is False).
- # For example:
- #
- # B' # rebase -s B -d D, when B was rebased to B'. dest for C
- # | C # is B', but merge base for C is B, instead of
- # D | # changelog.ancestor(C, B') == A. If changelog DAG and
- # | B # "state" edges are merged (so there will be an edge from
- # |/ # B to B'), the merge base is still ancestor(C, B') in
- # A # the merged graph.
- #
- # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
- # which uses "virtual null merge" to explain this situation.
- if isancestor(p, np):
- bases[i] = nullrev
-
# If one parent becomes an ancestor of the other, drop the ancestor
for j, x in enumerate(newps[:i]):
if x == nullrev:
@@ -1739,12 +1718,6 @@
if any(p != nullrev and isancestor(rev, p) for p in newps):
raise error.Abort(_(b'source is ancestor of destination'))
- # "rebasenode" updates to new p1, use the corresponding merge base.
- if bases[0] != nullrev:
- base = bases[0]
- else:
- base = None
-
# Check if the merge will contain unwanted changes. That may happen if
# there are multiple special (non-changelog ancestor) merge bases, which
# cannot be handled well by the 3-way merge algorithm. For example:
@@ -1760,15 +1733,16 @@
# But our merge base candidates (D and E in above case) could still be
# better than the default (ancestor(F, Z) == null). Therefore still
# pick one (so choose p1 above).
- if sum(1 for b in set(bases) if b != nullrev) > 1:
+ if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
for i, base in enumerate(bases):
- if base == nullrev:
+ if base == nullrev or base in newps:
continue
# Revisions in the side (not chosen as merge base) branch that
# might contain "surprising" contents
+ other_bases = set(bases) - {base}
siderevs = list(
- repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
+ repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
)
# If those revisions are covered by rebaseset, the result is good.
@@ -1786,35 +1760,40 @@
)
)
- # Choose a merge base that has a minimal number of unwanted revs.
- l, i = min(
- (len(revs), i)
- for i, revs in enumerate(unwanted)
- if revs is not None
- )
- base = bases[i]
-
- # newps[0] should match merge base if possible. Currently, if newps[i]
- # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
- # the other's ancestor. In that case, it's fine to not swap newps here.
- # (see CASE-1 and CASE-2 above)
- if i != 0 and newps[i] != nullrev:
- newps[0], newps[i] = newps[i], newps[0]
+ if any(revs is not None for revs in unwanted):
+ # Choose a merge base that has a minimal number of unwanted revs.
+ l, i = min(
+ (len(revs), i)
+ for i, revs in enumerate(unwanted)
+ if revs is not None
+ )
- # The merge will include unwanted revisions. Abort now. Revisit this if
- # we have a more advanced merge algorithm that handles multiple bases.
- if l > 0:
- unwanteddesc = _(b' or ').join(
- (
- b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
- for revs in unwanted
- if revs is not None
+ # The merge will include unwanted revisions. Abort now. Revisit this if
+ # we have a more advanced merge algorithm that handles multiple bases.
+ if l > 0:
+ unwanteddesc = _(b' or ').join(
+ (
+ b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
+ for revs in unwanted
+ if revs is not None
+ )
)
- )
- raise error.Abort(
- _(b'rebasing %d:%s will include unwanted changes from %s')
- % (rev, repo[rev], unwanteddesc)
- )
+ raise error.Abort(
+ _(b'rebasing %d:%s will include unwanted changes from %s')
+ % (rev, repo[rev], unwanteddesc)
+ )
+
+ # newps[0] should match merge base if possible. Currently, if newps[i]
+ # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
+ # the other's ancestor. In that case, it's fine to not swap newps here.
+ # (see CASE-1 and CASE-2 above)
+ if i != 0:
+ if newps[i] != nullrev:
+ newps[0], newps[i] = newps[i], newps[0]
+ bases[0], bases[i] = bases[i], bases[0]
+
+ # "rebasenode" updates to new p1, use the corresponding merge base.
+ base = bases[0]
repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
--- a/hgext/releasenotes.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/releasenotes.py Thu Feb 13 10:12:12 2020 -0800
@@ -654,7 +654,7 @@
opts = pycompat.byteskwargs(opts)
sections = releasenotessections(ui, repo)
- cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check')
+ cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check'])
if opts.get(b'list'):
return _getadmonitionlist(ui, sections)
--- a/hgext/transplant.py Fri Dec 13 10:37:45 2019 +0100
+++ b/hgext/transplant.py Thu Feb 13 10:12:12 2020 -0800
@@ -761,12 +761,12 @@
def checkopts(opts, revs):
if opts.get(b'continue'):
cmdutil.check_incompatible_arguments(
- opts, b'continue', b'branch', b'all', b'merge'
+ opts, b'continue', [b'branch', b'all', b'merge']
)
return
if opts.get(b'stop'):
cmdutil.check_incompatible_arguments(
- opts, b'stop', b'branch', b'all', b'merge'
+ opts, b'stop', [b'branch', b'all', b'merge']
)
return
if not (
--- a/mercurial/archival.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/archival.py Thu Feb 13 10:12:12 2020 -0800
@@ -355,7 +355,7 @@
if match(name):
write(name, 0o644, False, lambda: buildmetadata(ctx))
- files = [f for f in ctx.manifest().matches(match)]
+ files = list(ctx.manifest().walk(match))
total = len(files)
if total:
files.sort()
--- a/mercurial/branchmap.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/branchmap.py Thu Feb 13 10:12:12 2020 -0800
@@ -291,8 +291,8 @@
% (
_branchcachedesc(repo),
pycompat.bytestr(
- inst # pytype: disable=wrong-arg-types
- ),
+ inst
+ ), # pytype: disable=wrong-arg-types
)
)
bcache = None
--- a/mercurial/changegroup.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/changegroup.py Thu Feb 13 10:12:12 2020 -0800
@@ -1149,8 +1149,8 @@
if fastpathlinkrev:
assert not tree
return (
- manifests.__getitem__ # pytype: disable=unsupported-operands
- )
+ manifests.__getitem__
+ ) # pytype: disable=unsupported-operands
def lookupmflinknode(x):
"""Callback for looking up the linknode for manifests.
--- a/mercurial/changelog.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/changelog.py Thu Feb 13 10:12:12 2020 -0800
@@ -385,6 +385,9 @@
datafile=datafile,
checkambig=True,
mmaplargeindex=True,
+ persistentnodemap=opener.options.get(
+ b'exp-persistent-nodemap', False
+ ),
)
if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
--- a/mercurial/chgserver.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/chgserver.py Thu Feb 13 10:12:12 2020 -0800
@@ -551,40 +551,6 @@
raise ValueError(b'unexpected value in setenv request')
self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
- # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
- # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
- # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
- # disabled with PYTHONCOERCECLOCALE=0 in the environment.
- #
- # When fromui is called via _inithashstate, python has already set
- # this, so that's in the environment right when we start up the hg
- # process. Then chg will call us and tell us to set the environment to
- # the one it has; this might NOT have LC_CTYPE, so we'll need to
- # carry-forward the LC_CTYPE that was coerced in these situations.
- #
- # If this is not handled, we will fail config+env validation and fail
- # to start chg. If this is just ignored instead of carried forward, we
- # may have different behavior between chg and non-chg.
- if pycompat.ispy3:
- # Rename for wordwrapping purposes
- oldenv = encoding.environ
- if not any(
- e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
- ):
- keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
- old_keys = [k for k, v in oldenv.items() if k in keys and v]
- new_keys = [k for k, v in newenv.items() if k in keys and v]
- # If the user's environment (from chg) doesn't have ANY of the
- # keys that python looks for, and the environment (from
- # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
- # carry it forward.
- if (
- not new_keys
- and old_keys == [b'LC_CTYPE']
- and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
- ):
- newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
-
encoding.environ.clear()
encoding.environ.update(newenv)
@@ -731,6 +697,16 @@
# environ cleaner.
if b'CHGINTERNALMARK' in encoding.environ:
del encoding.environ[b'CHGINTERNALMARK']
+ # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
+ # it thinks the current value is "C". This breaks the hash computation and
+ # causes chg to restart loop.
+ if b'CHGORIG_LC_CTYPE' in encoding.environ:
+ encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
+ del encoding.environ[b'CHGORIG_LC_CTYPE']
+ elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
+ if b'LC_CTYPE' in encoding.environ:
+ del encoding.environ[b'LC_CTYPE']
+ del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
if repo:
# one chgserver can serve multiple repos. drop repo information
--- a/mercurial/cmdutil.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/cmdutil.py Thu Feb 13 10:12:12 2020 -0800
@@ -170,7 +170,12 @@
diffopts = [
(b'a', b'text', None, _(b'treat all files as text')),
- (b'g', b'git', None, _(b'use git extended diff format')),
+ (
+ b'g',
+ b'git',
+ None,
+ _(b'use git extended diff format (DEFAULT: diff.git)'),
+ ),
(b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
(b'', b'nodates', None, _(b'omit dates from diff headers')),
]
@@ -209,7 +214,9 @@
b'p',
b'show-function',
None,
- _(b'show which function each change is in'),
+ _(
+ b'show which function each change is in (DEFAULT: diff.showfunc)'
+ ),
),
(b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
]
@@ -281,11 +288,11 @@
return previous
-def check_incompatible_arguments(opts, first, *others):
+def check_incompatible_arguments(opts, first, others):
"""abort if the first argument is given along with any of the others
Unlike check_at_most_one_arg(), `others` are not mutually exclusive
- among themselves.
+ among themselves, and they're passed as a single collection.
"""
for other in others:
check_at_most_one_arg(opts, first, other)
@@ -584,15 +591,8 @@
[os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
# 3a. apply filtered patch to clean repo (clean)
if backups:
- # Equivalent to hg.revert
m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
- mergemod.update(
- repo,
- repo.dirstate.p1(),
- branchmerge=False,
- force=True,
- matcher=m,
- )
+ mergemod.revert_to(repo[b'.'], matcher=m)
# 3b. (apply)
if dopatch:
--- a/mercurial/color.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/color.py Thu Feb 13 10:12:12 2020 -0800
@@ -44,7 +44,7 @@
b'cyan': (False, curses.COLOR_CYAN, b''),
b'white': (False, curses.COLOR_WHITE, b''),
}
-except ImportError:
+except (ImportError, AttributeError):
curses = None
_baseterminfoparams = {}
--- a/mercurial/commands.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/commands.py Thu Feb 13 10:12:12 2020 -0800
@@ -1228,7 +1228,7 @@
action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
if action:
- cmdutil.check_incompatible_arguments(opts, action, b'rev')
+ cmdutil.check_incompatible_arguments(opts, action, [b'rev'])
elif names or rev:
action = b'add'
elif inactive:
@@ -1236,7 +1236,9 @@
else:
action = b'list'
- cmdutil.check_incompatible_arguments(opts, b'inactive', b'delete', b'list')
+ cmdutil.check_incompatible_arguments(
+ opts, b'inactive', [b'delete', b'list']
+ )
if not names and action in {b'add', b'delete'}:
raise error.Abort(_(b"bookmark name required"))
@@ -4847,6 +4849,7 @@
abort = opts.get(b'abort')
if abort and repo.dirstate.p2() == nullid:
cmdutil.wrongtooltocontinue(repo, _(b'merge'))
+ cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
if abort:
state = cmdutil.getunfinishedstate(repo)
if state and state._opname != b'merge':
@@ -4856,10 +4859,8 @@
)
if node:
raise error.Abort(_(b"cannot specify a node with --abort"))
- if opts.get(b'rev'):
- raise error.Abort(_(b"cannot specify both --rev and --abort"))
- if opts.get(b'preview'):
- raise error.Abort(_(b"cannot specify --preview with --abort"))
+ return hg.abortmerge(repo.ui, repo)
+
if opts.get(b'rev') and node:
raise error.Abort(_(b"please specify just one revision"))
if not node:
@@ -4867,8 +4868,7 @@
if node:
node = scmutil.revsingle(repo, node).node()
-
- if not node and not abort:
+ else:
if ui.configbool(b'commands', b'merge.require-rev'):
raise error.Abort(
_(
@@ -4878,9 +4878,12 @@
)
node = repo[destutil.destmerge(repo)].node()
+ if node is None:
+ raise error.Abort(_(b'merging with the working copy has no effect'))
+
if opts.get(b'preview'):
# find nodes that are ancestors of p2 but not of p1
- p1 = repo.lookup(b'.')
+ p1 = repo[b'.'].node()
p2 = node
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
@@ -4896,12 +4899,7 @@
force = opts.get(b'force')
labels = [b'working copy', b'merge rev']
return hg.merge(
- repo,
- node,
- force=force,
- mergeforce=force,
- labels=labels,
- abort=abort,
+ repo, node, force=force, mergeforce=force, labels=labels
)
@@ -5671,7 +5669,7 @@
@command(
b'recover',
- [(b'', b'verify', True, b"run `hg verify` after successful recover"),],
+ [(b'', b'verify', False, b"run `hg verify` after successful recover"),],
helpcategory=command.CATEGORY_MAINTENANCE,
)
def recover(ui, repo, **opts):
@@ -6648,7 +6646,12 @@
(b'i', b'ignored', None, _(b'show only ignored files')),
(b'n', b'no-status', None, _(b'hide status prefix')),
(b't', b'terse', _NOTTERSE, _(b'show the terse output (EXPERIMENTAL)')),
- (b'C', b'copies', None, _(b'show source of copied files')),
+ (
+ b'C',
+ b'copies',
+ None,
+ _(b'show source of copied files (DEFAULT: ui.statuscopies)'),
+ ),
(
b'0',
b'print0',
--- a/mercurial/configitems.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/configitems.py Thu Feb 13 10:12:12 2020 -0800
@@ -406,6 +406,9 @@
b'devel', b'legacy.exchange', default=list,
)
coreconfigitem(
+ b'devel', b'persistent-nodemap', default=False,
+)
+coreconfigitem(
b'devel', b'servercafile', default=b'',
)
coreconfigitem(
@@ -660,6 +663,9 @@
b'experimental', b'rust.index', default=False,
)
coreconfigitem(
+ b'experimental', b'exp-persistent-nodemap', default=False,
+)
+coreconfigitem(
b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
)
coreconfigitem(
--- a/mercurial/context.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/context.py Thu Feb 13 10:12:12 2020 -0800
@@ -267,7 +267,7 @@
def _fileinfo(self, path):
if '_manifest' in self.__dict__:
try:
- return self._manifest[path], self._manifest.flags(path)
+ return self._manifest.find(path)
except KeyError:
raise error.ManifestLookupError(
self._node, path, _(b'not found in manifest')
@@ -2357,8 +2357,7 @@
# Test the other direction -- that this path from p2 isn't a directory
# in p1 (test that p1 doesn't have any paths matching `path/*`).
match = self.match([path], default=b'path')
- matches = self.p1().manifest().matches(match)
- mfiles = matches.keys()
+ mfiles = list(self.p1().manifest().walk(match))
if len(mfiles) > 0:
if len(mfiles) == 1 and mfiles[0] == path:
return
--- a/mercurial/copies.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/copies.py Thu Feb 13 10:12:12 2020 -0800
@@ -403,13 +403,15 @@
)
if x == y or not x or not y:
return {}
+ if y.rev() is None and x == y.p1():
+ if debug:
+ repo.ui.debug(b'debug.copies: search mode: dirstate\n')
+ # short-circuit to avoid issues with merge states
+ return _dirstatecopies(repo, match)
a = y.ancestor(x)
if a == x:
if debug:
repo.ui.debug(b'debug.copies: search mode: forward\n')
- if y.rev() is None and x == y.p1():
- # short-circuit to avoid issues with merge states
- return _dirstatecopies(repo, match)
copies = _forwardcopies(x, y, match=match)
elif a == y:
if debug:
@@ -452,44 +454,34 @@
```other changed <file> which local deleted```
- Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
- "dirmove".
+ Returns a tuple where:
- "copy" is a mapping from destination name -> source name,
- where source is in c1 and destination is in c2 or vice-versa.
-
- "movewithdir" is a mapping from source name -> destination name,
- where the file at source present in one context but not the other
- needs to be moved to destination by the merge process, because the
- other context moved the directory it is in.
+ "branch_copies" an instance of branch_copies.
"diverge" is a mapping of source name -> list of destination names
for divergent renames.
- "renamedelete" is a mapping of source name -> list of destination
- names for files deleted in c1 that were renamed in c2 or vice-versa.
-
- "dirmove" is a mapping of detected source dir -> destination dir renames.
- This is needed for handling changes to new files previously grafted into
- renamed directories.
-
This function calls different copytracing algorithms based on config.
"""
# avoid silly behavior for update from empty dir
if not c1 or not c2 or c1 == c2:
- return {}, {}, {}, {}, {}
+ return branch_copies(), branch_copies(), {}
narrowmatch = c1.repo().narrowmatch()
# avoid silly behavior for parent -> working dir
if c2.node() is None and c1.node() == repo.dirstate.p1():
- return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
+ return (
+ branch_copies(_dirstatecopies(repo, narrowmatch)),
+ branch_copies(),
+ {},
+ )
copytracing = repo.ui.config(b'experimental', b'copytrace')
if stringutil.parsebool(copytracing) is False:
# stringutil.parsebool() returns None when it is unable to parse the
# value, so we should rely on making sure copytracing is on such cases
- return {}, {}, {}, {}, {}
+ return branch_copies(), branch_copies(), {}
if usechangesetcentricalgo(repo):
# The heuristics don't make sense when we need changeset-centric algos
@@ -537,15 +529,45 @@
if src not in m1:
# renamed on side 1, deleted on side 2
renamedelete[src] = dsts1
+ elif src not in mb:
+ # Work around the "short-circuit to avoid issues with merge states"
+ # thing in pathcopies(): pathcopies(x, y) can return a copy where the
+ # destination doesn't exist in y.
+ pass
elif m2[src] != mb[src]:
if not _related(c2[src], base[src]):
return
# modified on side 2
for dst in dsts1:
- if dst not in m2:
- # dst not added on side 2 (handle as regular
- # "both created" case in manifestmerge otherwise)
- copy[dst] = src
+ copy[dst] = src
+
+
+class branch_copies(object):
+ """Information about copies made on one side of a merge/graft.
+
+ "copy" is a mapping from destination name -> source name,
+ where source is in c1 and destination is in c2 or vice-versa.
+
+ "movewithdir" is a mapping from source name -> destination name,
+ where the file at source present in one context but not the other
+ needs to be moved to destination by the merge process, because the
+ other context moved the directory it is in.
+
+ "renamedelete" is a mapping of source name -> list of destination
+ names for files deleted in c1 that were renamed in c2 or vice-versa.
+
+ "dirmove" is a mapping of detected source dir -> destination dir renames.
+ This is needed for handling changes to new files previously grafted into
+ renamed directories.
+ """
+
+ def __init__(
+ self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
+ ):
+ self.copy = {} if copy is None else copy
+ self.renamedelete = {} if renamedelete is None else renamedelete
+ self.dirmove = {} if dirmove is None else dirmove
+ self.movewithdir = {} if movewithdir is None else movewithdir
def _fullcopytracing(repo, c1, c2, base):
@@ -563,6 +585,9 @@
copies1 = pathcopies(base, c1)
copies2 = pathcopies(base, c2)
+ if not (copies1 or copies2):
+ return branch_copies(), branch_copies(), {}
+
inversecopies1 = {}
inversecopies2 = {}
for dst, src in copies1.items():
@@ -570,9 +595,11 @@
for dst, src in copies2.items():
inversecopies2.setdefault(src, []).append(dst)
- copy = {}
+ copy1 = {}
+ copy2 = {}
diverge = {}
- renamedelete = {}
+ renamedelete1 = {}
+ renamedelete2 = {}
allsources = set(inversecopies1) | set(inversecopies2)
for src in allsources:
dsts1 = inversecopies1.get(src)
@@ -589,7 +616,8 @@
# and 'd' and deletes 'a'.
if dsts1 & dsts2:
for dst in dsts1 & dsts2:
- copy[dst] = src
+ copy1[dst] = src
+ copy2[dst] = src
else:
diverge[src] = sorted(dsts1 | dsts2)
elif src in m1 and src in m2:
@@ -597,27 +625,21 @@
dsts1 = set(dsts1)
dsts2 = set(dsts2)
for dst in dsts1 & dsts2:
- copy[dst] = src
+ copy1[dst] = src
+ copy2[dst] = src
# TODO: Handle cases where it was renamed on one side and copied
# on the other side
elif dsts1:
# copied/renamed only on side 1
_checksinglesidecopies(
- src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
+ src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
)
elif dsts2:
# copied/renamed only on side 2
_checksinglesidecopies(
- src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
+ src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
)
- renamedeleteset = set()
- divergeset = set()
- for dsts in diverge.values():
- divergeset.update(dsts)
- for dsts in renamedelete.values():
- renamedeleteset.update(dsts)
-
# find interesting file sets from manifests
addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
@@ -630,33 +652,60 @@
if u2:
repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
- fullcopy = copies1.copy()
- fullcopy.update(copies2)
- if not fullcopy:
- return copy, {}, diverge, renamedelete, {}
+ if repo.ui.debugflag:
+ renamedeleteset = set()
+ divergeset = set()
+ for dsts in diverge.values():
+ divergeset.update(dsts)
+ for dsts in renamedelete1.values():
+ renamedeleteset.update(dsts)
+ for dsts in renamedelete2.values():
+ renamedeleteset.update(dsts)
- if repo.ui.debugflag:
repo.ui.debug(
b" all copies found (* = to merge, ! = divergent, "
b"% = renamed and deleted):\n"
)
- for f in sorted(fullcopy):
- note = b""
- if f in copy:
- note += b"*"
- if f in divergeset:
- note += b"!"
- if f in renamedeleteset:
- note += b"%"
- repo.ui.debug(
- b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
- )
- del divergeset
+ for side, copies in ((b"local", copies1), (b"remote", copies2)):
+ if not copies:
+ continue
+ repo.ui.debug(b" on %s side:\n" % side)
+ for f in sorted(copies):
+ note = b""
+ if f in copy1 or f in copy2:
+ note += b"*"
+ if f in divergeset:
+ note += b"!"
+ if f in renamedeleteset:
+ note += b"%"
+ repo.ui.debug(
+ b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
+ )
+ del renamedeleteset
+ del divergeset
repo.ui.debug(b" checking for directory renames\n")
+ dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
+ dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
+
+ branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
+ branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
+
+ return branch_copies1, branch_copies2, diverge
+
+
+def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
+ """Finds moved directories and files that should move with them.
+
+ ctx: the context for one of the sides
+ copy: files copied on the same side (as ctx)
+ fullcopy: files copied on the same side (as ctx), including those that
+ merge.manifestmerge() won't care about
+ addedfiles: added files on the other side (compared to ctx)
+ """
# generate a directory move map
- d1, d2 = c1.dirs(), c2.dirs()
+ d = ctx.dirs()
invalid = set()
dirmove = {}
@@ -667,12 +716,9 @@
if dsrc in invalid:
# already seen to be uninteresting
continue
- elif dsrc in d1 and ddst in d1:
+ elif dsrc in d and ddst in d:
# directory wasn't entirely moved locally
invalid.add(dsrc)
- elif dsrc in d2 and ddst in d2:
- # directory wasn't entirely moved remotely
- invalid.add(dsrc)
elif dsrc in dirmove and dirmove[dsrc] != ddst:
# files from the same directory moved to two different places
invalid.add(dsrc)
@@ -683,10 +729,10 @@
for i in invalid:
if i in dirmove:
del dirmove[i]
- del d1, d2, invalid
+ del d, invalid
if not dirmove:
- return copy, {}, diverge, renamedelete, {}
+ return {}, {}
dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
@@ -697,7 +743,7 @@
movewithdir = {}
# check unaccounted nonoverlapping files against directory moves
- for f in u1 + u2:
+ for f in addedfiles:
if f not in fullcopy:
for d in dirmove:
if f.startswith(d):
@@ -711,7 +757,7 @@
)
break
- return copy, movewithdir, diverge, renamedelete, dirmove
+ return dirmove, movewithdir
def _heuristicscopytracing(repo, c1, c2, base):
@@ -744,8 +790,6 @@
if c2.rev() is None:
c2 = c2.p1()
- copies = {}
-
changedfiles = set()
m1 = c1.manifest()
if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
@@ -765,10 +809,11 @@
changedfiles.update(ctx.files())
ctx = ctx.p1()
+ copies2 = {}
cp = _forwardcopies(base, c2)
for dst, src in pycompat.iteritems(cp):
if src in m1:
- copies[dst] = src
+ copies2[dst] = src
# file is missing if it isn't present in the destination, but is present in
# the base and present in the source.
@@ -777,6 +822,7 @@
filt = lambda f: f not in m1 and f in base and f in c2
missingfiles = [f for f in changedfiles if filt(f)]
+ copies1 = {}
if missingfiles:
basenametofilename = collections.defaultdict(list)
dirnametofilename = collections.defaultdict(list)
@@ -818,9 +864,9 @@
# if there are a few related copies then we'll merge
# changes into all of them. This matches the behaviour
# of upstream copytracing
- copies[candidate] = f
+ copies1[candidate] = f
- return copies, {}, {}, {}, {}
+ return branch_copies(copies1), branch_copies(copies2), {}
def _related(f1, f2):
--- a/mercurial/crecord.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/crecord.py Thu Feb 13 10:12:12 2020 -0800
@@ -63,13 +63,13 @@
import curses.ascii
curses.error
-except ImportError:
+except (ImportError, AttributeError):
# I have no idea if wcurses works with crecord...
try:
import wcurses as curses
curses.error
- except ImportError:
+ except (ImportError, AttributeError):
# wcurses is not shipped on Windows by default, or python is not
# compiled with curses
curses = False
--- a/mercurial/debugcommands.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/debugcommands.py Thu Feb 13 10:12:12 2020 -0800
@@ -13,6 +13,7 @@
import errno
import operator
import os
+import platform
import random
import re
import socket
@@ -75,6 +76,7 @@
sshpeer,
sslutil,
streamclone,
+ tags as tagsmod,
templater,
treediscovery,
upgrade,
@@ -93,7 +95,10 @@
stringutil,
)
-from .revlogutils import deltas as deltautil
+from .revlogutils import (
+ deltas as deltautil,
+ nodemap,
+)
release = lockmod.release
@@ -1487,6 +1492,11 @@
pycompat.sysexecutable or _(b"unknown"),
)
fm.write(
+ b'pythonimplementation',
+ _(b"checking Python implementation (%s)\n"),
+ pycompat.sysbytes(platform.python_implementation()),
+ )
+ fm.write(
b'pythonver',
_(b"checking Python version (%s)\n"),
(b"%d.%d.%d" % sys.version_info[:3]),
@@ -2075,6 +2085,64 @@
@command(
+ b'debugnodemap',
+ [
+ (
+ b'',
+ b'dump-new',
+ False,
+ _(b'write a (new) persistent binary nodemap on stdin'),
+ ),
+ (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
+ (
+ b'',
+ b'check',
+ False,
+ _(b'check that the data on disk data are correct.'),
+ ),
+ (
+ b'',
+ b'metadata',
+ False,
+ _(b'display the on disk meta data for the nodemap'),
+ ),
+ ],
+)
+def debugnodemap(ui, repo, **opts):
+ """write and inspect on disk nodemap
+ """
+ if opts['dump_new']:
+ unfi = repo.unfiltered()
+ cl = unfi.changelog
+ data = nodemap.persistent_data(cl.index)
+ ui.write(data)
+ elif opts['dump_disk']:
+ unfi = repo.unfiltered()
+ cl = unfi.changelog
+ nm_data = nodemap.persisted_data(cl)
+ if nm_data is not None:
+ docket, data = nm_data
+ ui.write(data)
+ elif opts['check']:
+ unfi = repo.unfiltered()
+ cl = unfi.changelog
+ nm_data = nodemap.persisted_data(cl)
+ if nm_data is not None:
+ docket, data = nm_data
+ return nodemap.check_data(ui, cl.index, data)
+ elif opts['metadata']:
+ unfi = repo.unfiltered()
+ cl = unfi.changelog
+ nm_data = nodemap.persisted_data(cl)
+ if nm_data is not None:
+ docket, data = nm_data
+ ui.write((b"uid: %s\n") % docket.uid)
+ ui.write((b"tip-rev: %d\n") % docket.tip_rev)
+ ui.write((b"data-length: %d\n") % docket.data_length)
+ ui.write((b"data-unused: %d\n") % docket.data_unused)
+
+
+@command(
b'debugobsolete',
[
(b'', b'flags', 0, _(b'markers flag')),
@@ -3423,6 +3491,17 @@
ui.write(b'\n')
+@command(b'debugtagscache', [])
+def debugtagscache(ui, repo):
+ """display the contents of .hg/cache/hgtagsfnodes1"""
+ cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
+ for r in repo:
+ node = repo[r].node()
+ tagsnode = cache.getfnode(node, computemissing=False)
+ tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
+ ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
+
+
@command(
b'debugtemplate',
[
--- a/mercurial/dirstate.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/dirstate.py Thu Feb 13 10:12:12 2020 -0800
@@ -1262,6 +1262,9 @@
return files in the dirstate (in whatever state) filtered by match
'''
dmap = self._map
+ if rustmod is not None:
+ dmap = self._map._rustmap
+
if match.always():
return dmap.keys()
files = match.files()
--- a/mercurial/discovery.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/discovery.py Thu Feb 13 10:12:12 2020 -0800
@@ -448,7 +448,7 @@
if branch not in (b'default', None):
errormsg = _(
b"push creates new remote head %s on branch '%s'!"
- ) % (short(dhs[0]), branch)
+ ) % (short(dhs[0]), branch,)
elif repo[dhs[0]].bookmarks():
errormsg = _(
b"push creates new remote head %s "
--- a/mercurial/dispatch.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/dispatch.py Thu Feb 13 10:12:12 2020 -0800
@@ -624,7 +624,7 @@
except error.AmbiguousCommand:
self.badalias = _(
b"alias '%s' resolves to ambiguous command '%s'"
- ) % (self.name, cmd)
+ ) % (self.name, cmd,)
def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
# confine strings to be passed to i18n.gettext()
--- a/mercurial/exchange.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/exchange.py Thu Feb 13 10:12:12 2020 -0800
@@ -3068,7 +3068,15 @@
if not prefers:
return list(entries)
- prefers = [p.split(b'=', 1) for p in prefers]
+ def _split(p):
+ if b'=' not in p:
+ hint = _(b"each comma separated item should be key=value pairs")
+ raise error.Abort(
+ _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
+ )
+ return p.split(b'=', 1)
+
+ prefers = [_split(p) for p in prefers]
items = sorted(clonebundleentry(v, prefers) for v in entries)
return [i.value for i in items]
--- a/mercurial/help.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/help.py Thu Feb 13 10:12:12 2020 -0800
@@ -153,7 +153,18 @@
return doc
-def optrst(header, options, verbose):
+def parsedefaultmarker(text):
+ """given a text 'abc (DEFAULT: def.ghi)',
+ returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
+ if text[-1:] == b')':
+ marker = b' (DEFAULT: '
+ pos = text.find(marker)
+ if pos >= 0:
+ item = text[pos + len(marker) : -1]
+ return text[:pos], item.split(b'.', 2)
+
+
+def optrst(header, options, verbose, ui):
data = []
multioccur = False
for option in options:
@@ -165,7 +176,14 @@
if not verbose and any(w in desc for w in _exclkeywords):
continue
-
+ defaultstrsuffix = b''
+ if default is None:
+ parseresult = parsedefaultmarker(desc)
+ if parseresult is not None:
+ (desc, (section, name)) = parseresult
+ if ui.configbool(section, name):
+ default = True
+ defaultstrsuffix = _(b' from config')
so = b''
if shortopt:
so = b'-' + shortopt
@@ -183,7 +201,7 @@
defaultstr = pycompat.bytestr(default)
if default is True:
defaultstr = _(b"on")
- desc += _(b" (default: %s)") % defaultstr
+ desc += _(b" (default: %s)") % (defaultstr + defaultstrsuffix)
if isinstance(default, list):
lo += b" %s [+]" % optlabel
@@ -714,11 +732,13 @@
# options
if not ui.quiet and entry[1]:
- rst.append(optrst(_(b"options"), entry[1], ui.verbose))
+ rst.append(optrst(_(b"options"), entry[1], ui.verbose, ui))
if ui.verbose:
rst.append(
- optrst(_(b"global options"), commands.globalopts, ui.verbose)
+ optrst(
+ _(b"global options"), commands.globalopts, ui.verbose, ui
+ )
)
if not ui.verbose:
@@ -858,7 +878,9 @@
elif ui.verbose:
rst.append(
b'\n%s\n'
- % optrst(_(b"global options"), commands.globalopts, ui.verbose)
+ % optrst(
+ _(b"global options"), commands.globalopts, ui.verbose, ui
+ )
)
if name == b'shortlist':
rst.append(
--- a/mercurial/hg.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/hg.py Thu Feb 13 10:12:12 2020 -0800
@@ -1040,10 +1040,9 @@
def clean(repo, node, show_stats=True, quietempty=False):
"""forcibly switch the working directory to node, clobbering changes"""
stats = updaterepo(repo, node, True)
- repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
+ assert stats.unresolvedcount == 0
if show_stats:
_showstats(repo, stats, quietempty)
- return stats.unresolvedcount > 0
# naming conflict in updatetotally()
@@ -1138,19 +1137,10 @@
def merge(
- repo,
- node,
- force=None,
- remind=True,
- mergeforce=False,
- labels=None,
- abort=False,
+ repo, node, force=None, remind=True, mergeforce=False, labels=None,
):
"""Branch merge with node, resolving changes. Return true if any
unresolved conflicts."""
- if abort:
- return abortmerge(repo.ui, repo)
-
stats = mergemod.update(
repo,
node,
@@ -1182,9 +1172,9 @@
node = repo[b'.'].hex()
repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
- stats = mergemod.update(repo, node, branchmerge=False, force=True)
+ stats = mergemod.clean_update(repo[node])
+ assert stats.unresolvedcount == 0
_showstats(repo, stats)
- return stats.unresolvedcount > 0
def _incoming(
--- a/mercurial/hgweb/webutil.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/hgweb/webutil.py Thu Feb 13 10:12:12 2020 -0800
@@ -936,5 +936,5 @@
def getgraphnode(repo, ctx):
return templatekw.getgraphnodecurrent(
- repo, ctx
+ repo, ctx, {}
) + templatekw.getgraphnodesymbol(ctx)
--- a/mercurial/httpconnection.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/httpconnection.py Thu Feb 13 10:12:12 2020 -0800
@@ -39,12 +39,15 @@
self.write = self._data.write
self.length = os.fstat(self._data.fileno()).st_size
self._pos = 0
+ self._progress = self._makeprogress()
+
+ def _makeprogress(self):
# We pass double the max for total because we currently have
# to send the bundle twice in the case of a server that
# requires authentication. Since we can't know until we try
# once whether authentication will be required, just lie to
# the user and maybe the push succeeds suddenly at 50%.
- self._progress = ui.makeprogress(
+ return self.ui.makeprogress(
_(b'sending'), unit=_(b'kb'), total=(self.length // 1024 * 2)
)
--- a/mercurial/interfaces/repository.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/interfaces/repository.py Thu Feb 13 10:12:12 2020 -0800
@@ -985,18 +985,9 @@
def hasdir(dir):
"""Returns a bool indicating if a directory is in this manifest."""
- def matches(match):
- """Generate a new manifest filtered through a matcher.
-
- Returns an object conforming to the ``imanifestdict`` interface.
- """
-
def walk(match):
"""Generator of paths in manifest satisfying a matcher.
- This is equivalent to ``self.matches(match).iterkeys()`` except a new
- manifest object is not created.
-
If the matcher has explicit files listed and they don't exist in
the manifest, ``match.bad()`` is called for each missing file.
"""
@@ -1027,8 +1018,8 @@
def get(path, default=None):
"""Obtain the node value for a path or a default value if missing."""
- def flags(path, default=b''):
- """Return the flags value for a path or a default value if missing."""
+ def flags(path):
+ """Return the flags value for a path (default: empty bytestring)."""
def copy():
"""Return a copy of this manifest."""
@@ -1071,14 +1062,6 @@
as part of a larger interface.
"""
- def new():
- """Obtain a new manifest instance.
-
- Returns an object conforming to the ``imanifestrevisionwritable``
- interface. The instance will be associated with the same
- ``imanifestlog`` collection as this instance.
- """
-
def copy():
"""Obtain a copy of this manifest instance.
--- a/mercurial/localrepo.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/localrepo.py Thu Feb 13 10:12:12 2020 -0800
@@ -932,6 +932,10 @@
if ui.configbool(b'experimental', b'rust.index'):
options[b'rust.index'] = True
+ if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
+ options[b'exp-persistent-nodemap'] = True
+ if ui.configbool(b'devel', b'persistent-nodemap'):
+ options[b'devel-force-nodemap'] = True
return options
--- a/mercurial/logcmdutil.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/logcmdutil.py Thu Feb 13 10:12:12 2020 -0800
@@ -1004,7 +1004,7 @@
ui, spec, defaults=templatekw.keywords, resources=tres
)
- def formatnode(repo, ctx):
+ def formatnode(repo, ctx, cache):
props = {b'ctx': ctx, b'repo': repo}
return templ.renderdefault(props)
@@ -1038,8 +1038,9 @@
# experimental config: experimental.graphshorten
state.graphshorten = ui.configbool(b'experimental', b'graphshorten')
+ formatnode_cache = {}
for rev, type, ctx, parents in dag:
- char = formatnode(repo, ctx)
+ char = formatnode(repo, ctx, formatnode_cache)
copies = getcopies(ctx) if getcopies else None
edges = edgefn(type, char, state, rev, parents)
firstedge = next(edges)
--- a/mercurial/manifest.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/manifest.py Thu Feb 13 10:12:12 2020 -0800
@@ -23,6 +23,7 @@
from . import (
encoding,
error,
+ match as matchmod,
mdiff,
pathutil,
policy,
@@ -461,7 +462,7 @@
__bool__ = __nonzero__
def __setitem__(self, key, node):
- self._lm[key] = node, self.flags(key, b'')
+ self._lm[key] = node, self.flags(key)
def __contains__(self, key):
if key is None:
@@ -482,17 +483,11 @@
def filesnotin(self, m2, match=None):
'''Set of files in this manifest that are not in the other'''
- if match:
- m1 = self.matches(match)
- m2 = m2.matches(match)
- return m1.filesnotin(m2)
- diff = self.diff(m2)
- files = set(
- filepath
- for filepath, hashflags in pycompat.iteritems(diff)
- if hashflags[1][0] is None
- )
- return files
+ if match is not None:
+ match = matchmod.badmatch(match, lambda path, msg: None)
+ sm2 = set(m2.walk(match))
+ return {f for f in self.walk(match) if f not in sm2}
+ return {f for f in self if f not in m2}
@propertycache
def _dirs(self):
@@ -531,7 +526,8 @@
# avoid the entire walk if we're only looking for specific files
if self._filesfastpath(match):
for fn in sorted(fset):
- yield fn
+ if fn in self:
+ yield fn
return
for fn in self:
@@ -549,7 +545,7 @@
if not self.hasdir(fn):
match.bad(fn, None)
- def matches(self, match):
+ def _matches(self, match):
'''generate a new manifest filtered by the match argument'''
if match.always():
return self.copy()
@@ -582,8 +578,8 @@
string.
'''
if match:
- m1 = self.matches(match)
- m2 = m2.matches(match)
+ m1 = self._matches(match)
+ m2 = m2._matches(match)
return m1.diff(m2, clean=clean)
return self._lm.diff(m2._lm, clean)
@@ -596,11 +592,11 @@
except KeyError:
return default
- def flags(self, key, default=b''):
+ def flags(self, key):
try:
return self._lm[key][1]
except KeyError:
- return default
+ return b''
def copy(self):
c = manifestdict()
@@ -1079,8 +1075,8 @@
def filesnotin(self, m2, match=None):
'''Set of files in this manifest that are not in the other'''
if match and not match.always():
- m1 = self.matches(match)
- m2 = m2.matches(match)
+ m1 = self._matches(match)
+ m2 = m2._matches(match)
return m1.filesnotin(m2)
files = set()
@@ -1126,9 +1122,6 @@
def walk(self, match):
'''Generates matching file names.
- Equivalent to manifest.matches(match).iterkeys(), but without creating
- an entirely new manifest.
-
It also reports nonexistent files by marking them bad with match.bad().
'''
if match.always():
@@ -1171,16 +1164,16 @@
for f in self._dirs[p]._walk(match):
yield f
- def matches(self, match):
- '''generate a new manifest filtered by the match argument'''
- if match.always():
- return self.copy()
-
- return self._matches(match)
-
def _matches(self, match):
'''recursively generate a new manifest filtered by the match argument.
'''
+ if match.always():
+ return self.copy()
+ return self._matches_inner(match)
+
+ def _matches_inner(self, match):
+ if match.always():
+ return self.copy()
visit = match.visitchildrenset(self._dir[:-1])
if visit == b'all':
@@ -1211,7 +1204,7 @@
for dir, subm in pycompat.iteritems(self._dirs):
if visit and dir[:-1] not in visit:
continue
- m = subm._matches(match)
+ m = subm._matches_inner(match)
if not m._isempty():
ret._dirs[dir] = m
@@ -1235,8 +1228,8 @@
string.
'''
if match and not match.always():
- m1 = self.matches(match)
- m2 = m2.matches(match)
+ m1 = self._matches(match)
+ m2 = m2._matches(match)
return m1.diff(m2, clean=clean)
result = {}
emptytree = treemanifest()
@@ -1923,9 +1916,6 @@
def _storage(self):
return self._manifestlog.getstorage(b'')
- def new(self):
- return memmanifestctx(self._manifestlog)
-
def copy(self):
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
@@ -1972,9 +1962,6 @@
def node(self):
return self._node
- def new(self):
- return memmanifestctx(self._manifestlog)
-
def copy(self):
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
@@ -2039,9 +2026,6 @@
def _storage(self):
return self._manifestlog.getstorage(b'')
- def new(self, dir=b''):
- return memtreemanifestctx(self._manifestlog, dir=dir)
-
def copy(self):
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self._treemanifest.copy()
@@ -2124,9 +2108,6 @@
def node(self):
return self._node
- def new(self, dir=b''):
- return memtreemanifestctx(self._manifestlog, dir=dir)
-
def copy(self):
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self.read().copy()
--- a/mercurial/match.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/match.py Thu Feb 13 10:12:12 2020 -0800
@@ -24,7 +24,7 @@
)
from .utils import stringutil
-rustmod = policy.importrust('filepatterns')
+rustmod = policy.importrust('dirstate')
allpatternkinds = (
b're',
@@ -1273,15 +1273,6 @@
'''Convert a (normalized) pattern of any kind into a
regular expression.
globsuffix is appended to the regexp of globs.'''
-
- if rustmod is not None:
- try:
- return rustmod.build_single_regex(kind, pat, globsuffix)
- except rustmod.PatternError:
- raise error.ProgrammingError(
- b'not a regex pattern: %s:%s' % (kind, pat)
- )
-
if not pat and kind in (b'glob', b'relpath'):
return b''
if kind == b're':
@@ -1554,18 +1545,6 @@
This is useful to debug ignore patterns.
'''
- if rustmod is not None:
- result, warnings = rustmod.read_pattern_file(
- filepath, bool(warn), sourceinfo,
- )
-
- for warning_params in warnings:
- # Can't be easily emitted from Rust, because it would require
- # a mechanism for both gettext and calling the `warn` function.
- warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
-
- return result
-
syntaxes = {
b're': b'relre:',
b'regexp': b'relre:',
--- a/mercurial/merge.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/merge.py Thu Feb 13 10:12:12 2020 -0800
@@ -386,18 +386,26 @@
return configmergedriver
@util.propertycache
- def localctx(self):
+ def local(self):
if self._local is None:
- msg = b"localctx accessed but self._local isn't set"
+ msg = b"local accessed but self._local isn't set"
raise error.ProgrammingError(msg)
- return self._repo[self._local]
+ return self._local
+
+ @util.propertycache
+ def localctx(self):
+ return self._repo[self.local]
+
+ @util.propertycache
+ def other(self):
+ if self._other is None:
+ msg = b"other accessed but self._other isn't set"
+ raise error.ProgrammingError(msg)
+ return self._other
@util.propertycache
def otherctx(self):
- if self._other is None:
- msg = b"otherctx accessed but self._other isn't set"
- raise error.ProgrammingError(msg)
- return self._repo[self._other]
+ return self._repo[self.other]
def active(self):
"""Whether mergestate is active.
@@ -989,11 +997,10 @@
"""
Check for case-folding collisions.
"""
-
# If the repo is narrowed, filter out files outside the narrowspec.
narrowmatch = repo.narrowmatch()
if not narrowmatch.always():
- wmf = wmf.matches(narrowmatch)
+ pmmf = set(wmf.walk(narrowmatch))
if actions:
narrowactions = {}
for m, actionsfortype in pycompat.iteritems(actions):
@@ -1002,9 +1009,9 @@
if narrowmatch(f):
narrowactions[m].append((f, args, msg))
actions = narrowactions
-
- # build provisional merged manifest up
- pmmf = set(wmf)
+ else:
+ # build provisional merged manifest up
+ pmmf = set(wmf)
if actions:
# KEEP and EXEC are no-op
@@ -1256,17 +1263,19 @@
if matcher is not None and matcher.always():
matcher = None
- copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
-
# manifests fetched in order are going to be faster, so prime the caches
[
x.manifest()
for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
]
+ branch_copies1 = copies.branch_copies()
+ branch_copies2 = copies.branch_copies()
+ diverge = {}
if followcopies:
- ret = copies.mergecopies(repo, wctx, p2, pa)
- copy, movewithdir, diverge, renamedelete, dirmove = ret
+ branch_copies1, branch_copies2, diverge = copies.mergecopies(
+ repo, wctx, p2, pa
+ )
boolbm = pycompat.bytestr(bool(branchmerge))
boolf = pycompat.bytestr(bool(force))
@@ -1278,8 +1287,10 @@
repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
- copied = set(copy.values())
- copied.update(movewithdir.values())
+ copied1 = set(branch_copies1.copy.values())
+ copied1.update(branch_copies1.movewithdir.values())
+ copied2 = set(branch_copies2.copy.values())
+ copied2.update(branch_copies2.movewithdir.values())
if b'.hgsubstate' in m1 and wctx.rev() is None:
# Check whether sub state is modified, and overwrite the manifest
@@ -1299,10 +1310,10 @@
relevantfiles = set(ma.diff(m2).keys())
# For copied and moved files, we need to add the source file too.
- for copykey, copyvalue in pycompat.iteritems(copy):
+ for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
if copyvalue in relevantfiles:
relevantfiles.add(copykey)
- for movedirkey in movewithdir:
+ for movedirkey in branch_copies1.movewithdir:
relevantfiles.add(movedirkey)
filesmatcher = scmutil.matchfiles(repo, relevantfiles)
matcher = matchmod.intersectmatchers(matcher, filesmatcher)
@@ -1313,7 +1324,10 @@
for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
if n1 and n2: # file exists on both local and remote side
if f not in ma:
- fa = copy.get(f, None)
+ # TODO: what if they're renamed from different sources?
+ fa = branch_copies1.copy.get(
+ f, None
+ ) or branch_copies2.copy.get(f, None)
if fa is not None:
actions[f] = (
ACTION_MERGE,
@@ -1356,10 +1370,12 @@
b'versions differ',
)
elif n1: # file exists only on local side
- if f in copied:
+ if f in copied2:
pass # we'll deal with it on m2 side
- elif f in movewithdir: # directory rename, move local
- f2 = movewithdir[f]
+ elif (
+ f in branch_copies1.movewithdir
+ ): # directory rename, move local
+ f2 = branch_copies1.movewithdir[f]
if f2 in m2:
actions[f2] = (
ACTION_MERGE,
@@ -1372,8 +1388,8 @@
(f, fl1),
b'remote directory rename - move from %s' % f,
)
- elif f in copy:
- f2 = copy[f]
+ elif f in branch_copies1.copy:
+ f2 = branch_copies1.copy[f]
actions[f] = (
ACTION_MERGE,
(f, f2, f2, False, pa.node()),
@@ -1397,10 +1413,10 @@
else:
actions[f] = (ACTION_REMOVE, None, b'other deleted')
elif n2: # file exists only on remote side
- if f in copied:
+ if f in copied1:
pass # we'll deal with it on m1 side
- elif f in movewithdir:
- f2 = movewithdir[f]
+ elif f in branch_copies2.movewithdir:
+ f2 = branch_copies2.movewithdir[f]
if f2 in m1:
actions[f2] = (
ACTION_MERGE,
@@ -1413,8 +1429,8 @@
(f, fl2),
b'local directory rename - get from %s' % f,
)
- elif f in copy:
- f2 = copy[f]
+ elif f in branch_copies2.copy:
+ f2 = branch_copies2.copy[f]
if f2 in m2:
actions[f] = (
ACTION_MERGE,
@@ -1451,10 +1467,10 @@
)
elif n2 != ma[f]:
df = None
- for d in dirmove:
+ for d in branch_copies1.dirmove:
if f.startswith(d):
# new file added in a directory that was moved
- df = dirmove[d] + f[len(d) :]
+ df = branch_copies1.dirmove[d] + f[len(d) :]
break
if df is not None and df in m1:
actions[df] = (
@@ -1481,6 +1497,9 @@
# Updates "actions" in place
_filternarrowactions(narrowmatch, branchmerge, actions)
+ renamedelete = branch_copies1.renamedelete
+ renamedelete.update(branch_copies2.renamedelete)
+
return actions, diverge, renamedelete
@@ -2205,6 +2224,7 @@
labels=None,
matcher=None,
mergeforce=False,
+ updatedirstate=True,
updatecheck=None,
wc=None,
):
@@ -2288,13 +2308,6 @@
),
)
)
- # If we're doing a partial update, we need to skip updating
- # the dirstate, so make a note of any partial-ness to the
- # update here.
- if matcher is None or matcher.always():
- partial = False
- else:
- partial = True
with repo.wlock():
if wc is None:
wc = repo[None]
@@ -2507,7 +2520,11 @@
### apply phase
if not branchmerge: # just jump to the new rev
fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
- if not partial and not wc.isinmemory():
+ # If we're doing a partial update, we need to skip updating
+ # the dirstate.
+ always = matcher is None or matcher.always()
+ updatedirstate = updatedirstate and always and not wc.isinmemory()
+ if updatedirstate:
repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
# note that we're in the middle of an update
repo.vfs.write(b'updatestate', p2.hex())
@@ -2553,7 +2570,6 @@
)
)
- updatedirstate = not partial and not wc.isinmemory()
wantfiledata = updatedirstate and not branchmerge
stats, getfiledata = applyupdates(
repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
@@ -2574,15 +2590,48 @@
if not branchmerge:
sparse.prunetemporaryincludes(repo)
- if not partial:
+ if updatedirstate:
repo.hook(
b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
)
return stats
+def clean_update(ctx, wc=None):
+ """Do a clean update to the given commit.
+
+ This involves updating to the commit and discarding any changes in the
+ working copy.
+ """
+ return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
+
+
+def revert_to(ctx, matcher=None, wc=None):
+ """Revert the working copy to the given commit.
+
+ The working copy will keep its current parent(s) but its content will
+ be the same as in the given commit.
+ """
+
+ return update(
+ ctx.repo(),
+ ctx.rev(),
+ branchmerge=False,
+ force=True,
+ updatedirstate=False,
+ matcher=matcher,
+ wc=wc,
+ )
+
+
def graft(
- repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
+ repo,
+ ctx,
+ base=None,
+ labels=None,
+ keepparent=False,
+ keepconflictparent=False,
+ wctx=None,
):
"""Do a graft-like merge.
@@ -2593,7 +2642,7 @@
renames/copies appropriately.
ctx - changeset to rebase
- base - merge base, usually ctx.p1()
+ base - merge base, or ctx.p1() if not specified
labels - merge labels eg ['local', 'graft']
keepparent - keep second parent if any
keepconflictparent - if unresolved, keep parent used for the merge
@@ -2605,8 +2654,9 @@
# to copy commits), and 2) informs update that the incoming changes are
# newer than the destination so it doesn't prompt about "remote changed foo
# which local deleted".
- wctx = repo[None]
+ wctx = wctx or repo[None]
pctx = wctx.p1()
+ base = base or ctx.p1()
mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
stats = update(
@@ -2617,6 +2667,7 @@
base.node(),
mergeancestor=mergeancestor,
labels=labels,
+ wc=wctx,
)
if keepconflictparent and stats.unresolvedcount:
@@ -2631,17 +2682,23 @@
if pother == pctx.node():
pother = nullid
- with repo.dirstate.parentchange():
- repo.setparents(pctx.node(), pother)
- repo.dirstate.write(repo.currenttransaction())
+ if wctx.isinmemory():
+ wctx.setparents(pctx.node(), pother)
# fix up dirstate for copies and renames
copies.graftcopies(wctx, ctx, base)
+ else:
+ with repo.dirstate.parentchange():
+ repo.setparents(pctx.node(), pother)
+ repo.dirstate.write(repo.currenttransaction())
+ # fix up dirstate for copies and renames
+ copies.graftcopies(wctx, ctx, base)
return stats
def purge(
repo,
matcher,
+ unknown=True,
ignored=False,
removeemptydirs=True,
removefiles=True,
@@ -2653,7 +2710,9 @@
``matcher`` is a matcher configured to scan the working directory -
potentially a subset.
- ``ignored`` controls whether ignored files should also be purged.
+ ``unknown`` controls whether unknown files should be purged.
+
+ ``ignored`` controls whether ignored files should be purged.
``removeemptydirs`` controls whether empty directories should be removed.
@@ -2690,7 +2749,7 @@
directories = []
matcher.traversedir = directories.append
- status = repo.status(match=matcher, ignored=ignored, unknown=True)
+ status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
if removefiles:
for f in sorted(status.unknown + status.ignored):
--- a/mercurial/pathutil.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/pathutil.py Thu Feb 13 10:12:12 2020 -0800
@@ -84,7 +84,7 @@
_(b"path contains illegal component: %s") % path
)
if b'.hg' in _lowerclean(path):
- lparts = [_lowerclean(p.lower()) for p in parts]
+ lparts = [_lowerclean(p) for p in parts]
for p in b'.hg', b'.hg.':
if p in lparts[1:]:
pos = lparts.index(p)
@@ -99,10 +99,11 @@
parts.pop()
normparts.pop()
- prefixes = []
# It's important that we check the path parts starting from the root.
- # This means we won't accidentally traverse a symlink into some other
- # filesystem (which is potentially expensive to access).
+ # We don't want to add "foo/bar/baz" to auditeddir before checking if
+ # there's a "foo/.hg" directory. This also means we won't accidentally
+ # traverse a symlink into some other filesystem (which is potentially
+ # expensive to access).
for i in range(len(parts)):
prefix = pycompat.ossep.join(parts[: i + 1])
normprefix = pycompat.ossep.join(normparts[: i + 1])
@@ -110,13 +111,11 @@
continue
if self._realfs:
self._checkfs(prefix, path)
- prefixes.append(normprefix)
+ if self._cached:
+ self.auditeddir.add(normprefix)
if self._cached:
self.audited.add(normpath)
- # only add prefixes to the cache after checking everything: we don't
- # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
- self.auditeddir.update(prefixes)
def _checkfs(self, prefix, path):
"""raise exception if a file system backed check fails"""
--- a/mercurial/posix.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/posix.py Thu Feb 13 10:12:12 2020 -0800
@@ -324,9 +324,8 @@
open(fullpath, b'w').close()
except IOError as inst:
if (
- inst[0] # pytype: disable=unsupported-operands
- == errno.EACCES
- ):
+ inst[0] == errno.EACCES
+ ): # pytype: disable=unsupported-operands
# If we can't write to cachedir, just pretend
# that the fs is readonly and by association
# that the fs won't support symlinks. This
--- a/mercurial/profiling.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/profiling.py Thu Feb 13 10:12:12 2020 -0800
@@ -186,6 +186,7 @@
self._output = None
self._fp = None
self._fpdoclose = True
+ self._flushfp = None
self._profiler = None
self._enabled = enabled
self._entered = False
@@ -246,6 +247,8 @@
else:
self._fpdoclose = False
self._fp = self._ui.ferr
+ # Ensure we've flushed fout before writing to ferr.
+ self._flushfp = self._ui.fout
if proffn is not None:
pass
@@ -265,6 +268,7 @@
def __exit__(self, exception_type, exception_value, traceback):
propagate = None
if self._profiler is not None:
+ self._uiflush()
propagate = self._profiler.__exit__(
exception_type, exception_value, traceback
)
@@ -280,3 +284,7 @@
def _closefp(self):
if self._fpdoclose and self._fp is not None:
self._fp.close()
+
+ def _uiflush(self):
+ if self._flushfp:
+ self._flushfp.flush()
--- a/mercurial/pure/parsers.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/pure/parsers.py Thu Feb 13 10:12:12 2020 -0800
@@ -141,6 +141,50 @@
self._extra = self._extra[: i - self._lgt]
+class PersistentNodeMapIndexObject(IndexObject):
+ """a Debug oriented class to test persistent nodemap
+
+ We need a simple python object to test API and higher level behavior. See
+ the Rust implementation for more serious usage. This should be used only
+ through the dedicated `devel.persistent-nodemap` config.
+ """
+
+ def nodemap_data_all(self):
+ """Return bytes containing a full serialization of a nodemap
+
+ The nodemap should be valid for the full set of revisions in the
+ index."""
+ return nodemaputil.persistent_data(self)
+
+ def nodemap_data_incremental(self):
+ """Return bytes containing a incremental update to persistent nodemap
+
+ This containst the data for an append-only update of the data provided
+ in the last call to `update_nodemap_data`.
+ """
+ if self._nm_root is None:
+ return None
+ docket = self._nm_docket
+ changed, data = nodemaputil.update_persistent_data(
+ self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
+ )
+
+ self._nm_root = self._nm_max_idx = self._nm_docket = None
+ return docket, changed, data
+
+ def update_nodemap_data(self, docket, nm_data):
+ """provide full block of persisted binary data for a nodemap
+
+ The data are expected to come from disk. See `nodemap_data_all` for a
+ produceur of such data."""
+ if nm_data is not None:
+ self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
+ if self._nm_root:
+ self._nm_docket = docket
+ else:
+ self._nm_root = self._nm_max_idx = self._nm_docket = None
+
+
class InlinedIndexObject(BaseIndexObject):
def __init__(self, data, inline=0):
self._data = data
@@ -188,6 +232,12 @@
return InlinedIndexObject(data, inline), (0, data)
+def parse_index_devel_nodemap(data, inline):
+ """like parse_index2, but alway return a PersistentNodeMapIndexObject
+ """
+ return PersistentNodeMapIndexObject(data), None
+
+
def parse_dirstate(dmap, copymap, st):
parents = [st[:20], st[20:40]]
# dereference fields so they will be local in loop
--- a/mercurial/revlog.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/revlog.py Thu Feb 13 10:12:12 2020 -0800
@@ -352,6 +352,21 @@
return p
+NodemapRevlogIO = None
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+ class NodemapRevlogIO(revlogio):
+ """A debug oriented IO class that return a PersistentNodeMapIndexObject
+
+ The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
+ """
+
+ def parseindex(self, data, inline):
+ index, cache = parsers.parse_index_devel_nodemap(data, inline)
+ return index, cache
+
+
class rustrevlogio(revlogio):
def parseindex(self, data, inline):
index, cache = super(rustrevlogio, self).parseindex(data, inline)
@@ -407,6 +422,7 @@
mmaplargeindex=False,
censorable=False,
upperboundcomp=None,
+ persistentnodemap=False,
):
"""
create a revlog object
@@ -418,6 +434,10 @@
self.upperboundcomp = upperboundcomp
self.indexfile = indexfile
self.datafile = datafile or (indexfile[:-2] + b".d")
+ self.nodemap_file = None
+ if persistentnodemap:
+ self.nodemap_file = indexfile[:-2] + b".n"
+
self.opener = opener
# When True, indexfile is opened with checkambig=True at writing, to
# avoid file stat ambiguity.
@@ -435,6 +455,7 @@
self._maxchainlen = None
self._deltabothparents = True
self.index = None
+ self._nodemap_docket = None
# Mapping of partial identifiers to full nodes.
self._pcache = {}
# Mapping of revision integer to full node.
@@ -524,6 +545,9 @@
indexdata = b''
self._initempty = True
try:
+ nodemap_data = nodemaputil.persisted_data(self)
+ if nodemap_data is not None:
+ self._nodemap_docket = nodemap_data[0]
with self._indexfp() as f:
if (
mmapindexthreshold is not None
@@ -591,13 +615,31 @@
self._storedeltachains = True
+ devel_nodemap = (
+ self.nodemap_file
+ and opts.get(b'devel-force-nodemap', False)
+ and NodemapRevlogIO is not None
+ )
+
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
+ elif devel_nodemap:
+ self._io = NodemapRevlogIO()
elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
self._io = rustrevlogio()
try:
d = self._io.parseindex(indexdata, self._inline)
+ index, _chunkcache = d
+ use_nodemap = (
+ not self._inline
+ and self.nodemap_file is not None
+ and util.safehasattr(index, 'update_nodemap_data')
+ )
+ if use_nodemap:
+ nodemap_data = nodemaputil.persisted_data(self)
+ if nodemap_data is not None:
+ index.update_nodemap_data(*nodemap_data)
except (ValueError, IndexError):
raise error.RevlogError(
_(b"index %s is corrupted") % self.indexfile
@@ -1960,6 +2002,7 @@
# manager
tr.replace(self.indexfile, trindex * self._io.size)
+ nodemaputil.setup_persistent_nodemap(tr, self)
self._chunkclear()
def _nodeduplicatecallback(self, transaction, node):
@@ -2286,6 +2329,7 @@
ifh.write(data[0])
ifh.write(data[1])
self._enforceinlinesize(transaction, ifh)
+ nodemaputil.setup_persistent_nodemap(transaction, self)
def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
"""
--- a/mercurial/revlogutils/nodemap.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/revlogutils/nodemap.py Thu Feb 13 10:12:12 2020 -0800
@@ -7,9 +7,513 @@
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
-from .. import error
+
+import os
+import re
+import struct
+
+from .. import (
+ error,
+ node as nodemod,
+ util,
+)
class NodeMap(dict):
def __missing__(self, x):
raise error.RevlogError(b'unknown node: %s' % x)
+
+
+def persisted_data(revlog):
+ """read the nodemap for a revlog from disk"""
+ if revlog.nodemap_file is None:
+ return None
+ pdata = revlog.opener.tryread(revlog.nodemap_file)
+ if not pdata:
+ return None
+ offset = 0
+ (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
+ if version != ONDISK_VERSION:
+ return None
+ offset += S_VERSION.size
+ headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
+ uid_size, tip_rev, data_length, data_unused = headers
+ offset += S_HEADER.size
+ docket = NodeMapDocket(pdata[offset : offset + uid_size])
+ docket.tip_rev = tip_rev
+ docket.data_length = data_length
+ docket.data_unused = data_unused
+
+ filename = _rawdata_filepath(revlog, docket)
+ data = revlog.opener.tryread(filename)
+ if len(data) < data_length:
+ return None
+ elif len(data) > data_length:
+ data = data[:data_length]
+ return docket, data
+
+
+def setup_persistent_nodemap(tr, revlog):
+ """Install whatever is needed transaction side to persist a nodemap on disk
+
+ (only actually persist the nodemap if this is relevant for this revlog)
+ """
+ if revlog._inline:
+ return # inlined revlog are too small for this to be relevant
+ if revlog.nodemap_file is None:
+ return # we do not use persistent_nodemap on this revlog
+ callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
+ if tr.hasfinalize(callback_id):
+ return # no need to register again
+ tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
+
+
+def _persist_nodemap(tr, revlog):
+ """Write nodemap data on disk for a given revlog
+ """
+ if getattr(revlog, 'filteredrevs', ()):
+ raise error.ProgrammingError(
+ "cannot persist nodemap of a filtered changelog"
+ )
+ if revlog.nodemap_file is None:
+ msg = "calling persist nodemap on a revlog without the feature enableb"
+ raise error.ProgrammingError(msg)
+
+ can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
+ ondisk_docket = revlog._nodemap_docket
+
+ data = None
+ # first attemp an incremental update of the data
+ if can_incremental and ondisk_docket is not None:
+ target_docket = revlog._nodemap_docket.copy()
+ (
+ src_docket,
+ data_changed_count,
+ data,
+ ) = revlog.index.nodemap_data_incremental()
+ if src_docket != target_docket:
+ data = None
+ else:
+ datafile = _rawdata_filepath(revlog, target_docket)
+ # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+ # store vfs
+ with revlog.opener(datafile, b'r+') as fd:
+ fd.seek(target_docket.data_length)
+ fd.write(data)
+ fd.seek(0)
+ new_data = fd.read(target_docket.data_length + len(data))
+ target_docket.data_length += len(data)
+ target_docket.data_unused += data_changed_count
+
+ if data is None:
+ # otherwise fallback to a full new export
+ target_docket = NodeMapDocket()
+ datafile = _rawdata_filepath(revlog, target_docket)
+ if util.safehasattr(revlog.index, "nodemap_data_all"):
+ data = revlog.index.nodemap_data_all()
+ else:
+ data = persistent_data(revlog.index)
+ # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+ # store vfs
+ new_data = data
+ with revlog.opener(datafile, b'w') as fd:
+ fd.write(data)
+ target_docket.data_length = len(data)
+ target_docket.tip_rev = revlog.tiprev()
+ # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+ # store vfs
+ with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
+ fp.write(target_docket.serialize())
+ revlog._nodemap_docket = target_docket
+ if util.safehasattr(revlog.index, "update_nodemap_data"):
+ revlog.index.update_nodemap_data(target_docket, new_data)
+
+ # EXP-TODO: if the transaction abort, we should remove the new data and
+ # reinstall the old one.
+
+ # search for old index file in all cases, some older process might have
+ # left one behind.
+ olds = _other_rawdata_filepath(revlog, target_docket)
+ if olds:
+ realvfs = getattr(revlog, '_realopener', revlog.opener)
+
+ def cleanup(tr):
+ for oldfile in olds:
+ realvfs.tryunlink(oldfile)
+
+ callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
+ tr.addpostclose(callback_id, cleanup)
+
+
+### Nodemap docket file
+#
+# The nodemap data are stored on disk using 2 files:
+#
+# * a raw data files containing a persistent nodemap
+# (see `Nodemap Trie` section)
+#
+# * a small "docket" file containing medatadata
+#
+# While the nodemap data can be multiple tens of megabytes, the "docket" is
+# small, it is easy to update it automatically or to duplicated its content
+# during a transaction.
+#
+# Multiple raw data can exist at the same time (The currently valid one and a
+# new one beind used by an in progress transaction). To accomodate this, the
+# filename hosting the raw data has a variable parts. The exact filename is
+# specified inside the "docket" file.
+#
+# The docket file contains information to find, qualify and validate the raw
+# data. Its content is currently very light, but it will expand as the on disk
+# nodemap gains the necessary features to be used in production.
+
+# version 0 is experimental, no BC garantee, do no use outside of tests.
+ONDISK_VERSION = 0
+S_VERSION = struct.Struct(">B")
+S_HEADER = struct.Struct(">BQQQ")
+
+ID_SIZE = 8
+
+
+def _make_uid():
+ """return a new unique identifier.
+
+ The identifier is random and composed of ascii characters."""
+ return nodemod.hex(os.urandom(ID_SIZE))
+
+
+class NodeMapDocket(object):
+ """metadata associated with persistent nodemap data
+
+ The persistent data may come from disk or be on their way to disk.
+ """
+
+ def __init__(self, uid=None):
+ if uid is None:
+ uid = _make_uid()
+ self.uid = uid
+ self.tip_rev = None
+ self.data_length = None
+ self.data_unused = 0
+
+ def copy(self):
+ new = NodeMapDocket(uid=self.uid)
+ new.tip_rev = self.tip_rev
+ new.data_length = self.data_length
+ new.data_unused = self.data_unused
+ return new
+
+ def __cmp__(self, other):
+ if self.uid < other.uid:
+ return -1
+ if self.uid > other.uid:
+ return 1
+ elif self.data_length < other.data_length:
+ return -1
+ elif self.data_length > other.data_length:
+ return 1
+ return 0
+
+ def __eq__(self, other):
+ return self.uid == other.uid and self.data_length == other.data_length
+
+ def serialize(self):
+ """return serialized bytes for a docket using the passed uid"""
+ data = []
+ data.append(S_VERSION.pack(ONDISK_VERSION))
+ headers = (
+ len(self.uid),
+ self.tip_rev,
+ self.data_length,
+ self.data_unused,
+ )
+ data.append(S_HEADER.pack(*headers))
+ data.append(self.uid)
+ return b''.join(data)
+
+
+def _rawdata_filepath(revlog, docket):
+ """The (vfs relative) nodemap's rawdata file for a given uid"""
+ prefix = revlog.nodemap_file[:-2]
+ return b"%s-%s.nd" % (prefix, docket.uid)
+
+
+def _other_rawdata_filepath(revlog, docket):
+ prefix = revlog.nodemap_file[:-2]
+ pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
+ new_file_path = _rawdata_filepath(revlog, docket)
+ new_file_name = revlog.opener.basename(new_file_path)
+ dirpath = revlog.opener.dirname(new_file_path)
+ others = []
+ for f in revlog.opener.listdir(dirpath):
+ if pattern.match(f) and f != new_file_name:
+ others.append(f)
+ return others
+
+
+### Nodemap Trie
+#
+# This is a simple reference implementation to compute and persist a nodemap
+# trie. This reference implementation is write only. The python version of this
+# is not expected to be actually used, since it wont provide performance
+# improvement over existing non-persistent C implementation.
+#
+# The nodemap is persisted as Trie using 4bits-address/16-entries block. each
+# revision can be adressed using its node shortest prefix.
+#
+# The trie is stored as a sequence of block. Each block contains 16 entries
+# (signed 64bit integer, big endian). Each entry can be one of the following:
+#
+# * value >= 0 -> index of sub-block
+# * value == -1 -> no value
+# * value < -1 -> a revision value: rev = -(value+10)
+#
+# The implementation focus on simplicity, not on performance. A Rust
+# implementation should provide a efficient version of the same binary
+# persistence. This reference python implementation is never meant to be
+# extensively use in production.
+
+
+def persistent_data(index):
+ """return the persistent binary form for a nodemap for a given index
+ """
+ trie = _build_trie(index)
+ return _persist_trie(trie)
+
+
+def update_persistent_data(index, root, max_idx, last_rev):
+ """return the incremental update for persistent nodemap from a given index
+ """
+ changed_block, trie = _update_trie(index, root, last_rev)
+ return (
+ changed_block * S_BLOCK.size,
+ _persist_trie(trie, existing_idx=max_idx),
+ )
+
+
+S_BLOCK = struct.Struct(">" + ("l" * 16))
+
+NO_ENTRY = -1
+# rev 0 need to be -2 because 0 is used by block, -1 is a special value.
+REV_OFFSET = 2
+
+
+def _transform_rev(rev):
+ """Return the number used to represent the rev in the tree.
+
+ (or retrieve a rev number from such representation)
+
+ Note that this is an involution, a function equal to its inverse (i.e.
+ which gives the identity when applied to itself).
+ """
+ return -(rev + REV_OFFSET)
+
+
+def _to_int(hex_digit):
+ """turn an hexadecimal digit into a proper integer"""
+ return int(hex_digit, 16)
+
+
+class Block(dict):
+ """represent a block of the Trie
+
+ contains up to 16 entry indexed from 0 to 15"""
+
+ def __init__(self):
+ super(Block, self).__init__()
+ # If this block exist on disk, here is its ID
+ self.ondisk_id = None
+
+ def __iter__(self):
+ return iter(self.get(i) for i in range(16))
+
+
+def _build_trie(index):
+ """build a nodemap trie
+
+ The nodemap stores revision number for each unique prefix.
+
+ Each block is a dictionary with keys in `[0, 15]`. Values are either
+ another block or a revision number.
+ """
+ root = Block()
+ for rev in range(len(index)):
+ hex = nodemod.hex(index[rev][7])
+ _insert_into_block(index, 0, root, rev, hex)
+ return root
+
+
+def _update_trie(index, root, last_rev):
+ """consume"""
+ changed = 0
+ for rev in range(last_rev + 1, len(index)):
+ hex = nodemod.hex(index[rev][7])
+ changed += _insert_into_block(index, 0, root, rev, hex)
+ return changed, root
+
+
+def _insert_into_block(index, level, block, current_rev, current_hex):
+ """insert a new revision in a block
+
+ index: the index we are adding revision for
+ level: the depth of the current block in the trie
+ block: the block currently being considered
+ current_rev: the revision number we are adding
+ current_hex: the hexadecimal representation of the of that revision
+ """
+ changed = 1
+ if block.ondisk_id is not None:
+ block.ondisk_id = None
+ hex_digit = _to_int(current_hex[level : level + 1])
+ entry = block.get(hex_digit)
+ if entry is None:
+ # no entry, simply store the revision number
+ block[hex_digit] = current_rev
+ elif isinstance(entry, dict):
+ # need to recurse to an underlying block
+ changed += _insert_into_block(
+ index, level + 1, entry, current_rev, current_hex
+ )
+ else:
+ # collision with a previously unique prefix, inserting new
+ # vertices to fit both entry.
+ other_hex = nodemod.hex(index[entry][7])
+ other_rev = entry
+ new = Block()
+ block[hex_digit] = new
+ _insert_into_block(index, level + 1, new, other_rev, other_hex)
+ _insert_into_block(index, level + 1, new, current_rev, current_hex)
+ return changed
+
+
+def _persist_trie(root, existing_idx=None):
+ """turn a nodemap trie into persistent binary data
+
+ See `_build_trie` for nodemap trie structure"""
+ block_map = {}
+ if existing_idx is not None:
+ base_idx = existing_idx + 1
+ else:
+ base_idx = 0
+ chunks = []
+ for tn in _walk_trie(root):
+ if tn.ondisk_id is not None:
+ block_map[id(tn)] = tn.ondisk_id
+ else:
+ block_map[id(tn)] = len(chunks) + base_idx
+ chunks.append(_persist_block(tn, block_map))
+ return b''.join(chunks)
+
+
+def _walk_trie(block):
+ """yield all the block in a trie
+
+ Children blocks are always yield before their parent block.
+ """
+ for (_, item) in sorted(block.items()):
+ if isinstance(item, dict):
+ for sub_block in _walk_trie(item):
+ yield sub_block
+ yield block
+
+
+def _persist_block(block_node, block_map):
+ """produce persistent binary data for a single block
+
+ Children block are assumed to be already persisted and present in
+ block_map.
+ """
+ data = tuple(_to_value(v, block_map) for v in block_node)
+ return S_BLOCK.pack(*data)
+
+
+def _to_value(item, block_map):
+ """persist any value as an integer"""
+ if item is None:
+ return NO_ENTRY
+ elif isinstance(item, dict):
+ return block_map[id(item)]
+ else:
+ return _transform_rev(item)
+
+
+def parse_data(data):
+ """parse parse nodemap data into a nodemap Trie"""
+ if (len(data) % S_BLOCK.size) != 0:
+ msg = "nodemap data size is not a multiple of block size (%d): %d"
+ raise error.Abort(msg % (S_BLOCK.size, len(data)))
+ if not data:
+ return Block(), None
+ block_map = {}
+ new_blocks = []
+ for i in range(0, len(data), S_BLOCK.size):
+ block = Block()
+ block.ondisk_id = len(block_map)
+ block_map[block.ondisk_id] = block
+ block_data = data[i : i + S_BLOCK.size]
+ values = S_BLOCK.unpack(block_data)
+ new_blocks.append((block, values))
+ for b, values in new_blocks:
+ for idx, v in enumerate(values):
+ if v == NO_ENTRY:
+ continue
+ elif v >= 0:
+ b[idx] = block_map[v]
+ else:
+ b[idx] = _transform_rev(v)
+ return block, i // S_BLOCK.size
+
+
+# debug utility
+
+
+def check_data(ui, index, data):
+ """verify that the provided nodemap data are valid for the given idex"""
+ ret = 0
+ ui.status((b"revision in index: %d\n") % len(index))
+ root, __ = parse_data(data)
+ all_revs = set(_all_revisions(root))
+ ui.status((b"revision in nodemap: %d\n") % len(all_revs))
+ for r in range(len(index)):
+ if r not in all_revs:
+ msg = b" revision missing from nodemap: %d\n" % r
+ ui.write_err(msg)
+ ret = 1
+ else:
+ all_revs.remove(r)
+ nm_rev = _find_node(root, nodemod.hex(index[r][7]))
+ if nm_rev is None:
+ msg = b" revision node does not match any entries: %d\n" % r
+ ui.write_err(msg)
+ ret = 1
+ elif nm_rev != r:
+ msg = (
+ b" revision node does not match the expected revision: "
+ b"%d != %d\n" % (r, nm_rev)
+ )
+ ui.write_err(msg)
+ ret = 1
+
+ if all_revs:
+ for r in sorted(all_revs):
+ msg = b" extra revision in nodemap: %d\n" % r
+ ui.write_err(msg)
+ ret = 1
+ return ret
+
+
+def _all_revisions(root):
+ """return all revisions stored in a Trie"""
+ for block in _walk_trie(root):
+ for v in block:
+ if v is None or isinstance(v, Block):
+ continue
+ yield v
+
+
+def _find_node(block, node):
+ """find the revision associated with a given node"""
+ entry = block.get(_to_int(node[0:1]))
+ if isinstance(entry, dict):
+ return _find_node(entry, node[1:])
+ return entry
--- a/mercurial/revset.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/revset.py Thu Feb 13 10:12:12 2020 -0800
@@ -769,6 +769,38 @@
return subset
+@predicate(b'conflictlocal()', safe=True)
+def conflictlocal(repo, subset, x):
+ """The local side of the merge, if currently in an unresolved merge.
+
+ "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
+ """
+ getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
+ from . import merge
+
+ mergestate = merge.mergestate.read(repo)
+ if mergestate.active() and repo.changelog.hasnode(mergestate.local):
+ return subset & {repo.changelog.rev(mergestate.local)}
+
+ return baseset()
+
+
+@predicate(b'conflictother()', safe=True)
+def conflictother(repo, subset, x):
+ """The other side of the merge, if currently in an unresolved merge.
+
+ "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
+ """
+ getargs(x, 0, 0, _(b"conflictother takes no arguments"))
+ from . import merge
+
+ mergestate = merge.mergestate.read(repo)
+ if mergestate.active() and repo.changelog.hasnode(mergestate.other):
+ return subset & {repo.changelog.rev(mergestate.other)}
+
+ return baseset()
+
+
@predicate(b'contains(pattern)', weight=100)
def contains(repo, subset, x):
"""The revision's manifest contains a file matching pattern (but might not
--- a/mercurial/shelve.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/shelve.py Thu Feb 13 10:12:12 2020 -0800
@@ -745,7 +745,7 @@
try:
checkparents(repo, state)
- merge.update(repo, state.pendingctx, branchmerge=False, force=True)
+ merge.clean_update(state.pendingctx)
if state.activebookmark and state.activebookmark in repo._bookmarks:
bookmarks.activate(repo, state.activebookmark)
mergefiles(ui, repo, state.wctx, state.pendingctx)
@@ -996,7 +996,6 @@
stats = merge.graft(
repo,
shelvectx,
- shelvectx.p1(),
labels=[b'shelve', b'working-copy'],
keepconflictparent=True,
)
--- a/mercurial/tags.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/tags.py Thu Feb 13 10:12:12 2020 -0800
@@ -720,15 +720,20 @@
self._dirtyoffset = None
- if rawlen < wantedlen:
- self._dirtyoffset = rawlen
- self._raw.extend(b'\xff' * (wantedlen - rawlen))
- elif rawlen > wantedlen:
+ rawlentokeep = min(
+ wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
+ )
+ if rawlen > rawlentokeep:
# There's no easy way to truncate array instances. This seems
# slightly less evil than copying a potentially large array slice.
- for i in range(rawlen - wantedlen):
+ for i in range(rawlen - rawlentokeep):
self._raw.pop()
- self._dirtyoffset = len(self._raw)
+ rawlen = len(self._raw)
+ self._dirtyoffset = rawlen
+ if rawlen < wantedlen:
+ if self._dirtyoffset is None:
+ self._dirtyoffset = rawlen
+ self._raw.extend(b'\xff' * (wantedlen - rawlen))
def getfnode(self, node, computemissing=True):
"""Obtain the filenode of the .hgtags file at a specified revision.
--- a/mercurial/templatekw.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/templatekw.py Thu Feb 13 10:12:12 2020 -0800
@@ -396,26 +396,38 @@
return templateutil.compatfileslist(context, mapping, b'file', ctx.files())
-@templatekeyword(b'graphnode', requires={b'repo', b'ctx'})
+@templatekeyword(b'graphnode', requires={b'repo', b'ctx', b'cache'})
def showgraphnode(context, mapping):
"""String. The character representing the changeset node in an ASCII
revision graph."""
repo = context.resource(mapping, b'repo')
ctx = context.resource(mapping, b'ctx')
- return getgraphnode(repo, ctx)
+ cache = context.resource(mapping, b'cache')
+ return getgraphnode(repo, ctx, cache)
-def getgraphnode(repo, ctx):
- return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
+def getgraphnode(repo, ctx, cache):
+ return getgraphnodecurrent(repo, ctx, cache) or getgraphnodesymbol(ctx)
-def getgraphnodecurrent(repo, ctx):
+def getgraphnodecurrent(repo, ctx, cache):
wpnodes = repo.dirstate.parents()
if wpnodes[1] == nullid:
wpnodes = wpnodes[:1]
if ctx.node() in wpnodes:
return b'@'
else:
+ merge_nodes = cache.get(b'merge_nodes', ())
+ if not merge_nodes:
+ from . import merge
+
+ mergestate = merge.mergestate.read(repo)
+ if mergestate.active():
+ merge_nodes = (mergestate.local, mergestate.other)
+ cache[b'merge_nodes'] = merge_nodes
+
+ if ctx.node() in merge_nodes:
+ return b'%'
return b''
--- a/mercurial/ui.py Fri Dec 13 10:37:45 2019 +0100
+++ b/mercurial/ui.py Thu Feb 13 10:12:12 2020 -0800
@@ -1939,30 +1939,6 @@
if self._progbar is not None and self._progbar.printed:
self._progbar.clear()
- def progress(self, topic, pos, item=b"", unit=b"", total=None):
- '''show a progress message
-
- By default a textual progress bar will be displayed if an operation
- takes too long. 'topic' is the current operation, 'item' is a
- non-numeric marker of the current position (i.e. the currently
- in-process file), 'pos' is the current numeric position (i.e.
- revision, bytes, etc.), unit is a corresponding unit label,
- and total is the highest expected pos.
-
- Multiple nested topics may be active at a time.
-
- All topics should be marked closed by setting pos to None at
- termination.
- '''
- self.deprecwarn(
- b"use ui.makeprogress() instead of ui.progress()", b"5.1"
- )
- progress = self.makeprogress(topic, unit, total)
- if pos is not None:
- progress.update(pos, item=item)
- else:
- progress.complete()
-
def makeprogress(self, topic, unit=b"", total=None):
"""Create a progress helper for the specified topic"""
if getattr(self._fmsgerr, 'structured', False):
--- a/relnotes/next Fri Dec 13 10:37:45 2019 +0100
+++ b/relnotes/next Thu Feb 13 10:12:12 2020 -0800
@@ -1,5 +1,17 @@
== New Features ==
+ * `hg purge`/`hg clean` can now delete ignored files instead of
+ untracked files, with the new -i flag.
+
+ * `hg log` now defaults to using an '%' symbol for commits involved
+ in unresolved merge conflicts. That includes unresolved conflicts
+ caused by e.g. `hg update --merge` and `hg graft`. '@' still takes
+ precedence, so what used to be marked '@' still is.
+
+ * New `conflictlocal()` and `conflictother()` revsets return the
+ commits that are being merged, when there are conflicts. Also works
+ for conflicts caused by e.g. `hg graft`.
+
== New Experimental Features ==
@@ -9,6 +21,19 @@
== Backwards Compatibility Changes ==
+ * When `hg rebase` pauses for merge conflict resolution, the working
+ copy will no longer have the rebased node as a second parent. You
+ can use the new `conflictparents()` revset for finding the other
+ parent during a conflict.
+
== Internal API Changes ==
+ * The deprecated `ui.progress()` has now been deleted. Please use
+ `ui.makeprogress()` instead.
+
+ * `hg.merge()` has lost its `abort` argument. Please call
+ `hg.abortmerge()` directly instead.
+
+ * The `*others` argument of `cmdutil.check_incompatible_arguments()`
+ changed from being varargs argument to being a single collection.
--- a/rust/Cargo.lock Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/Cargo.lock Thu Feb 13 10:12:12 2020 -0800
@@ -2,23 +2,28 @@
# It is not intended for manual editing.
[[package]]
name = "aho-corasick"
-version = "0.7.6"
+version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "arrayvec"
-version = "0.4.12"
+name = "ansi_term"
+version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "autocfg"
-version = "0.1.6"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "autocfg"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -33,14 +38,18 @@
[[package]]
name = "c2-chacha"
-version = "0.2.2"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
+name = "cc"
+version = "1.0.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -55,55 +64,71 @@
[[package]]
name = "cpython"
-version = "0.3.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-deque"
-version = "0.7.1"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-epoch"
-version = "0.7.2"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-queue"
-version = "0.1.2"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-utils"
-version = "0.6.6"
+version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
+name = "ctor"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "difference"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "either"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -115,25 +140,44 @@
[[package]]
name = "getrandom"
-version = "0.1.12"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
+name = "hermit-abi"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "hex"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "hg-core"
version = "0.1.0"
dependencies = [
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -141,9 +185,9 @@
name = "hg-cpython"
version = "0.1.0"
dependencies = [
- "cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -151,7 +195,7 @@
version = "0.1.0"
dependencies = [
"hg-core 0.1.0",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -161,64 +205,95 @@
[[package]]
name = "libc"
-version = "0.2.64"
+version = "0.2.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memchr"
-version = "2.2.1"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memoffset"
-version = "0.5.1"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "nodrop"
-version = "0.1.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "num-traits"
-version = "0.2.8"
+version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num_cpus"
-version = "1.10.1"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "output_vt100"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ppv-lite86"
-version = "0.2.5"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "pretty_assertions"
+version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ctor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "python27-sys"
-version = "0.3.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "python3-sys"
-version = "0.3.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -226,8 +301,8 @@
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -241,11 +316,11 @@
[[package]]
name = "rand"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -256,7 +331,7 @@
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -265,7 +340,7 @@
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -287,7 +362,7 @@
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -319,7 +394,7 @@
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -331,7 +406,7 @@
dependencies = [
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -342,7 +417,7 @@
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -356,24 +431,24 @@
[[package]]
name = "rayon"
-version = "1.2.0"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rayon-core"
-version = "1.6.0"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -385,22 +460,35 @@
]
[[package]]
+name = "redox_syscall"
+version = "0.1.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "regex"
-version = "1.3.1"
+version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
-version = "0.6.12"
+version = "0.6.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "remove_dir_all"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -409,6 +497,14 @@
]
[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "scopeguard"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -427,8 +523,31 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "syn"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
+ "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "thread_local"
-version = "0.3.6"
+version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -439,12 +558,17 @@
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
+name = "unicode-xid"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "wasi"
-version = "0.7.0"
+version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -462,39 +586,56 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "winapi-util"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
-"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
-"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
-"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
+"checksum aho-corasick 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811"
+"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
+"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
+"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
-"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101"
+"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb"
+"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85532c648315aeb0829ad216a6a29aa3212cf9319bc7f6daf1404aa0bdd1485f"
-"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
-"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"
-"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
-"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
+"checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
+"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca"
+"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"
+"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
+"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"
+"checksum ctor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8ce37ad4184ab2ce004c33bf6379185d3b1c95801cab51026bd271bf68eedc"
+"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571"
+"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
+"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772"
+"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c"
-"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
-"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
-"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
-"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32"
-"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
-"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b"
-"checksum python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "372555e88a6bc8109eb641380240dc8d25a128fc48363ec9075664daadffdd5b"
-"checksum python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a8ebed3f1201fda179f3960609dbbc10cd8c75e9f2afcb03788278f367d8ea"
+"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
+"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223"
+"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
+"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
+"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
+"checksum output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
+"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
+"checksum pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
+"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548"
+"checksum python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
+"checksum python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
+"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"
"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
-"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
+"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
@@ -507,18 +648,25 @@
"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
-"checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123"
-"checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b"
+"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
+"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"
-"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
+"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
+"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8"
+"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06"
+"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
+"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5"
+"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
+"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
"checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
-"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
+"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
+"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/hg-core/Cargo.toml Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/Cargo.toml Thu Feb 13 10:12:12 2020 -0800
@@ -4,16 +4,31 @@
authors = ["Georges Racinet <gracinet@anybox.fr>"]
description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
edition = "2018"
+build = "build.rs"
[lib]
name = "hg"
[dependencies]
byteorder = "1.3.1"
+hex = "0.4.0"
lazy_static = "1.3.0"
+libc = { version = "0.2.66", optional = true }
memchr = "2.2.0"
rand = "0.6.5"
rand_pcg = "0.1.1"
-rayon = "1.2.0"
+rayon = "1.3.0"
regex = "1.1.0"
twox-hash = "1.5.0"
+same-file = "1.0.6"
+
+[dev-dependencies]
+tempfile = "3.1.0"
+pretty_assertions = "0.6.1"
+
+[build-dependencies]
+cc = { version = "1.0.48", optional = true }
+
+[features]
+default = []
+with-re2 = ["cc", "libc"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/build.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,25 @@
+// build.rs
+//
+// Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+#[cfg(feature = "with-re2")]
+use cc;
+
+#[cfg(feature = "with-re2")]
+fn compile_re2() {
+ cc::Build::new()
+ .cpp(true)
+ .flag("-std=c++11")
+ .file("src/re2/rust_re2.cpp")
+ .compile("librustre.a");
+
+ println!("cargo:rustc-link-lib=re2");
+}
+
+fn main() {
+ #[cfg(feature = "with-re2")]
+ compile_re2();
+}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Thu Feb 13 10:12:12 2020 -0800
@@ -8,12 +8,15 @@
//! A multiset of directory names.
//!
//! Used to counts the references to directories in a manifest or dirstate.
-use crate::utils::hg_path::{HgPath, HgPathBuf};
use crate::{
- dirstate::EntryState, utils::files, DirstateEntry, DirstateMapError,
- FastHashMap,
+ dirstate::EntryState,
+ utils::{
+ files,
+ hg_path::{HgPath, HgPathBuf, HgPathError},
+ },
+ DirstateEntry, DirstateMapError, FastHashMap,
};
-use std::collections::hash_map::{self, Entry};
+use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
// could be encapsulated if we care API stability more seriously
pub type DirsMultisetIter<'a> = hash_map::Keys<'a, HgPathBuf, u32>;
@@ -75,7 +78,14 @@
if subpath.as_bytes().last() == Some(&b'/') {
// TODO Remove this once PathAuditor is certified
// as the only entrypoint for path data
- return Err(DirstateMapError::ConsecutiveSlashes);
+ let second_slash_index = subpath.len() - 1;
+
+ return Err(DirstateMapError::InvalidPath(
+ HgPathError::ConsecutiveSlashes {
+ bytes: path.as_ref().as_bytes().to_owned(),
+ second_slash_index,
+ },
+ ));
}
if let Some(val) = self.inner.get_mut(subpath) {
*val += 1;
@@ -129,6 +139,68 @@
}
}
+/// This is basically a reimplementation of `DirsMultiset` that stores the
+/// children instead of just a count of them, plus a small optional
+/// optimization to avoid some directories we don't need.
+#[derive(PartialEq, Debug)]
+pub struct DirsChildrenMultiset<'a> {
+ inner: FastHashMap<&'a HgPath, HashSet<&'a HgPath>>,
+ only_include: Option<HashSet<&'a HgPath>>,
+}
+
+impl<'a> DirsChildrenMultiset<'a> {
+ pub fn new(
+ paths: impl Iterator<Item = &'a HgPathBuf>,
+ only_include: Option<&'a HashSet<impl AsRef<HgPath> + 'a>>,
+ ) -> Self {
+ let mut new = Self {
+ inner: HashMap::default(),
+ only_include: only_include
+ .map(|s| s.iter().map(|p| p.as_ref()).collect()),
+ };
+
+ for path in paths {
+ new.add_path(path)
+ }
+
+ new
+ }
+ fn add_path(&mut self, path: &'a (impl AsRef<HgPath> + 'a)) {
+ if path.as_ref().is_empty() {
+ return;
+ }
+ for (directory, basename) in files::find_dirs_with_base(path.as_ref())
+ {
+ if !self.is_dir_included(directory) {
+ continue;
+ }
+ self.inner
+ .entry(directory)
+ .and_modify(|e| {
+ e.insert(basename);
+ })
+ .or_insert_with(|| {
+ let mut set = HashSet::new();
+ set.insert(basename);
+ set
+ });
+ }
+ }
+ fn is_dir_included(&self, dir: impl AsRef<HgPath>) -> bool {
+ match &self.only_include {
+ None => false,
+ Some(i) => i.contains(dir.as_ref()),
+ }
+ }
+
+ pub fn get(
+ &self,
+ path: impl AsRef<HgPath>,
+ ) -> Option<&HashSet<&'a HgPath>> {
+ self.inner.get(path.as_ref())
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
--- a/rust/hg-core/src/dirstate/status.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/dirstate/status.rs Thu Feb 13 10:12:12 2020 -0800
@@ -272,7 +272,7 @@
pub fn status<'a: 'c, 'b: 'c, 'c>(
dmap: &'a DirstateMap,
- matcher: &'b (impl Matcher),
+ matcher: &'b impl Matcher,
root_dir: impl AsRef<Path> + Sync + Send + Copy,
list_clean: bool,
last_normal_time: i64,
--- a/rust/hg-core/src/filepatterns.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/filepatterns.rs Thu Feb 13 10:12:12 2020 -0800
@@ -8,12 +8,18 @@
//! Handling of Mercurial-specific patterns.
use crate::{
- utils::SliceExt, FastHashMap, LineNumber, PatternError, PatternFileError,
+ utils::{
+ files::{canonical_path, get_bytes_from_path, get_path_from_bytes},
+ hg_path::{path_to_hg_path_buf, HgPathBuf, HgPathError},
+ SliceExt,
+ },
+ FastHashMap, PatternError,
};
use lazy_static::lazy_static;
use regex::bytes::{NoExpand, Regex};
use std::fs::File;
use std::io::Read;
+use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::vec::Vec;
@@ -32,19 +38,33 @@
const GLOB_REPLACEMENTS: &[(&[u8], &[u8])] =
&[(b"*/", b"(?:.*/)?"), (b"*", b".*"), (b"", b"[^/]*")];
+/// Appended to the regexp of globs
+const GLOB_SUFFIX: &[u8; 7] = b"(?:/|$)";
+
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PatternSyntax {
+ /// A regular expression
Regexp,
/// Glob that matches at the front of the path
RootGlob,
/// Glob that matches at any suffix of the path (still anchored at
/// slashes)
Glob,
+ /// a path relative to repository root, which is matched recursively
Path,
+ /// A path relative to cwd
RelPath,
+ /// an unrooted glob (*.rs matches Rust files in all dirs)
RelGlob,
+ /// A regexp that needn't match the start of a name
RelRegexp,
+ /// A path relative to repository root, which is matched non-recursively
+ /// (will not match subdirectories)
RootFiles,
+ /// A file of patterns to read and include
+ Include,
+ /// A file of patterns to match against files under the same directory
+ SubInclude,
}
/// Transforms a glob pattern into a regex
@@ -125,16 +145,20 @@
.collect()
}
-fn parse_pattern_syntax(kind: &[u8]) -> Result<PatternSyntax, PatternError> {
+pub fn parse_pattern_syntax(
+ kind: &[u8],
+) -> Result<PatternSyntax, PatternError> {
match kind {
- b"re" => Ok(PatternSyntax::Regexp),
- b"path" => Ok(PatternSyntax::Path),
- b"relpath" => Ok(PatternSyntax::RelPath),
- b"rootfilesin" => Ok(PatternSyntax::RootFiles),
- b"relglob" => Ok(PatternSyntax::RelGlob),
- b"relre" => Ok(PatternSyntax::RelRegexp),
- b"glob" => Ok(PatternSyntax::Glob),
- b"rootglob" => Ok(PatternSyntax::RootGlob),
+ b"re:" => Ok(PatternSyntax::Regexp),
+ b"path:" => Ok(PatternSyntax::Path),
+ b"relpath:" => Ok(PatternSyntax::RelPath),
+ b"rootfilesin:" => Ok(PatternSyntax::RootFiles),
+ b"relglob:" => Ok(PatternSyntax::RelGlob),
+ b"relre:" => Ok(PatternSyntax::RelRegexp),
+ b"glob:" => Ok(PatternSyntax::Glob),
+ b"rootglob:" => Ok(PatternSyntax::RootGlob),
+ b"include:" => Ok(PatternSyntax::Include),
+ b"subinclude:" => Ok(PatternSyntax::SubInclude),
_ => Err(PatternError::UnsupportedSyntax(
String::from_utf8_lossy(kind).to_string(),
)),
@@ -144,11 +168,10 @@
/// Builds the regex that corresponds to the given pattern.
/// If within a `syntax: regexp` context, returns the pattern,
/// otherwise, returns the corresponding regex.
-fn _build_single_regex(
- syntax: PatternSyntax,
- pattern: &[u8],
- globsuffix: &[u8],
-) -> Vec<u8> {
+fn _build_single_regex(entry: &IgnorePattern) -> Vec<u8> {
+ let IgnorePattern {
+ syntax, pattern, ..
+ } = entry;
if pattern.is_empty() {
return vec![];
}
@@ -158,7 +181,7 @@
if pattern[0] == b'^' {
return pattern.to_owned();
}
- [b".*", pattern].concat()
+ [&b".*"[..], pattern].concat()
}
PatternSyntax::Path | PatternSyntax::RelPath => {
if pattern == b"." {
@@ -181,36 +204,91 @@
PatternSyntax::RelGlob => {
let glob_re = glob_to_re(pattern);
if let Some(rest) = glob_re.drop_prefix(b"[^/]*") {
- [b".*", rest, globsuffix].concat()
+ [b".*", rest, GLOB_SUFFIX].concat()
} else {
- [b"(?:|.*/)", glob_re.as_slice(), globsuffix].concat()
+ [b"(?:|.*/)", glob_re.as_slice(), GLOB_SUFFIX].concat()
}
}
PatternSyntax::Glob | PatternSyntax::RootGlob => {
- [glob_to_re(pattern).as_slice(), globsuffix].concat()
+ [glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
}
+ PatternSyntax::Include | PatternSyntax::SubInclude => unreachable!(),
}
}
const GLOB_SPECIAL_CHARACTERS: [u8; 7] =
[b'*', b'?', b'[', b']', b'{', b'}', b'\\'];
+/// TODO support other platforms
+#[cfg(unix)]
+pub fn normalize_path_bytes(bytes: &[u8]) -> Vec<u8> {
+ if bytes.is_empty() {
+ return b".".to_vec();
+ }
+ let sep = b'/';
+
+ let mut initial_slashes = bytes.iter().take_while(|b| **b == sep).count();
+ if initial_slashes > 2 {
+ // POSIX allows one or two initial slashes, but treats three or more
+ // as single slash.
+ initial_slashes = 1;
+ }
+ let components = bytes
+ .split(|b| *b == sep)
+ .filter(|c| !(c.is_empty() || c == b"."))
+ .fold(vec![], |mut acc, component| {
+ if component != b".."
+ || (initial_slashes == 0 && acc.is_empty())
+ || (!acc.is_empty() && acc[acc.len() - 1] == b"..")
+ {
+ acc.push(component)
+ } else if !acc.is_empty() {
+ acc.pop();
+ }
+ acc
+ });
+ let mut new_bytes = components.join(&sep);
+
+ if initial_slashes > 0 {
+ let mut buf: Vec<_> = (0..initial_slashes).map(|_| sep).collect();
+ buf.extend(new_bytes);
+ new_bytes = buf;
+ }
+ if new_bytes.is_empty() {
+ b".".to_vec()
+ } else {
+ new_bytes
+ }
+}
+
/// Wrapper function to `_build_single_regex` that short-circuits 'exact' globs
/// that don't need to be transformed into a regex.
pub fn build_single_regex(
- kind: &[u8],
- pat: &[u8],
- globsuffix: &[u8],
+ entry: &IgnorePattern,
) -> Result<Vec<u8>, PatternError> {
- let enum_kind = parse_pattern_syntax(kind)?;
- if enum_kind == PatternSyntax::RootGlob
- && !pat.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
+ let IgnorePattern {
+ pattern, syntax, ..
+ } = entry;
+ let pattern = match syntax {
+ PatternSyntax::RootGlob
+ | PatternSyntax::Path
+ | PatternSyntax::RelGlob
+ | PatternSyntax::RootFiles => normalize_path_bytes(&pattern),
+ PatternSyntax::Include | PatternSyntax::SubInclude => {
+ return Err(PatternError::NonRegexPattern(entry.clone()))
+ }
+ _ => pattern.to_owned(),
+ };
+ if *syntax == PatternSyntax::RootGlob
+ && !pattern.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
{
- let mut escaped = escape_pattern(pat);
- escaped.extend(b"(?:/|$)");
+ let mut escaped = escape_pattern(&pattern);
+ escaped.extend(GLOB_SUFFIX);
Ok(escaped)
} else {
- Ok(_build_single_regex(enum_kind, pat, globsuffix))
+ let mut entry = entry.clone();
+ entry.pattern = pattern;
+ Ok(_build_single_regex(&entry))
}
}
@@ -222,24 +300,29 @@
m.insert(b"regexp".as_ref(), b"relre:".as_ref());
m.insert(b"glob".as_ref(), b"relglob:".as_ref());
m.insert(b"rootglob".as_ref(), b"rootglob:".as_ref());
- m.insert(b"include".as_ref(), b"include".as_ref());
- m.insert(b"subinclude".as_ref(), b"subinclude".as_ref());
+ m.insert(b"include".as_ref(), b"include:".as_ref());
+ m.insert(b"subinclude".as_ref(), b"subinclude:".as_ref());
m
};
}
-pub type PatternTuple = (Vec<u8>, LineNumber, Vec<u8>);
-type WarningTuple = (PathBuf, Vec<u8>);
+#[derive(Debug)]
+pub enum PatternFileWarning {
+ /// (file path, syntax bytes)
+ InvalidSyntax(PathBuf, Vec<u8>),
+ /// File path
+ NoSuchFile(PathBuf),
+}
pub fn parse_pattern_file_contents<P: AsRef<Path>>(
lines: &[u8],
file_path: P,
warn: bool,
-) -> (Vec<PatternTuple>, Vec<WarningTuple>) {
+) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
let comment_escape_regex = Regex::new(r"\\#").unwrap();
- let mut inputs: Vec<PatternTuple> = vec![];
- let mut warnings: Vec<WarningTuple> = vec![];
+ let mut inputs: Vec<IgnorePattern> = vec![];
+ let mut warnings: Vec<PatternFileWarning> = vec![];
let mut current_syntax = b"relre:".as_ref();
@@ -267,8 +350,10 @@
if let Some(rel_syntax) = SYNTAXES.get(syntax) {
current_syntax = rel_syntax;
} else if warn {
- warnings
- .push((file_path.as_ref().to_owned(), syntax.to_owned()));
+ warnings.push(PatternFileWarning::InvalidSyntax(
+ file_path.as_ref().to_owned(),
+ syntax.to_owned(),
+ ));
}
continue;
}
@@ -288,34 +373,186 @@
}
}
- inputs.push((
- [line_syntax, line].concat(),
- line_number,
- line.to_owned(),
+ inputs.push(IgnorePattern::new(
+ parse_pattern_syntax(&line_syntax).map_err(|e| match e {
+ PatternError::UnsupportedSyntax(syntax) => {
+ PatternError::UnsupportedSyntaxInFile(
+ syntax,
+ file_path.as_ref().to_string_lossy().into(),
+ line_number,
+ )
+ }
+ _ => e,
+ })?,
+ &line,
+ &file_path,
));
}
- (inputs, warnings)
+ Ok((inputs, warnings))
}
pub fn read_pattern_file<P: AsRef<Path>>(
file_path: P,
warn: bool,
-) -> Result<(Vec<PatternTuple>, Vec<WarningTuple>), PatternFileError> {
- let mut f = File::open(file_path.as_ref())?;
+) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
+ let mut f = match File::open(file_path.as_ref()) {
+ Ok(f) => Ok(f),
+ Err(e) => match e.kind() {
+ std::io::ErrorKind::NotFound => {
+ return Ok((
+ vec![],
+ vec![PatternFileWarning::NoSuchFile(
+ file_path.as_ref().to_owned(),
+ )],
+ ))
+ }
+ _ => Err(e),
+ },
+ }?;
let mut contents = Vec::new();
f.read_to_end(&mut contents)?;
- Ok(parse_pattern_file_contents(&contents, file_path, warn))
+ Ok(parse_pattern_file_contents(&contents, file_path, warn)?)
+}
+
+/// Represents an entry in an "ignore" file.
+#[derive(Debug, Eq, PartialEq, Clone)]
+pub struct IgnorePattern {
+ pub syntax: PatternSyntax,
+ pub pattern: Vec<u8>,
+ pub source: PathBuf,
+}
+
+impl IgnorePattern {
+ pub fn new(
+ syntax: PatternSyntax,
+ pattern: &[u8],
+ source: impl AsRef<Path>,
+ ) -> Self {
+ Self {
+ syntax,
+ pattern: pattern.to_owned(),
+ source: source.as_ref().to_owned(),
+ }
+ }
+}
+
+pub type PatternResult<T> = Result<T, PatternError>;
+
+/// Wrapper for `read_pattern_file` that also recursively expands `include:`
+/// patterns.
+///
+/// `subinclude:` is not treated as a special pattern here: unraveling them
+/// needs to occur in the "ignore" phase.
+pub fn get_patterns_from_file(
+ pattern_file: impl AsRef<Path>,
+ root_dir: impl AsRef<Path>,
+) -> PatternResult<(Vec<IgnorePattern>, Vec<PatternFileWarning>)> {
+ let (patterns, mut warnings) = read_pattern_file(&pattern_file, true)?;
+ let patterns = patterns
+ .into_iter()
+ .flat_map(|entry| -> PatternResult<_> {
+ let IgnorePattern {
+ syntax,
+ pattern,
+ source: _,
+ } = &entry;
+ Ok(match syntax {
+ PatternSyntax::Include => {
+ let inner_include =
+ root_dir.as_ref().join(get_path_from_bytes(&pattern));
+ let (inner_pats, inner_warnings) = get_patterns_from_file(
+ &inner_include,
+ root_dir.as_ref(),
+ )?;
+ warnings.extend(inner_warnings);
+ inner_pats
+ }
+ _ => vec![entry],
+ })
+ })
+ .flatten()
+ .collect();
+
+ Ok((patterns, warnings))
+}
+
+/// Holds all the information needed to handle a `subinclude:` pattern.
+pub struct SubInclude {
+ /// Will be used for repository (hg) paths that start with this prefix.
+ /// It is relative to the current working directory, so comparing against
+ /// repository paths is painless.
+ pub prefix: HgPathBuf,
+ /// The file itself, containing the patterns
+ pub path: PathBuf,
+ /// Folder in the filesystem where this it applies
+ pub root: PathBuf,
+}
+
+impl SubInclude {
+ pub fn new(
+ root_dir: impl AsRef<Path>,
+ pattern: &[u8],
+ source: impl AsRef<Path>,
+ ) -> Result<SubInclude, HgPathError> {
+ let normalized_source =
+ normalize_path_bytes(&get_bytes_from_path(source));
+
+ let source_root = get_path_from_bytes(&normalized_source);
+ let source_root = source_root.parent().unwrap_or(source_root.deref());
+
+ let path = source_root.join(get_path_from_bytes(pattern));
+ let new_root = path.parent().unwrap_or(path.deref());
+
+ let prefix = canonical_path(&root_dir, &root_dir, new_root)?;
+
+ Ok(Self {
+ prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
+ if !p.is_empty() {
+ p.push(b'/');
+ }
+ Ok(p)
+ })?,
+ path: path.to_owned(),
+ root: new_root.to_owned(),
+ })
+ }
+}
+
+/// Separate and pre-process subincludes from other patterns for the "ignore"
+/// phase.
+pub fn filter_subincludes(
+ ignore_patterns: &[IgnorePattern],
+ root_dir: impl AsRef<Path>,
+) -> Result<(Vec<SubInclude>, Vec<&IgnorePattern>), HgPathError> {
+ let mut subincludes = vec![];
+ let mut others = vec![];
+
+ for ignore_pattern in ignore_patterns.iter() {
+ let IgnorePattern {
+ syntax,
+ pattern,
+ source,
+ } = ignore_pattern;
+ if *syntax == PatternSyntax::SubInclude {
+ subincludes.push(SubInclude::new(&root_dir, pattern, &source)?);
+ } else {
+ others.push(ignore_pattern)
+ }
+ }
+ Ok((subincludes, others))
}
#[cfg(test)]
mod tests {
use super::*;
+ use pretty_assertions::assert_eq;
#[test]
fn escape_pattern_test() {
- let untouched = br#"!"%',/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz"#;
+ let untouched =
+ br#"!"%',/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz"#;
assert_eq!(escape_pattern(untouched), untouched.to_vec());
// All escape codes
assert_eq!(
@@ -342,39 +579,78 @@
let lines = b"syntax: glob\n*.elc";
assert_eq!(
- vec![(b"relglob:*.elc".to_vec(), 2, b"*.elc".to_vec())],
parse_pattern_file_contents(lines, Path::new("file_path"), false)
+ .unwrap()
.0,
+ vec![IgnorePattern::new(
+ PatternSyntax::RelGlob,
+ b"*.elc",
+ Path::new("file_path")
+ )],
);
let lines = b"syntax: include\nsyntax: glob";
assert_eq!(
parse_pattern_file_contents(lines, Path::new("file_path"), false)
+ .unwrap()
.0,
vec![]
);
let lines = b"glob:**.o";
assert_eq!(
parse_pattern_file_contents(lines, Path::new("file_path"), false)
+ .unwrap()
.0,
- vec![(b"relglob:**.o".to_vec(), 1, b"**.o".to_vec())]
+ vec![IgnorePattern::new(
+ PatternSyntax::RelGlob,
+ b"**.o",
+ Path::new("file_path")
+ )]
+ );
+ }
+
+ #[test]
+ fn test_build_single_regex() {
+ assert_eq!(
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::RelGlob,
+ b"rust/target/",
+ Path::new("")
+ ))
+ .unwrap(),
+ br"(?:|.*/)rust/target(?:/|$)".to_vec(),
);
}
#[test]
fn test_build_single_regex_shortcut() {
assert_eq!(
- br"(?:/|$)".to_vec(),
- build_single_regex(b"rootglob", b"", b"").unwrap()
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::RootGlob,
+ b"",
+ Path::new("")
+ ))
+ .unwrap(),
+ br"\.(?:/|$)".to_vec(),
);
assert_eq!(
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::RootGlob,
+ b"whatever",
+ Path::new("")
+ ))
+ .unwrap(),
br"whatever(?:/|$)".to_vec(),
- build_single_regex(b"rootglob", b"whatever", b"").unwrap()
);
assert_eq!(
- br"[^/]*\.o".to_vec(),
- build_single_regex(b"rootglob", b"*.o", b"").unwrap()
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::RootGlob,
+ b"*.o",
+ Path::new("")
+ ))
+ .unwrap(),
+ br"[^/]*\.o(?:/|$)".to_vec(),
);
}
}
--- a/rust/hg-core/src/lib.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/lib.rs Thu Feb 13 10:12:12 2020 -0800
@@ -21,11 +21,14 @@
pub mod matchers;
pub mod revlog;
pub use revlog::*;
+#[cfg(feature = "with-re2")]
+pub mod re2;
pub mod utils;
-use crate::utils::hg_path::HgPathBuf;
+use crate::utils::hg_path::{HgPathBuf, HgPathError};
pub use filepatterns::{
- build_single_regex, read_pattern_file, PatternSyntax, PatternTuple,
+ parse_pattern_syntax, read_pattern_file, IgnorePattern,
+ PatternFileWarning, PatternSyntax,
};
use std::collections::HashMap;
use twox_hash::RandomXxHashBuilder64;
@@ -79,18 +82,17 @@
pub enum DirstateMapError {
PathNotFound(HgPathBuf),
EmptyPath,
- ConsecutiveSlashes,
+ InvalidPath(HgPathError),
}
impl ToString for DirstateMapError {
fn to_string(&self) -> String {
- use crate::DirstateMapError::*;
match self {
- PathNotFound(_) => "expected a value, found none".to_string(),
- EmptyPath => "Overflow in dirstate.".to_string(),
- ConsecutiveSlashes => {
- "found invalid consecutive slashes in path".to_string()
+ DirstateMapError::PathNotFound(_) => {
+ "expected a value, found none".to_string()
}
+ DirstateMapError::EmptyPath => "Overflow in dirstate.".to_string(),
+ DirstateMapError::InvalidPath(e) => e.to_string(),
}
}
}
@@ -116,18 +118,37 @@
#[derive(Debug)]
pub enum PatternError {
+ Path(HgPathError),
UnsupportedSyntax(String),
+ UnsupportedSyntaxInFile(String, String, usize),
+ TooLong(usize),
+ IO(std::io::Error),
+ /// Needed a pattern that can be turned into a regex but got one that
+ /// can't. This should only happen through programmer error.
+ NonRegexPattern(IgnorePattern),
}
-#[derive(Debug)]
-pub enum PatternFileError {
- IO(std::io::Error),
- Pattern(PatternError, LineNumber),
-}
-
-impl From<std::io::Error> for PatternFileError {
- fn from(e: std::io::Error) -> Self {
- PatternFileError::IO(e)
+impl ToString for PatternError {
+ fn to_string(&self) -> String {
+ match self {
+ PatternError::UnsupportedSyntax(syntax) => {
+ format!("Unsupported syntax {}", syntax)
+ }
+ PatternError::UnsupportedSyntaxInFile(syntax, file_path, line) => {
+ format!(
+ "{}:{}: unsupported syntax {}",
+ file_path, line, syntax
+ )
+ }
+ PatternError::TooLong(size) => {
+ format!("matcher pattern is too long ({} bytes)", size)
+ }
+ PatternError::IO(e) => e.to_string(),
+ PatternError::Path(e) => e.to_string(),
+ PatternError::NonRegexPattern(pattern) => {
+ format!("'{:?}' cannot be turned into a regex", pattern)
+ }
+ }
}
}
@@ -142,3 +163,15 @@
DirstateError::IO(e)
}
}
+
+impl From<std::io::Error> for PatternError {
+ fn from(e: std::io::Error) -> Self {
+ PatternError::IO(e)
+ }
+}
+
+impl From<HgPathError> for PatternError {
+ fn from(e: HgPathError) -> Self {
+ PatternError::Path(e)
+ }
+}
--- a/rust/hg-core/src/matchers.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/matchers.rs Thu Feb 13 10:12:12 2020 -0800
@@ -10,7 +10,9 @@
use crate::{utils::hg_path::HgPath, DirsMultiset, DirstateMapError};
use std::collections::HashSet;
use std::iter::FromIterator;
+use std::ops::Deref;
+#[derive(Debug, PartialEq)]
pub enum VisitChildrenSet<'a> {
/// Don't visit anything
Empty,
@@ -163,12 +165,48 @@
}
fn visit_children_set(
&self,
- _directory: impl AsRef<HgPath>,
+ directory: impl AsRef<HgPath>,
) -> VisitChildrenSet {
- // TODO implement once we have `status.traverse`
- // This is useless until unknown files are taken into account
- // Which will not need to happen before the `IncludeMatcher`.
- unimplemented!()
+ if self.files.is_empty() || !self.dirs.contains(&directory) {
+ return VisitChildrenSet::Empty;
+ }
+ let dirs_as_set = self.dirs.iter().map(|k| k.deref()).collect();
+
+ let mut candidates: HashSet<&HgPath> =
+ self.files.union(&dirs_as_set).map(|k| *k).collect();
+ candidates.remove(HgPath::new(b""));
+
+ if !directory.as_ref().is_empty() {
+ let directory = [directory.as_ref().as_bytes(), b"/"].concat();
+ candidates = candidates
+ .iter()
+ .filter_map(|c| {
+ if c.as_bytes().starts_with(&directory) {
+ Some(HgPath::new(&c.as_bytes()[directory.len()..]))
+ } else {
+ None
+ }
+ })
+ .collect();
+ }
+
+ // `self.dirs` includes all of the directories, recursively, so if
+ // we're attempting to match 'foo/bar/baz.txt', it'll have '', 'foo',
+ // 'foo/bar' in it. Thus we can safely ignore a candidate that has a
+ // '/' in it, indicating it's for a subdir-of-a-subdir; the immediate
+ // subdir will be in there without a slash.
+ VisitChildrenSet::Set(
+ candidates
+ .iter()
+ .filter_map(|c| {
+ if c.bytes().all(|b| *b != b'/') {
+ Some(*c)
+ } else {
+ None
+ }
+ })
+ .collect(),
+ )
}
fn matches_everything(&self) -> bool {
false
@@ -177,3 +215,107 @@
true
}
}
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use pretty_assertions::assert_eq;
+
+ #[test]
+ fn test_filematcher_visit_children_set() {
+ // Visitchildrenset
+ let files = vec![HgPath::new(b"dir/subdir/foo.txt")];
+ let matcher = FileMatcher::new(&files).unwrap();
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"dir"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"")),
+ VisitChildrenSet::Set(set)
+ );
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"subdir"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"dir")),
+ VisitChildrenSet::Set(set)
+ );
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"foo.txt"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"dir/subdir")),
+ VisitChildrenSet::Set(set)
+ );
+
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"dir/subdir/x")),
+ VisitChildrenSet::Empty
+ );
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"dir/subdir/foo.txt")),
+ VisitChildrenSet::Empty
+ );
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"folder")),
+ VisitChildrenSet::Empty
+ );
+ }
+
+ #[test]
+ fn test_filematcher_visit_children_set_files_and_dirs() {
+ let files = vec![
+ HgPath::new(b"rootfile.txt"),
+ HgPath::new(b"a/file1.txt"),
+ HgPath::new(b"a/b/file2.txt"),
+ // No file in a/b/c
+ HgPath::new(b"a/b/c/d/file4.txt"),
+ ];
+ let matcher = FileMatcher::new(&files).unwrap();
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"a"));
+ set.insert(HgPath::new(b"rootfile.txt"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"")),
+ VisitChildrenSet::Set(set)
+ );
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"b"));
+ set.insert(HgPath::new(b"file1.txt"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"a")),
+ VisitChildrenSet::Set(set)
+ );
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"c"));
+ set.insert(HgPath::new(b"file2.txt"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"a/b")),
+ VisitChildrenSet::Set(set)
+ );
+
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"d"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"a/b/c")),
+ VisitChildrenSet::Set(set)
+ );
+ let mut set = HashSet::new();
+ set.insert(HgPath::new(b"file4.txt"));
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"a/b/c/d")),
+ VisitChildrenSet::Set(set)
+ );
+
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"a/b/c/d/e")),
+ VisitChildrenSet::Empty
+ );
+ assert_eq!(
+ matcher.visit_children_set(HgPath::new(b"folder")),
+ VisitChildrenSet::Empty
+ );
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/mod.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,21 @@
+/// re2 module
+///
+/// The Python implementation of Mercurial uses the Re2 regex engine when
+/// possible and if the bindings are installed, falling back to Python's `re`
+/// in case of unsupported syntax (Re2 is a non-backtracking engine).
+///
+/// Using it from Rust is not ideal. We need C++ bindings, a C++ compiler,
+/// Re2 needs to be installed... why not just use the `regex` crate?
+///
+/// Using Re2 from the Rust implementation guarantees backwards compatibility.
+/// We know it will work out of the box without needing to figure out the
+/// subtle differences in syntax. For example, `regex` currently does not
+/// support empty alternations (regex like `a||b`) which happens more often
+/// than we might think. Old benchmarks also showed worse performance from
+/// regex than with Re2, but the methodology and results were lost, so take
+/// this with a grain of salt.
+///
+/// The idea is to use Re2 for now as a temporary phase and then investigate
+/// how much work would be needed to use `regex`.
+mod re2;
+pub use re2::Re2;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/re2.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,66 @@
+/*
+re2.rs
+
+Rust FFI bindings to Re2.
+
+Copyright 2020 Valentin Gatien-Baron
+
+This software may be used and distributed according to the terms of the
+GNU General Public License version 2 or any later version.
+*/
+use libc::{c_int, c_void};
+
+type Re2Ptr = *const c_void;
+
+pub struct Re2(Re2Ptr);
+
+/// `re2.h` says:
+/// "An "RE2" object is safe for concurrent use by multiple threads."
+unsafe impl Sync for Re2 {}
+
+/// These bind to the C ABI in `rust_re2.cpp`.
+extern "C" {
+ fn rust_re2_create(data: *const u8, len: usize) -> Re2Ptr;
+ fn rust_re2_destroy(re2: Re2Ptr);
+ fn rust_re2_ok(re2: Re2Ptr) -> bool;
+ fn rust_re2_error(
+ re2: Re2Ptr,
+ outdata: *mut *const u8,
+ outlen: *mut usize,
+ ) -> bool;
+ fn rust_re2_match(
+ re2: Re2Ptr,
+ data: *const u8,
+ len: usize,
+ anchor: c_int,
+ ) -> bool;
+}
+
+impl Re2 {
+ pub fn new(pattern: &[u8]) -> Result<Re2, String> {
+ unsafe {
+ let re2 = rust_re2_create(pattern.as_ptr(), pattern.len());
+ if rust_re2_ok(re2) {
+ Ok(Re2(re2))
+ } else {
+ let mut data: *const u8 = std::ptr::null();
+ let mut len: usize = 0;
+ rust_re2_error(re2, &mut data, &mut len);
+ Err(String::from_utf8_lossy(std::slice::from_raw_parts(
+ data, len,
+ ))
+ .to_string())
+ }
+ }
+ }
+
+ pub fn is_match(&self, data: &[u8]) -> bool {
+ unsafe { rust_re2_match(self.0, data.as_ptr(), data.len(), 1) }
+ }
+}
+
+impl Drop for Re2 {
+ fn drop(&mut self) {
+ unsafe { rust_re2_destroy(self.0) }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/re2/rust_re2.cpp Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,49 @@
+/*
+rust_re2.cpp
+
+C ABI export of Re2's C++ interface for Rust FFI.
+
+Copyright 2020 Valentin Gatien-Baron
+
+This software may be used and distributed according to the terms of the
+GNU General Public License version 2 or any later version.
+*/
+
+#include <re2/re2.h>
+using namespace re2;
+
+extern "C" {
+ RE2* rust_re2_create(const char* data, size_t len) {
+ RE2::Options o;
+ o.set_encoding(RE2::Options::Encoding::EncodingLatin1);
+ o.set_log_errors(false);
+ o.set_max_mem(50000000);
+
+ return new RE2(StringPiece(data, len), o);
+ }
+
+ void rust_re2_destroy(RE2* re) {
+ delete re;
+ }
+
+ bool rust_re2_ok(RE2* re) {
+ return re->ok();
+ }
+
+ void rust_re2_error(RE2* re, const char** outdata, size_t* outlen) {
+ const std::string& e = re->error();
+ *outdata = e.data();
+ *outlen = e.length();
+ }
+
+ bool rust_re2_match(RE2* re, char* data, size_t len, int ianchor) {
+ const StringPiece sp = StringPiece(data, len);
+
+ RE2::Anchor anchor =
+ ianchor == 0 ? RE2::Anchor::UNANCHORED :
+ (ianchor == 1 ? RE2::Anchor::ANCHOR_START :
+ RE2::Anchor::ANCHOR_BOTH);
+
+ return re->Match(sp, 0, len, anchor, NULL, 0);
+ }
+}
--- a/rust/hg-core/src/revlog.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/revlog.rs Thu Feb 13 10:12:12 2020 -0800
@@ -5,6 +5,10 @@
// GNU General Public License version 2 or any later version.
//! Mercurial concepts for handling revision history
+pub mod node;
+pub mod nodemap;
+pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+
/// Mercurial revision numbers
///
/// As noted in revlog.c, revision numbers are actually encoded in
@@ -36,3 +40,17 @@
ParentOutOfRange(Revision),
WorkingDirectoryUnsupported,
}
+
+/// The Mercurial Revlog Index
+///
+/// This is currently limited to the minimal interface that is needed for
+/// the [`nodemap`](nodemap/index.html) module
+pub trait RevlogIndex {
+ /// Total number of Revisions referenced in this index
+ fn len(&self) -> usize;
+
+ /// Return a reference to the Node or `None` if rev is out of bounds
+ ///
+ /// `NULL_REVISION` is not considered to be out of bounds.
+ fn node(&self, rev: Revision) -> Option<&Node>;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/node.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,368 @@
+// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Definitions and utilities for Revision nodes
+//!
+//! In Mercurial code base, it is customary to call "a node" the binary SHA
+//! of a revision.
+
+use hex::{self, FromHex, FromHexError};
+
+/// The length in bytes of a `Node`
+///
+/// This constant is meant to ease refactors of this module, and
+/// are private so that calling code does not expect all nodes have
+/// the same size, should we support several formats concurrently in
+/// the future.
+const NODE_BYTES_LENGTH: usize = 20;
+
+/// The length in bytes of a `Node`
+///
+/// see also `NODES_BYTES_LENGTH` about it being private.
+const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH;
+
+/// Private alias for readability and to ease future change
+type NodeData = [u8; NODE_BYTES_LENGTH];
+
+/// Binary revision SHA
+///
+/// ## Future changes of hash size
+///
+/// To accomodate future changes of hash size, Rust callers
+/// should use the conversion methods at the boundaries (FFI, actual
+/// computation of hashes and I/O) only, and only if required.
+///
+/// All other callers outside of unit tests should just handle `Node` values
+/// and never make any assumption on the actual length, using [`nybbles_len`]
+/// if they need a loop boundary.
+///
+/// All methods that create a `Node` either take a type that enforces
+/// the size or fail immediately at runtime with [`ExactLengthRequired`].
+///
+/// [`nybbles_len`]: #method.nybbles_len
+/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
+#[derive(Clone, Debug, PartialEq)]
+pub struct Node {
+ data: NodeData,
+}
+
+/// The node value for NULL_REVISION
+pub const NULL_NODE: Node = Node {
+ data: [0; NODE_BYTES_LENGTH],
+};
+
+impl From<NodeData> for Node {
+ fn from(data: NodeData) -> Node {
+ Node { data }
+ }
+}
+
+#[derive(Debug, PartialEq)]
+pub enum NodeError {
+ ExactLengthRequired(usize, String),
+ PrefixTooLong(String),
+ HexError(FromHexError, String),
+}
+
+/// Low level utility function, also for prefixes
+fn get_nybble(s: &[u8], i: usize) -> u8 {
+ if i % 2 == 0 {
+ s[i / 2] >> 4
+ } else {
+ s[i / 2] & 0x0f
+ }
+}
+
+impl Node {
+ /// Retrieve the `i`th half-byte of the binary data.
+ ///
+ /// This is also the `i`th hexadecimal digit in numeric form,
+ /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+ pub fn get_nybble(&self, i: usize) -> u8 {
+ get_nybble(&self.data, i)
+ }
+
+ /// Length of the data, in nybbles
+ pub fn nybbles_len(&self) -> usize {
+ // public exposure as an instance method only, so that we can
+ // easily support several sizes of hashes if needed in the future.
+ NODE_NYBBLES_LENGTH
+ }
+
+ /// Convert from hexadecimal string representation
+ ///
+ /// Exact length is required.
+ ///
+ /// To be used in FFI and I/O only, in order to facilitate future
+ /// changes of hash format.
+ pub fn from_hex(hex: &str) -> Result<Node, NodeError> {
+ Ok(NodeData::from_hex(hex)
+ .map_err(|e| NodeError::from((e, hex)))?
+ .into())
+ }
+
+ /// Convert to hexadecimal string representation
+ ///
+ /// To be used in FFI and I/O only, in order to facilitate future
+ /// changes of hash format.
+ pub fn encode_hex(&self) -> String {
+ hex::encode(self.data)
+ }
+
+ /// Provide access to binary data
+ ///
+ /// This is needed by FFI layers, for instance to return expected
+ /// binary values to Python.
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.data
+ }
+}
+
+impl<T: AsRef<str>> From<(FromHexError, T)> for NodeError {
+ fn from(err_offender: (FromHexError, T)) -> Self {
+ let (err, offender) = err_offender;
+ match err {
+ FromHexError::InvalidStringLength => {
+ NodeError::ExactLengthRequired(
+ NODE_NYBBLES_LENGTH,
+ offender.as_ref().to_owned(),
+ )
+ }
+ _ => NodeError::HexError(err, offender.as_ref().to_owned()),
+ }
+ }
+}
+
+/// The beginning of a binary revision SHA.
+///
+/// Since it can potentially come from an hexadecimal representation with
+/// odd length, it needs to carry around whether the last 4 bits are relevant
+/// or not.
+#[derive(Debug, PartialEq)]
+pub struct NodePrefix {
+ buf: Vec<u8>,
+ is_odd: bool,
+}
+
+impl NodePrefix {
+ /// Convert from hexadecimal string representation
+ ///
+ /// Similarly to `hex::decode`, can be used with Unicode string types
+ /// (`String`, `&str`) as well as bytes.
+ ///
+ /// To be used in FFI and I/O only, in order to facilitate future
+ /// changes of hash format.
+ pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, NodeError> {
+ let hex = hex.as_ref();
+ let len = hex.len();
+ if len > NODE_NYBBLES_LENGTH {
+ return Err(NodeError::PrefixTooLong(
+ String::from_utf8_lossy(hex).to_owned().to_string(),
+ ));
+ }
+
+ let is_odd = len % 2 == 1;
+ let even_part = if is_odd { &hex[..len - 1] } else { hex };
+ let mut buf: Vec<u8> = Vec::from_hex(&even_part)
+ .map_err(|e| (e, String::from_utf8_lossy(hex)))?;
+
+ if is_odd {
+ let latest_char = char::from(hex[len - 1]);
+ let latest_nybble = latest_char.to_digit(16).ok_or_else(|| {
+ (
+ FromHexError::InvalidHexCharacter {
+ c: latest_char,
+ index: len - 1,
+ },
+ String::from_utf8_lossy(hex),
+ )
+ })? as u8;
+ buf.push(latest_nybble << 4);
+ }
+ Ok(NodePrefix { buf, is_odd })
+ }
+
+ pub fn borrow(&self) -> NodePrefixRef {
+ NodePrefixRef {
+ buf: &self.buf,
+ is_odd: self.is_odd,
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct NodePrefixRef<'a> {
+ buf: &'a [u8],
+ is_odd: bool,
+}
+
+impl<'a> NodePrefixRef<'a> {
+ pub fn len(&self) -> usize {
+ if self.is_odd {
+ self.buf.len() * 2 - 1
+ } else {
+ self.buf.len() * 2
+ }
+ }
+
+ pub fn is_prefix_of(&self, node: &Node) -> bool {
+ if self.is_odd {
+ let buf = self.buf;
+ let last_pos = buf.len() - 1;
+ node.data.starts_with(buf.split_at(last_pos).0)
+ && node.data[last_pos] >> 4 == buf[last_pos] >> 4
+ } else {
+ node.data.starts_with(self.buf)
+ }
+ }
+
+ /// Retrieve the `i`th half-byte from the prefix.
+ ///
+ /// This is also the `i`th hexadecimal digit in numeric form,
+ /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
+ pub fn get_nybble(&self, i: usize) -> u8 {
+ assert!(i < self.len());
+ get_nybble(self.buf, i)
+ }
+}
+
+/// A shortcut for full `Node` references
+impl<'a> From<&'a Node> for NodePrefixRef<'a> {
+ fn from(node: &'a Node) -> Self {
+ NodePrefixRef {
+ buf: &node.data,
+ is_odd: false,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn sample_node() -> Node {
+ let mut data = [0; NODE_BYTES_LENGTH];
+ data.copy_from_slice(&[
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
+ 0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef,
+ ]);
+ data.into()
+ }
+
+ /// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH`
+ ///
+ /// The padding is made with zeros
+ pub fn hex_pad_right(hex: &str) -> String {
+ let mut res = hex.to_string();
+ while res.len() < NODE_NYBBLES_LENGTH {
+ res.push('0');
+ }
+ res
+ }
+
+ fn sample_node_hex() -> String {
+ hex_pad_right("0123456789abcdeffedcba9876543210deadbeef")
+ }
+
+ #[test]
+ fn test_node_from_hex() {
+ assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node()));
+
+ let mut short = hex_pad_right("0123");
+ short.pop();
+ short.pop();
+ assert_eq!(
+ Node::from_hex(&short),
+ Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)),
+ );
+
+ let not_hex = hex_pad_right("012... oops");
+ assert_eq!(
+ Node::from_hex(¬_hex),
+ Err(NodeError::HexError(
+ FromHexError::InvalidHexCharacter { c: '.', index: 3 },
+ not_hex,
+ )),
+ );
+ }
+
+ #[test]
+ fn test_node_encode_hex() {
+ assert_eq!(sample_node().encode_hex(), sample_node_hex());
+ }
+
+ #[test]
+ fn test_prefix_from_hex() -> Result<(), NodeError> {
+ assert_eq!(
+ NodePrefix::from_hex("0e1")?,
+ NodePrefix {
+ buf: vec![14, 16],
+ is_odd: true
+ }
+ );
+ assert_eq!(
+ NodePrefix::from_hex("0e1a")?,
+ NodePrefix {
+ buf: vec![14, 26],
+ is_odd: false
+ }
+ );
+
+ // checking limit case
+ let node_as_vec = sample_node().data.iter().cloned().collect();
+ assert_eq!(
+ NodePrefix::from_hex(sample_node_hex())?,
+ NodePrefix {
+ buf: node_as_vec,
+ is_odd: false
+ }
+ );
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_prefix_from_hex_errors() {
+ assert_eq!(
+ NodePrefix::from_hex("testgr"),
+ Err(NodeError::HexError(
+ FromHexError::InvalidHexCharacter { c: 't', index: 0 },
+ "testgr".to_string()
+ ))
+ );
+ let mut long = NULL_NODE.encode_hex();
+ long.push('c');
+ match NodePrefix::from_hex(&long)
+ .expect_err("should be refused as too long")
+ {
+ NodeError::PrefixTooLong(s) => assert_eq!(s, long),
+ err => panic!(format!("Should have been TooLong, got {:?}", err)),
+ }
+ }
+
+ #[test]
+ fn test_is_prefix_of() -> Result<(), NodeError> {
+ let mut node_data = [0; NODE_BYTES_LENGTH];
+ node_data[0] = 0x12;
+ node_data[1] = 0xca;
+ let node = Node::from(node_data);
+ assert!(NodePrefix::from_hex("12")?.borrow().is_prefix_of(&node));
+ assert!(!NodePrefix::from_hex("1a")?.borrow().is_prefix_of(&node));
+ assert!(NodePrefix::from_hex("12c")?.borrow().is_prefix_of(&node));
+ assert!(!NodePrefix::from_hex("12d")?.borrow().is_prefix_of(&node));
+ Ok(())
+ }
+
+ #[test]
+ fn test_get_nybble() -> Result<(), NodeError> {
+ let prefix = NodePrefix::from_hex("dead6789cafe")?;
+ assert_eq!(prefix.borrow().get_nybble(0), 13);
+ assert_eq!(prefix.borrow().get_nybble(7), 9);
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+pub use tests::hex_pad_right;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/nodemap.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,790 @@
+// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
+// and Mercurial contributors
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+//! Indexing facilities for fast retrieval of `Revision` from `Node`
+//!
+//! This provides a variation on the 16-ary radix tree that is
+//! provided as "nodetree" in revlog.c, ready for append-only persistence
+//! on disk.
+//!
+//! Following existing implicit conventions, the "nodemap" terminology
+//! is used in a more abstract context.
+
+use super::{
+ Node, NodeError, NodePrefix, NodePrefixRef, Revision, RevlogIndex,
+};
+
+use std::fmt;
+use std::ops::Deref;
+use std::ops::Index;
+
+#[derive(Debug, PartialEq)]
+pub enum NodeMapError {
+ MultipleResults,
+ InvalidNodePrefix(NodeError),
+ /// A `Revision` stored in the nodemap could not be found in the index
+ RevisionNotInIndex(Revision),
+}
+
+impl From<NodeError> for NodeMapError {
+ fn from(err: NodeError) -> Self {
+ NodeMapError::InvalidNodePrefix(err)
+ }
+}
+
+/// Mapping system from Mercurial nodes to revision numbers.
+///
+/// ## `RevlogIndex` and `NodeMap`
+///
+/// One way to think about their relationship is that
+/// the `NodeMap` is a prefix-oriented reverse index of the `Node` information
+/// carried by a [`RevlogIndex`].
+///
+/// Many of the methods in this trait take a `RevlogIndex` argument
+/// which is used for validation of their results. This index must naturally
+/// be the one the `NodeMap` is about, and it must be consistent.
+///
+/// Notably, the `NodeMap` must not store
+/// information about more `Revision` values than there are in the index.
+/// In these methods, an encountered `Revision` is not in the index, a
+/// [`RevisionNotInIndex`] error is returned.
+///
+/// In insert operations, the rule is thus that the `NodeMap` must always
+/// be updated after the `RevlogIndex`
+/// be updated first, and the `NodeMap` second.
+///
+/// [`RevisionNotInIndex`]: enum.NodeMapError.html#variant.RevisionNotInIndex
+/// [`RevlogIndex`]: ../trait.RevlogIndex.html
+pub trait NodeMap {
+ /// Find the unique `Revision` having the given `Node`
+ ///
+ /// If no Revision matches the given `Node`, `Ok(None)` is returned.
+ fn find_node(
+ &self,
+ index: &impl RevlogIndex,
+ node: &Node,
+ ) -> Result<Option<Revision>, NodeMapError> {
+ self.find_bin(index, node.into())
+ }
+
+ /// Find the unique Revision whose `Node` starts with a given binary prefix
+ ///
+ /// If no Revision matches the given prefix, `Ok(None)` is returned.
+ ///
+ /// If several Revisions match the given prefix, a [`MultipleResults`]
+ /// error is returned.
+ fn find_bin<'a>(
+ &self,
+ idx: &impl RevlogIndex,
+ prefix: NodePrefixRef<'a>,
+ ) -> Result<Option<Revision>, NodeMapError>;
+
+ /// Find the unique Revision whose `Node` hexadecimal string representation
+ /// starts with a given prefix
+ ///
+ /// If no Revision matches the given prefix, `Ok(None)` is returned.
+ ///
+ /// If several Revisions match the given prefix, a [`MultipleResults`]
+ /// error is returned.
+ fn find_hex(
+ &self,
+ idx: &impl RevlogIndex,
+ prefix: &str,
+ ) -> Result<Option<Revision>, NodeMapError> {
+ self.find_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
+ }
+}
+
+pub trait MutableNodeMap: NodeMap {
+ fn insert<I: RevlogIndex>(
+ &mut self,
+ index: &I,
+ node: &Node,
+ rev: Revision,
+ ) -> Result<(), NodeMapError>;
+}
+
+/// Low level NodeTree [`Blocks`] elements
+///
+/// These are exactly as for instance on persistent storage.
+type RawElement = i32;
+
+/// High level representation of values in NodeTree
+/// [`Blocks`](struct.Block.html)
+///
+/// This is the high level representation that most algorithms should
+/// use.
+#[derive(Clone, Debug, Eq, PartialEq)]
+enum Element {
+ Rev(Revision),
+ Block(usize),
+ None,
+}
+
+impl From<RawElement> for Element {
+ /// Conversion from low level representation, after endianness conversion.
+ ///
+ /// See [`Block`](struct.Block.html) for explanation about the encoding.
+ fn from(raw: RawElement) -> Element {
+ if raw >= 0 {
+ Element::Block(raw as usize)
+ } else if raw == -1 {
+ Element::None
+ } else {
+ Element::Rev(-raw - 2)
+ }
+ }
+}
+
+impl From<Element> for RawElement {
+ fn from(element: Element) -> RawElement {
+ match element {
+ Element::None => 0,
+ Element::Block(i) => i as RawElement,
+ Element::Rev(rev) => -rev - 2,
+ }
+ }
+}
+
+/// A logical block of the `NodeTree`, packed with a fixed size.
+///
+/// These are always used in container types implementing `Index<Block>`,
+/// such as `&Block`
+///
+/// As an array of integers, its ith element encodes that the
+/// ith potential edge from the block, representing the ith hexadecimal digit
+/// (nybble) `i` is either:
+///
+/// - absent (value -1)
+/// - another `Block` in the same indexable container (value ≥ 0)
+/// - a `Revision` leaf (value ≤ -2)
+///
+/// Endianness has to be fixed for consistency on shared storage across
+/// different architectures.
+///
+/// A key difference with the C `nodetree` is that we need to be
+/// able to represent the [`Block`] at index 0, hence -1 is the empty marker
+/// rather than 0 and the `Revision` range upper limit of -2 instead of -1.
+///
+/// Another related difference is that `NULL_REVISION` (-1) is not
+/// represented at all, because we want an immutable empty nodetree
+/// to be valid.
+
+#[derive(Clone, PartialEq)]
+pub struct Block([RawElement; 16]);
+
+impl Block {
+ fn new() -> Self {
+ Block([-1; 16])
+ }
+
+ fn get(&self, nybble: u8) -> Element {
+ Element::from(RawElement::from_be(self.0[nybble as usize]))
+ }
+
+ fn set(&mut self, nybble: u8, element: Element) {
+ self.0[nybble as usize] = RawElement::to_be(element.into())
+ }
+}
+
+impl fmt::Debug for Block {
+ /// sparse representation for testing and debugging purposes
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_map()
+ .entries((0..16).filter_map(|i| match self.get(i) {
+ Element::None => None,
+ element => Some((i, element)),
+ }))
+ .finish()
+ }
+}
+
+/// A mutable 16-radix tree with the root block logically at the end
+///
+/// Because of the append only nature of our node trees, we need to
+/// keep the original untouched and store new blocks separately.
+///
+/// The mutable root `Block` is kept apart so that we don't have to rebump
+/// it on each insertion.
+pub struct NodeTree {
+ readonly: Box<dyn Deref<Target = [Block]> + Send>,
+ growable: Vec<Block>,
+ root: Block,
+}
+
+impl Index<usize> for NodeTree {
+ type Output = Block;
+
+ fn index(&self, i: usize) -> &Block {
+ let ro_len = self.readonly.len();
+ if i < ro_len {
+ &self.readonly[i]
+ } else if i == ro_len + self.growable.len() {
+ &self.root
+ } else {
+ &self.growable[i - ro_len]
+ }
+ }
+}
+
+/// Return `None` unless the `Node` for `rev` has given prefix in `index`.
+fn has_prefix_or_none<'p>(
+ idx: &impl RevlogIndex,
+ prefix: NodePrefixRef<'p>,
+ rev: Revision,
+) -> Result<Option<Revision>, NodeMapError> {
+ idx.node(rev)
+ .ok_or_else(|| NodeMapError::RevisionNotInIndex(rev))
+ .map(|node| {
+ if prefix.is_prefix_of(node) {
+ Some(rev)
+ } else {
+ None
+ }
+ })
+}
+
+impl NodeTree {
+ /// Initiate a NodeTree from an immutable slice-like of `Block`
+ ///
+ /// We keep `readonly` and clone its root block if it isn't empty.
+ fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
+ let root = readonly
+ .last()
+ .map(|b| b.clone())
+ .unwrap_or_else(|| Block::new());
+ NodeTree {
+ readonly: readonly,
+ growable: Vec::new(),
+ root: root,
+ }
+ }
+
+ /// Total number of blocks
+ fn len(&self) -> usize {
+ self.readonly.len() + self.growable.len() + 1
+ }
+
+ /// Implemented for completeness
+ ///
+ /// A `NodeTree` always has at least the mutable root block.
+ #[allow(dead_code)]
+ fn is_empty(&self) -> bool {
+ false
+ }
+
+ /// Main working method for `NodeTree` searches
+ ///
+ /// This partial implementation lacks special cases for NULL_REVISION
+ fn lookup<'p>(
+ &self,
+ prefix: NodePrefixRef<'p>,
+ ) -> Result<Option<Revision>, NodeMapError> {
+ for visit_item in self.visit(prefix) {
+ if let Some(opt) = visit_item.final_revision() {
+ return Ok(opt);
+ }
+ }
+ Err(NodeMapError::MultipleResults)
+ }
+
+ fn visit<'n, 'p>(
+ &'n self,
+ prefix: NodePrefixRef<'p>,
+ ) -> NodeTreeVisitor<'n, 'p> {
+ NodeTreeVisitor {
+ nt: self,
+ prefix: prefix,
+ visit: self.len() - 1,
+ nybble_idx: 0,
+ done: false,
+ }
+ }
+ /// Return a mutable reference for `Block` at index `idx`.
+ ///
+ /// If `idx` lies in the immutable area, then the reference is to
+ /// a newly appended copy.
+ ///
+ /// Returns (new_idx, glen, mut_ref) where
+ ///
+ /// - `new_idx` is the index of the mutable `Block`
+ /// - `mut_ref` is a mutable reference to the mutable Block.
+ /// - `glen` is the new length of `self.growable`
+ ///
+ /// Note: the caller wouldn't be allowed to query `self.growable.len()`
+ /// itself because of the mutable borrow taken with the returned `Block`
+ fn mutable_block(&mut self, idx: usize) -> (usize, &mut Block, usize) {
+ let ro_blocks = &self.readonly;
+ let ro_len = ro_blocks.len();
+ let glen = self.growable.len();
+ if idx < ro_len {
+ // TODO OPTIM I think this makes two copies
+ self.growable.push(ro_blocks[idx].clone());
+ (glen + ro_len, &mut self.growable[glen], glen + 1)
+ } else if glen + ro_len == idx {
+ (idx, &mut self.root, glen)
+ } else {
+ (idx, &mut self.growable[idx - ro_len], glen)
+ }
+ }
+
+ /// Main insertion method
+ ///
+ /// This will dive in the node tree to find the deepest `Block` for
+ /// `node`, split it as much as needed and record `node` in there.
+ /// The method then backtracks, updating references in all the visited
+ /// blocks from the root.
+ ///
+ /// All the mutated `Block` are copied first to the growable part if
+ /// needed. That happens for those in the immutable part except the root.
+ pub fn insert<I: RevlogIndex>(
+ &mut self,
+ index: &I,
+ node: &Node,
+ rev: Revision,
+ ) -> Result<(), NodeMapError> {
+ let ro_len = &self.readonly.len();
+
+ let mut visit_steps: Vec<_> = self.visit(node.into()).collect();
+ let read_nybbles = visit_steps.len();
+ // visit_steps cannot be empty, since we always visit the root block
+ let deepest = visit_steps.pop().unwrap();
+
+ let (mut block_idx, mut block, mut glen) =
+ self.mutable_block(deepest.block_idx);
+
+ if let Element::Rev(old_rev) = deepest.element {
+ let old_node = index
+ .node(old_rev)
+ .ok_or_else(|| NodeMapError::RevisionNotInIndex(old_rev))?;
+ if old_node == node {
+ return Ok(()); // avoid creating lots of useless blocks
+ }
+
+ // Looping over the tail of nybbles in both nodes, creating
+ // new blocks until we find the difference
+ let mut new_block_idx = ro_len + glen;
+ let mut nybble = deepest.nybble;
+ for nybble_pos in read_nybbles..node.nybbles_len() {
+ block.set(nybble, Element::Block(new_block_idx));
+
+ let new_nybble = node.get_nybble(nybble_pos);
+ let old_nybble = old_node.get_nybble(nybble_pos);
+
+ if old_nybble == new_nybble {
+ self.growable.push(Block::new());
+ block = &mut self.growable[glen];
+ glen += 1;
+ new_block_idx += 1;
+ nybble = new_nybble;
+ } else {
+ let mut new_block = Block::new();
+ new_block.set(old_nybble, Element::Rev(old_rev));
+ new_block.set(new_nybble, Element::Rev(rev));
+ self.growable.push(new_block);
+ break;
+ }
+ }
+ } else {
+ // Free slot in the deepest block: no splitting has to be done
+ block.set(deepest.nybble, Element::Rev(rev));
+ }
+
+ // Backtrack over visit steps to update references
+ while let Some(visited) = visit_steps.pop() {
+ let to_write = Element::Block(block_idx);
+ if visit_steps.is_empty() {
+ self.root.set(visited.nybble, to_write);
+ break;
+ }
+ let (new_idx, block, _) = self.mutable_block(visited.block_idx);
+ if block.get(visited.nybble) == to_write {
+ break;
+ }
+ block.set(visited.nybble, to_write);
+ block_idx = new_idx;
+ }
+ Ok(())
+ }
+}
+
+struct NodeTreeVisitor<'n, 'p> {
+ nt: &'n NodeTree,
+ prefix: NodePrefixRef<'p>,
+ visit: usize,
+ nybble_idx: usize,
+ done: bool,
+}
+
+#[derive(Debug, PartialEq, Clone)]
+struct NodeTreeVisitItem {
+ block_idx: usize,
+ nybble: u8,
+ element: Element,
+}
+
+impl<'n, 'p> Iterator for NodeTreeVisitor<'n, 'p> {
+ type Item = NodeTreeVisitItem;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done || self.nybble_idx >= self.prefix.len() {
+ return None;
+ }
+
+ let nybble = self.prefix.get_nybble(self.nybble_idx);
+ self.nybble_idx += 1;
+
+ let visit = self.visit;
+ let element = self.nt[visit].get(nybble);
+ if let Element::Block(idx) = element {
+ self.visit = idx;
+ } else {
+ self.done = true;
+ }
+
+ Some(NodeTreeVisitItem {
+ block_idx: visit,
+ nybble: nybble,
+ element: element,
+ })
+ }
+}
+
+impl NodeTreeVisitItem {
+ // Return `Some(opt)` if this item is final, with `opt` being the
+ // `Revision` that it may represent.
+ //
+ // If the item is not terminal, return `None`
+ fn final_revision(&self) -> Option<Option<Revision>> {
+ match self.element {
+ Element::Block(_) => None,
+ Element::Rev(r) => Some(Some(r)),
+ Element::None => Some(None),
+ }
+ }
+}
+
+impl From<Vec<Block>> for NodeTree {
+ fn from(vec: Vec<Block>) -> Self {
+ Self::new(Box::new(vec))
+ }
+}
+
+impl fmt::Debug for NodeTree {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let readonly: &[Block] = &*self.readonly;
+ write!(
+ f,
+ "readonly: {:?}, growable: {:?}, root: {:?}",
+ readonly, self.growable, self.root
+ )
+ }
+}
+
+impl Default for NodeTree {
+ /// Create a fully mutable empty NodeTree
+ fn default() -> Self {
+ NodeTree::new(Box::new(Vec::new()))
+ }
+}
+
+impl NodeMap for NodeTree {
+ fn find_bin<'a>(
+ &self,
+ idx: &impl RevlogIndex,
+ prefix: NodePrefixRef<'a>,
+ ) -> Result<Option<Revision>, NodeMapError> {
+ self.lookup(prefix.clone()).and_then(|opt| {
+ opt.map_or(Ok(None), |rev| has_prefix_or_none(idx, prefix, rev))
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::NodeMapError::*;
+ use super::*;
+ use crate::revlog::node::{hex_pad_right, Node};
+ use std::collections::HashMap;
+
+ /// Creates a `Block` using a syntax close to the `Debug` output
+ macro_rules! block {
+ {$($nybble:tt : $variant:ident($val:tt)),*} => (
+ {
+ let mut block = Block::new();
+ $(block.set($nybble, Element::$variant($val)));*;
+ block
+ }
+ )
+ }
+
+ #[test]
+ fn test_block_debug() {
+ let mut block = Block::new();
+ block.set(1, Element::Rev(3));
+ block.set(10, Element::Block(0));
+ assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}");
+ }
+
+ #[test]
+ fn test_block_macro() {
+ let block = block! {5: Block(2)};
+ assert_eq!(format!("{:?}", block), "{5: Block(2)}");
+
+ let block = block! {13: Rev(15), 5: Block(2)};
+ assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}");
+ }
+
+ #[test]
+ fn test_raw_block() {
+ let mut raw = [-1; 16];
+ raw[0] = 0;
+ raw[1] = RawElement::to_be(15);
+ raw[2] = RawElement::to_be(-2);
+ raw[3] = RawElement::to_be(-1);
+ raw[4] = RawElement::to_be(-3);
+ let block = Block(raw);
+ assert_eq!(block.get(0), Element::Block(0));
+ assert_eq!(block.get(1), Element::Block(15));
+ assert_eq!(block.get(3), Element::None);
+ assert_eq!(block.get(2), Element::Rev(0));
+ assert_eq!(block.get(4), Element::Rev(1));
+ }
+
+ type TestIndex = HashMap<Revision, Node>;
+
+ impl RevlogIndex for TestIndex {
+ fn node(&self, rev: Revision) -> Option<&Node> {
+ self.get(&rev)
+ }
+
+ fn len(&self) -> usize {
+ self.len()
+ }
+ }
+
+ /// Pad hexadecimal Node prefix with zeros on the right
+ ///
+ /// This avoids having to repeatedly write very long hexadecimal
+ /// strings for test data, and brings actual hash size independency.
+ #[cfg(test)]
+ fn pad_node(hex: &str) -> Node {
+ Node::from_hex(&hex_pad_right(hex)).unwrap()
+ }
+
+ /// Pad hexadecimal Node prefix with zeros on the right, then insert
+ fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
+ idx.insert(rev, pad_node(hex));
+ }
+
+ fn sample_nodetree() -> NodeTree {
+ NodeTree::from(vec![
+ block![0: Rev(9)],
+ block![0: Rev(0), 1: Rev(9)],
+ block![0: Block(1), 1:Rev(1)],
+ ])
+ }
+
+ #[test]
+ fn test_nt_debug() {
+ let nt = sample_nodetree();
+ assert_eq!(
+ format!("{:?}", nt),
+ "readonly: \
+ [{0: Rev(9)}, {0: Rev(0), 1: Rev(9)}, {0: Block(1), 1: Rev(1)}], \
+ growable: [], \
+ root: {0: Block(1), 1: Rev(1)}",
+ );
+ }
+
+ #[test]
+ fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
+ let mut idx: TestIndex = HashMap::new();
+ pad_insert(&mut idx, 1, "1234deadcafe");
+
+ let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
+ assert_eq!(nt.find_hex(&idx, "1")?, Some(1));
+ assert_eq!(nt.find_hex(&idx, "12")?, Some(1));
+ assert_eq!(nt.find_hex(&idx, "1234de")?, Some(1));
+ assert_eq!(nt.find_hex(&idx, "1a")?, None);
+ assert_eq!(nt.find_hex(&idx, "ab")?, None);
+
+ // and with full binary Nodes
+ assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
+ let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
+ assert_eq!(nt.find_node(&idx, &unknown)?, None);
+ Ok(())
+ }
+
+ #[test]
+ fn test_immutable_find_one_jump() {
+ let mut idx = TestIndex::new();
+ pad_insert(&mut idx, 9, "012");
+ pad_insert(&mut idx, 0, "00a");
+
+ let nt = sample_nodetree();
+
+ assert_eq!(nt.find_hex(&idx, "0"), Err(MultipleResults));
+ assert_eq!(nt.find_hex(&idx, "01"), Ok(Some(9)));
+ assert_eq!(nt.find_hex(&idx, "00"), Ok(Some(0)));
+ assert_eq!(nt.find_hex(&idx, "00a"), Ok(Some(0)));
+ }
+
+ #[test]
+ fn test_mutated_find() -> Result<(), NodeMapError> {
+ let mut idx = TestIndex::new();
+ pad_insert(&mut idx, 9, "012");
+ pad_insert(&mut idx, 0, "00a");
+ pad_insert(&mut idx, 2, "cafe");
+ pad_insert(&mut idx, 3, "15");
+ pad_insert(&mut idx, 1, "10");
+
+ let nt = NodeTree {
+ readonly: sample_nodetree().readonly,
+ growable: vec![block![0: Rev(1), 5: Rev(3)]],
+ root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
+ };
+ assert_eq!(nt.find_hex(&idx, "10")?, Some(1));
+ assert_eq!(nt.find_hex(&idx, "c")?, Some(2));
+ assert_eq!(nt.find_hex(&idx, "00")?, Some(0));
+ assert_eq!(nt.find_hex(&idx, "01")?, Some(9));
+ Ok(())
+ }
+
+ struct TestNtIndex {
+ index: TestIndex,
+ nt: NodeTree,
+ }
+
+ impl TestNtIndex {
+ fn new() -> Self {
+ TestNtIndex {
+ index: HashMap::new(),
+ nt: NodeTree::default(),
+ }
+ }
+
+ fn insert(
+ &mut self,
+ rev: Revision,
+ hex: &str,
+ ) -> Result<(), NodeMapError> {
+ let node = pad_node(hex);
+ self.index.insert(rev, node.clone());
+ self.nt.insert(&self.index, &node, rev)?;
+ Ok(())
+ }
+
+ fn find_hex(
+ &self,
+ prefix: &str,
+ ) -> Result<Option<Revision>, NodeMapError> {
+ self.nt.find_hex(&self.index, prefix)
+ }
+
+ /// Drain `added` and restart a new one
+ fn commit(self) -> Self {
+ let mut as_vec: Vec<Block> =
+ self.nt.readonly.iter().map(|block| block.clone()).collect();
+ as_vec.extend(self.nt.growable);
+ as_vec.push(self.nt.root);
+
+ Self {
+ index: self.index,
+ nt: NodeTree::from(as_vec).into(),
+ }
+ }
+ }
+
+ #[test]
+ fn test_insert_full_mutable() -> Result<(), NodeMapError> {
+ let mut idx = TestNtIndex::new();
+ idx.insert(0, "1234")?;
+ assert_eq!(idx.find_hex("1")?, Some(0));
+ assert_eq!(idx.find_hex("12")?, Some(0));
+
+ // let's trigger a simple split
+ idx.insert(1, "1a34")?;
+ assert_eq!(idx.nt.growable.len(), 1);
+ assert_eq!(idx.find_hex("12")?, Some(0));
+ assert_eq!(idx.find_hex("1a")?, Some(1));
+
+ // reinserting is a no_op
+ idx.insert(1, "1a34")?;
+ assert_eq!(idx.nt.growable.len(), 1);
+ assert_eq!(idx.find_hex("12")?, Some(0));
+ assert_eq!(idx.find_hex("1a")?, Some(1));
+
+ idx.insert(2, "1a01")?;
+ assert_eq!(idx.nt.growable.len(), 2);
+ assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
+ assert_eq!(idx.find_hex("12")?, Some(0));
+ assert_eq!(idx.find_hex("1a3")?, Some(1));
+ assert_eq!(idx.find_hex("1a0")?, Some(2));
+ assert_eq!(idx.find_hex("1a12")?, None);
+
+ // now let's make it split and create more than one additional block
+ idx.insert(3, "1a345")?;
+ assert_eq!(idx.nt.growable.len(), 4);
+ assert_eq!(idx.find_hex("1a340")?, Some(1));
+ assert_eq!(idx.find_hex("1a345")?, Some(3));
+ assert_eq!(idx.find_hex("1a341")?, None);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_insert_extreme_splitting() -> Result<(), NodeMapError> {
+ // check that the splitting loop is long enough
+ let mut nt_idx = TestNtIndex::new();
+ let nt = &mut nt_idx.nt;
+ let idx = &mut nt_idx.index;
+
+ let node0_hex = hex_pad_right("444444");
+ let mut node1_hex = hex_pad_right("444444").clone();
+ node1_hex.pop();
+ node1_hex.push('5');
+ let node0 = Node::from_hex(&node0_hex).unwrap();
+ let node1 = Node::from_hex(&node1_hex).unwrap();
+
+ idx.insert(0, node0.clone());
+ nt.insert(idx, &node0, 0)?;
+ idx.insert(1, node1.clone());
+ nt.insert(idx, &node1, 1)?;
+
+ assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(0));
+ assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(1));
+ Ok(())
+ }
+
+ #[test]
+ fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
+ let mut idx = TestNtIndex::new();
+ idx.insert(0, "1234")?;
+ idx.insert(1, "1235")?;
+ idx.insert(2, "131")?;
+ idx.insert(3, "cafe")?;
+ let mut idx = idx.commit();
+ assert_eq!(idx.find_hex("1234")?, Some(0));
+ assert_eq!(idx.find_hex("1235")?, Some(1));
+ assert_eq!(idx.find_hex("131")?, Some(2));
+ assert_eq!(idx.find_hex("cafe")?, Some(3));
+
+ idx.insert(4, "123A")?;
+ assert_eq!(idx.find_hex("1234")?, Some(0));
+ assert_eq!(idx.find_hex("1235")?, Some(1));
+ assert_eq!(idx.find_hex("131")?, Some(2));
+ assert_eq!(idx.find_hex("cafe")?, Some(3));
+ assert_eq!(idx.find_hex("123A")?, Some(4));
+
+ idx.insert(5, "c0")?;
+ assert_eq!(idx.find_hex("cafe")?, Some(3));
+ assert_eq!(idx.find_hex("c0")?, Some(5));
+ assert_eq!(idx.find_hex("c1")?, None);
+ assert_eq!(idx.find_hex("1234")?, Some(0));
+
+ Ok(())
+ }
+}
--- a/rust/hg-core/src/utils.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/utils.rs Thu Feb 13 10:12:12 2020 -0800
@@ -7,8 +7,12 @@
//! Contains useful functions, traits, structs, etc. for use in core.
+use crate::utils::hg_path::HgPath;
+use std::{io::Write, ops::Deref};
+
pub mod files;
pub mod hg_path;
+pub mod path_auditor;
/// Useful until rust/issues/56345 is stable
///
@@ -111,3 +115,54 @@
}
}
}
+
+pub trait Escaped {
+ /// Return bytes escaped for display to the user
+ fn escaped_bytes(&self) -> Vec<u8>;
+}
+
+impl Escaped for u8 {
+ fn escaped_bytes(&self) -> Vec<u8> {
+ let mut acc = vec![];
+ match self {
+ c @ b'\'' | c @ b'\\' => {
+ acc.push(b'\\');
+ acc.push(*c);
+ }
+ b'\t' => {
+ acc.extend(br"\\t");
+ }
+ b'\n' => {
+ acc.extend(br"\\n");
+ }
+ b'\r' => {
+ acc.extend(br"\\r");
+ }
+ c if (*c < b' ' || *c >= 127) => {
+ write!(acc, "\\x{:x}", self).unwrap();
+ }
+ c => {
+ acc.push(*c);
+ }
+ }
+ acc
+ }
+}
+
+impl<'a, T: Escaped> Escaped for &'a [T] {
+ fn escaped_bytes(&self) -> Vec<u8> {
+ self.iter().flat_map(|item| item.escaped_bytes()).collect()
+ }
+}
+
+impl<T: Escaped> Escaped for Vec<T> {
+ fn escaped_bytes(&self) -> Vec<u8> {
+ self.deref().escaped_bytes()
+ }
+}
+
+impl<'a> Escaped for &'a HgPath {
+ fn escaped_bytes(&self) -> Vec<u8> {
+ self.as_bytes().escaped_bytes()
+ }
+}
--- a/rust/hg-core/src/utils/files.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/utils/files.rs Thu Feb 13 10:12:12 2020 -0800
@@ -9,11 +9,18 @@
//! Functions for fiddling with files.
-use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::utils::{
+ hg_path::{path_to_hg_path_buf, HgPath, HgPathBuf, HgPathError},
+ path_auditor::PathAuditor,
+ replace_slice,
+};
+use lazy_static::lazy_static;
+use same_file::is_same_file;
+use std::borrow::ToOwned;
+use std::fs::Metadata;
use std::iter::FusedIterator;
-
-use std::fs::Metadata;
-use std::path::Path;
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
let os_str;
@@ -62,6 +69,28 @@
impl<'a> FusedIterator for Ancestors<'a> {}
+/// An iterator over repository path yielding itself and its ancestors.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct AncestorsWithBase<'a> {
+ next: Option<(&'a HgPath, &'a HgPath)>,
+}
+
+impl<'a> Iterator for AncestorsWithBase<'a> {
+ type Item = (&'a HgPath, &'a HgPath);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let next = self.next;
+ self.next = match self.next {
+ Some((s, _)) if s.is_empty() => None,
+ Some((s, _)) => Some(s.split_filename()),
+ None => None,
+ };
+ next
+ }
+}
+
+impl<'a> FusedIterator for AncestorsWithBase<'a> {}
+
/// Returns an iterator yielding ancestor directories of the given repository
/// path.
///
@@ -77,6 +106,25 @@
dirs
}
+/// Returns an iterator yielding ancestor directories of the given repository
+/// path.
+///
+/// The path is separated by '/', and must not start with '/'.
+///
+/// The path itself isn't included unless it is b"" (meaning the root
+/// directory.)
+pub(crate) fn find_dirs_with_base<'a>(
+ path: &'a HgPath,
+) -> AncestorsWithBase<'a> {
+ let mut dirs = AncestorsWithBase {
+ next: Some((path, HgPath::new(b""))),
+ };
+ if !path.is_empty() {
+ dirs.next(); // skip itself
+ }
+ dirs
+}
+
/// TODO more than ASCII?
pub fn normalize_case(path: &HgPath) -> HgPathBuf {
#[cfg(windows)] // NTFS compares via upper()
@@ -85,6 +133,41 @@
path.to_ascii_lowercase()
}
+lazy_static! {
+ static ref IGNORED_CHARS: Vec<Vec<u8>> = {
+ [
+ 0x200c, 0x200d, 0x200e, 0x200f, 0x202a, 0x202b, 0x202c, 0x202d,
+ 0x202e, 0x206a, 0x206b, 0x206c, 0x206d, 0x206e, 0x206f, 0xfeff,
+ ]
+ .iter()
+ .map(|code| {
+ std::char::from_u32(*code)
+ .unwrap()
+ .encode_utf8(&mut [0; 3])
+ .bytes()
+ .collect()
+ })
+ .collect()
+ };
+}
+
+fn hfs_ignore_clean(bytes: &[u8]) -> Vec<u8> {
+ let mut buf = bytes.to_owned();
+ let needs_escaping = bytes.iter().any(|b| *b == b'\xe2' || *b == b'\xef');
+ if needs_escaping {
+ for forbidden in IGNORED_CHARS.iter() {
+ replace_slice(&mut buf, forbidden, &[])
+ }
+ buf
+ } else {
+ buf
+ }
+}
+
+pub fn lower_clean(bytes: &[u8]) -> Vec<u8> {
+ hfs_ignore_clean(&bytes.to_ascii_lowercase())
+}
+
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct HgMetadata {
pub st_dev: u64,
@@ -111,9 +194,66 @@
}
}
+/// Returns the canonical path of `name`, given `cwd` and `root`
+pub fn canonical_path(
+ root: impl AsRef<Path>,
+ cwd: impl AsRef<Path>,
+ name: impl AsRef<Path>,
+) -> Result<PathBuf, HgPathError> {
+ // TODO add missing normalization for other platforms
+ let root = root.as_ref();
+ let cwd = cwd.as_ref();
+ let name = name.as_ref();
+
+ let name = if !name.is_absolute() {
+ root.join(&cwd).join(&name)
+ } else {
+ name.to_owned()
+ };
+ let mut auditor = PathAuditor::new(&root);
+ if name != root && name.starts_with(&root) {
+ let name = name.strip_prefix(&root).unwrap();
+ auditor.audit_path(path_to_hg_path_buf(name)?)?;
+ return Ok(name.to_owned());
+ } else if name == root {
+ return Ok("".into());
+ } else {
+ // Determine whether `name' is in the hierarchy at or beneath `root',
+ // by iterating name=name.parent() until it returns `None` (can't
+ // check name == '/', because that doesn't work on windows).
+ let mut name = name.deref();
+ let original_name = name.to_owned();
+ loop {
+ let same = is_same_file(&name, &root).unwrap_or(false);
+ if same {
+ if name == original_name {
+ // `name` was actually the same as root (maybe a symlink)
+ return Ok("".into());
+ }
+ // `name` is a symlink to root, so `original_name` is under
+ // root
+ let rel_path = original_name.strip_prefix(&name).unwrap();
+ auditor.audit_path(path_to_hg_path_buf(&rel_path)?)?;
+ return Ok(rel_path.to_owned());
+ }
+ name = match name.parent() {
+ None => break,
+ Some(p) => p,
+ };
+ }
+ // TODO hint to the user about using --cwd
+ // Bubble up the responsibility to Python for now
+ Err(HgPathError::NotUnderRoot {
+ path: original_name.to_owned(),
+ root: root.to_owned(),
+ })
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
+ use pretty_assertions::assert_eq;
#[test]
fn find_dirs_some() {
@@ -133,4 +273,112 @@
assert_eq!(dirs.next(), None);
assert_eq!(dirs.next(), None);
}
+
+ #[test]
+ fn test_find_dirs_with_base_some() {
+ let mut dirs = super::find_dirs_with_base(HgPath::new(b"foo/bar/baz"));
+ assert_eq!(
+ dirs.next(),
+ Some((HgPath::new(b"foo/bar"), HgPath::new(b"baz")))
+ );
+ assert_eq!(
+ dirs.next(),
+ Some((HgPath::new(b"foo"), HgPath::new(b"bar")))
+ );
+ assert_eq!(dirs.next(), Some((HgPath::new(b""), HgPath::new(b"foo"))));
+ assert_eq!(dirs.next(), None);
+ assert_eq!(dirs.next(), None);
+ }
+
+ #[test]
+ fn test_find_dirs_with_base_empty() {
+ let mut dirs = super::find_dirs_with_base(HgPath::new(b""));
+ assert_eq!(dirs.next(), Some((HgPath::new(b""), HgPath::new(b""))));
+ assert_eq!(dirs.next(), None);
+ assert_eq!(dirs.next(), None);
+ }
+
+ #[test]
+ fn test_canonical_path() {
+ let root = Path::new("/repo");
+ let cwd = Path::new("/dir");
+ let name = Path::new("filename");
+ assert_eq!(
+ canonical_path(root, cwd, name),
+ Err(HgPathError::NotUnderRoot {
+ path: PathBuf::from("/dir/filename"),
+ root: root.to_path_buf()
+ })
+ );
+
+ let root = Path::new("/repo");
+ let cwd = Path::new("/");
+ let name = Path::new("filename");
+ assert_eq!(
+ canonical_path(root, cwd, name),
+ Err(HgPathError::NotUnderRoot {
+ path: PathBuf::from("/filename"),
+ root: root.to_path_buf()
+ })
+ );
+
+ let root = Path::new("/repo");
+ let cwd = Path::new("/");
+ let name = Path::new("repo/filename");
+ assert_eq!(
+ canonical_path(root, cwd, name),
+ Ok(PathBuf::from("filename"))
+ );
+
+ let root = Path::new("/repo");
+ let cwd = Path::new("/repo");
+ let name = Path::new("filename");
+ assert_eq!(
+ canonical_path(root, cwd, name),
+ Ok(PathBuf::from("filename"))
+ );
+
+ let root = Path::new("/repo");
+ let cwd = Path::new("/repo/subdir");
+ let name = Path::new("filename");
+ assert_eq!(
+ canonical_path(root, cwd, name),
+ Ok(PathBuf::from("subdir/filename"))
+ );
+ }
+
+ #[test]
+ fn test_canonical_path_not_rooted() {
+ use std::fs::create_dir;
+ use tempfile::tempdir;
+
+ let base_dir = tempdir().unwrap();
+ let base_dir_path = base_dir.path();
+ let beneath_repo = base_dir_path.join("a");
+ let root = base_dir_path.join("a/b");
+ let out_of_repo = base_dir_path.join("c");
+ let under_repo_symlink = out_of_repo.join("d");
+
+ create_dir(&beneath_repo).unwrap();
+ create_dir(&root).unwrap();
+
+ // TODO make portable
+ std::os::unix::fs::symlink(&root, &out_of_repo).unwrap();
+
+ assert_eq!(
+ canonical_path(&root, Path::new(""), out_of_repo),
+ Ok(PathBuf::from(""))
+ );
+ assert_eq!(
+ canonical_path(&root, Path::new(""), &beneath_repo),
+ Err(HgPathError::NotUnderRoot {
+ path: beneath_repo.to_owned(),
+ root: root.to_owned()
+ })
+ );
+ assert_eq!(
+ canonical_path(&root, Path::new(""), &under_repo_symlink),
+ Ok(PathBuf::from("d"))
+ );
+ }
}
--- a/rust/hg-core/src/utils/hg_path.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-core/src/utils/hg_path.rs Thu Feb 13 10:12:12 2020 -0800
@@ -15,12 +15,35 @@
pub enum HgPathError {
/// Bytes from the invalid `HgPath`
LeadingSlash(Vec<u8>),
- /// Bytes and index of the second slash
- ConsecutiveSlashes(Vec<u8>, usize),
- /// Bytes and index of the null byte
- ContainsNullByte(Vec<u8>, usize),
+ ConsecutiveSlashes {
+ bytes: Vec<u8>,
+ second_slash_index: usize,
+ },
+ ContainsNullByte {
+ bytes: Vec<u8>,
+ null_byte_index: usize,
+ },
/// Bytes
DecodeError(Vec<u8>),
+ /// The rest come from audit errors
+ EndsWithSlash(HgPathBuf),
+ ContainsIllegalComponent(HgPathBuf),
+ /// Path is inside the `.hg` folder
+ InsideDotHg(HgPathBuf),
+ IsInsideNestedRepo {
+ path: HgPathBuf,
+ nested_repo: HgPathBuf,
+ },
+ TraversesSymbolicLink {
+ path: HgPathBuf,
+ symlink: HgPathBuf,
+ },
+ NotFsCompliant(HgPathBuf),
+ /// `path` is the smallest invalid path
+ NotUnderRoot {
+ path: PathBuf,
+ root: PathBuf,
+ },
}
impl ToString for HgPathError {
@@ -29,17 +52,55 @@
HgPathError::LeadingSlash(bytes) => {
format!("Invalid HgPath '{:?}': has a leading slash.", bytes)
}
- HgPathError::ConsecutiveSlashes(bytes, pos) => format!(
- "Invalid HgPath '{:?}': consecutive slahes at pos {}.",
+ HgPathError::ConsecutiveSlashes {
+ bytes,
+ second_slash_index: pos,
+ } => format!(
+ "Invalid HgPath '{:?}': consecutive slashes at pos {}.",
bytes, pos
),
- HgPathError::ContainsNullByte(bytes, pos) => format!(
+ HgPathError::ContainsNullByte {
+ bytes,
+ null_byte_index: pos,
+ } => format!(
"Invalid HgPath '{:?}': contains null byte at pos {}.",
bytes, pos
),
HgPathError::DecodeError(bytes) => {
format!("Invalid HgPath '{:?}': could not be decoded.", bytes)
}
+ HgPathError::EndsWithSlash(path) => {
+ format!("Audit failed for '{}': ends with a slash.", path)
+ }
+ HgPathError::ContainsIllegalComponent(path) => format!(
+ "Audit failed for '{}': contains an illegal component.",
+ path
+ ),
+ HgPathError::InsideDotHg(path) => format!(
+ "Audit failed for '{}': is inside the '.hg' folder.",
+ path
+ ),
+ HgPathError::IsInsideNestedRepo {
+ path,
+ nested_repo: nested,
+ } => format!(
+ "Audit failed for '{}': is inside a nested repository '{}'.",
+ path, nested
+ ),
+ HgPathError::TraversesSymbolicLink { path, symlink } => format!(
+ "Audit failed for '{}': traverses symbolic link '{}'.",
+ path, symlink
+ ),
+ HgPathError::NotFsCompliant(path) => format!(
+ "Audit failed for '{}': cannot be turned into a \
+ filesystem path.",
+ path
+ ),
+ HgPathError::NotUnderRoot { path, root } => format!(
+ "Audit failed for '{}': not under root {}.",
+ path.display(),
+ root.display()
+ ),
}
}
}
@@ -112,10 +173,40 @@
pub fn contains(&self, other: u8) -> bool {
self.inner.contains(&other)
}
- pub fn starts_with(&self, needle: impl AsRef<HgPath>) -> bool {
+ pub fn starts_with(&self, needle: impl AsRef<Self>) -> bool {
self.inner.starts_with(needle.as_ref().as_bytes())
}
- pub fn join<T: ?Sized + AsRef<HgPath>>(&self, other: &T) -> HgPathBuf {
+ pub fn trim_trailing_slash(&self) -> &Self {
+ Self::new(if self.inner.last() == Some(&b'/') {
+ &self.inner[..self.inner.len() - 1]
+ } else {
+ &self.inner[..]
+ })
+ }
+ /// Returns a tuple of slices `(base, filename)` resulting from the split
+ /// at the rightmost `/`, if any.
+ ///
+ /// # Examples:
+ ///
+ /// ```
+ /// use hg::utils::hg_path::HgPath;
+ ///
+ /// let path = HgPath::new(b"cool/hg/path").split_filename();
+ /// assert_eq!(path, (HgPath::new(b"cool/hg"), HgPath::new(b"path")));
+ ///
+ /// let path = HgPath::new(b"pathwithoutsep").split_filename();
+ /// assert_eq!(path, (HgPath::new(b""), HgPath::new(b"pathwithoutsep")));
+ /// ```
+ pub fn split_filename(&self) -> (&Self, &Self) {
+ match &self.inner.iter().rposition(|c| *c == b'/') {
+ None => (HgPath::new(""), &self),
+ Some(size) => (
+ HgPath::new(&self.inner[..*size]),
+ HgPath::new(&self.inner[*size + 1..]),
+ ),
+ }
+ }
+ pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
let mut inner = self.inner.to_owned();
if inner.len() != 0 && inner.last() != Some(&b'/') {
inner.push(b'/');
@@ -123,21 +214,103 @@
inner.extend(other.as_ref().bytes());
HgPathBuf::from_bytes(&inner)
}
+ pub fn parent(&self) -> &Self {
+ let inner = self.as_bytes();
+ HgPath::new(match inner.iter().rposition(|b| *b == b'/') {
+ Some(pos) => &inner[..pos],
+ None => &[],
+ })
+ }
/// Given a base directory, returns the slice of `self` relative to the
/// base directory. If `base` is not a directory (does not end with a
/// `b'/'`), returns `None`.
- pub fn relative_to(&self, base: impl AsRef<HgPath>) -> Option<&HgPath> {
+ pub fn relative_to(&self, base: impl AsRef<Self>) -> Option<&Self> {
let base = base.as_ref();
if base.is_empty() {
return Some(self);
}
let is_dir = base.as_bytes().ends_with(b"/");
if is_dir && self.starts_with(base) {
- Some(HgPath::new(&self.inner[base.len()..]))
+ Some(Self::new(&self.inner[base.len()..]))
} else {
None
}
}
+
+ #[cfg(windows)]
+ /// Copied from the Python stdlib's `os.path.splitdrive` implementation.
+ ///
+ /// Split a pathname into drive/UNC sharepoint and relative path
+ /// specifiers. Returns a 2-tuple (drive_or_unc, path); either part may
+ /// be empty.
+ ///
+ /// If you assign
+ /// result = split_drive(p)
+ /// It is always true that:
+ /// result[0] + result[1] == p
+ ///
+ /// If the path contained a drive letter, drive_or_unc will contain
+ /// everything up to and including the colon.
+ /// e.g. split_drive("c:/dir") returns ("c:", "/dir")
+ ///
+ /// If the path contained a UNC path, the drive_or_unc will contain the
+ /// host name and share up to but not including the fourth directory
+ /// separator character.
+ /// e.g. split_drive("//host/computer/dir") returns ("//host/computer",
+ /// "/dir")
+ ///
+ /// Paths cannot contain both a drive letter and a UNC path.
+ pub fn split_drive<'a>(&self) -> (&HgPath, &HgPath) {
+ let bytes = self.as_bytes();
+ let is_sep = |b| std::path::is_separator(b as char);
+
+ if self.len() < 2 {
+ (HgPath::new(b""), &self)
+ } else if is_sep(bytes[0])
+ && is_sep(bytes[1])
+ && (self.len() == 2 || !is_sep(bytes[2]))
+ {
+ // Is a UNC path:
+ // vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
+ // \\machine\mountpoint\directory\etc\...
+ // directory ^^^^^^^^^^^^^^^
+
+ let machine_end_index = bytes[2..].iter().position(|b| is_sep(*b));
+ let mountpoint_start_index = if let Some(i) = machine_end_index {
+ i + 2
+ } else {
+ return (HgPath::new(b""), &self);
+ };
+
+ match bytes[mountpoint_start_index + 1..]
+ .iter()
+ .position(|b| is_sep(*b))
+ {
+ // A UNC path can't have two slashes in a row
+ // (after the initial two)
+ Some(0) => (HgPath::new(b""), &self),
+ Some(i) => {
+ let (a, b) =
+ bytes.split_at(mountpoint_start_index + 1 + i);
+ (HgPath::new(a), HgPath::new(b))
+ }
+ None => (&self, HgPath::new(b"")),
+ }
+ } else if bytes[1] == b':' {
+ // Drive path c:\directory
+ let (a, b) = bytes.split_at(2);
+ (HgPath::new(a), HgPath::new(b))
+ } else {
+ (HgPath::new(b""), &self)
+ }
+ }
+
+ #[cfg(unix)]
+ /// Split a pathname into drive and path. On Posix, drive is always empty.
+ pub fn split_drive(&self) -> (&HgPath, &HgPath) {
+ (HgPath::new(b""), &self)
+ }
+
/// Checks for errors in the path, short-circuiting at the first one.
/// This generates fine-grained errors useful for debugging.
/// To simply check if the path is valid during tests, use `is_valid`.
@@ -154,17 +327,17 @@
for (index, byte) in bytes.iter().enumerate() {
match byte {
0 => {
- return Err(HgPathError::ContainsNullByte(
- bytes.to_vec(),
- index,
- ))
+ return Err(HgPathError::ContainsNullByte {
+ bytes: bytes.to_vec(),
+ null_byte_index: index,
+ })
}
b'/' => {
if previous_byte.is_some() && previous_byte == Some(b'/') {
- return Err(HgPathError::ConsecutiveSlashes(
- bytes.to_vec(),
- index,
- ));
+ return Err(HgPathError::ConsecutiveSlashes {
+ bytes: bytes.to_vec(),
+ second_slash_index: index,
+ });
}
}
_ => (),
@@ -348,6 +521,7 @@
#[cfg(test)]
mod tests {
use super::*;
+ use pretty_assertions::assert_eq;
#[test]
fn test_path_states() {
@@ -356,11 +530,17 @@
HgPath::new(b"/").check_state()
);
assert_eq!(
- Err(HgPathError::ConsecutiveSlashes(b"a/b//c".to_vec(), 4)),
+ Err(HgPathError::ConsecutiveSlashes {
+ bytes: b"a/b//c".to_vec(),
+ second_slash_index: 4
+ }),
HgPath::new(b"a/b//c").check_state()
);
assert_eq!(
- Err(HgPathError::ContainsNullByte(b"a/b/\0c".to_vec(), 4)),
+ Err(HgPathError::ContainsNullByte {
+ bytes: b"a/b/\0c".to_vec(),
+ null_byte_index: 4
+ }),
HgPath::new(b"a/b/\0c").check_state()
);
// TODO test HgPathError::DecodeError for the Windows implementation.
@@ -473,4 +653,116 @@
let base = HgPath::new(b"ends/");
assert_eq!(Some(HgPath::new(b"with/dir/")), path.relative_to(base));
}
+
+ #[test]
+ #[cfg(unix)]
+ fn test_split_drive() {
+ // Taken from the Python stdlib's tests
+ assert_eq!(
+ HgPath::new(br"/foo/bar").split_drive(),
+ (HgPath::new(b""), HgPath::new(br"/foo/bar"))
+ );
+ assert_eq!(
+ HgPath::new(br"foo:bar").split_drive(),
+ (HgPath::new(b""), HgPath::new(br"foo:bar"))
+ );
+ assert_eq!(
+ HgPath::new(br":foo:bar").split_drive(),
+ (HgPath::new(b""), HgPath::new(br":foo:bar"))
+ );
+ // Also try NT paths; should not split them
+ assert_eq!(
+ HgPath::new(br"c:\foo\bar").split_drive(),
+ (HgPath::new(b""), HgPath::new(br"c:\foo\bar"))
+ );
+ assert_eq!(
+ HgPath::new(b"c:/foo/bar").split_drive(),
+ (HgPath::new(b""), HgPath::new(br"c:/foo/bar"))
+ );
+ assert_eq!(
+ HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+ (
+ HgPath::new(b""),
+ HgPath::new(br"\\conky\mountpoint\foo\bar")
+ )
+ );
+ }
+
+ #[test]
+ #[cfg(windows)]
+ fn test_split_drive() {
+ assert_eq!(
+ HgPath::new(br"c:\foo\bar").split_drive(),
+ (HgPath::new(br"c:"), HgPath::new(br"\foo\bar"))
+ );
+ assert_eq!(
+ HgPath::new(b"c:/foo/bar").split_drive(),
+ (HgPath::new(br"c:"), HgPath::new(br"/foo/bar"))
+ );
+ assert_eq!(
+ HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(),
+ (
+ HgPath::new(br"\\conky\mountpoint"),
+ HgPath::new(br"\foo\bar")
+ )
+ );
+ assert_eq!(
+ HgPath::new(br"//conky/mountpoint/foo/bar").split_drive(),
+ (
+ HgPath::new(br"//conky/mountpoint"),
+ HgPath::new(br"/foo/bar")
+ )
+ );
+ assert_eq!(
+ HgPath::new(br"\\\conky\mountpoint\foo\bar").split_drive(),
+ (
+ HgPath::new(br""),
+ HgPath::new(br"\\\conky\mountpoint\foo\bar")
+ )
+ );
+ assert_eq!(
+ HgPath::new(br"///conky/mountpoint/foo/bar").split_drive(),
+ (
+ HgPath::new(br""),
+ HgPath::new(br"///conky/mountpoint/foo/bar")
+ )
+ );
+ assert_eq!(
+ HgPath::new(br"\\conky\\mountpoint\foo\bar").split_drive(),
+ (
+ HgPath::new(br""),
+ HgPath::new(br"\\conky\\mountpoint\foo\bar")
+ )
+ );
+ assert_eq!(
+ HgPath::new(br"//conky//mountpoint/foo/bar").split_drive(),
+ (
+ HgPath::new(br""),
+ HgPath::new(br"//conky//mountpoint/foo/bar")
+ )
+ );
+ // UNC part containing U+0130
+ assert_eq!(
+ HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT/foo/bar").split_drive(),
+ (
+ HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT"),
+ HgPath::new(br"/foo/bar")
+ )
+ );
+ }
+
+ #[test]
+ fn test_parent() {
+ let path = HgPath::new(b"");
+ assert_eq!(path.parent(), path);
+
+ let path = HgPath::new(b"a");
+ assert_eq!(path.parent(), HgPath::new(b""));
+
+ let path = HgPath::new(b"a/b");
+ assert_eq!(path.parent(), HgPath::new(b"a"));
+
+ let path = HgPath::new(b"a/other/b");
+ assert_eq!(path.parent(), HgPath::new(b"a/other"));
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/utils/path_auditor.rs Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,230 @@
+// path_auditor.rs
+//
+// Copyright 2020
+// Raphaël Gomès <rgomes@octobus.net>,
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::utils::{
+ files::lower_clean,
+ find_slice_in_slice,
+ hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf, HgPathError},
+};
+use std::collections::HashSet;
+use std::path::{Path, PathBuf};
+
+/// Ensures that a path is valid for use in the repository i.e. does not use
+/// any banned components, does not traverse a symlink, etc.
+#[derive(Debug, Default)]
+pub struct PathAuditor {
+ audited: HashSet<HgPathBuf>,
+ audited_dirs: HashSet<HgPathBuf>,
+ root: PathBuf,
+}
+
+impl PathAuditor {
+ pub fn new(root: impl AsRef<Path>) -> Self {
+ Self {
+ root: root.as_ref().to_owned(),
+ ..Default::default()
+ }
+ }
+ pub fn audit_path(
+ &mut self,
+ path: impl AsRef<HgPath>,
+ ) -> Result<(), HgPathError> {
+ // TODO windows "localpath" normalization
+ let path = path.as_ref();
+ if path.is_empty() {
+ return Ok(());
+ }
+ // TODO case normalization
+ if self.audited.contains(path) {
+ return Ok(());
+ }
+ // AIX ignores "/" at end of path, others raise EISDIR.
+ let last_byte = path.as_bytes()[path.len() - 1];
+ if last_byte == b'/' || last_byte == b'\\' {
+ return Err(HgPathError::EndsWithSlash(path.to_owned()));
+ }
+ let parts: Vec<_> = path
+ .as_bytes()
+ .split(|b| std::path::is_separator(*b as char))
+ .collect();
+
+ let first_component = lower_clean(parts[0]);
+ let first_component = first_component.as_slice();
+ if !path.split_drive().0.is_empty()
+ || (first_component == b".hg"
+ || first_component == b".hg."
+ || first_component == b"")
+ || parts.iter().any(|c| c == b"..")
+ {
+ return Err(HgPathError::InsideDotHg(path.to_owned()));
+ }
+
+ // Windows shortname aliases
+ for part in parts.iter() {
+ if part.contains(&b'~') {
+ let mut split = part.splitn(1, |b| *b == b'~');
+ let first =
+ split.next().unwrap().to_owned().to_ascii_uppercase();
+ let last = split.next().unwrap();
+ if last.iter().all(u8::is_ascii_digit)
+ && (first == b"HG" || first == b"HG8B6C")
+ {
+ return Err(HgPathError::ContainsIllegalComponent(
+ path.to_owned(),
+ ));
+ }
+ }
+ }
+ let lower_path = lower_clean(path.as_bytes());
+ if find_slice_in_slice(&lower_path, b".hg").is_some() {
+ let lower_parts: Vec<_> = path
+ .as_bytes()
+ .split(|b| std::path::is_separator(*b as char))
+ .collect();
+ for pattern in [b".hg".to_vec(), b".hg.".to_vec()].iter() {
+ if let Some(pos) = lower_parts[1..]
+ .iter()
+ .position(|part| part == &pattern.as_slice())
+ {
+ let base = lower_parts[..=pos]
+ .iter()
+ .fold(HgPathBuf::new(), |acc, p| {
+ acc.join(HgPath::new(p))
+ });
+ return Err(HgPathError::IsInsideNestedRepo {
+ path: path.to_owned(),
+ nested_repo: base,
+ });
+ }
+ }
+ }
+
+ let parts = &parts[..parts.len().saturating_sub(1)];
+
+ // We don't want to add "foo/bar/baz" to `audited_dirs` before checking
+ // if there's a "foo/.hg" directory. This also means we won't
+ // accidentally traverse a symlink into some other filesystem (which
+ // is potentially expensive to access).
+ for index in 0..parts.len() {
+ let prefix = &parts[..index + 1].join(&b'/');
+ let prefix = HgPath::new(prefix);
+ if self.audited_dirs.contains(prefix) {
+ continue;
+ }
+ self.check_filesystem(&prefix, &path)?;
+ }
+
+ self.audited.insert(path.to_owned());
+
+ Ok(())
+ }
+
+ pub fn check_filesystem(
+ &self,
+ prefix: impl AsRef<HgPath>,
+ path: impl AsRef<HgPath>,
+ ) -> Result<(), HgPathError> {
+ let prefix = prefix.as_ref();
+ let path = path.as_ref();
+ let current_path = self.root.join(
+ hg_path_to_path_buf(prefix)
+ .map_err(|_| HgPathError::NotFsCompliant(path.to_owned()))?,
+ );
+ match std::fs::symlink_metadata(¤t_path) {
+ Err(e) => {
+ // EINVAL can be raised as invalid path syntax under win32.
+ if e.kind() != std::io::ErrorKind::NotFound
+ && e.kind() != std::io::ErrorKind::InvalidInput
+ && e.raw_os_error() != Some(20)
+ {
+ // Rust does not yet have an `ErrorKind` for
+ // `NotADirectory` (errno 20)
+ // It happens if the dirstate contains `foo/bar` and
+ // foo is not a directory
+ return Err(HgPathError::NotFsCompliant(path.to_owned()));
+ }
+ }
+ Ok(meta) => {
+ if meta.file_type().is_symlink() {
+ return Err(HgPathError::TraversesSymbolicLink {
+ path: path.to_owned(),
+ symlink: prefix.to_owned(),
+ });
+ }
+ if meta.file_type().is_dir()
+ && current_path.join(".hg").is_dir()
+ {
+ return Err(HgPathError::IsInsideNestedRepo {
+ path: path.to_owned(),
+ nested_repo: prefix.to_owned(),
+ });
+ }
+ }
+ };
+
+ Ok(())
+ }
+
+ pub fn check(&mut self, path: impl AsRef<HgPath>) -> bool {
+ self.audit_path(path).is_ok()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::utils::files::get_path_from_bytes;
+ use crate::utils::hg_path::path_to_hg_path_buf;
+
+ #[test]
+ fn test_path_auditor() {
+ let mut auditor = PathAuditor::new(get_path_from_bytes(b"/tmp"));
+
+ let path = HgPath::new(b".hg/00changelog.i");
+ assert_eq!(
+ auditor.audit_path(path),
+ Err(HgPathError::InsideDotHg(path.to_owned()))
+ );
+ let path = HgPath::new(b"this/is/nested/.hg/thing.txt");
+ assert_eq!(
+ auditor.audit_path(path),
+ Err(HgPathError::IsInsideNestedRepo {
+ path: path.to_owned(),
+ nested_repo: HgPathBuf::from_bytes(b"this/is/nested")
+ })
+ );
+
+ use std::fs::{create_dir, File};
+ use tempfile::tempdir;
+
+ let base_dir = tempdir().unwrap();
+ let base_dir_path = base_dir.path();
+ let a = base_dir_path.join("a");
+ let b = base_dir_path.join("b");
+ create_dir(&a).unwrap();
+ let in_a_path = a.join("in_a");
+ File::create(in_a_path).unwrap();
+
+ // TODO make portable
+ std::os::unix::fs::symlink(&a, &b).unwrap();
+
+ let buf = b.join("in_a").components().skip(2).collect::<PathBuf>();
+ eprintln!("buf: {}", buf.display());
+ let path = path_to_hg_path_buf(buf).unwrap();
+ assert_eq!(
+ auditor.audit_path(&path),
+ Err(HgPathError::TraversesSymbolicLink {
+ path: path,
+ symlink: path_to_hg_path_buf(
+ b.components().skip(2).collect::<PathBuf>()
+ )
+ .unwrap()
+ })
+ );
+ }
+}
--- a/rust/hg-cpython/Cargo.toml Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/Cargo.toml Thu Feb 13 10:12:12 2020 -0800
@@ -10,6 +10,7 @@
[features]
default = ["python27"]
+with-re2 = ["hg-core/with-re2"]
# Features to build an extension module:
python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -21,9 +22,9 @@
python3-bin = ["cpython/python3-sys"]
[dependencies]
-hg-core = { path = "../hg-core" }
+hg-core = { path = "../hg-core"}
libc = '*'
[dependencies.cpython]
-version = "0.3"
+version = "0.4"
default-features = false
--- a/rust/hg-cpython/src/dirstate/copymap.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/dirstate/copymap.rs Thu Feb 13 10:12:12 2020 -0800
@@ -8,11 +8,12 @@
//! Bindings for `hg::dirstate::dirstate_map::CopyMap` provided by the
//! `hg-core` package.
-use cpython::{PyBytes, PyClone, PyDict, PyObject, PyResult, Python};
+use cpython::{
+ PyBytes, PyClone, PyDict, PyObject, PyResult, Python, UnsafePyLeaked,
+};
use std::cell::RefCell;
use crate::dirstate::dirstate_map::DirstateMap;
-use crate::ref_sharing::PyLeaked;
use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
py_class!(pub class CopyMap |py| {
@@ -104,14 +105,14 @@
py_shared_iterator!(
CopyMapKeysIterator,
- PyLeaked<CopyMapIter<'static>>,
+ UnsafePyLeaked<CopyMapIter<'static>>,
CopyMap::translate_key,
Option<PyBytes>
);
py_shared_iterator!(
CopyMapItemsIterator,
- PyLeaked<CopyMapIter<'static>>,
+ UnsafePyLeaked<CopyMapIter<'static>>,
CopyMap::translate_key_value,
Option<(PyBytes, PyBytes)>
);
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Thu Feb 13 10:12:12 2020 -0800
@@ -13,11 +13,10 @@
use cpython::{
exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
- Python,
+ Python, UnsafePyLeaked,
};
use crate::dirstate::extract_dirstate;
-use crate::ref_sharing::{PyLeaked, PySharedRefCell};
use hg::{
utils::hg_path::{HgPath, HgPathBuf},
DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
@@ -25,7 +24,7 @@
};
py_class!(pub class Dirs |py| {
- data inner: PySharedRefCell<DirsMultiset>;
+ @shared data inner: DirsMultiset;
// `map` is either a `dict` or a flat iterator (usually a `set`, sometimes
// a `list`)
@@ -65,14 +64,11 @@
})?
};
- Self::create_instance(
- py,
- PySharedRefCell::new(inner),
- )
+ Self::create_instance(py, inner)
}
def addpath(&self, path: PyObject) -> PyResult<PyObject> {
- self.inner_shared(py).borrow_mut()?.add_path(
+ self.inner(py).borrow_mut().add_path(
HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
).and(Ok(py.None())).or_else(|e| {
match e {
@@ -90,7 +86,7 @@
}
def delpath(&self, path: PyObject) -> PyResult<PyObject> {
- self.inner_shared(py).borrow_mut()?.delete_path(
+ self.inner(py).borrow_mut().delete_path(
HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
)
.and(Ok(py.None()))
@@ -109,7 +105,7 @@
})
}
def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
DirsMultisetKeysIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -117,17 +113,15 @@
}
def __contains__(&self, item: PyObject) -> PyResult<bool> {
- Ok(self.inner_shared(py).borrow().contains(HgPath::new(
+ Ok(self.inner(py).borrow().contains(HgPath::new(
item.extract::<PyBytes>(py)?.data(py).as_ref(),
)))
}
});
-py_shared_ref!(Dirs, DirsMultiset, inner, inner_shared);
-
impl Dirs {
pub fn from_inner(py: Python, d: DirsMultiset) -> PyResult<Self> {
- Self::create_instance(py, PySharedRefCell::new(d))
+ Self::create_instance(py, d)
}
fn translate_key(
@@ -140,7 +134,7 @@
py_shared_iterator!(
DirsMultisetKeysIterator,
- PyLeaked<DirsMultisetIter<'static>>,
+ UnsafePyLeaked<DirsMultisetIter<'static>>,
Dirs::translate_key,
Option<PyBytes>
);
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Thu Feb 13 10:12:12 2020 -0800
@@ -15,13 +15,13 @@
use cpython::{
exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
+ UnsafePyLeaked,
};
use crate::{
dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
dirstate::non_normal_entries::NonNormalEntries,
dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
- ref_sharing::{PyLeaked, PySharedRefCell},
};
use hg::{
utils::hg_path::{HgPath, HgPathBuf},
@@ -43,18 +43,15 @@
// All attributes also have to have a separate refcount data attribute for
// leaks, with all methods that go along for reference sharing.
py_class!(pub class DirstateMap |py| {
- data inner: PySharedRefCell<RustDirstateMap>;
+ @shared data inner: RustDirstateMap;
def __new__(_cls, _root: PyObject) -> PyResult<Self> {
let inner = RustDirstateMap::default();
- Self::create_instance(
- py,
- PySharedRefCell::new(inner),
- )
+ Self::create_instance(py, inner)
}
def clear(&self) -> PyResult<PyObject> {
- self.inner_shared(py).borrow_mut()?.clear();
+ self.inner(py).borrow_mut().clear();
Ok(py.None())
}
@@ -64,7 +61,7 @@
default: Option<PyObject> = None
) -> PyResult<Option<PyObject>> {
let key = key.extract::<PyBytes>(py)?;
- match self.inner_shared(py).borrow().get(HgPath::new(key.data(py))) {
+ match self.inner(py).borrow().get(HgPath::new(key.data(py))) {
Some(entry) => {
Ok(Some(make_dirstate_tuple(py, entry)?))
},
@@ -81,7 +78,7 @@
size: PyObject,
mtime: PyObject
) -> PyResult<PyObject> {
- self.inner_shared(py).borrow_mut()?.add_file(
+ self.inner(py).borrow_mut().add_file(
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
@@ -109,7 +106,7 @@
oldstate: PyObject,
size: PyObject
) -> PyResult<PyObject> {
- self.inner_shared(py).borrow_mut()?
+ self.inner(py).borrow_mut()
.remove_file(
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -133,7 +130,7 @@
f: PyObject,
oldstate: PyObject
) -> PyResult<PyBool> {
- self.inner_shared(py).borrow_mut()?
+ self.inner(py).borrow_mut()
.drop_file(
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
@@ -164,13 +161,13 @@
))
})
.collect();
- self.inner_shared(py).borrow_mut()?
+ self.inner(py).borrow_mut()
.clear_ambiguous_times(files?, now.extract(py)?);
Ok(py.None())
}
def other_parent_entries(&self) -> PyResult<PyObject> {
- let mut inner_shared = self.inner_shared(py).borrow_mut()?;
+ let mut inner_shared = self.inner(py).borrow_mut();
let (_, other_parent) =
inner_shared.get_non_normal_other_parent_entries();
@@ -196,8 +193,8 @@
def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
Ok(self
- .inner_shared(py)
- .borrow_mut()?
+ .inner(py)
+ .borrow_mut()
.get_non_normal_other_parent_entries().0
.as_ref()
.unwrap()
@@ -211,8 +208,8 @@
&format!(
"NonNormalEntries: {:?}",
self
- .inner_shared(py)
- .borrow_mut()?
+ .inner(py)
+ .borrow_mut()
.get_non_normal_other_parent_entries().0
.as_ref()
.unwrap().iter().map(|o| o))
@@ -223,8 +220,8 @@
def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
self
- .inner_shared(py)
- .borrow_mut()?
+ .inner(py)
+ .borrow_mut()
.non_normal_entries_remove(HgPath::new(key.data(py)));
Ok(py.None())
}
@@ -239,21 +236,21 @@
.collect();
let res = self
- .inner_shared(py)
- .borrow_mut()?
+ .inner(py)
+ .borrow_mut()
.non_normal_entries_union(other?);
let ret = PyList::new(py, &[]);
- for (i, filename) in res.iter().enumerate() {
+ for filename in res.iter() {
let as_pystring = PyBytes::new(py, filename.as_bytes());
- ret.insert_item(py, i, as_pystring.into_object());
+ ret.append(py, as_pystring.into_object());
}
Ok(ret)
}
def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
let d = d.extract::<PyBytes>(py)?;
- Ok(self.inner_shared(py).borrow_mut()?
+ Ok(self.inner(py).borrow_mut()
.has_tracked_dir(HgPath::new(d.data(py)))
.map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -263,7 +260,7 @@
def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
let d = d.extract::<PyBytes>(py)?;
- Ok(self.inner_shared(py).borrow_mut()?
+ Ok(self.inner(py).borrow_mut()
.has_dir(HgPath::new(d.data(py)))
.map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -272,7 +269,7 @@
}
def parents(&self, st: PyObject) -> PyResult<PyTuple> {
- self.inner_shared(py).borrow_mut()?
+ self.inner(py).borrow_mut()
.parents(st.extract::<PyBytes>(py)?.data(py))
.and_then(|d| {
Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
@@ -290,13 +287,13 @@
let p1 = extract_node_id(py, &p1)?;
let p2 = extract_node_id(py, &p2)?;
- self.inner_shared(py).borrow_mut()?
+ self.inner(py).borrow_mut()
.set_parents(&DirstateParents { p1, p2 });
Ok(py.None())
}
def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
- match self.inner_shared(py).borrow_mut()?
+ match self.inner(py).borrow_mut()
.read(st.extract::<PyBytes>(py)?.data(py))
{
Ok(Some(parents)) => Ok(Some(
@@ -323,7 +320,7 @@
p2: extract_node_id(py, &p2)?,
};
- match self.inner_shared(py).borrow_mut()?.pack(parents, now) {
+ match self.inner(py).borrow_mut().pack(parents, now) {
Ok(packed) => Ok(PyBytes::new(py, &packed)),
Err(_) => Err(PyErr::new::<exc::OSError, _>(
py,
@@ -335,7 +332,7 @@
def filefoldmapasdict(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
for (key, value) in
- self.inner_shared(py).borrow_mut()?.build_file_fold_map().iter()
+ self.inner(py).borrow_mut().build_file_fold_map().iter()
{
dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
}
@@ -343,18 +340,18 @@
}
def __len__(&self) -> PyResult<usize> {
- Ok(self.inner_shared(py).borrow().len())
+ Ok(self.inner(py).borrow().len())
}
def __contains__(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
- Ok(self.inner_shared(py).borrow().contains_key(HgPath::new(key.data(py))))
+ Ok(self.inner(py).borrow().contains_key(HgPath::new(key.data(py))))
}
def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
let key = HgPath::new(key.data(py));
- match self.inner_shared(py).borrow().get(key) {
+ match self.inner(py).borrow().get(key) {
Some(entry) => {
Ok(make_dirstate_tuple(py, entry)?)
},
@@ -366,7 +363,7 @@
}
def keys(&self) -> PyResult<DirstateMapKeysIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
DirstateMapKeysIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -374,7 +371,7 @@
}
def items(&self) -> PyResult<DirstateMapItemsIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
DirstateMapItemsIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -382,7 +379,7 @@
}
def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
DirstateMapKeysIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -391,14 +388,14 @@
def getdirs(&self) -> PyResult<Dirs> {
// TODO don't copy, share the reference
- self.inner_shared(py).borrow_mut()?.set_dirs()
+ self.inner(py).borrow_mut().set_dirs()
.map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?;
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner_shared(py).borrow(),
+ &self.inner(py).borrow(),
Some(EntryState::Removed),
)
.map_err(|e| {
@@ -408,14 +405,14 @@
}
def getalldirs(&self) -> PyResult<Dirs> {
// TODO don't copy, share the reference
- self.inner_shared(py).borrow_mut()?.set_all_dirs()
+ self.inner(py).borrow_mut().set_all_dirs()
.map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?;
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner_shared(py).borrow(),
+ &self.inner(py).borrow(),
None,
).map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -426,7 +423,7 @@
// TODO all copymap* methods, see docstring above
def copymapcopy(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
- for (key, value) in self.inner_shared(py).borrow().copy_map.iter() {
+ for (key, value) in self.inner(py).borrow().copy_map.iter() {
dict.set_item(
py,
PyBytes::new(py, key.as_ref()),
@@ -438,7 +435,7 @@
def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
let key = key.extract::<PyBytes>(py)?;
- match self.inner_shared(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+ match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
None => Err(PyErr::new::<exc::KeyError, _>(
py,
@@ -451,12 +448,12 @@
}
def copymaplen(&self) -> PyResult<usize> {
- Ok(self.inner_shared(py).borrow().copy_map.len())
+ Ok(self.inner(py).borrow().copy_map.len())
}
def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
Ok(self
- .inner_shared(py)
+ .inner(py)
.borrow()
.copy_map
.contains_key(HgPath::new(key.data(py))))
@@ -468,7 +465,7 @@
) -> PyResult<Option<PyObject>> {
let key = key.extract::<PyBytes>(py)?;
match self
- .inner_shared(py)
+ .inner(py)
.borrow()
.copy_map
.get(HgPath::new(key.data(py)))
@@ -486,7 +483,7 @@
) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
let value = value.extract::<PyBytes>(py)?;
- self.inner_shared(py).borrow_mut()?.copy_map.insert(
+ self.inner(py).borrow_mut().copy_map.insert(
HgPathBuf::from_bytes(key.data(py)),
HgPathBuf::from_bytes(value.data(py)),
);
@@ -499,8 +496,8 @@
) -> PyResult<Option<PyObject>> {
let key = key.extract::<PyBytes>(py)?;
match self
- .inner_shared(py)
- .borrow_mut()?
+ .inner(py)
+ .borrow_mut()
.copy_map
.remove(HgPath::new(key.data(py)))
{
@@ -510,7 +507,7 @@
}
def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
CopyMapKeysIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -518,7 +515,7 @@
}
def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
- let leaked_ref = self.inner_shared(py).leak_immutable();
+ let leaked_ref = self.inner(py).leak_immutable();
CopyMapItemsIterator::from_inner(
py,
unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -532,7 +529,7 @@
&'a self,
py: Python<'a>,
) -> Ref<'a, RustDirstateMap> {
- self.inner_shared(py).borrow()
+ self.inner(py).borrow()
}
fn translate_key(
py: Python,
@@ -552,18 +549,16 @@
}
}
-py_shared_ref!(DirstateMap, RustDirstateMap, inner, inner_shared);
-
py_shared_iterator!(
DirstateMapKeysIterator,
- PyLeaked<StateMapIter<'static>>,
+ UnsafePyLeaked<StateMapIter<'static>>,
DirstateMap::translate_key,
Option<PyBytes>
);
py_shared_iterator!(
DirstateMapItemsIterator,
- PyLeaked<StateMapIter<'static>>,
+ UnsafePyLeaked<StateMapIter<'static>>,
DirstateMap::translate_key_value,
Option<(PyBytes, PyObject)>
);
--- a/rust/hg-cpython/src/dirstate/status.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/dirstate/status.rs Thu Feb 13 10:12:12 2020 -0800
@@ -33,7 +33,7 @@
let list = PyList::new(py, &[]);
for (i, path) in collection.iter().enumerate() {
- list.insert_item(
+ list.insert(
py,
i,
PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
--- a/rust/hg-cpython/src/exceptions.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/exceptions.rs Thu Feb 13 10:12:12 2020 -0800
@@ -13,7 +13,7 @@
//!
//! [`GraphError`]: struct.GraphError.html
use cpython::{
- exc::{IOError, RuntimeError, ValueError},
+ exc::{RuntimeError, ValueError},
py_exception, PyErr, Python,
};
use hg;
@@ -39,34 +39,4 @@
}
}
-py_exception!(rustext, PatternError, RuntimeError);
-py_exception!(rustext, PatternFileError, RuntimeError);
py_exception!(rustext, HgPathPyError, RuntimeError);
-
-impl PatternError {
- pub fn pynew(py: Python, inner: hg::PatternError) -> PyErr {
- match inner {
- hg::PatternError::UnsupportedSyntax(m) => {
- PatternError::new(py, ("PatternError", m))
- }
- }
- }
-}
-
-impl PatternFileError {
- pub fn pynew(py: Python, inner: hg::PatternFileError) -> PyErr {
- match inner {
- hg::PatternFileError::IO(e) => {
- let value = (e.raw_os_error().unwrap_or(2), e.to_string());
- PyErr::new::<IOError, _>(py, value)
- }
- hg::PatternFileError::Pattern(e, l) => match e {
- hg::PatternError::UnsupportedSyntax(m) => {
- PatternFileError::new(py, ("PatternFileError", m, l))
- }
- },
- }
- }
-}
-
-py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs Fri Dec 13 10:37:45 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-// filepatterns.rs
-//
-// Copyright 2019, Georges Racinet <gracinet@anybox.fr>,
-// Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Bindings for the `hg::filepatterns` module provided by the
-//! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns`
-//! and can be used as replacement for the the pure `filepatterns` Python
-//! module.
-use crate::exceptions::{PatternError, PatternFileError};
-use cpython::{
- PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject,
-};
-use hg::utils::files;
-use hg::{build_single_regex, read_pattern_file, LineNumber, PatternTuple};
-use std::path::PathBuf;
-
-/// Rust does not like functions with different return signatures.
-/// The 3-tuple version is always returned by the hg-core function,
-/// the (potential) conversion is handled at this level since it is not likely
-/// to have any measurable impact on performance.
-///
-/// The Python implementation passes a function reference for `warn` instead
-/// of a boolean that is used to emit warnings while parsing. The Rust
-/// implementation chooses to accumulate the warnings and propagate them to
-/// Python upon completion. See the `readpatternfile` function in `match.py`
-/// for more details.
-fn read_pattern_file_wrapper(
- py: Python,
- file_path: PyObject,
- warn: bool,
- source_info: bool,
-) -> PyResult<PyTuple> {
- let bytes = file_path.extract::<PyBytes>(py)?;
- let path = files::get_path_from_bytes(bytes.data(py));
- match read_pattern_file(path, warn) {
- Ok((patterns, warnings)) => {
- if source_info {
- let itemgetter = |x: &PatternTuple| {
- (PyBytes::new(py, &x.0), x.1, PyBytes::new(py, &x.2))
- };
- let results: Vec<(PyBytes, LineNumber, PyBytes)> =
- patterns.iter().map(itemgetter).collect();
- return Ok((results, warnings_to_py_bytes(py, &warnings))
- .to_py_object(py));
- }
- let itemgetter = |x: &PatternTuple| PyBytes::new(py, &x.0);
- let results: Vec<PyBytes> =
- patterns.iter().map(itemgetter).collect();
- Ok(
- (results, warnings_to_py_bytes(py, &warnings))
- .to_py_object(py),
- )
- }
- Err(e) => Err(PatternFileError::pynew(py, e)),
- }
-}
-
-fn warnings_to_py_bytes(
- py: Python,
- warnings: &[(PathBuf, Vec<u8>)],
-) -> Vec<(PyBytes, PyBytes)> {
- warnings
- .iter()
- .map(|(path, syn)| {
- (
- PyBytes::new(py, &files::get_bytes_from_path(path)),
- PyBytes::new(py, syn),
- )
- })
- .collect()
-}
-
-fn build_single_regex_wrapper(
- py: Python,
- kind: PyObject,
- pat: PyObject,
- globsuffix: PyObject,
-) -> PyResult<PyBytes> {
- match build_single_regex(
- kind.extract::<PyBytes>(py)?.data(py),
- pat.extract::<PyBytes>(py)?.data(py),
- globsuffix.extract::<PyBytes>(py)?.data(py),
- ) {
- Ok(regex) => Ok(PyBytes::new(py, ®ex)),
- Err(e) => Err(PatternError::pynew(py, e)),
- }
-}
-
-pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
- let dotted_name = &format!("{}.filepatterns", package);
- let m = PyModule::new(py, dotted_name)?;
-
- m.add(py, "__package__", package)?;
- m.add(
- py,
- "__doc__",
- "Patterns files parsing - Rust implementation",
- )?;
- m.add(
- py,
- "build_single_regex",
- py_fn!(
- py,
- build_single_regex_wrapper(
- kind: PyObject,
- pat: PyObject,
- globsuffix: PyObject
- )
- ),
- )?;
- m.add(
- py,
- "read_pattern_file",
- py_fn!(
- py,
- read_pattern_file_wrapper(
- file_path: PyObject,
- warn: bool,
- source_info: bool
- )
- ),
- )?;
- m.add(py, "PatternError", py.get_type::<PatternError>())?;
- let sys = PyModule::import(py, "sys")?;
- let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
- sys_modules.set_item(py, dotted_name, &m)?;
-
- Ok(m)
-}
--- a/rust/hg-cpython/src/lib.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/lib.rs Thu Feb 13 10:12:12 2020 -0800
@@ -33,7 +33,6 @@
pub mod dirstate;
pub mod discovery;
pub mod exceptions;
-pub mod filepatterns;
pub mod parsers;
pub mod revlog;
pub mod utils;
@@ -53,25 +52,10 @@
m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
m.add(
py,
- "filepatterns",
- filepatterns::init_module(py, &dotted_name)?,
- )?;
- m.add(
- py,
"parsers",
parsers::init_parsers_module(py, &dotted_name)?,
)?;
m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
- m.add(
- py,
- "PatternFileError",
- py.get_type::<exceptions::PatternFileError>(),
- )?;
- m.add(
- py,
- "PatternError",
- py.get_type::<exceptions::PatternError>(),
- )?;
Ok(())
});
--- a/rust/hg-cpython/src/ref_sharing.rs Fri Dec 13 10:37:45 2019 +0100
+++ b/rust/hg-cpython/src/ref_sharing.rs Thu Feb 13 10:12:12 2020 -0800
@@ -22,413 +22,6 @@
//! Macros for use in the `hg-cpython` bridge library.
-use crate::exceptions::AlreadyBorrowed;
-use cpython::{exc, PyClone, PyErr, PyObject, PyResult, Python};
-use std::cell::{Ref, RefCell, RefMut};
-use std::ops::{Deref, DerefMut};
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-/// Manages the shared state between Python and Rust
-///
-/// `PySharedState` is owned by `PySharedRefCell`, and is shared across its
-/// derived references. The consistency of these references are guaranteed
-/// as follows:
-///
-/// - The immutability of `py_class!` object fields. Any mutation of
-/// `PySharedRefCell` is allowed only through its `borrow_mut()`.
-/// - The `py: Python<'_>` token, which makes sure that any data access is
-/// synchronized by the GIL.
-/// - The underlying `RefCell`, which prevents `PySharedRefCell` data from
-/// being directly borrowed or leaked while it is mutably borrowed.
-/// - The `borrow_count`, which is the number of references borrowed from
-/// `PyLeaked`. Just like `RefCell`, mutation is prohibited while `PyLeaked`
-/// is borrowed.
-/// - The `generation` counter, which increments on `borrow_mut()`. `PyLeaked`
-/// reference is valid only if the `current_generation()` equals to the
-/// `generation` at the time of `leak_immutable()`.
-#[derive(Debug, Default)]
-struct PySharedState {
- // The counter variable could be Cell<usize> since any operation on
- // PySharedState is synchronized by the GIL, but being "atomic" makes
- // PySharedState inherently Sync. The ordering requirement doesn't
- // matter thanks to the GIL.
- borrow_count: AtomicUsize,
- generation: AtomicUsize,
-}
-
-impl PySharedState {
- fn borrow_mut<'a, T>(
- &'a self,
- py: Python<'a>,
- pyrefmut: RefMut<'a, T>,
- ) -> PyResult<RefMut<'a, T>> {
- match self.current_borrow_count(py) {
- 0 => {
- // Note that this wraps around to the same value if mutably
- // borrowed more than usize::MAX times, which wouldn't happen
- // in practice.
- self.generation.fetch_add(1, Ordering::Relaxed);
- Ok(pyrefmut)
- }
- _ => Err(AlreadyBorrowed::new(
- py,
- "Cannot borrow mutably while immutably borrowed",
- )),
- }
- }
-
- /// Return a reference to the wrapped data and its state with an
- /// artificial static lifetime.
- /// We need to be protected by the GIL for thread-safety.
- ///
- /// # Safety
- ///
- /// This is highly unsafe since the lifetime of the given data can be
- /// extended. Do not call this function directly.
- unsafe fn leak_immutable<T>(
- &self,
- _py: Python,
- data: Ref<T>,
- ) -> (&'static T, &'static PySharedState) {
- let ptr: *const T = &*data;
- let state_ptr: *const PySharedState = self;
- (&*ptr, &*state_ptr)
- }
-
- fn current_borrow_count(&self, _py: Python) -> usize {
- self.borrow_count.load(Ordering::Relaxed)
- }
-
- fn increase_borrow_count(&self, _py: Python) {
- // Note that this wraps around if there are more than usize::MAX
- // borrowed references, which shouldn't happen due to memory limit.
- self.borrow_count.fetch_add(1, Ordering::Relaxed);
- }
-
- fn decrease_borrow_count(&self, _py: Python) {
- let prev_count = self.borrow_count.fetch_sub(1, Ordering::Relaxed);
- assert!(prev_count > 0);
- }
-
- fn current_generation(&self, _py: Python) -> usize {
- self.generation.load(Ordering::Relaxed)
- }
-}
-
-/// Helper to keep the borrow count updated while the shared object is
-/// immutably borrowed without using the `RefCell` interface.
-struct BorrowPyShared<'a> {
- py: Python<'a>,
- py_shared_state: &'a PySharedState,
-}
-
-impl<'a> BorrowPyShared<'a> {
- fn new(
- py: Python<'a>,
- py_shared_state: &'a PySharedState,
- ) -> BorrowPyShared<'a> {
- py_shared_state.increase_borrow_count(py);
- BorrowPyShared {
- py,
- py_shared_state,
- }
- }
-}
-
-impl Drop for BorrowPyShared<'_> {
- fn drop(&mut self) {
- self.py_shared_state.decrease_borrow_count(self.py);
- }
-}
-
-/// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
-///
-/// This object can be stored in a `py_class!` object as a data field. Any
-/// operation is allowed through the `PySharedRef` interface.
-#[derive(Debug)]
-pub struct PySharedRefCell<T> {
- inner: RefCell<T>,
- py_shared_state: PySharedState,
-}
-
-impl<T> PySharedRefCell<T> {
- pub fn new(value: T) -> PySharedRefCell<T> {
- Self {
- inner: RefCell::new(value),
- py_shared_state: PySharedState::default(),
- }
- }
-
- fn borrow<'a>(&'a self, _py: Python<'a>) -> Ref<'a, T> {
- // py_shared_state isn't involved since
- // - inner.borrow() would fail if self is mutably borrowed,
- // - and inner.borrow_mut() would fail while self is borrowed.
- self.inner.borrow()
- }
-
- // TODO: maybe this should be named as try_borrow_mut(), and use
- // inner.try_borrow_mut(). The current implementation panics if
- // self.inner has been borrowed, but returns error if py_shared_state
- // refuses to borrow.
- fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<RefMut<'a, T>> {
- self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
- }
-}
-
-/// Sharable data member of type `T` borrowed from the `PyObject`.
-pub struct PySharedRef<'a, T> {
- py: Python<'a>,
- owner: &'a PyObject,
- data: &'a PySharedRefCell<T>,
-}
-
-impl<'a, T> PySharedRef<'a, T> {
- /// # Safety
- ///
- /// The `data` must be owned by the `owner`. Otherwise, the leak count
- /// would get wrong.
- pub unsafe fn new(
- py: Python<'a>,
- owner: &'a PyObject,
- data: &'a PySharedRefCell<T>,
- ) -> Self {
- Self { py, owner, data }
- }
-
- pub fn borrow(&self) -> Ref<'a, T> {
- self.data.borrow(self.py)
- }
-
- pub fn borrow_mut(&self) -> PyResult<RefMut<'a, T>> {
- self.data.borrow_mut(self.py)
- }
-
- /// Returns a leaked reference.
- ///
- /// # Panics
- ///
- /// Panics if this is mutably borrowed.
- pub fn leak_immutable(&self) -> PyLeaked<&'static T> {
- let state = &self.data.py_shared_state;
- // make sure self.data isn't mutably borrowed; otherwise the
- // generation number can't be trusted.
- let data_ref = self.borrow();
- unsafe {
- let (static_ref, static_state_ref) =
- state.leak_immutable(self.py, data_ref);
- PyLeaked::new(self.py, self.owner, static_ref, static_state_ref)
- }
- }
-}
-
-/// Allows a `py_class!` generated struct to share references to one of its
-/// data members with Python.
-///
-/// # Parameters
-///
-/// * `$name` is the same identifier used in for `py_class!` macro call.
-/// * `$inner_struct` is the identifier of the underlying Rust struct
-/// * `$data_member` is the identifier of the data member of `$inner_struct`
-/// that will be shared.
-/// * `$shared_accessor` is the function name to be generated, which allows
-/// safe access to the data member.
-///
-/// # Safety
-///
-/// `$data_member` must persist while the `$name` object is alive. In other
-/// words, it must be an accessor to a data field of the Python object.
-///
-/// # Example
-///
-/// ```
-/// struct MyStruct {
-/// inner: Vec<u32>;
-/// }
-///
-/// py_class!(pub class MyType |py| {
-/// data inner: PySharedRefCell<MyStruct>;
-/// });
-///
-/// py_shared_ref!(MyType, MyStruct, inner, inner_shared);
-/// ```
-macro_rules! py_shared_ref {
- (
- $name: ident,
- $inner_struct: ident,
- $data_member: ident,
- $shared_accessor: ident
- ) => {
- impl $name {
- /// Returns a safe reference to the shared `$data_member`.
- ///
- /// This function guarantees that `PySharedRef` is created with
- /// the valid `self` and `self.$data_member(py)` pair.
- fn $shared_accessor<'a>(
- &'a self,
- py: Python<'a>,
- ) -> $crate::ref_sharing::PySharedRef<'a, $inner_struct> {
- use cpython::PythonObject;
- use $crate::ref_sharing::PySharedRef;
- let owner = self.as_object();
- let data = self.$data_member(py);
- unsafe { PySharedRef::new(py, owner, data) }
- }
- }
- };
-}
-
-/// Manage immutable references to `PyObject` leaked into Python iterators.
-///
-/// This reference will be invalidated once the original value is mutably
-/// borrowed.
-pub struct PyLeaked<T> {
- inner: PyObject,
- data: Option<T>,
- py_shared_state: &'static PySharedState,
- /// Generation counter of data `T` captured when PyLeaked is created.
- generation: usize,
-}
-
-// DO NOT implement Deref for PyLeaked<T>! Dereferencing PyLeaked
-// without taking Python GIL wouldn't be safe. Also, the underling reference
-// is invalid if generation != py_shared_state.generation.
-
-impl<T> PyLeaked<T> {
- /// # Safety
- ///
- /// The `py_shared_state` must be owned by the `inner` Python object.
- fn new(
- py: Python,
- inner: &PyObject,
- data: T,
- py_shared_state: &'static PySharedState,
- ) -> Self {
- Self {
- inner: inner.clone_ref(py),
- data: Some(data),
- py_shared_state,
- generation: py_shared_state.current_generation(py),
- }
- }
-
- /// Immutably borrows the wrapped value.
- ///
- /// Borrowing fails if the underlying reference has been invalidated.
- pub fn try_borrow<'a>(
- &'a self,
- py: Python<'a>,
- ) -> PyResult<PyLeakedRef<'a, T>> {
- self.validate_generation(py)?;
- Ok(PyLeakedRef {
- _borrow: BorrowPyShared::new(py, self.py_shared_state),
- data: self.data.as_ref().unwrap(),
- })
- }
-
- /// Mutably borrows the wrapped value.
- ///
- /// Borrowing fails if the underlying reference has been invalidated.
- ///
- /// Typically `T` is an iterator. If `T` is an immutable reference,
- /// `get_mut()` is useless since the inner value can't be mutated.
- pub fn try_borrow_mut<'a>(
- &'a mut self,
- py: Python<'a>,
- ) -> PyResult<PyLeakedRefMut<'a, T>> {
- self.validate_generation(py)?;
- Ok(PyLeakedRefMut {
- _borrow: BorrowPyShared::new(py, self.py_shared_state),
- data: self.data.as_mut().unwrap(),
- })
- }
-
- /// Converts the inner value by the given function.
- ///
- /// Typically `T` is a static reference to a container, and `U` is an
- /// iterator of that container.
- ///
- /// # Panics
- ///
- /// Panics if the underlying reference has been invalidated.
- ///
- /// This is typically called immediately after the `PyLeaked` is obtained.
- /// In which case, the reference must be valid and no panic would occur.
- ///
- /// # Safety
- ///
- /// The lifetime of the object passed in to the function `f` is cheated.
- /// It's typically a static reference, but is valid only while the
- /// corresponding `PyLeaked` is alive. Do not copy it out of the
- /// function call.
- pub unsafe fn map<U>(
- mut self,
- py: Python,
- f: impl FnOnce(T) -> U,
- ) -> PyLeaked<U> {
- // Needs to test the generation value to make sure self.data reference
- // is still intact.
- self.validate_generation(py)
- .expect("map() over invalidated leaked reference");
-
- // f() could make the self.data outlive. That's why map() is unsafe.
- // In order to make this function safe, maybe we'll need a way to
- // temporarily restrict the lifetime of self.data and translate the
- // returned object back to Something<'static>.
- let new_data = f(self.data.take().unwrap());
- PyLeaked {
- inner: self.inner.clone_ref(py),
- data: Some(new_data),
- py_shared_state: self.py_shared_state,
- generation: self.generation,
- }
- }
-
- fn validate_generation(&self, py: Python) -> PyResult<()> {
- if self.py_shared_state.current_generation(py) == self.generation {
- Ok(())
- } else {
- Err(PyErr::new::<exc::RuntimeError, _>(
- py,
- "Cannot access to leaked reference after mutation",
- ))
- }
- }
-}
-
-/// Immutably borrowed reference to a leaked value.
-pub struct PyLeakedRef<'a, T> {
- _borrow: BorrowPyShared<'a>,
- data: &'a T,
-}
-
-impl<T> Deref for PyLeakedRef<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- self.data
- }
-}
-
-/// Mutably borrowed reference to a leaked value.
-pub struct PyLeakedRefMut<'a, T> {
- _borrow: BorrowPyShared<'a>,
- data: &'a mut T,
-}
-
-impl<T> Deref for PyLeakedRefMut<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- self.data
- }
-}
-
-impl<T> DerefMut for PyLeakedRefMut<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- self.data
- }
-}
-
/// Defines a `py_class!` that acts as a Python iterator over a Rust iterator.
///
/// TODO: this is a bit awkward to use, and a better (more complicated)
@@ -437,12 +30,18 @@
/// # Parameters
///
/// * `$name` is the identifier to give to the resulting Rust struct.
-/// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call.
+/// * `$leaked` corresponds to `UnsafePyLeaked` in the matching `@shared data`
+/// declaration.
/// * `$iterator_type` is the type of the Rust iterator.
/// * `$success_func` is a function for processing the Rust `(key, value)`
/// tuple on iteration success, turning it into something Python understands.
/// * `$success_func` is the return type of `$success_func`
///
+/// # Safety
+///
+/// `$success_func` may take a reference, but it's lifetime may be cheated.
+/// Do not copy it out of the function call.
+///
/// # Example
///
/// ```
@@ -451,7 +50,7 @@
/// }
///
/// py_class!(pub class MyType |py| {
-/// data inner: PySharedRefCell<MyStruct>;
+/// @shared data inner: MyStruct;
///
/// def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
/// let leaked_ref = self.inner_shared(py).leak_immutable();
@@ -475,11 +74,9 @@
/// }
/// }
///
-/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef);
-///
/// py_shared_iterator!(
/// MyTypeItemsIterator,
-/// PyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
+/// UnsafePyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
/// MyType::translate_key_value,
/// Option<(PyBytes, PyBytes)>
/// );
@@ -496,9 +93,10 @@
def __next__(&self) -> PyResult<$success_type> {
let mut leaked = self.inner(py).borrow_mut();
- let mut iter = leaked.try_borrow_mut(py)?;
+ let mut iter = unsafe { leaked.try_borrow_mut(py)? };
match iter.next() {
None => Ok(None),
+ // res may be a reference of cheated 'static lifetime
Some(res) => $success_func(py, res),
}
}
@@ -521,116 +119,3 @@
}
};
}
-
-#[cfg(test)]
-#[cfg(any(feature = "python27-bin", feature = "python3-bin"))]
-mod test {
- use super::*;
- use cpython::{GILGuard, Python};
-
- py_class!(class Owner |py| {
- data string: PySharedRefCell<String>;
- });
- py_shared_ref!(Owner, String, string, string_shared);
-
- fn prepare_env() -> (GILGuard, Owner) {
- let gil = Python::acquire_gil();
- let py = gil.python();
- let owner =
- Owner::create_instance(py, PySharedRefCell::new("new".to_owned()))
- .unwrap();
- (gil, owner)
- }
-
- #[test]
- fn test_leaked_borrow() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let leaked = owner.string_shared(py).leak_immutable();
- let leaked_ref = leaked.try_borrow(py).unwrap();
- assert_eq!(*leaked_ref, "new");
- }
-
- #[test]
- fn test_leaked_borrow_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let leaked = owner.string_shared(py).leak_immutable();
- let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
- let mut leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
- assert_eq!(leaked_ref.next(), Some('n'));
- assert_eq!(leaked_ref.next(), Some('e'));
- assert_eq!(leaked_ref.next(), Some('w'));
- assert_eq!(leaked_ref.next(), None);
- }
-
- #[test]
- fn test_leaked_borrow_after_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let leaked = owner.string_shared(py).leak_immutable();
- owner.string_shared(py).borrow_mut().unwrap().clear();
- assert!(leaked.try_borrow(py).is_err());
- }
-
- #[test]
- fn test_leaked_borrow_mut_after_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let leaked = owner.string_shared(py).leak_immutable();
- let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
- owner.string_shared(py).borrow_mut().unwrap().clear();
- assert!(leaked_iter.try_borrow_mut(py).is_err());
- }
-
- #[test]
- #[should_panic(expected = "map() over invalidated leaked reference")]
- fn test_leaked_map_after_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let leaked = owner.string_shared(py).leak_immutable();
- owner.string_shared(py).borrow_mut().unwrap().clear();
- let _leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
- }
-
- #[test]
- fn test_borrow_mut_while_leaked_ref() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- assert!(owner.string_shared(py).borrow_mut().is_ok());
- let leaked = owner.string_shared(py).leak_immutable();
- {
- let _leaked_ref = leaked.try_borrow(py).unwrap();
- assert!(owner.string_shared(py).borrow_mut().is_err());
- {
- let _leaked_ref2 = leaked.try_borrow(py).unwrap();
- assert!(owner.string_shared(py).borrow_mut().is_err());
- }
- assert!(owner.string_shared(py).borrow_mut().is_err());
- }
- assert!(owner.string_shared(py).borrow_mut().is_ok());
- }
-
- #[test]
- fn test_borrow_mut_while_leaked_ref_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- assert!(owner.string_shared(py).borrow_mut().is_ok());
- let leaked = owner.string_shared(py).leak_immutable();
- let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
- {
- let _leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
- assert!(owner.string_shared(py).borrow_mut().is_err());
- }
- assert!(owner.string_shared(py).borrow_mut().is_ok());
- }
-
- #[test]
- #[should_panic(expected = "mutably borrowed")]
- fn test_leak_while_borrow_mut() {
- let (gil, owner) = prepare_env();
- let py = gil.python();
- let _mut_ref = owner.string_shared(py).borrow_mut();
- owner.string_shared(py).leak_immutable();
- }
-}
--- a/setup.py Fri Dec 13 10:37:45 2019 +0100
+++ b/setup.py Thu Feb 13 10:12:12 2020 -0800
@@ -323,7 +323,7 @@
# gives precedence to hg.exe in the current directory, so fall back to the
# python invocation of local hg, where pythonXY.dll can always be found.
check_cmd = ['log', '-r.', '-Ttest']
- if os.name != 'nt':
+ if os.name != 'nt' or not os.path.exists("hg.exe"):
try:
retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
except EnvironmentError:
@@ -1351,10 +1351,19 @@
env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
cargocmd = ['cargo', 'rustc', '-vv', '--release']
+
+ feature_flags = []
+
if sys.version_info[0] == 3 and self.py3_features is not None:
- cargocmd.extend(
- ('--features', self.py3_features, '--no-default-features')
- )
+ feature_flags.append(self.py3_features)
+ cargocmd.append('--no-default-features')
+
+ rust_features = env.get("HG_RUST_FEATURES")
+ if rust_features:
+ feature_flags.append(rust_features)
+
+ cargocmd.extend(('--features', " ".join(feature_flags)))
+
cargocmd.append('--')
if sys.platform == 'darwin':
cargocmd.extend(
--- a/tests/hghave.py Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/hghave.py Thu Feb 13 10:12:12 2020 -0800
@@ -307,13 +307,23 @@
return False
-def gethgversion():
+def _gethgversion():
m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
if not m:
return (0, 0)
return (int(m.group(1)), int(m.group(2)))
+_hgversion = None
+
+
+def gethgversion():
+ global _hgversion
+ if _hgversion is None:
+ _hgversion = _gethgversion()
+ return _hgversion
+
+
@checkvers(
"hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)])
)
@@ -685,7 +695,7 @@
curses.COLOR_BLUE
return matchoutput('test -x "`which tic`"', br'')
- except ImportError:
+ except (ImportError, AttributeError):
return False
--- a/tests/run-tests.py Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/run-tests.py Thu Feb 13 10:12:12 2020 -0800
@@ -555,12 +555,6 @@
help="use pure Python code instead of C extensions",
)
hgconf.add_argument(
- "-3",
- "--py3-warnings",
- action="store_true",
- help="enable Py3k warnings on Python 2.7+",
- )
- hgconf.add_argument(
"--with-chg",
metavar="CHG",
help="use specified chg wrapper in place of hg",
@@ -748,9 +742,6 @@
)
options.timeout = 0
options.slowtimeout = 0
- if options.py3_warnings:
- if PYTHON3:
- parser.error('--py3-warnings can only be used on Python 2.7')
if options.blacklist:
options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -909,7 +900,6 @@
timeout=None,
startport=None,
extraconfigopts=None,
- py3warnings=False,
shell=None,
hgcommand=None,
slowtimeout=None,
@@ -942,8 +932,6 @@
must have the form "key=value" (something understood by hgrc). Values
of the form "foo.key=value" will result in "[foo] key=value".
- py3warnings enables Py3k warnings.
-
shell is the shell to execute tests in.
"""
if timeout is None:
@@ -968,7 +956,6 @@
self._slowtimeout = slowtimeout
self._startport = startport
self._extraconfigopts = extraconfigopts or []
- self._py3warnings = py3warnings
self._shell = _bytespath(shell)
self._hgcommand = hgcommand or b'hg'
self._usechg = usechg
@@ -1515,9 +1502,8 @@
return os.path.join(self._testdir, b'%s.out' % self.bname)
def _run(self, env):
- py3switch = self._py3warnings and b' -3' or b''
# Quote the python(3) executable for Windows
- cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
+ cmd = b'"%s" "%s"' % (PYTHON, self.path)
vlog("# Running", cmd.decode("utf-8"))
normalizenewlines = os.name == 'nt'
result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
@@ -3366,7 +3352,6 @@
timeout=self.options.timeout,
startport=self._getport(count),
extraconfigopts=self.options.extra_config_opt,
- py3warnings=self.options.py3_warnings,
shell=self.options.shell,
hgcommand=self._hgcommand,
usechg=bool(self.options.with_chg or self.options.chg),
@@ -3512,15 +3497,6 @@
self._usecorrectpython()
- if self.options.py3_warnings and not self.options.anycoverage:
- vlog("# Updating hg command to enable Py3k Warnings switch")
- with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
- lines = [line.rstrip() for line in f]
- lines[0] += ' -3'
- with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
- for line in lines:
- f.write(line + '\n')
-
hgbat = os.path.join(self._bindir, b'hg.bat')
if os.path.isfile(hgbat):
# hg.bat expects to be put in bin/scripts while run-tests.py
--- a/tests/test-backout.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-backout.t Thu Feb 13 10:12:12 2020 -0800
@@ -86,6 +86,33 @@
commit: 1 unresolved (clean)
update: (current)
phases: 5 draft
+ $ hg log -G
+ @ changeset: 4:ed99997b793d
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:05 1970 +0000
+ | summary: ypples
+ |
+ o changeset: 3:1c2161e97c0a
+ | user: test
+ | date: Thu Jan 01 00:00:04 1970 +0000
+ | summary: Backed out changeset 22cb4f70d813
+ |
+ o changeset: 2:a8c6e511cfee
+ | user: test
+ | date: Thu Jan 01 00:00:02 1970 +0000
+ | summary: grapes
+ |
+ % changeset: 1:22cb4f70d813
+ | user: test
+ | date: Thu Jan 01 00:00:01 1970 +0000
+ | summary: chair
+ |
+ o changeset: 0:a5cb2dde5805
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: tomatoes
+
file that was removed is recreated
(this also tests that editor is not invoked if the commit message is
@@ -709,6 +736,23 @@
commit: 1 unresolved (clean)
update: (current)
phases: 3 draft
+ $ hg log -G
+ @ changeset: 2:b71750c4b0fd
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: capital ten
+ |
+ o changeset: 1:913609522437
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: capital three
+ |
+ % changeset: 0:a30dd8addae3
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
$ hg resolve --all --debug
picked tool ':merge' for foo (binary False symlink False changedelete False)
merging foo
--- a/tests/test-check-format.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-check-format.t Thu Feb 13 10:12:12 2020 -0800
@@ -1,5 +1,5 @@
#require black
$ cd $RUNTESTDIR/..
- $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
+ $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
--- a/tests/test-check-interfaces.py Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-check-interfaces.py Thu Feb 13 10:12:12 2020 -0800
@@ -252,7 +252,6 @@
checkzobject(mctx)
# Conforms to imanifestrevisionwritable.
- checkzobject(mctx.new())
checkzobject(mctx.copy())
# Conforms to imanifestdict.
--- a/tests/test-check-module-imports.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-check-module-imports.t Thu Feb 13 10:12:12 2020 -0800
@@ -24,6 +24,7 @@
> -X contrib/packaging/hg-docker \
> -X contrib/packaging/hgpackaging/ \
> -X contrib/packaging/inno/ \
+ > -X contrib/phab-clean.py \
> -X contrib/python-zstandard/ \
> -X contrib/win32/hgwebdir_wsgi.py \
> -X contrib/perf-utils/perf-revlog-write-plot.py \
--- a/tests/test-chg.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-chg.t Thu Feb 13 10:12:12 2020 -0800
@@ -382,8 +382,8 @@
YYYY/MM/DD HH:MM:SS (PID)> log -R cached
YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in ...s)
-Test that chg works even when python "coerces" the locale (py3.7+, which is done
-by default if none of LC_ALL, LC_CTYPE, or LANG are set in the environment)
+Test that chg works (sets to the user's actual LC_CTYPE) even when python
+"coerces" the locale (py3.7+)
$ cat > $TESTTMP/debugenv.py <<EOF
> from mercurial import encoding
@@ -397,9 +397,22 @@
> if v is not None:
> ui.write(b'%s=%s\n' % (k, encoding.environ[k]))
> EOF
+(hg keeps python's modified LC_CTYPE, chg doesn't)
+ $ (unset LC_ALL; unset LANG; LC_CTYPE= "$CHGHG" \
+ > --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ LC_CTYPE=C.UTF-8 (py37 !)
+ LC_CTYPE= (no-py37 !)
+ $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
+ > --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ LC_CTYPE=
+ $ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
+ > --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ LC_CTYPE=unsupported_value
+ $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
+ > --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ LC_CTYPE=
$ LANG= LC_ALL= LC_CTYPE= chg \
> --config extensions.debugenv=$TESTTMP/debugenv.py debugenv
LC_ALL=
- LC_CTYPE=C.UTF-8 (py37 !)
- LC_CTYPE= (no-py37 !)
+ LC_CTYPE=
LANG=
--- a/tests/test-clonebundles.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-clonebundles.t Thu Feb 13 10:12:12 2020 -0800
@@ -455,6 +455,19 @@
no changes found
2 local changesets published
+Test a bad attribute list
+
+ $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
+ abort: invalid ui.clonebundleprefers item: bad
+ (each comma separated item should be key=value pairs)
+ [255]
+ $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
+ > -U http://localhost:$HGPORT bad-input
+ abort: invalid ui.clonebundleprefers item: bad
+ (each comma separated item should be key=value pairs)
+ [255]
+
+
Test interaction between clone bundles and --stream
A manifest with just a gzip bundle
--- a/tests/test-commit-unresolved.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-commit-unresolved.t Thu Feb 13 10:12:12 2020 -0800
@@ -60,7 +60,7 @@
abort: cannot specify a node with --abort
[255]
$ hg merge --abort --rev e4501
- abort: cannot specify both --rev and --abort
+ abort: cannot specify both --abort and --rev
[255]
#if abortcommand
@@ -144,7 +144,7 @@
(branch merge, don't forget to commit)
$ hg merge --preview --abort
- abort: cannot specify --preview with --abort
+ abort: cannot specify both --abort and --preview
[255]
$ hg abort
--- a/tests/test-completion.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-completion.t Thu Feb 13 10:12:12 2020 -0800
@@ -107,6 +107,7 @@
debugmanifestfulltextcache
debugmergestate
debugnamecomplete
+ debugnodemap
debugobsolete
debugp1copies
debugp2copies
@@ -128,6 +129,7 @@
debugssl
debugsub
debugsuccessorssets
+ debugtagscache
debugtemplate
debuguigetpass
debuguiprompt
@@ -289,6 +291,7 @@
debugmanifestfulltextcache: clear, add
debugmergestate:
debugnamecomplete:
+ debugnodemap: dump-new, dump-disk, check, metadata
debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
debugp1copies: rev
debugp2copies: rev
@@ -310,6 +313,7 @@
debugssl:
debugsub: rev
debugsuccessorssets: closest
+ debugtagscache:
debugtemplate: rev, define
debuguigetpass: prompt
debuguiprompt: prompt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-copies-chain-merge.t Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,619 @@
+=====================================================
+Test Copy tracing for chain of copies involving merge
+=====================================================
+
+This test files covers copies/rename case for a chains of commit where merges
+are involved. It cheks we do not have unwanted update of behavior and that the
+different options to retrieve copies behave correctly.
+
+Setup
+=====
+
+use git diff to see rename
+
+ $ cat << EOF >> $HGRCPATH
+ > [diff]
+ > git=yes
+ > [ui]
+ > logtemplate={rev} {desc}]\n
+ > EOF
+
+ $ hg init repo-chain
+ $ cd repo-chain
+
+Add some linear rename initialy
+
+ $ touch a b
+ $ hg ci -Am 'i-0 initial commit: a b'
+ adding a
+ adding b
+ $ hg mv a c
+ $ hg ci -Am 'i-1: a -move-> c'
+ $ hg mv c d
+ $ hg ci -Am 'i-2: c -move-> d'
+ $ hg log -G
+ @ 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Have a branching with nothing on one side
+
+ $ hg mv d e
+ $ hg ci -Am 'a-1: d -move-> e'
+ $ hg mv e f
+ $ hg ci -Am 'a-2: e -move-> f'
+ $ hg log -G --rev '::.'
+ @ 4 a-2: e -move-> f]
+ |
+ o 3 a-1: d -move-> e]
+ |
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+And Having another branch with renames on the other side
+
+ $ hg up 'desc("i-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo foo > b
+ $ hg ci -m 'b-1: b update'
+ created new head
+ $ hg log -G --rev '::.'
+ @ 5 b-1: b update]
+ |
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+
+Merge the two branches we just defined (in both directions)
+- one with change to an unrelated file
+- one with renames in them
+
+ $ hg up 'desc("b-1")'
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("a-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mBAm-0 simple merge - one way'
+ $ hg up 'desc("a-2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mABm-0 simple merge - the other way'
+ created new head
+ $ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
+ @ 7 mABm-0 simple merge - the other way]
+ |\
+ +---o 6 mBAm-0 simple merge - one way]
+ | |/
+ | o 5 b-1: b update]
+ | |
+ o | 4 a-2: e -move-> f]
+ | |
+ o | 3 a-1: d -move-> e]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Create a branch that delete a file previous renamed
+
+ $ hg up 'desc("i-2")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg rm d
+ $ hg ci -m 'c-1 delete d'
+ created new head
+ $ hg log -G --rev '::.'
+ @ 8 c-1 delete d]
+ |
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Merge:
+- one with change to an unrelated file
+- one deleting the change
+and recreate an unrelated file after the merge
+
+ $ hg up 'desc("b-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("c-1")'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mBCm-0 simple merge - one way'
+ $ echo bar > d
+ $ hg add d
+ $ hg ci -m 'mBCm-1 re-add d'
+ $ hg up 'desc("c-1")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mCBm-0 simple merge - the other way'
+ created new head
+ $ echo bar > d
+ $ hg add d
+ $ hg ci -m 'mCBm-1 re-add d'
+ $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
+ @ 12 mCBm-1 re-add d]
+ |
+ o 11 mCBm-0 simple merge - the other way]
+ |\
+ | | o 10 mBCm-1 re-add d]
+ | | |
+ +---o 9 mBCm-0 simple merge - one way]
+ | |/
+ | o 8 c-1 delete d]
+ | |
+ o | 5 b-1: b update]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Create a branch that delete a file previous renamed and recreate it
+
+ $ hg up 'desc("i-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg rm d
+ $ hg ci -m 'd-1 delete d'
+ created new head
+ $ echo bar > d
+ $ hg add d
+ $ hg ci -m 'd-2 re-add d'
+ $ hg log -G --rev '::.'
+ @ 14 d-2 re-add d]
+ |
+ o 13 d-1 delete d]
+ |
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+Note:
+| In this case, the merge get conflicting information since on one side we have
+| a "brand new" d. and one the other one we have "d renamed from c (itself
+| renamed from c)".
+|
+| The current code arbitrarily pick one side
+
+ $ hg up 'desc("b-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("d-2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mBDm-0 simple merge - one way'
+ $ hg up 'desc("d-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mDBm-0 simple merge - the other way'
+ created new head
+ $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
+ @ 16 mDBm-0 simple merge - the other way]
+ |\
+ +---o 15 mBDm-0 simple merge - one way]
+ | |/
+ | o 14 d-2 re-add d]
+ | |
+ | o 13 d-1 delete d]
+ | |
+ o | 5 b-1: b update]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Having another branch renaming a different file to the same filename as another
+
+ $ hg up 'desc("i-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg mv b g
+ $ hg ci -m 'e-1 b -move-> g'
+ created new head
+ $ hg mv g f
+ $ hg ci -m 'e-2 g -move-> f'
+ $ hg log -G --rev '::.'
+ @ 18 e-2 g -move-> f]
+ |
+ o 17 e-1 b -move-> g]
+ |
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+ $ hg up 'desc("a-2")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("e-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mAEm-0 simple merge - one way'
+ $ hg up 'desc("e-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("a-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'mEAm-0 simple merge - the other way'
+ created new head
+ $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
+ @ 20 mEAm-0 simple merge - the other way]
+ |\
+ +---o 19 mAEm-0 simple merge - one way]
+ | |/
+ | o 18 e-2 g -move-> f]
+ | |
+ | o 17 e-1 b -move-> g]
+ | |
+ o | 4 a-2: e -move-> f]
+ | |
+ o | 3 a-1: d -move-> e]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Note:
+| In this case, the merge get conflicting information since each side have a
+| different way to reach 'f'.
+
+final summary
+
+ $ hg log -G
+ @ 20 mEAm-0 simple merge - the other way]
+ |\
+ +---o 19 mAEm-0 simple merge - one way]
+ | |/
+ | o 18 e-2 g -move-> f]
+ | |
+ | o 17 e-1 b -move-> g]
+ | |
+ | | o 16 mDBm-0 simple merge - the other way]
+ | | |\
+ | | +---o 15 mBDm-0 simple merge - one way]
+ | | | |/
+ | | | o 14 d-2 re-add d]
+ | | | |
+ | +---o 13 d-1 delete d]
+ | | |
+ | | | o 12 mCBm-1 re-add d]
+ | | | |
+ | | | o 11 mCBm-0 simple merge - the other way]
+ | | |/|
+ | | | | o 10 mBCm-1 re-add d]
+ | | | | |
+ | | +---o 9 mBCm-0 simple merge - one way]
+ | | | |/
+ | +---o 8 c-1 delete d]
+ | | |
+ +-----o 7 mABm-0 simple merge - the other way]
+ | | |/
+ +-----o 6 mBAm-0 simple merge - one way]
+ | | |/
+ | | o 5 b-1: b update]
+ | |/
+ o | 4 a-2: e -move-> f]
+ | |
+ o | 3 a-1: d -move-> e]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+Check results
+=============
+
+merging with unrelated change does not interfer wit the renames
+---------------------------------------------------------------
+
+- rename on one side
+- unrelated change on the other side
+
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mABm")'
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBAm")'
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mABm")'
+ M b
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mBAm")'
+ M b
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mABm")'
+ M b
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBAm")'
+ M b
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mABm")'
+ M b
+ A f
+ a
+ R a
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBAm")'
+ M b
+ A f
+ a
+ R a
+
+merging with the side having a delete
+-------------------------------------
+
+case summary:
+- one with change to an unrelated file
+- one deleting the change
+and recreate an unrelated file after the merge
+
+checks:
+- comparing from the merge
+
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBCm-0")'
+ R d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCBm-0")'
+ R d
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBCm-0")'
+ M b
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCBm-0")'
+ M b
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBCm-0")'
+ M b
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mCBm-0")'
+ M b
+ R d
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-0")'
+ M b
+ R a
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-0")'
+ M b
+ R a
+
+- comparing with the merge children re-adding the file
+
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBCm-1")'
+ M d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCBm-1")'
+ M d
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBCm-1")'
+ M b
+ A d
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCBm-1")'
+ M b
+ A d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBCm-1")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mCBm-1")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-1")'
+ M b
+ A d
+ R a
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-1")'
+ M b
+ A d
+ R a
+
+Comparing with a merge re-adding the file afterward
+---------------------------------------------------
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+Note:
+| In this case, the merge get conflicting information since on one side we have
+| a "brand new" d. and one the other one we have "d renamed from c (itself
+| renamed from c)".
+|
+| The current code arbitrarily pick one side
+
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBDm-0")'
+ M d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mDBm-0")'
+ M d
+ $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mBDm-0")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mDBm-0")'
+ M b
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mBDm-0")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mDBm-0")'
+ M b
+ M d
+
+The recorded copy is different depending of where we started the merge from since
+
+ $ hg manifest --debug --rev 'desc("mBDm-0")' | grep '644 d'
+ 0bb5445dc4d02f4e0d86cf16f9f3a411d0f17744 644 d
+ $ hg manifest --debug --rev 'desc("mDBm-0")' | grep '644 d'
+ b004912a8510032a0350a74daa2803dadfb00e12 644 d
+
+This second b004912a8510032a0350a74daa2803dadfb00e12 seems wrong. We should record the merge
+ $ hg manifest --debug --rev 'desc("d-2")' | grep '644 d'
+ b004912a8510032a0350a74daa2803dadfb00e12 644 d
+ $ hg manifest --debug --rev 'desc("b-1")' | grep '644 d'
+ 01c2f5eabdc4ce2bdee42b5f86311955e6c8f573 644 d
+ $ hg debugindex d
+ rev linkrev nodeid p1 p2
+ 0 2 01c2f5eabdc4 000000000000 000000000000
+ 1 10 b004912a8510 000000000000 000000000000
+ 2 15 0bb5445dc4d0 01c2f5eabdc4 b004912a8510
+
+ $ hg log -Gfr 'desc("mBDm-0")' d
+ o 15 mBDm-0 simple merge - one way]
+ |\
+ o : 14 d-2 re-add d]
+ :/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+(That output seems wrong, if we had opportunity to record the merge, we should
+probably have recorded the merge).
+
+ $ hg log -Gfr 'desc("mDBm-0")' d
+ o 14 d-2 re-add d]
+ |
+ ~
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBDm-0")'
+ M b
+ A d
+ a
+ R a
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDBm-0")'
+ M b
+ A d
+ R a
+
+Comparing with a merge re-adding the file afterward
+---------------------------------------------------
+
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+ $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644 f'
+ eb806e34ef6be4c264effd5933d31004ad15a793 644 f
+ $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644 f'
+ eb806e34ef6be4c264effd5933d31004ad15a793 644 f
+ $ hg manifest --debug --rev 'desc("a-2")' | grep '644 f'
+ 0dd616bc7ab1a111921d95d76f69cda5c2ac539c 644 f
+ $ hg manifest --debug --rev 'desc("e-2")' | grep '644 f'
+ 6da5a2eecb9c833f830b67a4972366d49a9a142c 644 f
+ $ hg debugindex f
+ rev linkrev nodeid p1 p2
+ 0 4 0dd616bc7ab1 000000000000 000000000000
+ 1 18 6da5a2eecb9c 000000000000 000000000000
+ 2 19 eb806e34ef6b 0dd616bc7ab1 6da5a2eecb9c
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
+ M f
+ R b
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
+ M f
+ R b
+ $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
+ M f
+ R d
+ $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
+ M f
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("e-2")'
+ A f
+ b
+ R b
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAEm-0")'
+ A f
+ d
+ R b
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEAm-0")'
+ A f
+ d
+ R b
+ R d
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
+ A f
+ a
+ R a
+ R b
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
+ A f
+ a
+ R a
+ R b
+
+ $ hg log -Gfr 'desc("mAEm-0")' f
+ o 19 mAEm-0 simple merge - one way]
+ |\
+ | o 18 e-2 g -move-> f]
+ | |
+ | o 17 e-1 b -move-> g]
+ | |
+ o | 4 a-2: e -move-> f]
+ | |
+ o | 3 a-1: d -move-> e]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
+
+ $ hg log -Gfr 'desc("mEAm-0")' f
+ @ 20 mEAm-0 simple merge - the other way]
+ |\
+ | o 18 e-2 g -move-> f]
+ | |
+ | o 17 e-1 b -move-> g]
+ | |
+ o | 4 a-2: e -move-> f]
+ | |
+ o | 3 a-1: d -move-> e]
+ |/
+ o 2 i-2: c -move-> d]
+ |
+ o 1 i-1: a -move-> c]
+ |
+ o 0 i-0 initial commit: a b]
+
--- a/tests/test-copy-move-merge.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-copy-move-merge.t Thu Feb 13 10:12:12 2020 -0800
@@ -1,6 +1,19 @@
Test for the full copytracing algorithm
=======================================
+
+Initial Setup
+=============
+
+use git diff to see rename
+
+ $ cat << EOF >> $HGRCPATH
+ > [diff]
+ > git=yes
+ > EOF
+
+Setup an history where one side copy and rename a file (and update it) while the other side update it.
+
$ hg init t
$ cd t
@@ -22,13 +35,67 @@
$ hg ci -qAm "other"
+ $ hg log -G --patch
+ @ changeset: 2:add3f11052fa
+ | tag: tip
+ | parent: 0:b8bf91eeebbc
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: other
+ |
+ | diff --git a/a b/a
+ | --- a/a
+ | +++ b/a
+ | @@ -1,1 +1,2 @@
+ | +0
+ | 1
+ |
+ | o changeset: 1:17c05bb7fcb6
+ |/ user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: second
+ |
+ | diff --git a/a b/b
+ | rename from a
+ | rename to b
+ | --- a/a
+ | +++ b/b
+ | @@ -1,1 +1,2 @@
+ | 1
+ | +2
+ | diff --git a/a b/c
+ | copy from a
+ | copy to c
+ | --- a/a
+ | +++ b/c
+ | @@ -1,1 +1,2 @@
+ | 1
+ | +2
+ |
+ o changeset: 0:b8bf91eeebbc
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: first
+
+ diff --git a/a b/a
+ new file mode 100644
+ --- /dev/null
+ +++ b/a
+ @@ -0,0 +1,1 @@
+ +1
+
+
+Test Simple Merge
+=================
+
$ hg merge --debug
unmatched files in other:
b
c
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
- src: 'a' -> dst: 'c' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
+ src: 'a' -> dst: 'c' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -63,8 +130,10 @@
2
Test disabling copy tracing
+===========================
-- first verify copy metadata was kept
+first verify copy metadata was kept
+-----------------------------------
$ hg up -qC 2
$ hg rebase --keep -d 1 -b 2 --config extensions.rebase=
@@ -77,7 +146,8 @@
1
2
-- next verify copy metadata is lost when disabled
+ next verify copy metadata is lost when disabled
+------------------------------------------------
$ hg strip -r . --config extensions.strip=
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -98,6 +168,7 @@
$ cd ..
Verify disabling copy tracing still keeps copies from rebase source
+-------------------------------------------------------------------
$ hg init copydisable
$ cd copydisable
@@ -130,7 +201,14 @@
$ cd ../
-Verify we duplicate existing copies, instead of detecting them
+
+test storage preservation
+-------------------------
+
+Verify rebase do not discard recorded copies data when copy tracing usage is
+disabled.
+
+Setup
$ hg init copydisable3
$ cd copydisable3
@@ -153,6 +231,12 @@
|/
o 0 add a
+
+Actual Test
+
+A file is copied on one side and has been moved twice on the other side. the
+file is copied from `0:a`, so the file history of the `3:b` should trace directly to `0:a`.
+
$ hg rebase -d 2 -s 3 --config extensions.rebase= --config experimental.copytrace=off
rebasing 3:47e1a9e6273b "copy a->b (2)" (tip)
saved backup bundle to $TESTTMP/copydisable3/.hg/strip-backup/47e1a9e6273b-2d099c59-rebase.hg
--- a/tests/test-double-merge.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-double-merge.t Thu Feb 13 10:12:12 2020 -0800
@@ -29,7 +29,8 @@
unmatched files in other:
bar
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'foo' -> dst: 'bar' *
+ on remote side:
+ src: 'foo' -> dst: 'bar' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastexport.t Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,855 @@
+ $ cat >> $HGRCPATH << EOF
+ > [extensions]
+ > fastexport=
+ > EOF
+
+ $ hg init
+
+ $ hg debugbuilddag -mon '+2:tbase @name1 +3:thead1 <tbase @name2 +4:thead2 @both /thead1 +2:tmaintip'
+
+ $ hg up -r 10
+ 13 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg rm nf10
+ $ hg commit -u debugbuilddag --date 'Thu Jan 01 00:00:12 1970 +0000' -m r12
+ created new head
+ $ hg up -r 11
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 12
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg commit -m debugbuilddag --date 'Thu Jan 01 00:00:13 1970 +0000'
+
+ $ hg log -G
+ @ changeset: 13:e5c379648af4
+ |\ branch: both
+ | | tag: tip
+ | | parent: 11:2cbd52c10e88
+ | | parent: 12:4f31c9604af6
+ | | user: test
+ | | date: Thu Jan 01 00:00:13 1970 +0000
+ | | summary: debugbuilddag
+ | |
+ | o changeset: 12:4f31c9604af6
+ | | branch: both
+ | | parent: 10:9220596cb068
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:12 1970 +0000
+ | | summary: r12
+ | |
+ o | changeset: 11:2cbd52c10e88
+ |/ branch: both
+ | tag: tmaintip
+ | user: debugbuilddag
+ | date: Thu Jan 01 00:00:11 1970 +0000
+ | summary: r11
+ |
+ o changeset: 10:9220596cb068
+ | branch: both
+ | user: debugbuilddag
+ | date: Thu Jan 01 00:00:10 1970 +0000
+ | summary: r10
+ |
+ o changeset: 9:0767d147d86e
+ |\ branch: both
+ | | parent: 8:0d0219415f18
+ | | parent: 4:e8bc3a6ab9ae
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:09 1970 +0000
+ | | summary: r9
+ | |
+ | o changeset: 8:0d0219415f18
+ | | branch: name2
+ | | tag: thead2
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:08 1970 +0000
+ | | summary: r8
+ | |
+ | o changeset: 7:82c6c8b3ac68
+ | | branch: name2
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:07 1970 +0000
+ | | summary: r7
+ | |
+ | o changeset: 6:94093a13175f
+ | | branch: name2
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:06 1970 +0000
+ | | summary: r6
+ | |
+ | o changeset: 5:4baee2f72e9e
+ | | branch: name2
+ | | parent: 1:bf4022f1addd
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:05 1970 +0000
+ | | summary: r5
+ | |
+ o | changeset: 4:e8bc3a6ab9ae
+ | | branch: name1
+ | | tag: thead1
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:04 1970 +0000
+ | | summary: r4
+ | |
+ o | changeset: 3:46148e496a8a
+ | | branch: name1
+ | | user: debugbuilddag
+ | | date: Thu Jan 01 00:00:03 1970 +0000
+ | | summary: r3
+ | |
+ o | changeset: 2:29863c4219cd
+ |/ branch: name1
+ | user: debugbuilddag
+ | date: Thu Jan 01 00:00:02 1970 +0000
+ | summary: r2
+ |
+ o changeset: 1:bf4022f1addd
+ | tag: tbase
+ | user: debugbuilddag
+ | date: Thu Jan 01 00:00:01 1970 +0000
+ | summary: r1
+ |
+ o changeset: 0:ae6ae30a671b
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: r0
+
+
+ $ hg fastexport --export-marks fastexport.marks
+ blob
+ mark :1
+ data 65
+ 0 r0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :2
+ data 3
+ r0
+
+ commit refs/heads/default
+ mark :3
+ committer "debugbuilddag" <debugbuilddag> 0 -0000
+ data 2
+ r0
+ M 644 :1 mf
+ M 644 :2 nf0
+ M 644 :2 of
+
+ blob
+ mark :4
+ data 68
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :5
+ data 3
+ r1
+
+ blob
+ mark :6
+ data 3
+ r1
+
+ commit refs/heads/default
+ mark :7
+ committer "debugbuilddag" <debugbuilddag> 1 -0000
+ data 2
+ r1
+ from :3
+ M 644 :4 mf
+ M 644 :5 nf1
+ M 644 :6 of
+
+ blob
+ mark :8
+ data 71
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :9
+ data 3
+ r2
+
+ blob
+ mark :10
+ data 3
+ r2
+
+ commit refs/heads/name1
+ mark :11
+ committer "debugbuilddag" <debugbuilddag> 2 -0000
+ data 2
+ r2
+ from :7
+ M 644 :8 mf
+ M 644 :9 nf2
+ M 644 :10 of
+
+ blob
+ mark :12
+ data 74
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6 r3
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :13
+ data 3
+ r3
+
+ blob
+ mark :14
+ data 3
+ r3
+
+ commit refs/heads/name1
+ mark :15
+ committer "debugbuilddag" <debugbuilddag> 3 -0000
+ data 2
+ r3
+ from :11
+ M 644 :12 mf
+ M 644 :13 nf3
+ M 644 :14 of
+
+ blob
+ mark :16
+ data 77
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6 r3
+ 7
+ 8 r4
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :17
+ data 3
+ r4
+
+ blob
+ mark :18
+ data 3
+ r4
+
+ commit refs/heads/name1
+ mark :19
+ committer "debugbuilddag" <debugbuilddag> 4 -0000
+ data 2
+ r4
+ from :15
+ M 644 :16 mf
+ M 644 :17 nf4
+ M 644 :18 of
+
+ blob
+ mark :20
+ data 71
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10 r5
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :21
+ data 3
+ r5
+
+ blob
+ mark :22
+ data 3
+ r5
+
+ commit refs/heads/name2
+ mark :23
+ committer "debugbuilddag" <debugbuilddag> 5 -0000
+ data 2
+ r5
+ from :7
+ M 644 :20 mf
+ M 644 :21 nf5
+ M 644 :22 of
+
+ blob
+ mark :24
+ data 74
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :25
+ data 3
+ r6
+
+ blob
+ mark :26
+ data 3
+ r6
+
+ commit refs/heads/name2
+ mark :27
+ committer "debugbuilddag" <debugbuilddag> 6 -0000
+ data 2
+ r6
+ from :23
+ M 644 :24 mf
+ M 644 :25 nf6
+ M 644 :26 of
+
+ blob
+ mark :28
+ data 77
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14 r7
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :29
+ data 3
+ r7
+
+ blob
+ mark :30
+ data 3
+ r7
+
+ commit refs/heads/name2
+ mark :31
+ committer "debugbuilddag" <debugbuilddag> 7 -0000
+ data 2
+ r7
+ from :27
+ M 644 :28 mf
+ M 644 :29 nf7
+ M 644 :30 of
+
+ blob
+ mark :32
+ data 80
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14 r7
+ 15
+ 16 r8
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :33
+ data 3
+ r8
+
+ blob
+ mark :34
+ data 3
+ r8
+
+ commit refs/heads/name2
+ mark :35
+ committer "debugbuilddag" <debugbuilddag> 8 -0000
+ data 2
+ r8
+ from :31
+ M 644 :32 mf
+ M 644 :33 nf8
+ M 644 :34 of
+
+ blob
+ mark :36
+ data 92
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6 r3
+ 7
+ 8 r4
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14 r7
+ 15
+ 16 r8
+ 17
+ 18 r9
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :37
+ data 3
+ r9
+
+ blob
+ mark :38
+ data 3
+ r9
+
+ commit refs/heads/both
+ mark :39
+ committer "debugbuilddag" <debugbuilddag> 9 -0000
+ data 2
+ r9
+ from :35
+ merge :19
+ M 644 :36 mf
+ M 644 :9 nf2
+ M 644 :13 nf3
+ M 644 :17 nf4
+ M 644 :37 nf9
+ M 644 :38 of
+
+ blob
+ mark :40
+ data 96
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6 r3
+ 7
+ 8 r4
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14 r7
+ 15
+ 16 r8
+ 17
+ 18 r9
+ 19
+ 20 r10
+ 21
+ 22
+ 23
+
+ blob
+ mark :41
+ data 4
+ r10
+
+ blob
+ mark :42
+ data 4
+ r10
+
+ commit refs/heads/both
+ mark :43
+ committer "debugbuilddag" <debugbuilddag> 10 -0000
+ data 3
+ r10
+ from :39
+ M 644 :40 mf
+ M 644 :41 nf10
+ M 644 :42 of
+
+ blob
+ mark :44
+ data 100
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4 r2
+ 5
+ 6 r3
+ 7
+ 8 r4
+ 9
+ 10 r5
+ 11
+ 12 r6
+ 13
+ 14 r7
+ 15
+ 16 r8
+ 17
+ 18 r9
+ 19
+ 20 r10
+ 21
+ 22 r11
+ 23
+
+ blob
+ mark :45
+ data 4
+ r11
+
+ blob
+ mark :46
+ data 4
+ r11
+
+ commit refs/heads/both
+ mark :47
+ committer "debugbuilddag" <debugbuilddag> 11 -0000
+ data 3
+ r11
+ from :43
+ M 644 :44 mf
+ M 644 :45 nf11
+ M 644 :46 of
+
+ commit refs/heads/both
+ mark :48
+ committer "debugbuilddag" <debugbuilddag> 12 -0000
+ data 3
+ r12
+ from :43
+ D nf10
+
+ commit refs/heads/both
+ mark :49
+ committer "test" <test> 13 -0000
+ data 13
+ debugbuilddag
+ from :47
+ merge :48
+ D nf10
+
+ $ cat fastexport.marks
+ e1767c7564f83127d75331428473dd0512b36cc6
+ 2c436e3f677d989438ddd9a7e5e4d56e016dfd35
+ ae6ae30a671be09096aaaf51217b3691eec0eee0
+ 016f8fd6128ac4bd19ec5a6ae128dadc3873b13f
+ a0e6fc91007068df3bc60f46ce0a893a73189b54
+ 1a085e1daf625e186ee0064c64ff41731a901f24
+ bf4022f1addd28523fb1122ac6166a29da58d34c
+ 2c45ad1c720111830380baa89a6a16cae1bef688
+ 180506669a19f4b8317009fc6fa0043966d1ffb4
+ 1ebc486e6a5c2c8ca7e531cf0b63dfcc071ec324
+ 29863c4219cd68e0f57aecd5ffc12ba83313f26b
+ d20e5eeac6991189eefad45cd8ea0f6a32ce8122
+ 710c4580a600b8aadc63fa3d7bb0fab71b127c04
+ fa27314b56d7b6f90c1caeebb2a74730b3747574
+ 46148e496a8a75fde9e203b1ded69ec99289af27
+ e5548c667d7eeb6c326e723c579888341329c9fe
+ 3c1407305701051cbed9f9cb9a68bdfb5997c235
+ e2ed51893b0a54bd7fef5a406a0c489d668f19c3
+ e8bc3a6ab9aef589f5db504f401953449a3c3a10
+ 558f3a23efc0a1a972e14d5314a65918791b77be
+ 0dbd89c185f53a1727c54cd1ce256482fa23968e
+ f84faeb138605b36d74324c6d0ea76a9099c3567
+ 4baee2f72e9eeae2aef5b9e1ec416020090672ef
+ 412c5793886eaaabb31debd36695f6215a719865
+ a0eafc60760d32b690564b8588ba042cc63e0c74
+ a53842517de32d2f926c38a170c29dc90ae3348a
+ 94093a13175f1cfcbbfddaa0ceafbd3a89784b91
+ d2f0d76af0be0da17ec88190215eadb8706689ab
+ 639939af794373d6c2ab12c2ef637cd220174389
+ cc8921e2b19a88147643ea825459ffa140e3d704
+ 82c6c8b3ac6873fadd9083323b02cc6a53659130
+ c6cc0b14a3e6e61906242d6fce28b9510c9f9208
+ 093593169cb4716f94e52ed7561bb84b36b7eb9d
+ 034df75dc138e7507e061d26170b4c44321a5d92
+ 0d0219415f18c43636163fff4160f41600951a25
+ f13693f6e6052eeb189521945fef56892e812fdb
+ 1239c633b8a7a7283825dba9171bf285e5790852
+ 34b655bd51e8573b8e85c1c1476a94d8573babef
+ 0767d147d86e1546593bda50f1e11276c0ac8f1a
+ 284ca43bbbe82e89c0f1d977e8ac6cfb969c05ec
+ 15315ab9e272ec81ae8d847996e5bdecd5635b0b
+ 78c10aaf21f49d518c7ccb8318c29abb5d4e5db7
+ 9220596cb068dfc73e2f7e695dc8ad0858a936db
+ 32abd0da49b7c7ee756298fc46a15584d6aedc99
+ 33fbc651630ffa7ccbebfe4eb91320a873e7291c
+ 868d828870663d075cdcff502d26cf8445ce068e
+ 2cbd52c10e88ce604402dc83a869ec4f07765b3d
+ 4f31c9604af676986343d775b05695f535e8db5e
+ e5c379648af4c9fa3b5546ab7ee6e61a36082830
+
+ $ hg fastexport --export-marks fastexport.marks2 -r 0
+ blob
+ mark :1
+ data 65
+ 0 r0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :2
+ data 3
+ r0
+
+ commit refs/heads/default
+ mark :3
+ committer "debugbuilddag" <debugbuilddag> 0 -0000
+ data 2
+ r0
+ M 644 :1 mf
+ M 644 :2 nf0
+ M 644 :2 of
+
+ $ cat fastexport.marks2
+ e1767c7564f83127d75331428473dd0512b36cc6
+ 2c436e3f677d989438ddd9a7e5e4d56e016dfd35
+ ae6ae30a671be09096aaaf51217b3691eec0eee0
+ $ hg fastexport --import-marks fastexport.marks2 -r 1
+ blob
+ mark :4
+ data 68
+ 0 r0
+ 1
+ 2 r1
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+
+ blob
+ mark :5
+ data 3
+ r1
+
+ blob
+ mark :6
+ data 3
+ r1
+
+ commit refs/heads/default
+ mark :7
+ committer "debugbuilddag" <debugbuilddag> 1 -0000
+ data 2
+ r1
+ from :3
+ M 644 :4 mf
+ M 644 :5 nf1
+ M 644 :6 of
+
--- a/tests/test-fncache.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-fncache.t Thu Feb 13 10:12:12 2020 -0800
@@ -356,7 +356,7 @@
$ cat .hg/store/fncache | sort
data/y.i
data/z.i
- $ hg recover
+ $ hg recover --verify
rolling back interrupted transaction
checking changesets
checking manifests
--- a/tests/test-graft-interrupted.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-graft-interrupted.t Thu Feb 13 10:12:12 2020 -0800
@@ -431,7 +431,7 @@
$ hg log -GT "{rev}:{node|short} {desc}"
@ 6:6ec71c037d94 added x
|
- | o 5:36b793615f78 added foo to c
+ | % 5:36b793615f78 added foo to c
| |
| | o 4:863a25e1a9ea added x
| |/
@@ -622,7 +622,7 @@
$ hg log -GT "{rev}:{node|short} {desc}\n"
@ 4:2aa9ad1006ff B in file a
|
- | o 3:09e253b87e17 A in file a
+ | % 3:09e253b87e17 A in file a
| |
| o 2:d36c0562f908 c
| |
@@ -669,7 +669,7 @@
$ hg log -GT "{rev}:{node|short} {desc}\n"
@ 4:2aa9ad1006ff B in file a
|
- | o 3:09e253b87e17 A in file a
+ | % 3:09e253b87e17 A in file a
| |
| o 2:d36c0562f908 c
| |
@@ -712,7 +712,7 @@
$ hg log -GT "{rev}:{node|short} {desc}\n"
@ 4:2aa9ad1006ff B in file a
|
- | o 3:09e253b87e17 A in file a
+ | % 3:09e253b87e17 A in file a
| |
| o 2:d36c0562f908 c
| |
--- a/tests/test-graft.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-graft.t Thu Feb 13 10:12:12 2020 -0800
@@ -204,7 +204,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
@@ -223,7 +224,8 @@
updating the branch cache
grafting 5:97f8bfe72746 "5"
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'c' -> dst: 'b'
+ on local side:
+ src: 'c' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
@@ -239,7 +241,8 @@
scanning for duplicate grafts
grafting 4:9c233e8e184d "4"
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'c' -> dst: 'b'
+ on local side:
+ src: 'c' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
@@ -746,7 +749,10 @@
scanning for duplicate grafts
grafting 13:7a4785234d87 "2"
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
--- a/tests/test-help.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-help.t Thu Feb 13 10:12:12 2020 -0800
@@ -364,6 +364,7 @@
eol automatically manage newlines in repository files
extdiff command to allow external programs to compare revisions
factotum http authentication with factotum
+ fastexport export repositories as git fast-import stream
githelp try mapping git commands to Mercurial commands
gpg commands to sign and verify changesets
hgk browse the repository in a graphical way
@@ -787,6 +788,12 @@
(use 'hg help extensions' for information on enabling extensions)
[255]
+Checking that help adapts based on the config:
+
+ $ hg help diff --config ui.tweakdefaults=true | egrep -e '^ *(-g|config)'
+ -g --[no-]git use git extended diff format (default: on from
+ config)
+
Make sure that we don't run afoul of the help system thinking that
this is a section and erroring out weirdly.
@@ -1017,6 +1024,7 @@
print merge state
debugnamecomplete
complete "names" - tags, open branch names, bookmark names
+ debugnodemap write and inspect on disk nodemap
debugobsolete
create arbitrary obsolete marker
debugoptADV (no help text available)
@@ -1054,6 +1062,8 @@
debugsub (no help text available)
debugsuccessorssets
show set of successors for revision
+ debugtagscache
+ display the contents of .hg/cache/hgtagsfnodes1
debugtemplate
parse and apply a template
debuguigetpass
--- a/tests/test-install.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-install.t Thu Feb 13 10:12:12 2020 -0800
@@ -2,6 +2,7 @@
$ hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (2.*) (glob) (no-py3 !)
checking Python version (3.*) (glob) (py3 !)
checking Python lib (.*[Ll]ib.*)... (re)
@@ -43,6 +44,7 @@
"hgverextra": "*", (glob)
"problems": 0,
"pythonexe": "*", (glob)
+ "pythonimplementation": "*", (glob)
"pythonlib": "*", (glob)
"pythonsecurity": [*], (glob)
"pythonver": "*.*.*", (glob)
@@ -58,6 +60,7 @@
$ HGUSER= hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (2.*) (glob) (no-py3 !)
checking Python version (3.*) (glob) (py3 !)
checking Python lib (.*[Ll]ib.*)... (re)
@@ -103,6 +106,7 @@
$ HGEDITOR="~/tools/testeditor.exe" hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (2.*) (glob) (no-py3 !)
checking Python version (3.*) (glob) (py3 !)
checking Python lib (.*[Ll]ib.*)... (re)
@@ -128,6 +132,7 @@
$ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (2.*) (glob) (no-py3 !)
checking Python version (3.*) (glob) (py3 !)
checking Python lib (.*[Ll]ib.*)... (re)
@@ -185,6 +190,7 @@
$ ./installenv/*/hg debuginstall || cat pip.log
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (3.*) (glob)
checking Python lib (*)... (glob)
checking Python security support (*) (glob)
@@ -221,6 +227,7 @@
$ ./installenv/*/hg debuginstall || cat pip.log
checking encoding (ascii)...
checking Python executable (*) (glob)
+ checking Python implementation (*) (glob)
checking Python version (2.*) (glob)
checking Python lib (*)... (glob)
checking Python security support (*) (glob)
--- a/tests/test-issue1802.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-issue1802.t Thu Feb 13 10:12:12 2020 -0800
@@ -52,8 +52,6 @@
Simulate a Windows merge:
$ hg --config extensions.n=$TESTTMP/noexec.py merge --debug
- unmatched files in local:
- b
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: a03b0deabf2b, local: d6fa54f68ae1+, remote: 2d8bcf2dda39
--- a/tests/test-issue522.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-issue522.t Thu Feb 13 10:12:12 2020 -0800
@@ -25,8 +25,6 @@
$ hg ci -qAm 'add bar'
$ hg merge --debug
- unmatched files in local:
- bar
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: bbd179dfa0a7, local: 71766447bdbb+, remote: 4d9e78aaceee
--- a/tests/test-issue672.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-issue672.t Thu Feb 13 10:12:12 2020 -0800
@@ -28,7 +28,8 @@
unmatched files in other:
1a
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: '1' -> dst: '1a'
+ on remote side:
+ src: '1' -> dst: '1a'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -56,7 +57,8 @@
unmatched files in local:
1a
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: '1' -> dst: '1a' *
+ on local side:
+ src: '1' -> dst: '1a' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -78,7 +80,8 @@
unmatched files in other:
1a
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: '1' -> dst: '1a' *
+ on remote side:
+ src: '1' -> dst: '1a' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
--- a/tests/test-journal-exists.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-journal-exists.t Thu Feb 13 10:12:12 2020 -0800
@@ -15,11 +15,7 @@
$ hg recover
rolling back interrupted transaction
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 1 changesets with 1 changes to 1 files
+ (verify step skipped, run `hg verify` to check your repository content)
recover, explicit verify
--- a/tests/test-lfs-serve-access.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-lfs-serve-access.t Thu Feb 13 10:12:12 2020 -0800
@@ -17,6 +17,7 @@
$ hg init server
$ hg --config "lfs.usercache=$TESTTMP/servercache" \
> --config experimental.lfs.serve=False -R server serve -d \
+ > --config experimental.lfs.worker-enable=False \
> -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
$ cat hg.pid >> $DAEMON_PIDS
--- a/tests/test-lfs-serve.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-lfs-serve.t Thu Feb 13 10:12:12 2020 -0800
@@ -65,6 +65,7 @@
> debugprocessors = $TESTTMP/debugprocessors.py
> [experimental]
> lfs.disableusercache = True
+ > lfs.worker-enable = False
> [lfs]
> threshold=10
> [web]
--- a/tests/test-lfs-test-server.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-lfs-test-server.t Thu Feb 13 10:12:12 2020 -0800
@@ -40,6 +40,8 @@
#endif
$ cat >> $HGRCPATH <<EOF
+ > [experimental]
+ > lfs.worker-enable = False
> [extensions]
> lfs=
> [lfs]
--- a/tests/test-manifest.py Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-manifest.py Thu Feb 13 10:12:12 2020 -0800
@@ -171,7 +171,7 @@
self.assertEqual(want, m[b'foo'])
# make sure the suffix survives a copy
match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(want, m2[b'foo'])
self.assertEqual(1, len(m2))
m2 = m.copy()
@@ -196,7 +196,7 @@
match.matchfn = filt
with self.assertRaises(AssertionError):
- m.matches(match)
+ m._matches(match)
def testRemoveItem(self):
m = self.parsemanifest(A_SHORT_MANIFEST)
@@ -300,7 +300,7 @@
m = self.parsemanifest(A_HUGE_MANIFEST)
match = matchmod.exact([b'file1', b'file200', b'file300'])
- m2 = m.matches(match)
+ m2 = m._matches(match)
w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
HASH_2,
@@ -318,7 +318,7 @@
match = matchmod.exact(
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
)
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
@@ -332,7 +332,7 @@
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
)
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual([], m2.keys())
@@ -343,7 +343,7 @@
flist = m.keys()[80:300]
match = matchmod.exact(flist)
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(flist, m2.keys())
@@ -352,7 +352,7 @@
m = self.parsemanifest(A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(m.keys(), m2.keys())
@@ -364,7 +364,7 @@
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
)
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(
[
@@ -388,7 +388,7 @@
m = self.parsemanifest(A_DEEPER_MANIFEST)
match = matchmod.exact([b'a/b'])
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual([], m2.keys())
@@ -400,7 +400,7 @@
match = matchmod.match(
util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
)
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(
[
@@ -423,7 +423,7 @@
m = self.parsemanifest(A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
- m2 = m.matches(match)
+ m2 = m._matches(match)
self.assertEqual(
[b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
--- a/tests/test-merge-criss-cross.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-merge-criss-cross.t Thu Feb 13 10:12:12 2020 -0800
@@ -410,11 +410,6 @@
note: merging c0ef19750a22+ and 6ca01f7342b9 using bids from ancestors 11b5b303e36c and 154e6000f54e
calculating bids for ancestor 11b5b303e36c
- unmatched files in local:
- d1/a
- d1/b
- unmatched files in other:
- d2/b
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: 11b5b303e36c, local: c0ef19750a22+, remote: 6ca01f7342b9
@@ -424,7 +419,8 @@
unmatched files in other:
d2/b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'd1/b' -> dst: 'd2/b'
+ on remote side:
+ src: 'd1/b' -> dst: 'd2/b'
checking for directory renames
discovered dir src: 'd1/' -> dst: 'd2/'
resolving manifests
--- a/tests/test-merge2.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-merge2.t Thu Feb 13 10:12:12 2020 -0800
@@ -50,4 +50,8 @@
adding b
created new head
+ $ hg merge 'wdir()'
+ abort: merging with the working copy has no effect
+ [255]
+
$ cd ..
--- a/tests/test-merge4.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-merge4.t Thu Feb 13 10:12:12 2020 -0800
@@ -23,3 +23,37 @@
abort: cannot commit merge with missing files
[255]
+
+Test conflict*() revsets
+
+# Bad usage
+ $ hg log -r 'conflictlocal(foo)'
+ hg: parse error: conflictlocal takes no arguments
+ [255]
+ $ hg log -r 'conflictother(foo)'
+ hg: parse error: conflictother takes no arguments
+ [255]
+ $ hg co -C .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+# No merge parents when not merging
+ $ hg log -r 'conflictlocal() + conflictother()'
+# No merge parents when there is no conflict
+ $ hg merge 1
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg log -r 'conflictlocal() + conflictother()'
+ $ hg co -C .
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo conflict > b
+ $ hg ci -Aqm 'conflicting change to b'
+ $ hg merge 1
+ merging b
+ warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ [1]
+# Shows merge parents when there is a conflict
+ $ hg log -r 'conflictlocal()' -T '{rev} {desc}\n'
+ 3 conflicting change to b
+ $ hg log -r 'conflictother()' -T '{rev} {desc}\n'
+ 1 commit #1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-persistent-nodemap.t Thu Feb 13 10:12:12 2020 -0800
@@ -0,0 +1,86 @@
+===================================
+Test the persistent on-disk nodemap
+===================================
+
+
+ $ hg init test-repo
+ $ cd test-repo
+ $ cat << EOF >> .hg/hgrc
+ > [experimental]
+ > exp-persistent-nodemap=yes
+ > [devel]
+ > persistent-nodemap=yes
+ > EOF
+ $ hg debugbuilddag .+5000
+ $ hg debugnodemap --metadata
+ uid: ???????????????? (glob)
+ tip-rev: 5000
+ data-length: 122880
+ data-unused: 0
+ $ f --size .hg/store/00changelog.n
+ .hg/store/00changelog.n: size=42
+ $ f --sha256 .hg/store/00changelog-*.nd
+ .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
+ $ hg debugnodemap --dump-new | f --sha256 --size
+ size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
+ size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
+ 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
+ 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
+ 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
+ 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
+ 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
+ 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
+ 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ $ hg debugnodemap --check
+ revision in index: 5001
+ revision in nodemap: 5001
+
+add a new commit
+
+ $ hg up
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo foo > foo
+ $ hg add foo
+ $ hg ci -m 'foo'
+
+#if pure
+ $ hg debugnodemap --metadata
+ uid: ???????????????? (glob)
+ tip-rev: 5001
+ data-length: 123072
+ data-unused: 192
+#else
+ $ hg debugnodemap --metadata
+ uid: ???????????????? (glob)
+ tip-rev: 5001
+ data-length: 122880
+ data-unused: 0
+#endif
+ $ f --size .hg/store/00changelog.n
+ .hg/store/00changelog.n: size=42
+
+(The pure code use the debug code that perform incremental update, the C code reencode from scratch)
+
+#if pure
+ $ f --sha256 .hg/store/00changelog-*.nd --size
+ .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
+
+#else
+ $ f --sha256 .hg/store/00changelog-*.nd --size
+ .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
+
+#endif
+
+ $ hg debugnodemap --check
+ revision in index: 5002
+ revision in nodemap: 5002
--- a/tests/test-purge.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-purge.t Thu Feb 13 10:12:12 2020 -0800
@@ -120,19 +120,32 @@
directory/untracked_file
$ rm directory/untracked_file
-skip ignored files if --all not specified
+skip ignored files if -i or --all not specified
$ touch ignored
$ hg purge -p
$ hg purge -v
+ $ touch untracked_file
$ ls
directory
ignored
r1
+ untracked_file
+ $ hg purge -p -i
+ ignored
+ $ hg purge -v -i
+ removing file ignored
+ $ ls
+ directory
+ r1
+ untracked_file
+ $ touch ignored
$ hg purge -p --all
ignored
+ untracked_file
$ hg purge -v --all
removing file ignored
+ removing file untracked_file
$ ls
directory
r1
--- a/tests/test-rebase-abort.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-abort.t Thu Feb 13 10:12:12 2020 -0800
@@ -236,7 +236,7 @@
[1]
$ hg tglog
- @ 4:draft 'C1'
+ % 4:draft 'C1'
|
o 3:draft 'B bis'
|
--- a/tests/test-rebase-collapse.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-collapse.t Thu Feb 13 10:12:12 2020 -0800
@@ -486,61 +486,6 @@
abort: cannot collapse multiple named branches
[255]
- $ repeatchange() {
- > hg checkout $1
- > hg cp d z
- > echo blah >> z
- > hg commit -Am "$2" --user "$3"
- > }
- $ repeatchange 3 "E" "user1"
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ repeatchange 3 "E" "user2"
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- created new head
- $ hg tglog
- @ 5: fbfb97b1089a 'E'
- |
- | o 4: f338eb3c2c7c 'E'
- |/
- o 3: 41acb9dca9eb 'D'
- |
- | o 2: 8ac4a08debf1 'C' two
- | |
- | o 1: 1ba175478953 'B' one
- |/
- o 0: 1994f17a630e 'A'
-
- $ hg rebase -s 5 -d 4
- rebasing 5:fbfb97b1089a "E" (tip)
- note: not rebasing 5:fbfb97b1089a "E" (tip), its destination already has all its changes
- saved backup bundle to $TESTTMP/e/.hg/strip-backup/fbfb97b1089a-553e1d85-rebase.hg
- $ hg tglog
- @ 4: f338eb3c2c7c 'E'
- |
- o 3: 41acb9dca9eb 'D'
- |
- | o 2: 8ac4a08debf1 'C' two
- | |
- | o 1: 1ba175478953 'B' one
- |/
- o 0: 1994f17a630e 'A'
-
- $ hg export tip
- # HG changeset patch
- # User user1
- # Date 0 0
- # Thu Jan 01 00:00:00 1970 +0000
- # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213
- # Parent 41acb9dca9eb976e84cd21fcb756b4afa5a35c09
- E
-
- diff -r 41acb9dca9eb -r f338eb3c2c7c z
- --- /dev/null Thu Jan 01 00:00:00 1970 +0000
- +++ b/z Thu Jan 01 00:00:00 1970 +0000
- @@ -0,0 +1,2 @@
- +d
- +blah
-
$ cd ..
Rebase, collapse and copies
@@ -767,7 +712,7 @@
|
| @ 2: 82b8abf9c185 'D'
| |
- @ | 1: f899f3910ce7 'B'
+ % | 1: f899f3910ce7 'B'
|/
o 0: 4a2df7238c3b 'A'
@@ -791,7 +736,7 @@
unresolved conflicts (see hg resolve, then hg rebase --continue)
[1]
$ hg tglog
- @ 3: 63668d570d21 'C'
+ % 3: 63668d570d21 'C'
|
| @ 2: 82b8abf9c185 'D'
| |
@@ -817,7 +762,7 @@
abort: edit failed: false exited with status 1
[255]
$ hg tglog
- o 3: 63668d570d21 'C'
+ % 3: 63668d570d21 'C'
|
| @ 2: 82b8abf9c185 'D'
| |
--- a/tests/test-rebase-conflicts.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-conflicts.t Thu Feb 13 10:12:12 2020 -0800
@@ -429,3 +429,72 @@
|/
o 0:draft 'A'
+
+Test where the conflict happens when rebasing a merge commit
+
+ $ cd $TESTTMP
+ $ hg init conflict-in-merge
+ $ cd conflict-in-merge
+ $ hg debugdrawdag <<'EOS'
+ > F # F/conflict = foo\n
+ > |\
+ > D E
+ > |/
+ > C B # B/conflict = bar\n
+ > |/
+ > A
+ > EOS
+
+ $ hg co F
+ 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg rebase -d B
+ rebasing 2:dc0947a82db8 "C" (C)
+ rebasing 3:e7b3f00ed42e "D" (D)
+ rebasing 4:03ca77807e91 "E" (E)
+ rebasing 5:9a6b91dc2044 "F" (F tip)
+ merging conflict
+ warning: conflicts while merging conflict! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see hg resolve, then hg rebase --continue)
+ [1]
+ $ hg tglog
+ @ 8:draft 'E'
+ |
+ | @ 7:draft 'D'
+ |/
+ o 6:draft 'C'
+ |
+ | % 5:draft 'F'
+ | |\
+ | | o 4:draft 'E'
+ | | |
+ | o | 3:draft 'D'
+ | |/
+ | o 2:draft 'C'
+ | |
+ o | 1:draft 'B'
+ |/
+ o 0:draft 'A'
+
+ $ echo baz > conflict
+ $ hg resolve -m
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase -c
+ already rebased 2:dc0947a82db8 "C" (C) as 0199610c343e
+ already rebased 3:e7b3f00ed42e "D" (D) as f0dd538aaa63
+ already rebased 4:03ca77807e91 "E" (E) as cbf25af8347d
+ rebasing 5:9a6b91dc2044 "F" (F)
+ saved backup bundle to $TESTTMP/conflict-in-merge/.hg/strip-backup/dc0947a82db8-ca7e7d5b-rebase.hg
+ $ hg tglog
+ @ 5:draft 'F'
+ |\
+ | o 4:draft 'E'
+ | |
+ o | 3:draft 'D'
+ |/
+ o 2:draft 'C'
+ |
+ o 1:draft 'B'
+ |
+ o 0:draft 'A'
+
--- a/tests/test-rebase-dest.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-dest.t Thu Feb 13 10:12:12 2020 -0800
@@ -256,7 +256,7 @@
> EOS
rebasing 3:a4256619d830 "B" (B)
rebasing 6:8e139e245220 "C" (C tip)
- o 8: 51e2ce92e06a C
+ o 8: d7d1169e9b1c C
|\
| o 7: 2ed0c8546285 B
| |\
--- a/tests/test-rebase-interruptions.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-interruptions.t Thu Feb 13 10:12:12 2020 -0800
@@ -294,7 +294,7 @@
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
- | @ 6: a0b2430ebfb8 secret 'F'
+ | o 6: a0b2430ebfb8 secret 'F'
| |
o | 5: 45396c49d53b public 'B'
| |
@@ -345,7 +345,7 @@
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
- | @ 6: a0b2430ebfb8 secret 'F'
+ | o 6: a0b2430ebfb8 secret 'F'
| |
o | 5: 45396c49d53b public 'B'
| |
@@ -395,7 +395,7 @@
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
- | @ 6: a0b2430ebfb8 secret 'F'
+ | o 6: a0b2430ebfb8 secret 'F'
| |
o | 5: 45396c49d53b public 'B'
| |
--- a/tests/test-rebase-newancestor.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-newancestor.t Thu Feb 13 10:12:12 2020 -0800
@@ -68,11 +68,6 @@
that is mixed up with the actual merge stuff and there is in general no way to
separate them.
-Note: The dev branch contains _no_ changes to f-default. It might be unclear
-how rebasing of ancestor merges should be handled, but the current behavior
-with spurious prompts for conflicts in files that didn't change seems very
-wrong.
-
$ hg init ancestor-merge
$ cd ancestor-merge
@@ -133,16 +128,11 @@
note: not rebasing 1:1d1a643d390e "dev: create branch", its destination already has all its changes
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
- file 'f-default' was deleted in local [dest] but was modified in other [source].
- You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
- What do you want to do? c
+ note: not rebasing 4:4b019212aaf6 "dev: merge default", its destination already has all its changes
rebasing 6:010ced67e558 "dev: merge default"
+ note: not rebasing 6:010ced67e558 "dev: merge default", its destination already has all its changes
saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-4a6f6d17-rebase.hg
$ hg tglog
- o 6: de147e4f69cf 'dev: merge default'
- |
- o 5: eda7b7f46f5d 'dev: merge default'
- |
o 4: 3e075b1c0a40 'dev: f-dev stuff'
|
@ 3: e08089805d82 'default: f-other stuff'
@@ -163,28 +153,8 @@
> EOF
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
- file 'f-default' was deleted in local [dest] but was modified in other [source].
- You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
- What do you want to do? c
- rebasing 6:010ced67e558 "dev: merge default"
- saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-827d7a44-rebase.hg
- $ hg tglog
- o 7: de147e4f69cf 'dev: merge default'
- |
- o 6: eda7b7f46f5d 'dev: merge default'
- |
- o 5: 3e075b1c0a40 'dev: f-dev stuff'
- |
- o 4: e08089805d82 'default: f-other stuff'
- |
- o 3: 462860db70a1 'default: remove f-default'
- |
- o 2: f157ecfd2b6b 'default: f-default stuff'
- |
- | o 1: 1d1a643d390e 'dev: create branch' dev
- |/
- o 0: e90e8eb90b6f 'default: create f-default'
-
+ abort: rebasing 4:4b019212aaf6 will include unwanted changes from 1:1d1a643d390e
+ [255]
$ cd ..
@@ -284,18 +254,7 @@
rebasing 6:4c5f12f25ebe "merge rebase ancestors" (tip)
resolving manifests
removing other
- note: merging f9daf77ffe76+ and 4c5f12f25ebe using bids from ancestors a60552eb93fb and f59da8fc0fcf
-
- calculating bids for ancestor a60552eb93fb
resolving manifests
-
- calculating bids for ancestor f59da8fc0fcf
- resolving manifests
-
- auction for merging merge bids
- other: consensus for g
- end of auction
-
getting other
committing files:
other
--- a/tests/test-rebase-obsolete.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-obsolete.t Thu Feb 13 10:12:12 2020 -0800
@@ -1795,19 +1795,15 @@
$ hg log -G
@ 2:b18e25de2cf5 D
|
- | @ 1:2ec65233581b B (pruned using prune)
- |/
o 0:426bada5c675 A
$ hg summary
parent: 2:b18e25de2cf5 tip
D
- parent: 1:2ec65233581b (obsolete)
- B
branch: default
- commit: 2 modified, 1 unknown, 1 unresolved (merge)
+ commit: 1 modified, 1 added, 1 unknown, 1 unresolved
update: (current)
- phases: 3 draft
+ phases: 2 draft
rebase: 0 rebased, 2 remaining (rebase --continue)
$ hg rebase --abort
--- a/tests/test-rebase-parameters.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-parameters.t Thu Feb 13 10:12:12 2020 -0800
@@ -92,6 +92,14 @@
empty "rev" revision set - nothing to rebase
[1]
+ $ hg rebase --rev 'wdir()' --dest 6
+ abort: cannot rebase the working copy
+ [255]
+
+ $ hg rebase --source 'wdir()' --dest 6
+ abort: cannot rebase the working copy
+ [255]
+
$ hg rebase --source '1 & !1' --dest 8
empty "source" revision set - nothing to rebase
[1]
@@ -473,11 +481,9 @@
$ hg summary
parent: 1:56daeba07f4b
c2
- parent: 2:e4e3f3546619 tip
- c2b
branch: default
- commit: 1 modified, 1 unresolved (merge)
- update: (current)
+ commit: 1 unresolved (clean)
+ update: 1 new changesets, 2 branch heads (merge)
phases: 3 draft
rebase: 0 rebased, 1 remaining (rebase --continue)
--- a/tests/test-rebase-rename.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-rename.t Thu Feb 13 10:12:12 2020 -0800
@@ -108,6 +108,62 @@
+ $ repeatchange() {
+ > hg checkout $1
+ > hg cp a z
+ > echo blah >> z
+ > hg commit -Am "$2" --user "$3"
+ > }
+ $ repeatchange 1 "E" "user1"
+ 2 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ created new head
+ $ repeatchange 1 "E" "user2"
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ created new head
+ $ hg tglog
+ @ 5: af8ad1f97097 'E'
+ |
+ | o 4: 60f545c27784 'E'
+ |/
+ | o 3: 032a9b75e83b 'rename A'
+ | |
+ | o 2: 220d0626d185 'rename B'
+ |/
+ o 1: 3ab5da9a5c01 'B'
+ |
+ o 0: 1994f17a630e 'A'
+
+ $ hg rebase -s 5 -d 4
+ rebasing 5:af8ad1f97097 "E" (tip)
+ note: not rebasing 5:af8ad1f97097 "E" (tip), its destination already has all its changes
+ saved backup bundle to $TESTTMP/a/.hg/strip-backup/af8ad1f97097-c3e90708-rebase.hg
+ $ hg tglog
+ @ 4: 60f545c27784 'E'
+ |
+ | o 3: 032a9b75e83b 'rename A'
+ | |
+ | o 2: 220d0626d185 'rename B'
+ |/
+ o 1: 3ab5da9a5c01 'B'
+ |
+ o 0: 1994f17a630e 'A'
+
+ $ hg export tip
+ # HG changeset patch
+ # User user1
+ # Date 0 0
+ # Thu Jan 01 00:00:00 1970 +0000
+ # Node ID 60f545c277846e6bad309919bae3ae106f59cb39
+ # Parent 3ab5da9a5c01faa02c20f2ec4870a4f689c92da6
+ E
+
+ diff -r 3ab5da9a5c01 -r 60f545c27784 z
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/z Thu Jan 01 00:00:00 1970 +0000
+ @@ -0,0 +1,2 @@
+ +a
+ +blah
+
$ cd ..
--- a/tests/test-rebase-transaction.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rebase-transaction.t Thu Feb 13 10:12:12 2020 -0800
@@ -114,7 +114,7 @@
|
| @ 4: Z
| |
- @ | 3: C
+ % | 3: C
| |
| o 2: Y
| |
@@ -123,9 +123,9 @@
o 0: A
$ hg st
- M C
M conflict
A B
+ A C
? conflict.orig
$ echo resolved > conflict
$ hg resolve -m
--- a/tests/test-rename-dir-merge.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rename-dir-merge.t Thu Feb 13 10:12:12 2020 -0800
@@ -30,8 +30,9 @@
b/a
b/b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a/a' -> dst: 'b/a'
- src: 'a/b' -> dst: 'b/b'
+ on remote side:
+ src: 'a/a' -> dst: 'b/a'
+ src: 'a/b' -> dst: 'b/b'
checking for directory renames
discovered dir src: 'a/' -> dst: 'b/'
pending file src: 'a/c' -> dst: 'b/c'
@@ -75,8 +76,9 @@
unmatched files in other:
a/c
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a/a' -> dst: 'b/a'
- src: 'a/b' -> dst: 'b/b'
+ on local side:
+ src: 'a/a' -> dst: 'b/a'
+ src: 'a/b' -> dst: 'b/b'
checking for directory renames
discovered dir src: 'a/' -> dst: 'b/'
pending file src: 'a/c' -> dst: 'b/c'
--- a/tests/test-rename-merge1.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rename-merge1.t Thu Feb 13 10:12:12 2020 -0800
@@ -28,9 +28,11 @@
b
b2
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
- src: 'a2' -> dst: 'b2' !
- src: 'a2' -> dst: 'c2' !
+ on local side:
+ src: 'a2' -> dst: 'c2' !
+ on remote side:
+ src: 'a' -> dst: 'b' *
+ src: 'a2' -> dst: 'b2' !
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -170,7 +172,8 @@
unmatched files in other:
newfile
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'file' -> dst: 'newfile' %
+ on remote side:
+ src: 'file' -> dst: 'newfile' %
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -184,3 +187,50 @@
$ hg status
M newfile
$ cd ..
+
+Create x and y, then modify y and rename x to z on one side of merge, and
+modify x and rename y to z on the other side.
+ $ hg init conflicting-target
+ $ cd conflicting-target
+ $ echo x > x
+ $ echo y > y
+ $ hg ci -Aqm 'add x and y'
+ $ hg mv x z
+ $ echo foo >> y
+ $ hg ci -qm 'modify y, rename x to z'
+ $ hg co -q 0
+ $ hg mv y z
+ $ echo foo >> x
+ $ hg ci -qm 'modify x, rename y to z'
+# We should probably tell the user about the conflicting rename sources.
+# Depending on which side they pick, we should take that rename and get
+# the changes to the source from the other side. The unchanged file should
+# remain.
+ $ hg merge --debug 1 -t :merge3
+ all copies found (* = to merge, ! = divergent, % = renamed and deleted):
+ on local side:
+ src: 'y' -> dst: 'z' *
+ on remote side:
+ src: 'x' -> dst: 'z' *
+ checking for directory renames
+ resolving manifests
+ branchmerge: True, force: False, partial: False
+ ancestor: 5151c134577e, local: 07fcbc9a74ed+, remote: f21419739508
+ preserving z for resolve of z
+ starting 4 threads for background file closing (?)
+ z: both renamed from y -> m (premerge)
+ picked tool ':merge3' for z (binary False symlink False changedelete False)
+ merging z
+ my z@07fcbc9a74ed+ other z@f21419739508 ancestor y@5151c134577e
+ premerge successful
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ ls
+ x
+ z
+ $ cat x
+ x
+ foo
+# 'z' should have had the added 'foo' line
+ $ cat z
+ x
--- a/tests/test-rename-merge2.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rename-merge2.t Thu Feb 13 10:12:12 2020 -0800
@@ -79,7 +79,8 @@
unmatched files in other:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -117,7 +118,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -156,7 +158,8 @@
unmatched files in other:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -194,7 +197,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -231,7 +235,8 @@
unmatched files in other:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -263,7 +268,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -294,7 +300,8 @@
unmatched files in other:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -327,7 +334,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -355,7 +363,10 @@
test L:um a b R:um a b W: - 9 do merge with ancestor in a
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -400,8 +411,10 @@
unmatched files in other:
c
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' !
- src: 'a' -> dst: 'c' !
+ on local side:
+ src: 'a' -> dst: 'b' !
+ on remote side:
+ src: 'a' -> dst: 'c' !
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -434,7 +447,8 @@
test L:nc a b R:up b W: - 12 merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -473,7 +487,8 @@
test L:up b R:nm a b W: - 13 merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -513,7 +528,8 @@
test L:nc a b R:up a b W: - 14 merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -522,19 +538,15 @@
preserving rev for resolve of rev
a: remote is newer -> g
getting a
- b: both created -> m (premerge)
+ b: both renamed from a -> m (premerge)
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
- my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+ my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+ premerge successful
rev: versions differ -> m (premerge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
rev: versions differ -> m (merge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -553,7 +565,8 @@
test L:up b R:nm a b W: - 15 merge b no ancestor, remove a
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b'
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -593,7 +606,8 @@
test L:nc a b R:up a b W: - 16 get a, merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -602,19 +616,15 @@
preserving rev for resolve of rev
a: remote is newer -> g
getting a
- b: both created -> m (premerge)
+ b: both renamed from a -> m (premerge)
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
- my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
+ my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
+ premerge successful
rev: versions differ -> m (premerge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@86a2aa42fc76+ other b@8dbce441892a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
rev: versions differ -> m (merge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
@@ -633,7 +643,8 @@
test L:up a b R:nc a b W: - 17 keep a, merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -641,19 +652,15 @@
preserving b for resolve of b
preserving rev for resolve of rev
starting 4 threads for background file closing (?)
- b: both created -> m (premerge)
+ b: both renamed from a -> m (premerge)
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
- my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
+ my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337
+ premerge successful
rev: versions differ -> m (premerge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
rev: versions differ -> m (merge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
@@ -672,7 +679,8 @@
test L:nm a b R:up a b W: - 18 merge b no ancestor
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -680,35 +688,24 @@
preserving b for resolve of b
preserving rev for resolve of rev
starting 4 threads for background file closing (?)
- a: prompt deleted/changed -> m (premerge)
- picked tool ':prompt' for a (binary False symlink False changedelete True)
- file 'a' was deleted in local [working copy] but was modified in other [merge rev].
- You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
- What do you want to do? u
- b: both created -> m (premerge)
+ b: both renamed from a -> m (premerge)
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
- my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
+ my b@02963e448370+ other b@8dbce441892a ancestor a@924404dff337
+ premerge successful
rev: versions differ -> m (premerge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@02963e448370+ other b@8dbce441892a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
rev: versions differ -> m (merge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
- 0 files updated, 2 files merged, 0 files removed, 1 files unresolved
- use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
--------------
- M a
M b
- abort: unresolved merge conflicts (see 'hg help resolve')
--------------
$ tm "up a b" "nm a b" " " "19 merge b no ancestor, prompt remove a"
@@ -717,44 +714,34 @@
test L:up a b R:nm a b W: - 19 merge b no ancestor, prompt remove a
--------------
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b'
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: 924404dff337, local: 0b76e65c8289+, remote: bdb19105162a
- preserving a for resolve of a
preserving b for resolve of b
preserving rev for resolve of rev
+ b: both renamed from a -> m (premerge)
starting 4 threads for background file closing (?)
- a: prompt changed/deleted -> m (premerge)
- picked tool ':prompt' for a (binary False symlink False changedelete True)
- file 'a' was deleted in other [merge rev] but was modified in local [working copy].
- You can use (c)hanged version, (d)elete, or leave (u)nresolved.
- What do you want to do? u
- b: both created -> m (premerge)
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
- my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
+ my b@0b76e65c8289+ other b@bdb19105162a ancestor a@924404dff337
+ premerge successful
rev: versions differ -> m (premerge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@0b76e65c8289+ other b@bdb19105162a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
rev: versions differ -> m (merge)
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
- 0 files updated, 2 files merged, 0 files removed, 1 files unresolved
- use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
--------------
M b
C a
- abort: unresolved merge conflicts (see 'hg help resolve')
--------------
$ tm "up a " "um a b" " " "20 merge a and b to b, remove a"
@@ -765,7 +752,8 @@
unmatched files in other:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on remote side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -807,7 +795,8 @@
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -853,7 +842,8 @@
unmatched files in other:
c
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: 'a' -> dst: 'b' *
+ on local side:
+ src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: False, partial: False
@@ -936,11 +926,14 @@
4/g
7/f
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- src: '1/f' -> dst: '1/g' *
- src: '3/f' -> dst: '3/g' *
- src: '4/f' -> dst: '4/g' *
- src: '5/f' -> dst: '5/g' *
- src: '6/f' -> dst: '6/g' *
+ on local side:
+ src: '1/f' -> dst: '1/g' *
+ src: '5/f' -> dst: '5/g' *
+ src: '6/f' -> dst: '6/g' *
+ on remote side:
+ src: '1/f' -> dst: '1/g' *
+ src: '3/f' -> dst: '3/g' *
+ src: '4/f' -> dst: '4/g' *
checking for directory renames
$ hg mani
0/f
--- a/tests/test-repair-strip.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-repair-strip.t Thu Feb 13 10:12:12 2020 -0800
@@ -25,7 +25,9 @@
> else
> echo "(no journal)"
> fi
- > ls .hg/store/journal >/dev/null 2>&1 && hg recover
+ > if ls .hg/store/journal >/dev/null 2>&1; then
+ > hg recover --verify
+ > fi
> ls .hg/strip-backup/* >/dev/null 2>&1 && hg unbundle -q .hg/strip-backup/*
> rm -rf .hg/strip-backup
> }
--- a/tests/test-repo-filters-tiptoe.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-repo-filters-tiptoe.t Thu Feb 13 10:12:12 2020 -0800
@@ -67,6 +67,12 @@
R a
! b
+ $ hg status --copies
+ M c
+ A d
+ R a
+ ! b
+
Getting data about the working copy parent
$ hg log -r '.' -T "{node}\n{date}\n"
--- a/tests/test-rollback.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-rollback.t Thu Feb 13 10:12:12 2020 -0800
@@ -190,7 +190,7 @@
corrupt journal test
$ echo "foo" > .hg/store/journal
- $ hg recover
+ $ hg recover --verify
rolling back interrupted transaction
couldn't read journal entry 'foo\n'!
checking changesets
--- a/tests/test-shelve.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-shelve.t Thu Feb 13 10:12:12 2020 -0800
@@ -171,6 +171,8 @@
$ hg mv b b.rename
moving b/b to b.rename/b
$ hg cp c c.copy
+ $ hg mv d ghost
+ $ rm ghost
$ hg status -C
M a/a
A b.rename/b
@@ -178,12 +180,15 @@
A c.copy
c
R b/b
+ R d
+ ! ghost
+ d
the common case - no options or filenames
$ hg shelve
shelved as default-01
- 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg status -C
ensure that our shelved changes exist
@@ -254,6 +259,7 @@
A c.copy
c
R b/b
+ R d
$ hg shelve -l
(both of default.hg and default-1.hg should be still kept, because it
@@ -287,6 +293,7 @@
A c.copy
c
R b/b
+ R d
$ HGEDITOR=cat hg shelve -q -n wibble -m wat -e a
wat
@@ -306,6 +313,7 @@
A c.copy
c
R b/b
+ R d
$ hg shelve -l --stat
wibble (*) wat (glob)
a/a | 1 +
@@ -323,6 +331,7 @@
A c.copy
c
R b/b
+ R d
ensure old shelve backups are being deleted automatically
@@ -363,6 +372,7 @@
M b.rename/b
M c.copy
R b/b
+ R d
? a/a.orig
# The repository is in an unfinished *unshelve* state.
@@ -401,6 +411,7 @@
M b.rename/b
M c.copy
R b/b
+ R d
? a/a.orig
$ hg diff
diff --git a/a/a b/a/a
@@ -412,13 +423,19 @@
c
+=======
+a
- +>>>>>>> working-copy: a68ec3400638 - shelve: changes to: [mq]: second.patch
+ +>>>>>>> working-copy: 203c9f771d2b - shelve: changes to: [mq]: second.patch
diff --git a/b/b b/b.rename/b
rename from b/b
rename to b.rename/b
diff --git a/c b/c.copy
copy from c
copy to c.copy
+ diff --git a/d b/d
+ deleted file mode 100644
+ --- a/d
+ +++ /dev/null
+ @@ -1,1 +0,0 @@
+ -d
$ hg resolve -l
U a/a
@@ -434,6 +451,7 @@
M b.rename/b
M c.copy
R b/b
+ R d
? a/a.orig
$ hg unshelve -a
unshelve of 'default' aborted
@@ -512,6 +530,7 @@
c
A foo/foo
R b/b
+ R d
? a/a.orig
there should be no shelves left
--- a/tests/test-strip.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-strip.t Thu Feb 13 10:12:12 2020 -0800
@@ -591,6 +591,18 @@
phases: 2 draft
mq: 3 unapplied
+ $ hg log --graph
+ @ changeset: 1:76dcf9fab855
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: b
+ |
+ % changeset: 0:9ab35a2d17cb
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: a
+
$ echo c > b
$ hg strip tip
abort: uncommitted changes
--- a/tests/test-tags.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-tags.t Thu Feb 13 10:12:12 2020 -0800
@@ -103,6 +103,9 @@
0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
+ $ hg debugtagscache
+ 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing/invalid
+ 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
Repeat with cold tag cache:
@@ -368,6 +371,20 @@
1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
+On junk data + missing cache entries, hg also overwrites the junk.
+
+ $ rm -f .hg/cache/tags2-visible
+ $ truncate .hg/cache/hgtagsfnodes1 -s -10
+ $ hg debugtagscache | tail -2
+ 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing/invalid
+ $ hg tags
+ tip 5:8dbfe60eff30
+ bar 1:78391a272241
+ $ hg debugtagscache | tail -2
+ 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+
#if unix-permissions no-root
Errors writing to .hgtags fnodes cache are silently ignored
--- a/tests/test-uncommit.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-uncommit.t Thu Feb 13 10:12:12 2020 -0800
@@ -489,7 +489,7 @@
$ hg add b
$ hg status
A b
- $ hg unc a
+ $ hg uncommit a
note: keeping empty commit
$ cat a
super critical info!
@@ -503,11 +503,11 @@
$ hg ci -Am 'add b'
$ echo 'foo bar' > b
- $ hg unc b
+ $ hg uncommit b
abort: uncommitted changes
(requires --allow-dirty-working-copy to uncommit)
[255]
- $ hg unc --allow-dirty-working-copy b
+ $ hg uncommit --allow-dirty-working-copy b
$ hg log
changeset: 3:30fa958635b2
tag: tip
--- a/tests/test-up-local-change.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-up-local-change.t Thu Feb 13 10:12:12 2020 -0800
@@ -40,8 +40,6 @@
summary: 1
$ hg --debug up
- unmatched files in other:
- b
resolving manifests
branchmerge: False, force: False, partial: False
ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
@@ -91,8 +89,6 @@
summary: 1
$ hg --debug up
- unmatched files in other:
- b
resolving manifests
branchmerge: False, force: False, partial: False
ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
--- a/tests/test-update-branches.t Fri Dec 13 10:37:45 2019 +0100
+++ b/tests/test-update-branches.t Thu Feb 13 10:12:12 2020 -0800
@@ -249,6 +249,19 @@
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges
[1]
+ $ hg log -G --template '{rev}:{node|short} {parents} {branches}\n'
+ o 5:ff252e8273df b1
+ |
+ @ 4:d047485b3896 0:60829823a42a b1
+ |
+ | % 3:6efa171f091b 1:0786582aa4b1
+ | |
+ | | o 2:bd10386d478c
+ | |/
+ | o 1:0786582aa4b1
+ |/
+ o 0:60829823a42a
+
$ hg st
M a
? a.orig