--- a/contrib/automation/hgautomation/aws.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/automation/hgautomation/aws.py Mon Jun 07 17:10:35 2021 -0400
@@ -925,10 +925,15 @@
requirements3_path = (
pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.txt'
)
+ requirements35_path = (
+ pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.5.txt'
+ )
with requirements2_path.open('r', encoding='utf-8') as fh:
requirements2 = fh.read()
with requirements3_path.open('r', encoding='utf-8') as fh:
requirements3 = fh.read()
+ with requirements35_path.open('r', encoding='utf-8') as fh:
+ requirements35 = fh.read()
# Compute a deterministic fingerprint to determine whether image needs to
# be regenerated.
@@ -938,6 +943,7 @@
'bootstrap_script': BOOTSTRAP_DEBIAN,
'requirements_py2': requirements2,
'requirements_py3': requirements3,
+ 'requirements_py35': requirements35,
}
)
@@ -979,6 +985,10 @@
fh.write(requirements3)
fh.chmod(0o0700)
+ with sftp.open('%s/requirements-py3.5.txt' % home, 'wb') as fh:
+ fh.write(requirements35)
+ fh.chmod(0o0700)
+
print('executing bootstrap')
chan, stdin, stdout = ssh_exec_command(
client, '%s/bootstrap' % home
--- a/contrib/automation/hgautomation/linux.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/automation/hgautomation/linux.py Mon Jun 07 17:10:35 2021 -0400
@@ -26,11 +26,11 @@
INSTALL_PYTHONS = r'''
PYENV2_VERSIONS="2.7.17 pypy2.7-7.2.0"
-PYENV3_VERSIONS="3.5.10 3.6.12 3.7.9 3.8.6 3.9.0 pypy3.5-7.0.0 pypy3.6-7.3.0"
+PYENV3_VERSIONS="3.5.10 3.6.13 3.7.10 3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3"
git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
pushd /hgdev/pyenv
-git checkout 8ac91b4fd678a8c04356f5ec85cfcd565c265e9a
+git checkout 328fd42c3a2fbf14ae46dae2021a087fe27ba7e2
popd
export PYENV_ROOT="/hgdev/pyenv"
@@ -56,7 +56,20 @@
for v in ${PYENV3_VERSIONS}; do
pyenv install -v ${v}
${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
- ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
+
+ case ${v} in
+ 3.5.*)
+ REQUIREMENTS=requirements-py3.5.txt
+ ;;
+ pypy3.5*)
+ REQUIREMENTS=requirements-py3.5.txt
+ ;;
+ *)
+ REQUIREMENTS=requirements-py3.txt
+ ;;
+ esac
+
+ ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/${REQUIREMENTS}
done
pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
@@ -64,6 +77,18 @@
'\r\n', '\n'
)
+INSTALL_PYOXIDIZER = r'''
+PYOXIDIZER_VERSION=0.16.0
+PYOXIDIZER_SHA256=8875471c270312fbb934007fd30f65f1904cc0f5da6188d61c90ed2129b9f9c1
+PYOXIDIZER_URL=https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F${PYOXIDIZER_VERSION}/pyoxidizer-${PYOXIDIZER_VERSION}-linux_x86_64.zip
+
+wget -O pyoxidizer.zip --progress dot:mega ${PYOXIDIZER_URL}
+echo "${PYOXIDIZER_SHA256} pyoxidizer.zip" | sha256sum --check -
+
+unzip pyoxidizer.zip
+chmod +x pyoxidizer
+sudo mv pyoxidizer /usr/local/bin/pyoxidizer
+'''
INSTALL_RUST = r'''
RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
@@ -72,10 +97,8 @@
chmod +x rustup-init
sudo -H -u hg -g hg ./rustup-init -y
-sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.46.0
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.41.1 1.52.0
sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
-
-sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --version 0.10.3 pyoxidizer
'''
@@ -306,9 +329,9 @@
sudo chown `whoami` /hgdev
{install_rust}
+{install_pyoxidizer}
-cp requirements-py2.txt /hgdev/requirements-py2.txt
-cp requirements-py3.txt /hgdev/requirements-py3.txt
+cp requirements-*.txt /hgdev/
# Disable the pip version check because it uses the network and can
# be annoying.
@@ -332,6 +355,7 @@
'''.lstrip()
.format(
install_rust=INSTALL_RUST,
+ install_pyoxidizer=INSTALL_PYOXIDIZER,
install_pythons=INSTALL_PYTHONS,
bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV,
)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/automation/linux-requirements-py3.5.txt Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,194 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.5.txt contrib/automation/linux-requirements.txt.in
+#
+astroid==2.4.2 \
+ --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
+ --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386
+ # via pylint
+docutils==0.17.1 \
+ --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+ --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
+ # via -r contrib/automation/linux-requirements.txt.in
+fuzzywuzzy==0.18.0 \
+ --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+ --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
+ # via -r contrib/automation/linux-requirements.txt.in
+idna==3.1 \
+ --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+ --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
+ # via yarl
+isort==4.3.21 \
+ --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
+ --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # pylint
+lazy-object-proxy==1.4.3 \
+ --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
+ --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
+ --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
+ --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
+ --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
+ --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
+ --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
+ --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
+ --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
+ --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
+ --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
+ --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
+ --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
+ --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
+ --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
+ --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
+ --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
+ --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
+ --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
+ --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
+ --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0
+ # via astroid
+mccabe==0.6.1 \
+ --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
+ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
+ # via pylint
+multidict==5.0.2 \
+ --hash=sha256:060d68ae3e674c913ec41a464916f12c4d7ff17a3a9ebbf37ba7f2c681c2b33e \
+ --hash=sha256:06f39f0ddc308dab4e5fa282d145f90cd38d7ed75390fc83335636909a9ec191 \
+ --hash=sha256:17847fede1aafdb7e74e01bb34ab47a1a1ea726e8184c623c45d7e428d2d5d34 \
+ --hash=sha256:1cd102057b09223b919f9447c669cf2efabeefb42a42ae6233f25ffd7ee31a79 \
+ --hash=sha256:20cc9b2dd31761990abff7d0e63cd14dbfca4ebb52a77afc917b603473951a38 \
+ --hash=sha256:2576e30bbec004e863d87216bc34abe24962cc2e964613241a1c01c7681092ab \
+ --hash=sha256:2ab9cad4c5ef5c41e1123ed1f89f555aabefb9391d4e01fd6182de970b7267ed \
+ --hash=sha256:359ea00e1b53ceef282232308da9d9a3f60d645868a97f64df19485c7f9ef628 \
+ --hash=sha256:3e61cc244fd30bd9fdfae13bdd0c5ec65da51a86575ff1191255cae677045ffe \
+ --hash=sha256:43c7a87d8c31913311a1ab24b138254a0ee89142983b327a2c2eab7a7d10fea9 \
+ --hash=sha256:4a3f19da871befa53b48dd81ee48542f519beffa13090dc135fffc18d8fe36db \
+ --hash=sha256:4df708ef412fd9b59b7e6c77857e64c1f6b4c0116b751cb399384ec9a28baa66 \
+ --hash=sha256:59182e975b8c197d0146a003d0f0d5dc5487ce4899502061d8df585b0f51fba2 \
+ --hash=sha256:6128d2c0956fd60e39ec7d1c8f79426f0c915d36458df59ddd1f0cff0340305f \
+ --hash=sha256:6168839491a533fa75f3f5d48acbb829475e6c7d9fa5c6e245153b5f79b986a3 \
+ --hash=sha256:62abab8088704121297d39c8f47156cb8fab1da731f513e59ba73946b22cf3d0 \
+ --hash=sha256:653b2bbb0bbf282c37279dd04f429947ac92713049e1efc615f68d4e64b1dbc2 \
+ --hash=sha256:6566749cd78cb37cbf8e8171b5cd2cbfc03c99f0891de12255cf17a11c07b1a3 \
+ --hash=sha256:76cbdb22f48de64811f9ce1dd4dee09665f84f32d6a26de249a50c1e90e244e0 \
+ --hash=sha256:8efcf070d60fd497db771429b1c769a3783e3a0dd96c78c027e676990176adc5 \
+ --hash=sha256:8fa4549f341a057feec4c3139056ba73e17ed03a506469f447797a51f85081b5 \
+ --hash=sha256:9380b3f2b00b23a4106ba9dd022df3e6e2e84e1788acdbdd27603b621b3288df \
+ --hash=sha256:9ed9b280f7778ad6f71826b38a73c2fdca4077817c64bc1102fdada58e75c03c \
+ --hash=sha256:a7b8b5bd16376c8ac2977748bd978a200326af5145d8d0e7f799e2b355d425b6 \
+ --hash=sha256:af271c2540d1cd2a137bef8d95a8052230aa1cda26dd3b2c73d858d89993d518 \
+ --hash=sha256:b561e76c9e21402d9a446cdae13398f9942388b9bff529f32dfa46220af54d00 \
+ --hash=sha256:b82400ef848bbac6b9035a105ac6acaa1fb3eea0d164e35bbb21619b88e49fed \
+ --hash=sha256:b98af08d7bb37d3456a22f689819ea793e8d6961b9629322d7728c4039071641 \
+ --hash=sha256:c58e53e1c73109fdf4b759db9f2939325f510a8a5215135330fe6755921e4886 \
+ --hash=sha256:cbabfc12b401d074298bfda099c58dfa5348415ae2e4ec841290627cb7cb6b2e \
+ --hash=sha256:d4a6fb98e9e9be3f7d70fd3e852369c00a027bd5ed0f3e8ade3821bcad257408 \
+ --hash=sha256:d99da85d6890267292065e654a329e1d2f483a5d2485e347383800e616a8c0b1 \
+ --hash=sha256:e58db0e0d60029915f7fc95a8683fa815e204f2e1990f1fb46a7778d57ca8c35 \
+ --hash=sha256:e5bf89fe57f702a046c7ec718fe330ed50efd4bcf74722940db2eb0919cddb1c \
+ --hash=sha256:f612e8ef8408391a4a3366e3508bab8ef97b063b4918a317cb6e6de4415f01af \
+ --hash=sha256:f65a2442c113afde52fb09f9a6276bbc31da71add99dc76c3adf6083234e07c6 \
+ --hash=sha256:fa0503947a99a1be94f799fac89d67a5e20c333e78ddae16e8534b151cdc588a
+ # via yarl
+pyflakes==2.3.1 \
+ --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+ --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
+ # via -r contrib/automation/linux-requirements.txt.in
+pygments==2.9.0 \
+ --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+ --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
+ # via -r contrib/automation/linux-requirements.txt.in
+pylint==2.6.2 \
+ --hash=sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9 \
+ --hash=sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf
+ # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+ --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
+ # via -r contrib/automation/linux-requirements.txt.in
+pyyaml==5.3.1 \
+ --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
+ --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
+ --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
+ --hash=sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e \
+ --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
+ --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
+ --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
+ --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
+ --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
+ --hash=sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a \
+ --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
+ --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
+ --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a
+ # via vcrpy
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via
+ # astroid
+ # vcrpy
+toml==0.10.2 \
+ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+ # via pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+ --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+ --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+ --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+ --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+ --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+ --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+ --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+ --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+ --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+ --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+ --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+ --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+ --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+ --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+ --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+ --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+ --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+ --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+ --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+ --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+ --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+ --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+ --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+ --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+ --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+ --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+ --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+ --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+ --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+ --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # astroid
+vcrpy==4.1.1 \
+ --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+ --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
+ # via -r contrib/automation/linux-requirements.txt.in
+wrapt==1.12.1 \
+ --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+ # via
+ # astroid
+ # vcrpy
+yarl==1.3.0 \
+ --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
+ --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
+ --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
+ --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
+ --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
+ --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
+ --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
+ --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
+ --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
+ --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
+ --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1
+ # via vcrpy
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools
--- a/contrib/automation/linux-requirements-py3.txt Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/automation/linux-requirements-py3.txt Mon Jun 07 17:10:35 2021 -0400
@@ -6,208 +6,299 @@
#
appdirs==1.4.4 \
--hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
- --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 \
+ --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128
# via black
-astroid==2.4.2 \
- --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
- --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 \
+astroid==2.5.6 \
+ --hash=sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e \
+ --hash=sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975
# via pylint
-attrs==20.2.0 \
- --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \
- --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc \
+attrs==21.1.0 \
+ --hash=sha256:3901be1cb7c2a780f14668691474d9252c070a756be0a9ead98cfeabfa11aeb8 \
+ --hash=sha256:8ee1e5f5a1afc5b19bdfae4fdf0c35ed324074bdce3500c939842c8f818645d9
# via black
black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \
--hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \
- --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 \
+ --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539
# via -r contrib/automation/linux-requirements.txt.in
click==7.1.2 \
--hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \
- --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc \
+ --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
# via black
-docutils==0.16 \
- --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
- --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+docutils==0.17.1 \
+ --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+ --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
# via -r contrib/automation/linux-requirements.txt.in
fuzzywuzzy==0.18.0 \
--hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
- --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 \
+ --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
# via -r contrib/automation/linux-requirements.txt.in
-idna==2.10 \
- --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
- --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \
+idna==3.1 \
+ --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+ --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
# via yarl
isort==4.3.21 \
--hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
- --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
- # via -r contrib/automation/linux-requirements.txt.in, pylint
-lazy-object-proxy==1.4.3 \
- --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
- --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
- --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
- --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
- --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
- --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
- --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
- --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
- --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
- --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
- --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
- --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
- --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
- --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
- --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
- --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
- --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
- --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
- --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
- --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
- --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 \
+ --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # pylint
+lazy-object-proxy==1.6.0 \
+ --hash=sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653 \
+ --hash=sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61 \
+ --hash=sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2 \
+ --hash=sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837 \
+ --hash=sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3 \
+ --hash=sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43 \
+ --hash=sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726 \
+ --hash=sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3 \
+ --hash=sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587 \
+ --hash=sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8 \
+ --hash=sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a \
+ --hash=sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd \
+ --hash=sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f \
+ --hash=sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad \
+ --hash=sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4 \
+ --hash=sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b \
+ --hash=sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf \
+ --hash=sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981 \
+ --hash=sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741 \
+ --hash=sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e \
+ --hash=sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93 \
+ --hash=sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b
# via astroid
mccabe==0.6.1 \
--hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
- --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
+ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
# via pylint
-multidict==4.7.6 \
- --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \
- --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \
- --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \
- --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \
- --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \
- --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \
- --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \
- --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \
- --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \
- --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \
- --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \
- --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \
- --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \
- --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \
- --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \
- --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \
- --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d \
+multidict==5.1.0 \
+ --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
+ --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
+ --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
+ --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
+ --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
+ --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
+ --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
+ --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
+ --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
+ --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
+ --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
+ --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
+ --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
+ --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
+ --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
+ --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
+ --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
+ --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
+ --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
+ --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
+ --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
+ --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
+ --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
+ --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
+ --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
+ --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
+ --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
+ --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
+ --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
+ --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
+ --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
+ --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
+ --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
+ --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
+ --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
+ --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
+ --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80
# via yarl
-pathspec==0.8.0 \
- --hash=sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0 \
- --hash=sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061 \
+pathspec==0.8.1 \
+ --hash=sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd \
+ --hash=sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d
# via black
-pyflakes==2.2.0 \
- --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \
- --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \
+pyflakes==2.3.1 \
+ --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+ --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
# via -r contrib/automation/linux-requirements.txt.in
-pygments==2.7.1 \
- --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
- --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+pygments==2.9.0 \
+ --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+ --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
# via -r contrib/automation/linux-requirements.txt.in
-pylint==2.6.0 \
- --hash=sha256:bb4a908c9dadbc3aac18860550e870f58e1a02c9f2c204fdf5693d73be061210 \
- --hash=sha256:bfe68f020f8a0fece830a22dd4d5dddb4ecc6137db04face4c3420a46a52239f \
+pylint==2.8.2 \
+ --hash=sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217 \
+ --hash=sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b
+ # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+ --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
# via -r contrib/automation/linux-requirements.txt.in
-python-levenshtein==0.12.0 \
- --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \
- # via -r contrib/automation/linux-requirements.txt.in
-pyyaml==5.3.1 \
- --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
- --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
- --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
- --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
- --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
- --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
- --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
- --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
- --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
- --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
- --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \
+pyyaml==5.4.1 \
+ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
+ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
+ --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
+ --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
+ --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
+ --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
+ --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
+ --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
+ --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
+ --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
+ --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
+ --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
+ --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
+ --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
+ --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
+ --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
+ --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
+ --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
+ --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
+ --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
+ --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
+ --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
+ --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
+ --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
+ --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
+ --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
+ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
+ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
+ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0
# via vcrpy
-regex==2020.9.27 \
- --hash=sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef \
- --hash=sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c \
- --hash=sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7 \
- --hash=sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b \
- --hash=sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c \
- --hash=sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63 \
- --hash=sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302 \
- --hash=sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc \
- --hash=sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67 \
- --hash=sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be \
- --hash=sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab \
- --hash=sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650 \
- --hash=sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81 \
- --hash=sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19 \
- --hash=sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637 \
- --hash=sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc \
- --hash=sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b \
- --hash=sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d \
- --hash=sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b \
- --hash=sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100 \
- --hash=sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad \
- --hash=sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3 \
- --hash=sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121 \
- --hash=sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b \
- --hash=sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707 \
- --hash=sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7 \
- --hash=sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f \
+regex==2021.4.4 \
+ --hash=sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5 \
+ --hash=sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79 \
+ --hash=sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31 \
+ --hash=sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500 \
+ --hash=sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11 \
+ --hash=sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14 \
+ --hash=sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3 \
+ --hash=sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439 \
+ --hash=sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c \
+ --hash=sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82 \
+ --hash=sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711 \
+ --hash=sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093 \
+ --hash=sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a \
+ --hash=sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb \
+ --hash=sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8 \
+ --hash=sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17 \
+ --hash=sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000 \
+ --hash=sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d \
+ --hash=sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480 \
+ --hash=sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc \
+ --hash=sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0 \
+ --hash=sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9 \
+ --hash=sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765 \
+ --hash=sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e \
+ --hash=sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a \
+ --hash=sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07 \
+ --hash=sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f \
+ --hash=sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac \
+ --hash=sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7 \
+ --hash=sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed \
+ --hash=sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968 \
+ --hash=sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7 \
+ --hash=sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2 \
+ --hash=sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4 \
+ --hash=sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87 \
+ --hash=sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8 \
+ --hash=sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10 \
+ --hash=sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29 \
+ --hash=sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605 \
+ --hash=sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6 \
+ --hash=sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042
# via black
-six==1.15.0 \
- --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
- --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
- # via astroid, vcrpy
-toml==0.10.1 \
- --hash=sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f \
- --hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88 \
- # via black, pylint
-typed-ast==1.4.1 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
- --hash=sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355 \
- --hash=sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919 \
- --hash=sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa \
- --hash=sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652 \
- --hash=sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75 \
- --hash=sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01 \
- --hash=sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d \
- --hash=sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1 \
- --hash=sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907 \
- --hash=sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c \
- --hash=sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3 \
- --hash=sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b \
- --hash=sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614 \
- --hash=sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb \
- --hash=sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b \
- --hash=sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41 \
- --hash=sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6 \
- --hash=sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34 \
- --hash=sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe \
- --hash=sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4 \
- --hash=sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7 \
- # via -r contrib/automation/linux-requirements.txt.in, astroid, black
-typing-extensions==3.7.4.3 \
- --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \
- --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \
- --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f \
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via vcrpy
+toml==0.10.2 \
+ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+ # via
+ # black
+ # pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+ --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+ --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+ --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+ --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+ --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+ --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+ --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+ --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+ --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+ --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+ --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+ --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+ --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+ --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+ --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+ --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+ --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+ --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+ --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+ --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+ --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+ --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+ --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+ --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+ --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+ --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+ --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+ --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+ --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+ --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # astroid
+ # black
+typing-extensions==3.10.0.0 \
+ --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
+ --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
+ --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84
# via yarl
-vcrpy==4.1.0 \
- --hash=sha256:4138e79eb35981ad391406cbb7227bce7eba8bad788dcf1a89c2e4a8b740debe \
- --hash=sha256:d833248442bbc560599add895c9ab0ef518676579e8dc72d8b0933bdb3880253 \
+vcrpy==4.1.1 \
+ --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+ --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
# via -r contrib/automation/linux-requirements.txt.in
wrapt==1.12.1 \
- --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
- # via astroid, vcrpy
-yarl==1.6.0 \
- --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \
- --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \
- --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \
- --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \
- --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \
- --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \
- --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \
- --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \
- --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \
- --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \
- --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \
- --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \
- --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \
- --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \
- --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \
- --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \
- --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a \
+ --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+ # via
+ # astroid
+ # vcrpy
+yarl==1.6.3 \
+ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
+ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
+ --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
+ --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
+ --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
+ --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
+ --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
+ --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
+ --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
+ --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
+ --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
+ --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
+ --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
+ --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
+ --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
+ --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
+ --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
+ --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
+ --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
+ --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
+ --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
+ --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
+ --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
+ --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
+ --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
+ --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
+ --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
+ --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
+ --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
+ --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
+ --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
+ --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
+ --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
+ --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
+ --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
+ --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
+ --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71
# via vcrpy
# WARNING: The following packages were not pinned, but pip requires them to be
--- a/contrib/chg/chg.c Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/chg/chg.c Mon Jun 07 17:10:35 2021 -0400
@@ -240,13 +240,8 @@
const char *hgcmd = gethgcmd();
const char *baseargv[] = {
- hgcmd,
- "serve",
- "--cmdserver",
- "chgunix",
- "--address",
- opts->initsockname,
- "--daemon-postexec",
+ hgcmd, "serve", "--no-profile", "--cmdserver",
+ "chgunix", "--address", opts->initsockname, "--daemon-postexec",
"chdir:/",
};
size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
--- a/contrib/dumprevlog Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/dumprevlog Mon Jun 07 17:10:35 2021 -0400
@@ -13,6 +13,10 @@
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -32,7 +36,16 @@
for f in sys.argv[1:]:
- r = revlog.revlog(binopen, encoding.strtolocal(f))
+ localf = encoding.strtolocal(f)
+ if not localf.endswith(b'.i'):
+ print("file:", f, file=sys.stderr)
+ print(" invalid filename", file=sys.stderr)
+
+ r = revlog.revlog(
+ binopen,
+ target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
+ radix=localf[:-2],
+ )
print("file:", f)
for i in r:
n = r.node(i)
--- a/contrib/fuzz/mpatch_corpus.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/fuzz/mpatch_corpus.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,10 +1,15 @@
from __future__ import absolute_import, print_function
import argparse
+import os
import struct
import sys
import zipfile
+# Add ../.. to sys.path as an absolute path so we can import hg modules
+hgloc = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
+sys.path[0:0] = [hgloc]
+
from mercurial import (
hg,
ui as uimod,
--- a/contrib/import-checker.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/import-checker.py Mon Jun 07 17:10:35 2021 -0400
@@ -23,7 +23,7 @@
# Whitelist of modules that symbols can be directly imported from.
allowsymbolimports = (
'__future__',
- 'bzrlib',
+ 'breezy',
'hgclient',
'mercurial',
'mercurial.hgweb.common',
--- a/contrib/install-windows-dependencies.ps1 Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/install-windows-dependencies.ps1 Mon Jun 07 17:10:35 2021 -0400
@@ -32,15 +32,15 @@
$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe"
$PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987"
-$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6.exe"
-$PYTHON38_x86_SHA256 = "287d5df01ff22ff09e6a487ae018603ee19eade71d462ec703850c96f1d5e8a0"
-$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6-amd64.exe"
-$PYTHON38_x64_SHA256 = "328a257f189cb500606bb26ab0fbdd298ed0e05d8c36540a322a1744f489a0a0"
+$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10.exe"
+$PYTHON38_x86_SHA256 = "ad07633a1f0cd795f3bf9da33729f662281df196b4567fa795829f3bb38a30ac"
+$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe"
+$PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a"
-$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0.exe"
-$PYTHON39_x86_SHA256 = "a4c65917f4225d1543959342f0615c813a4e9e7ff1137c4394ff6a5290ac1913"
-$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe"
-$PYTHON39_x64_SHA256 = "fd2e2c6612d43bb6b213b72fc53f07d73d99059fa72c96e44bde12e7815073ae"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe"
+$PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de"
+$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe"
+$PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac"
# PIP 19.2.3.
$PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
@@ -62,6 +62,9 @@
$RUSTUP_INIT_URL = "https://static.rust-lang.org/rustup/archive/1.21.1/x86_64-pc-windows-gnu/rustup-init.exe"
$RUSTUP_INIT_SHA256 = "d17df34ba974b9b19cf5c75883a95475aa22ddc364591d75d174090d55711c72"
+$PYOXIDIZER_URL = "https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F0.16.0/PyOxidizer-0.16.0-x64.msi"
+$PYOXIDIZER_SHA256 = "2a9c58add9161c272c418d5e6dec13fbe648f624b5d26770190357e4d664f24e"
+
# Writing progress slows down downloads substantially. So disable it.
$progressPreference = 'silentlyContinue'
@@ -121,11 +124,8 @@
Invoke-Process "${prefix}\assets\rustup-init.exe" "-y --default-host x86_64-pc-windows-msvc"
Invoke-Process "${prefix}\cargo\bin\rustup.exe" "target add i686-pc-windows-msvc"
- Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.46.0"
+ Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.52.0"
Invoke-Process "${prefix}\cargo\bin\rustup.exe" "component add clippy"
-
- # Install PyOxidizer for packaging.
- Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --version 0.10.3 pyoxidizer"
}
function Install-Dependencies($prefix) {
@@ -151,6 +151,7 @@
Secure-Download $MINGW_BIN_URL ${prefix}\assets\mingw-get-bin.zip $MINGW_BIN_SHA256
Secure-Download $MERCURIAL_WHEEL_URL ${prefix}\assets\${MERCURIAL_WHEEL_FILENAME} $MERCURIAL_WHEEL_SHA256
Secure-Download $RUSTUP_INIT_URL ${prefix}\assets\rustup-init.exe $RUSTUP_INIT_SHA256
+ Secure-Download $PYOXIDIZER_URL ${prefix}\assets\PyOxidizer.msi $PYOXIDIZER_SHA256
Write-Output "installing Python 2.7 32-bit"
Invoke-Process msiexec.exe "/i ${prefix}\assets\python27-x86.msi /l* ${prefix}\assets\python27-x86.log /q TARGETDIR=${prefix}\python27-x86 ALLUSERS="
@@ -172,6 +173,9 @@
Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
+ Write-Output "installing PyOxidizer"
+ Invoke-Process msiexec.exe "/i ${prefix}\assets\PyOxidizer.msi /l* ${prefix}\assets\PyOxidizer.log /quiet"
+
Install-Rust ${prefix}
Write-Output "installing Visual C++ 9.0 for Python 2.7"
--- a/contrib/packaging/hgpackaging/inno.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/packaging/hgpackaging/inno.py Mon Jun 07 17:10:35 2021 -0400
@@ -18,7 +18,7 @@
build_py2exe,
stage_install,
)
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import create_pyoxidizer_install_layout
from .util import (
find_legacy_vc_runtime_files,
normalize_windows_version,
@@ -136,7 +136,9 @@
staging_dir = inno_build_dir / "stage"
inno_build_dir.mkdir(parents=True, exist_ok=True)
- run_pyoxidizer(source_dir, inno_build_dir, staging_dir, target_triple)
+ create_pyoxidizer_install_layout(
+ source_dir, inno_build_dir, staging_dir, target_triple
+ )
process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
--- a/contrib/packaging/hgpackaging/pyoxidizer.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/packaging/hgpackaging/pyoxidizer.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,6 +12,7 @@
import shutil
import subprocess
import sys
+import typing
from .downloads import download_entry
from .util import (
@@ -53,17 +54,36 @@
]
+def build_docs_html(source_dir: pathlib.Path):
+ """Ensures HTML documentation is built.
+
+ This will fail if docutils isn't available.
+
+ (The HTML docs aren't built as part of `pip install` so we need to build them
+ out of band.)
+ """
+ subprocess.run(
+ [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
+ cwd=str(source_dir),
+ check=True,
+ )
+
+
def run_pyoxidizer(
source_dir: pathlib.Path,
build_dir: pathlib.Path,
- out_dir: pathlib.Path,
target_triple: str,
-):
- """Build Mercurial with PyOxidizer and copy additional files into place.
+ build_vars: typing.Optional[typing.Dict[str, str]] = None,
+ target: typing.Optional[str] = None,
+) -> pathlib.Path:
+ """Run `pyoxidizer` in an environment with access to build dependencies.
- After successful completion, ``out_dir`` contains files constituting a
- Mercurial install.
+ Returns the output directory that pyoxidizer would have used for build
+ artifacts. Actual build artifacts are likely in a sub-directory with the
+ name of the pyoxidizer build target that was built.
"""
+ build_vars = build_vars or {}
+
# We need to make gettext binaries available for compiling i18n files.
gettext_pkg, gettext_entry = download_entry('gettext', build_dir)
gettext_dep_pkg = download_entry('gettext-dep', build_dir)[0]
@@ -91,8 +111,31 @@
target_triple,
]
+ for k, v in sorted(build_vars.items()):
+ args.extend(["--var", k, v])
+
+ if target:
+ args.append(target)
+
subprocess.run(args, env=env, check=True)
+ return source_dir / "build" / "pyoxidizer" / target_triple / "release"
+
+
+def create_pyoxidizer_install_layout(
+ source_dir: pathlib.Path,
+ build_dir: pathlib.Path,
+ out_dir: pathlib.Path,
+ target_triple: str,
+):
+ """Build Mercurial with PyOxidizer and copy additional files into place.
+
+ After successful completion, ``out_dir`` contains files constituting a
+ Mercurial install.
+ """
+
+ run_pyoxidizer(source_dir, build_dir, target_triple)
+
if "windows" in target_triple:
target = "app_windows"
else:
@@ -113,14 +156,7 @@
# is taught to use the importlib APIs for reading resources.
process_install_rules(STAGING_RULES_APP, build_dir, out_dir)
- # We also need to run setup.py build_doc to produce html files,
- # as they aren't built as part of ``pip install``.
- # This will fail if docutils isn't installed.
- subprocess.run(
- [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
- cwd=str(source_dir),
- check=True,
- )
+ build_docs_html(source_dir)
if "windows" in target_triple:
process_install_rules(STAGING_RULES_WINDOWS, source_dir, out_dir)
--- a/contrib/packaging/hgpackaging/wix.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/packaging/hgpackaging/wix.py Mon Jun 07 17:10:35 2021 -0400
@@ -22,7 +22,11 @@
build_py2exe,
stage_install,
)
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import (
+ build_docs_html,
+ create_pyoxidizer_install_layout,
+ run_pyoxidizer,
+)
from .util import (
extract_zip_to_directory,
normalize_windows_version,
@@ -386,36 +390,66 @@
"""Build a WiX MSI installer using PyOxidizer."""
hg_build_dir = source_dir / "build"
build_dir = hg_build_dir / ("wix-%s" % target_triple)
- staging_dir = build_dir / "stage"
-
- arch = "x64" if "x86_64" in target_triple else "x86"
build_dir.mkdir(parents=True, exist_ok=True)
- run_pyoxidizer(source_dir, build_dir, staging_dir, target_triple)
+
+ # Need to ensure docs HTML is built because this isn't done as part of
+ # `pip install Mercurial`.
+ build_docs_html(source_dir)
+
+ build_vars = {}
- # We also install some extra files.
- process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
+ if msi_name:
+ build_vars["MSI_NAME"] = msi_name
+
+ if version:
+ build_vars["VERSION"] = version
+
+ if extra_features:
+ build_vars["EXTRA_MSI_FEATURES"] = ";".join(extra_features)
- # And remove some files we don't want.
- for f in STAGING_REMOVE_FILES:
- p = staging_dir / f
- if p.exists():
- print('removing %s' % p)
- p.unlink()
+ if signing_info:
+ if signing_info["cert_path"]:
+ build_vars["SIGNING_PFX_PATH"] = signing_info["cert_path"]
+ if signing_info["cert_password"]:
+ build_vars["SIGNING_PFX_PASSWORD"] = signing_info["cert_password"]
+ if signing_info["subject_name"]:
+ build_vars["SIGNING_SUBJECT_NAME"] = signing_info["subject_name"]
+ if signing_info["timestamp_url"]:
+ build_vars["TIME_STAMP_SERVER_URL"] = signing_info["timestamp_url"]
- return run_wix_packaging(
+ if extra_wxs:
+ raise Exception(
+ "support for extra .wxs files has been temporarily dropped"
+ )
+
+ out_dir = run_pyoxidizer(
source_dir,
build_dir,
- staging_dir,
- arch,
- version,
- python2=False,
- msi_name=msi_name,
- extra_wxs=extra_wxs,
- extra_features=extra_features,
- signing_info=signing_info,
+ target_triple,
+ build_vars=build_vars,
+ target="msi",
)
+ msi_dir = out_dir / "msi"
+ msi_files = [f for f in os.listdir(msi_dir) if f.endswith(".msi")]
+
+ if len(msi_files) != 1:
+ raise Exception("expected exactly 1 .msi file; got %d" % len(msi_files))
+
+ msi_filename = msi_files[0]
+
+ msi_path = msi_dir / msi_filename
+ dist_path = source_dir / "dist" / msi_filename
+
+ dist_path.parent.mkdir(parents=True, exist_ok=True)
+
+ shutil.copyfile(msi_path, dist_path)
+
+ return {
+ "msi_path": dist_path,
+ }
+
def run_wix_packaging(
source_dir: pathlib.Path,
--- a/contrib/packaging/wix/mercurial.wxs Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/packaging/wix/mercurial.wxs Mon Jun 07 17:10:35 2021 -0400
@@ -135,9 +135,13 @@
<UIRef Id="WixUI_FeatureTree" />
<UIRef Id="WixUI_ErrorProgressText" />
+ <?ifdef PyOxidizer?>
+ <WixVariable Id="WixUILicenseRtf" Value="COPYING.rtf" />
+ <Icon Id="hgIcon.ico" SourceFile="mercurial.ico" />
+ <?else?>
<WixVariable Id="WixUILicenseRtf" Value="contrib\packaging\wix\COPYING.rtf" />
-
<Icon Id="hgIcon.ico" SourceFile="contrib/win32/mercurial.ico" />
+ <?endif?>
<Upgrade Id='$(var.ProductUpgradeCode)'>
<UpgradeVersion
--- a/contrib/perf.py Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/perf.py Mon Jun 07 17:10:35 2021 -0400
@@ -66,6 +66,8 @@
import tempfile
import threading
import time
+
+import mercurial.revlog
from mercurial import (
changegroup,
cmdutil,
@@ -76,7 +78,6 @@
hg,
mdiff,
merge,
- revlog,
util,
)
@@ -119,6 +120,21 @@
except ImportError:
profiling = None
+try:
+ from mercurial.revlogutils import constants as revlog_constants
+
+ perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
+
+ def revlog(opener, *args, **kwargs):
+ return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
+
+
+except (ImportError, AttributeError):
+ perf_rl_kind = None
+
+ def revlog(opener, *args, **kwargs):
+ return mercurial.revlog.revlog(opener, *args, **kwargs)
+
def identity(a):
return a
@@ -1809,7 +1825,11 @@
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
n = scmutil.revsingle(repo, rev).node()
- cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
+
+ try:
+ cl = revlog(getsvfs(repo), radix=b"00changelog")
+ except TypeError:
+ cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
def d():
cl.rev(n)
@@ -2592,17 +2612,25 @@
rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
opener = getattr(rl, 'opener') # trick linter
- indexfile = rl.indexfile
+ # compat with hg <= 5.8
+ radix = getattr(rl, 'radix', None)
+ indexfile = getattr(rl, '_indexfile', None)
+ if indexfile is None:
+ # compatibility with <= hg-5.8
+ indexfile = getattr(rl, 'indexfile')
data = opener.read(indexfile)
header = struct.unpack(b'>I', data[0:4])[0]
version = header & 0xFFFF
if version == 1:
- revlogio = revlog.revlogio()
inline = header & (1 << 16)
else:
raise error.Abort(b'unsupported revlog version: %d' % version)
+ parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
+ if parse_index_v1 is None:
+ parse_index_v1 = mercurial.revlog.revlogio().parseindex
+
rllen = len(rl)
node0 = rl.node(0)
@@ -2617,33 +2645,35 @@
allnodesrev = list(reversed(allnodes))
def constructor():
- revlog.revlog(opener, indexfile)
+ if radix is not None:
+ revlog(opener, radix=radix)
+ else:
+ # hg <= 5.8
+ revlog(opener, indexfile=indexfile)
def read():
with opener(indexfile) as fh:
fh.read()
def parseindex():
- revlogio.parseindex(data, inline)
+ parse_index_v1(data, inline)
def getentry(revornode):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
index[revornode]
def getentries(revs, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
for i in range(count):
for rev in revs:
index[rev]
def resolvenode(node):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -2655,12 +2685,10 @@
pass
def resolvenodes(nodes, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -3015,10 +3043,17 @@
if util.safehasattr(orig, k):
revlogkwargs[k] = getattr(orig, k)
- origindexpath = orig.opener.join(orig.indexfile)
- origdatapath = orig.opener.join(orig.datafile)
- indexname = 'revlog.i'
- dataname = 'revlog.d'
+ indexfile = getattr(orig, '_indexfile', None)
+ if indexfile is None:
+ # compatibility with <= hg-5.8
+ indexfile = getattr(orig, 'indexfile')
+ origindexpath = orig.opener.join(indexfile)
+
+ datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
+ origdatapath = orig.opener.join(datafile)
+ radix = b'revlog'
+ indexname = b'revlog.i'
+ dataname = b'revlog.d'
tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
try:
@@ -3043,9 +3078,12 @@
vfs = vfsmod.vfs(tmpdir)
vfs.options = getattr(orig.opener, 'options', None)
- dest = revlog.revlog(
- vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
- )
+ try:
+ dest = revlog(vfs, radix=radix, **revlogkwargs)
+ except TypeError:
+ dest = revlog(
+ vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
+ )
if dest._inline:
raise error.Abort('not supporting inline revlog (yet)')
# make sure internals are initialized
@@ -3111,9 +3149,14 @@
def rlfh(rl):
if rl._inline:
- return getsvfs(repo)(rl.indexfile)
+ indexfile = getattr(rl, '_indexfile', None)
+ if indexfile is None:
+ # compatibility with <= hg-5.8
+ indexfile = getattr(rl, 'indexfile')
+ return getsvfs(repo)(indexfile)
else:
- return getsvfs(repo)(rl.datafile)
+ datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
+ return getsvfs(repo)(datafile)
def doread():
rl.clearcaches()
--- a/contrib/undumprevlog Sun Jun 06 01:24:30 2021 +0200
+++ b/contrib/undumprevlog Mon Jun 07 17:10:35 2021 -0400
@@ -15,6 +15,10 @@
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -28,7 +32,12 @@
break
if l.startswith("file:"):
f = encoding.strtolocal(l[6:-1])
- r = revlog.revlog(opener, f)
+ assert f.endswith(b'.i')
+ r = revlog.revlog(
+ opener,
+ target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
+ radix=f[:-2],
+ )
procutil.stdout.write(b'%s\n' % f)
elif l.startswith("node:"):
n = bin(l[6:-1])
--- a/hgext/absorb.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/absorb.py Mon Jun 07 17:10:35 2021 -0400
@@ -38,7 +38,6 @@
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -109,7 +108,7 @@
return b''
def node(self):
- return nullid
+ return self._repo.nullid
def uniq(lst):
@@ -927,7 +926,7 @@
the commit is a clone from ctx, with a (optionally) different p1, and
different file contents replaced by memworkingcopy.
"""
- parents = p1 and (p1, nullid)
+ parents = p1 and (p1, self.repo.nullid)
extra = ctx.extra()
if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
extra[b'absorb_source'] = ctx.hex()
--- a/hgext/convert/bzr.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/convert/bzr.py Mon Jun 07 17:10:35 2021 -0400
@@ -5,8 +5,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
-# it cannot access 'bar' repositories, but they were never used very much
+# This module is for handling Breezy imports or `brz`, but it's also compatible
+# with Bazaar or `bzr`, that was formerly known as Bazaar-NG;
+# it cannot access `bar` repositories, but they were never used very much.
from __future__ import absolute_import
import os
@@ -19,31 +20,32 @@
)
from . import common
+
# these do not work with demandimport, blacklist
demandimport.IGNORES.update(
[
- b'bzrlib.transactions',
- b'bzrlib.urlutils',
+ b'breezy.transactions',
+ b'breezy.urlutils',
b'ElementPath',
]
)
try:
# bazaar imports
- import bzrlib.bzrdir
- import bzrlib.errors
- import bzrlib.revision
- import bzrlib.revisionspec
+ import breezy.bzr.bzrdir
+ import breezy.errors
+ import breezy.revision
+ import breezy.revisionspec
- bzrdir = bzrlib.bzrdir
- errors = bzrlib.errors
- revision = bzrlib.revision
- revisionspec = bzrlib.revisionspec
+ bzrdir = breezy.bzr.bzrdir
+ errors = breezy.errors
+ revision = breezy.revision
+ revisionspec = breezy.revisionspec
revisionspec.RevisionSpec
except ImportError:
pass
-supportedkinds = (b'file', b'symlink')
+supportedkinds = ('file', 'symlink')
class bzr_source(common.converter_source):
@@ -58,7 +60,7 @@
)
try:
- # access bzrlib stuff
+ # access breezy stuff
bzrdir
except NameError:
raise common.NoRepo(_(b'Bazaar modules could not be loaded'))
@@ -66,7 +68,8 @@
path = os.path.abspath(path)
self._checkrepotype(path)
try:
- self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
+ bzr_dir = bzrdir.BzrDir.open(path.decode())
+ self.sourcerepo = bzr_dir.open_repository()
except errors.NoRepositoryPresent:
raise common.NoRepo(
_(b'%s does not look like a Bazaar repository') % path
@@ -78,7 +81,7 @@
# Lightweight checkouts detection is informational but probably
# fragile at API level. It should not terminate the conversion.
try:
- dir = bzrdir.BzrDir.open_containing(path)[0]
+ dir = bzrdir.BzrDir.open_containing(path.decode())[0]
try:
tree = dir.open_workingtree(recommend_upgrade=False)
branch = tree.branch
@@ -87,8 +90,8 @@
branch = dir.open_branch()
if (
tree is not None
- and tree.bzrdir.root_transport.base
- != branch.bzrdir.root_transport.base
+ and tree.controldir.root_transport.base
+ != branch.controldir.root_transport.base
):
self.ui.warn(
_(
@@ -127,7 +130,8 @@
revid = None
for branch in self._bzrbranches():
try:
- r = revisionspec.RevisionSpec.from_string(self.revs[0])
+ revspec = self.revs[0].decode()
+ r = revisionspec.RevisionSpec.from_string(revspec)
info = r.in_history(branch)
except errors.BzrError:
pass
@@ -142,24 +146,26 @@
return heads
def getfile(self, name, rev):
+ name = name.decode()
revtree = self.sourcerepo.revision_tree(rev)
- fileid = revtree.path2id(name.decode(self.encoding or b'utf-8'))
- kind = None
- if fileid is not None:
- kind = revtree.kind(fileid)
+
+ try:
+ kind = revtree.kind(name)
+ except breezy.errors.NoSuchFile:
+ return None, None
if kind not in supportedkinds:
# the file is not available anymore - was deleted
return None, None
- mode = self._modecache[(name, rev)]
- if kind == b'symlink':
- target = revtree.get_symlink_target(fileid)
+ mode = self._modecache[(name.encode(), rev)]
+ if kind == 'symlink':
+ target = revtree.get_symlink_target(name)
if target is None:
raise error.Abort(
_(b'%s.%s symlink has no target') % (name, rev)
)
- return target, mode
+ return target.encode(), mode
else:
- sio = revtree.get_file(fileid)
+ sio = revtree.get_file(name)
return sio.read(), mode
def getchanges(self, version, full):
@@ -184,15 +190,15 @@
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
- branch = self.recode(rev.properties.get(b'branch-nick', u'default'))
- if branch == b'trunk':
- branch = b'default'
+ branch = rev.properties.get('branch-nick', 'default')
+ if branch == 'trunk':
+ branch = 'default'
return common.commit(
parents=parents,
date=b'%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
desc=self.recode(rev.message),
- branch=branch,
+ branch=branch.encode('utf8'),
rev=version,
saverev=self._saverev,
)
@@ -234,35 +240,32 @@
# Process the entries by reverse lexicographic name order to
# handle nested renames correctly, most specific first.
+
+ def key(c):
+ return c.path[0] or c.path[1] or ""
+
curchanges = sorted(
current.iter_changes(origin),
- key=lambda c: c[1][0] or c[1][1],
+ key=key,
reverse=True,
)
- for (
- fileid,
- paths,
- changed_content,
- versioned,
- parent,
- name,
- kind,
- executable,
- ) in curchanges:
-
+ for change in curchanges:
+ paths = change.path
+ kind = change.kind
+ executable = change.executable
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
continue
# bazaar tracks directories, mercurial does not, so
# we have to rename the directory contents
- if kind[1] == b'directory':
- if kind[0] not in (None, b'directory'):
+ if kind[1] == 'directory':
+ if kind[0] not in (None, 'directory'):
# Replacing 'something' with a directory, record it
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
- if kind[0] == b'directory' and None not in paths:
+ if kind[0] == 'directory' and None not in paths:
renaming = paths[0] != paths[1]
# neither an add nor an delete - a move
# rename all directory contents manually
@@ -270,9 +273,9 @@
# get all child-entries of the directory
for name, entry in inventory.iter_entries(subdir):
# hg does not track directory renames
- if entry.kind == b'directory':
+ if entry.kind == 'directory':
continue
- frompath = self.recode(paths[0] + b'/' + name)
+ frompath = self.recode(paths[0] + '/' + name)
if frompath in seen:
# Already handled by a more specific change entry
# This is important when you have:
@@ -283,14 +286,14 @@
seen.add(frompath)
if not renaming:
continue
- topath = self.recode(paths[1] + b'/' + name)
+ topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
changes.append((topath, revid))
# add to mode cache
mode = (
(entry.executable and b'x')
- or (entry.kind == b'symlink' and b's')
+ or (entry.kind == 'symlink' and b's')
or b''
)
self._modecache[(topath, revid)] = mode
@@ -320,7 +323,7 @@
# populate the mode cache
kind, executable = [e[1] for e in (kind, executable)]
- mode = (executable and b'x') or (kind == b'symlink' and b'l') or b''
+ mode = (executable and b'x') or (kind == 'symlink' and b'l') or b''
self._modecache[(topath, revid)] = mode
changes.append((topath, revid))
--- a/hgext/convert/git.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/convert/git.py Mon Jun 07 17:10:35 2021 -0400
@@ -9,7 +9,7 @@
import os
from mercurial.i18n import _
-from mercurial.node import nullhex
+from mercurial.node import sha1nodeconstants
from mercurial import (
config,
error,
@@ -192,7 +192,7 @@
return heads
def catfile(self, rev, ftype):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
raise IOError
self.catfilepipe[0].write(rev + b'\n')
self.catfilepipe[0].flush()
@@ -214,7 +214,7 @@
return data
def getfile(self, name, rev):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
return None, None
if name == b'.hgsub':
data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +228,7 @@
return data, mode
def submoditer(self):
- null = nullhex
+ null = sha1nodeconstants.nullhex
for m in sorted(self.submodules, key=lambda p: p.path):
if m.node != null:
yield m
@@ -317,7 +317,7 @@
subexists[0] = True
if entry[4] == b'D' or renamesource:
subdeleted[0] = True
- changes.append((b'.hgsub', nullhex))
+ changes.append((b'.hgsub', sha1nodeconstants.nullhex))
else:
changes.append((b'.hgsub', b''))
elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +325,7 @@
subexists[0] = True
else:
if renamesource:
- h = nullhex
+ h = sha1nodeconstants.nullhex
self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
changes.append((f, h))
@@ -362,7 +362,7 @@
if subexists[0]:
if subdeleted[0]:
- changes.append((b'.hgsubstate', nullhex))
+ changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
else:
self.retrievegitmodules(version)
changes.append((b'.hgsubstate', b''))
--- a/hgext/convert/hg.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/convert/hg.py Mon Jun 07 17:10:35 2021 -0400
@@ -27,8 +27,7 @@
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
)
from mercurial import (
bookmarks,
@@ -160,7 +159,7 @@
continue
revid = revmap.get(source.lookuprev(s[0]))
if not revid:
- if s[0] == nullhex:
+ if s[0] == sha1nodeconstants.nullhex:
revid = s[0]
else:
# missing, but keep for hash stability
@@ -179,7 +178,7 @@
revid = s[0]
subpath = s[1]
- if revid != nullhex:
+ if revid != sha1nodeconstants.nullhex:
revmap = self.subrevmaps.get(subpath)
if revmap is None:
revmap = mapfile(
@@ -304,9 +303,9 @@
parent = parents[0]
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
p2 = parents.pop(0)
text = commit.desc
@@ -356,7 +355,7 @@
p2 = parents.pop(0)
p1ctx = self.repo[p1]
p2ctx = None
- if p2 != nullid:
+ if p2 != self.repo.nullid:
p2ctx = self.repo[p2]
fileset = set(files)
if full:
@@ -421,7 +420,7 @@
def puttags(self, tags):
tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
- tagparent = tagparent or nullid
+ tagparent = tagparent or self.repo.nullid
oldlines = set()
for branch, heads in pycompat.iteritems(self.repo.branchmap()):
--- a/hgext/git/dirstate.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/git/dirstate.py Mon Jun 07 17:10:35 2021 -0400
@@ -4,7 +4,7 @@
import errno
import os
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import (
error,
extensions,
@@ -81,14 +81,16 @@
except pygit2.GitError:
# Typically happens when peeling HEAD fails, as in an
# empty repository.
- return nullid
+ return sha1nodeconstants.nullid
def p2(self):
# TODO: MERGE_HEAD? something like that, right?
- return nullid
+ return sha1nodeconstants.nullid
- def setparents(self, p1, p2=nullid):
- assert p2 == nullid, b'TODO merging support'
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = sha1nodeconstants.nullid
+ assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
self.git.head.set_target(gitutil.togitnode(p1))
@util.propertycache
@@ -102,7 +104,7 @@
def parents(self):
# TODO how on earth do we find p2 if a merge is in flight?
- return self.p1(), nullid
+ return self.p1(), sha1nodeconstants.nullid
def __iter__(self):
return (pycompat.fsencode(f.path) for f in self.git.index)
--- a/hgext/git/gitlog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/git/gitlog.py Mon Jun 07 17:10:35 2021 -0400
@@ -5,11 +5,8 @@
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
- wdirhex,
)
from mercurial import (
ancestor,
@@ -47,7 +44,7 @@
)
def rev(self, n):
- if n == nullid:
+ if n == sha1nodeconstants.nullid:
return -1
t = self._db.execute(
'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 @@
def node(self, r):
if r == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
t = self._db.execute(
'SELECT node FROM changelog WHERE rev = ?', (r,)
).fetchone()
@@ -135,7 +132,7 @@
bin(v[0]): v[1]
for v in self._db.execute('SELECT node, rev FROM changelog')
}
- r[nullid] = nullrev
+ r[sha1nodeconstants.nullid] = nullrev
return r
def tip(self):
@@ -144,7 +141,7 @@
).fetchone()
if t:
return bin(t[0])
- return nullid
+ return sha1nodeconstants.nullid
def revs(self, start=0, stop=None):
if stop is None:
@@ -167,7 +164,7 @@
return -1
def _partialmatch(self, id):
- if wdirhex.startswith(id):
+ if sha1nodeconstants.wdirhex.startswith(id):
raise error.WdirUnsupported
candidates = [
bin(x[0])
@@ -176,8 +173,8 @@
(pycompat.sysstr(id + b'%'),),
)
]
- if nullhex.startswith(id):
- candidates.append(nullid)
+ if sha1nodeconstants.nullhex.startswith(id):
+ candidates.append(sha1nodeconstants.nullid)
if len(candidates) > 1:
raise error.AmbiguousPrefixLookupError(
id, b'00changelog.i', _(b'ambiguous identifier')
@@ -223,8 +220,10 @@
n = nodeorrev
extra = {b'branch': b'default'}
# handle looking up nullid
- if n == nullid:
- return hgchangelog._changelogrevision(extra=extra, manifest=nullid)
+ if n == sha1nodeconstants.nullid:
+ return hgchangelog._changelogrevision(
+ extra=extra, manifest=sha1nodeconstants.nullid
+ )
hn = gitutil.togitnode(n)
# We've got a real commit!
files = [
@@ -301,7 +300,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [sha1nodeconstants.nullid]
if heads is None:
heads = self.heads()
@@ -400,9 +399,9 @@
):
parents = []
hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(hp1)
- if p2 and p2 != nullid:
+ if p2 and p2 != sha1nodeconstants.nullid:
parents.append(hp2)
assert date is not None
timestamp, tz = date
@@ -435,7 +434,7 @@
return self.get(b'', node)
def get(self, relpath, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
# TODO: this should almost certainly be a memgittreemanifestctx
return manifest.memtreemanifestctx(self, relpath)
commit = self.gitrepo[gitutil.togitnode(node)]
@@ -454,9 +453,10 @@
super(filelog, self).__init__(gr, db)
assert isinstance(path, bytes)
self.path = path
+ self.nullid = sha1nodeconstants.nullid
def read(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return b''
return self.gitrepo[gitutil.togitnode(node)].data
--- a/hgext/git/gitutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/git/gitutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,7 +1,7 @@
"""utilities to assist in working with pygit2"""
from __future__ import absolute_import
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex, sha1nodeconstants
from mercurial import pycompat
@@ -50,4 +50,4 @@
return bin(n)
-nullgit = togitnode(nullid)
+nullgit = togitnode(sha1nodeconstants.nullid)
--- a/hgext/git/index.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/git/index.py Mon Jun 07 17:10:35 2021 -0400
@@ -5,9 +5,7 @@
import sqlite3
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
-)
+from mercurial.node import sha1nodeconstants
from mercurial import (
encoding,
@@ -317,7 +315,9 @@
)
new_files = (p.delta.new_file for p in patchgen)
files = {
- nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
+ nf.path: nf.id.hex
+ for nf in new_files
+ if nf.id.raw != sha1nodeconstants.nullid
}
for p, n in files.items():
# We intentionally set NULLs for any file parentage
--- a/hgext/gpg.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/gpg.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,7 +14,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -314,7 +313,9 @@
if revs:
nodes = [repo.lookup(n) for n in revs]
else:
- nodes = [node for node in repo.dirstate.parents() if node != nullid]
+ nodes = [
+ node for node in repo.dirstate.parents() if node != repo.nullid
+ ]
if len(nodes) > 1:
raise error.Abort(
_(b'uncommitted merge - please provide a specific revision')
--- a/hgext/hgk.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/hgk.py Mon Jun 07 17:10:35 2021 -0400
@@ -40,7 +40,6 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
short,
)
@@ -95,7 +94,7 @@
mmap2 = repo[node2].manifest()
m = scmutil.match(repo[node1], files)
st = repo.status(node1, node2, m)
- empty = short(nullid)
+ empty = short(repo.nullid)
for f in st.modified:
# TODO get file permissions
@@ -317,9 +316,9 @@
parentstr = b""
if parents:
pp = repo.changelog.parents(n)
- if pp[0] != nullid:
+ if pp[0] != repo.nullid:
parentstr += b" " + short(pp[0])
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
parentstr += b" " + short(pp[1])
if not full:
ui.write(b"%s%s\n" % (short(n), parentstr))
--- a/hgext/journal.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/journal.py Mon Jun 07 17:10:35 2021 -0400
@@ -22,7 +22,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -117,8 +116,8 @@
new = list(new)
if util.safehasattr(dirstate, 'journalstorage'):
# only record two hashes if there was a merge
- oldhashes = old[:1] if old[1] == nullid else old
- newhashes = new[:1] if new[1] == nullid else new
+ oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
+ newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
dirstate.journalstorage.record(
wdirparenttype, b'.', oldhashes, newhashes
)
@@ -131,7 +130,7 @@
if util.safehasattr(repo, 'journal'):
oldmarks = bookmarks.bmstore(repo)
for mark, value in pycompat.iteritems(store):
- oldvalue = oldmarks.get(mark, nullid)
+ oldvalue = oldmarks.get(mark, repo.nullid)
if value != oldvalue:
repo.journal.record(bookmarktype, mark, oldvalue, value)
return orig(store, fp)
--- a/hgext/largefiles/basestore.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/largefiles/basestore.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,7 +11,8 @@
from mercurial.i18n import _
-from mercurial import node, util
+from mercurial.node import short
+from mercurial import util
from mercurial.utils import (
urlutil,
)
@@ -137,7 +138,7 @@
filestocheck = [] # list of (cset, filename, expectedhash)
for rev in revs:
cctx = self.repo[rev]
- cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
+ cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
for standin in cctx:
filename = lfutil.splitstandin(standin)
--- a/hgext/largefiles/lfcommands.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/largefiles/lfcommands.py Mon Jun 07 17:10:35 2021 -0400
@@ -17,7 +17,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -115,7 +114,7 @@
rsrc[ctx]
for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
)
- revmap = {nullid: nullid}
+ revmap = {rsrc.nullid: rdst.nullid}
if tolfile:
# Lock destination to prevent modification while it is converted to.
# Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 @@
# Generate list of changed files
def _getchangedfiles(ctx, parents):
files = set(ctx.files())
- if nullid not in parents:
+ if ctx.repo().nullid not in parents:
mc = ctx.manifest()
for pctx in ctx.parents():
for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 @@
for p in ctx.parents():
parents.append(revmap[p.node()])
while len(parents) < 2:
- parents.append(nullid)
+ parents.append(ctx.repo().nullid)
return parents
--- a/hgext/largefiles/lfutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/largefiles/lfutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -15,10 +15,7 @@
import stat
from mercurial.i18n import _
-from mercurial.node import (
- hex,
- nullid,
-)
+from mercurial.node import hex
from mercurial.pycompat import open
from mercurial import (
@@ -28,6 +25,7 @@
httpconnection,
match as matchmod,
pycompat,
+ requirements,
scmutil,
sparse,
util,
@@ -200,6 +198,7 @@
vfs = repo.vfs
lfstoredir = longname
opener = vfsmod.vfs(vfs.join(lfstoredir))
+ use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
lfdirstate = largefilesdirstate(
opener,
ui,
@@ -207,6 +206,7 @@
repo.dirstate._validate,
lambda: sparse.matcher(repo),
repo.nodeconstants,
+ use_dirstate_v2,
)
# If the largefiles dirstate does not exist, populate and create
@@ -613,7 +613,7 @@
) as progress:
for i, n in enumerate(missing):
progress.update(i)
- parents = [p for p in repo[n].parents() if p != nullid]
+ parents = [p for p in repo[n].parents() if p != repo.nullid]
with lfstatus(repo, value=False):
ctx = repo[n]
--- a/hgext/lfs/wrapper.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/lfs/wrapper.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,7 +10,7 @@
import hashlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid, short
+from mercurial.node import bin, hex, short
from mercurial.pycompat import (
getattr,
setattr,
@@ -158,7 +158,7 @@
rev = rlog.rev(node)
else:
node = rlog.node(rev)
- if node == nullid:
+ if node == rlog.nullid:
return False
flags = rlog.flags(rev)
return bool(flags & revlog.REVIDX_EXTSTORED)
--- a/hgext/mq.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/mq.py Mon Jun 07 17:10:35 2021 -0400
@@ -73,7 +73,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -908,13 +907,13 @@
"""
if rev is None:
(p1, p2) = repo.dirstate.parents()
- if p2 == nullid:
+ if p2 == repo.nullid:
return p1
if not self.applied:
return None
return self.applied[-1].node
p1, p2 = repo.changelog.parents(rev)
- if p2 != nullid and p2 in [x.node for x in self.applied]:
+ if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
return p2
return p1
@@ -1591,7 +1590,7 @@
for hs in repo.branchmap().iterheads():
heads.extend(hs)
if not heads:
- heads = [nullid]
+ heads = [repo.nullid]
if repo.dirstate.p1() not in heads and not exact:
self.ui.status(_(b"(working directory not at a head)\n"))
@@ -1857,7 +1856,7 @@
fctx = ctx[f]
repo.wwrite(f, fctx.data(), fctx.flags())
repo.dirstate.normal(f)
- repo.setparents(qp, nullid)
+ repo.setparents(qp, repo.nullid)
for patch in reversed(self.applied[start:end]):
self.ui.status(_(b"popping %s\n") % patch.name)
del self.applied[start:end]
--- a/hgext/narrow/narrowbundle2.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/narrow/narrowbundle2.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,7 +11,6 @@
import struct
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
bundle2,
changegroup,
@@ -94,7 +93,7 @@
raise error.Abort(_(b'depth must be positive, got %d') % depth)
heads = set(heads or repo.heads())
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 @@
common,
known,
):
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
# Steps:
# 1. Send kill for "$known & ::common"
#
--- a/hgext/narrow/narrowcommands.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/narrow/narrowcommands.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,7 +12,6 @@
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -193,7 +192,7 @@
kwargs[b'known'] = [
hex(ctx.node())
for ctx in repo.set(b'::%ln', pullop.common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
if not kwargs[b'known']:
# Mercurial serializes an empty list as '' and deserializes it as
@@ -228,10 +227,17 @@
unfi = repo.unfiltered()
outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
ui.status(_(b'looking for local changes to affected paths\n'))
+ progress = ui.makeprogress(
+ topic=_(b'changesets'),
+ unit=_(b'changesets'),
+ total=len(outgoing.missing) + len(outgoing.excluded),
+ )
localnodes = []
- for n in itertools.chain(outgoing.missing, outgoing.excluded):
- if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
- localnodes.append(n)
+ with progress:
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ progress.increment()
+ if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+ localnodes.append(n)
revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
hiddenrevs = repoview.filterrevs(repo, b'visible')
visibletostrip = list(
@@ -275,6 +281,10 @@
)
hg.clean(repo, urev)
overrides = {(b'devel', b'strip-obsmarkers'): False}
+ if backup:
+ ui.status(_(b'moving unwanted changesets to backup\n'))
+ else:
+ ui.status(_(b'deleting unwanted changesets\n'))
with ui.configoverride(overrides, b'narrow'):
repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
@@ -310,6 +320,7 @@
util.unlinkpath(repo.svfs.join(f))
repo.store.markremoved(f)
+ ui.status(_(b'deleting unwanted files from working copy\n'))
narrowspec.updateworkingcopy(repo, assumeclean=True)
narrowspec.copytoworkingcopy(repo)
@@ -370,7 +381,7 @@
ds = repo.dirstate
p1, p2 = ds.p1(), ds.p2()
with ds.parentchange():
- ds.setparents(nullid, nullid)
+ ds.setparents(repo.nullid, repo.nullid)
if isoldellipses:
with wrappedextraprepare:
exchange.pull(repo, remote, heads=common)
@@ -380,7 +391,7 @@
known = [
ctx.node()
for ctx in repo.set(b'::%ln', common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
with remote.commandexecutor() as e:
bundle = e.callcommand(
--- a/hgext/phabricator.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/phabricator.py Mon Jun 07 17:10:35 2021 -0400
@@ -69,7 +69,7 @@
import re
import time
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, short
from mercurial.i18n import _
from mercurial.pycompat import getattr
from mercurial.thirdparty import attr
@@ -586,7 +586,7 @@
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
@@ -1606,7 +1606,7 @@
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
--- a/hgext/rebase.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/rebase.py Mon Jun 07 17:10:35 2021 -0400
@@ -446,8 +446,15 @@
rebaseset = set(destmap.keys())
rebaseset -= set(self.obsolete_with_successor_in_destination)
rebaseset -= self.obsolete_with_successor_in_rebase_set
+ # We have our own divergence-checking in the rebase extension
+ overrides = {}
+ if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
+ overrides = {
+ (b'experimental', b'evolution.allowdivergence'): b'true'
+ }
try:
- rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+ with self.ui.configoverride(overrides):
+ rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
except error.Abort as e:
if e.hint is None:
e.hint = _(b'use --keep to keep original changesets')
--- a/hgext/remotefilelog/contentstore.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/contentstore.py Mon Jun 07 17:10:35 2021 -0400
@@ -2,7 +2,10 @@
import threading
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.pycompat import getattr
from mercurial import (
mdiff,
@@ -55,7 +58,7 @@
"""
chain = self.getdeltachain(name, node)
- if chain[-1][ChainIndicies.BASENODE] != nullid:
+ if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
# If we didn't receive a full chain, throw
raise KeyError((name, hex(node)))
@@ -92,7 +95,7 @@
deltabasenode.
"""
chain = self._getpartialchain(name, node)
- while chain[-1][ChainIndicies.BASENODE] != nullid:
+ while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
x, x, deltabasename, deltabasenode, x = chain[-1]
try:
morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 @@
# Since remotefilelog content stores only contain full texts, just
# return that.
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 @@
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._sanitizemetacache()
@@ -237,7 +245,12 @@
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self._shared.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self._shared.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 @@
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._fileservice.prefetch(
@@ -268,7 +281,7 @@
self._store = repo.store
self._svfs = repo.svfs
self._revlogs = dict()
- self._cl = revlog.revlog(self._svfs, b'00changelog.i')
+ self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
self._repackstartlinkrev = 0
def get(self, name, node):
@@ -276,11 +289,11 @@
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return revision, name, self._cl.nullid, self.getmeta(name, node)
def getdeltachain(self, name, node):
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, self._cl.nullid, revision)]
def getmeta(self, name, node):
rl = self._revlog(name)
@@ -304,9 +317,9 @@
missing.discard(ancnode)
p1, p2 = rl.parents(ancnode)
- if p1 != nullid and p1 not in known:
+ if p1 != self._cl.nullid and p1 not in known:
missing.add(p1)
- if p2 != nullid and p2 not in known:
+ if p2 != self._cl.nullid and p2 not in known:
missing.add(p2)
linknode = self._cl.node(rl.linkrev(ancrev))
@@ -328,10 +341,10 @@
def _revlog(self, name):
rl = self._revlogs.get(name)
if rl is None:
- revlogname = b'00manifesttree.i'
+ revlogname = b'00manifesttree'
if name != b'':
- revlogname = b'meta/%s/00manifest.i' % name
- rl = revlog.revlog(self._svfs, revlogname)
+ revlogname = b'meta/%s/00manifest' % name
+ rl = revlog.revlog(self._svfs, radix=revlogname)
self._revlogs[name] = rl
return rl
@@ -352,7 +365,7 @@
if options and options.get(constants.OPTION_PACKSONLY):
return
treename = b''
- rl = revlog.revlog(self._svfs, b'00manifesttree.i')
+ rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
startlinkrev = self._repackstartlinkrev
endlinkrev = self._repackendlinkrev
for rev in pycompat.xrange(len(rl) - 1, -1, -1):
@@ -369,9 +382,9 @@
if path[:5] != b'meta/' or path[-2:] != b'.i':
continue
- treename = path[5 : -len(b'/00manifest.i')]
+ treename = path[5 : -len(b'/00manifest')]
- rl = revlog.revlog(self._svfs, path)
+ rl = revlog.revlog(self._svfs, indexfile=path[:-2])
for rev in pycompat.xrange(len(rl) - 1, -1, -1):
linkrev = rl.linkrev(rev)
if linkrev < startlinkrev:
--- a/hgext/remotefilelog/datapack.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/datapack.py Mon Jun 07 17:10:35 2021 -0400
@@ -3,7 +3,10 @@
import struct
import zlib
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.i18n import _
from mercurial import (
pycompat,
@@ -458,7 +461,7 @@
rawindex = b''
fmt = self.INDEXFORMAT
for node, deltabase, offset, size in entries:
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
deltabaselocation = FULLTEXTINDEXMARK
else:
# Instead of storing the deltabase node in the index, let's
--- a/hgext/remotefilelog/debugcommands.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/debugcommands.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,7 +12,7 @@
from mercurial.node import (
bin,
hex,
- nullid,
+ sha1nodeconstants,
short,
)
from mercurial.i18n import _
@@ -57,9 +57,9 @@
_(b"%s => %s %s %s %s\n")
% (short(node), short(p1), short(p2), short(linknode), copyfrom)
)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
queue.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
queue.append(p2)
@@ -152,7 +152,7 @@
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
ui.write(
b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
% (
@@ -197,7 +197,7 @@
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -212,7 +212,7 @@
filepath = os.path.join(root, file)
size, firstnode, mapping = parsefileblob(filepath, decompress)
for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
- if linknode == nullid:
+ if linknode == sha1nodeconstants.nullid:
actualpath = os.path.relpath(root, path)
key = fileserverclient.getcachekey(
b"reponame", actualpath, file
@@ -371,7 +371,7 @@
current = node
deltabase = bases[current]
- while deltabase != nullid:
+ while deltabase != sha1nodeconstants.nullid:
if deltabase not in nodes:
ui.warn(
(
@@ -397,7 +397,7 @@
deltabase = bases[current]
# Since ``node`` begins a valid chain, reset/memoize its base to nullid
# so we don't traverse it again.
- bases[node] = nullid
+ bases[node] = sha1nodeconstants.nullid
return failures
--- a/hgext/remotefilelog/fileserverclient.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/fileserverclient.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,7 +14,7 @@
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
error,
pycompat,
@@ -272,7 +272,7 @@
def _getfiles_threaded(
remote, receivemissing, progresstick, missed, idmap, step
):
- remote._callstream(b"getfiles")
+ remote._callstream(b"x_rfl_getfiles")
pipeo = remote._pipeo
pipei = remote._pipei
@@ -599,9 +599,13 @@
# partition missing nodes into nullid and not-nullid so we can
# warn about this filtering potentially shadowing bugs.
- nullids = len([None for unused, id in missingids if id == nullid])
+ nullids = len(
+ [None for unused, id in missingids if id == self.repo.nullid]
+ )
if nullids:
- missingids = [(f, id) for f, id in missingids if id != nullid]
+ missingids = [
+ (f, id) for f, id in missingids if id != self.repo.nullid
+ ]
repo.ui.develwarn(
(
b'remotefilelog not fetching %d null revs'
--- a/hgext/remotefilelog/historypack.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/historypack.py Mon Jun 07 17:10:35 2021 -0400
@@ -2,7 +2,10 @@
import struct
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial import (
pycompat,
util,
@@ -147,9 +150,9 @@
pending.remove(ancnode)
p1node = entry[ANC_P1NODE]
p2node = entry[ANC_P2NODE]
- if p1node != nullid and p1node not in known:
+ if p1node != sha1nodeconstants.nullid and p1node not in known:
pending.add(p1node)
- if p2node != nullid and p2node not in known:
+ if p2node != sha1nodeconstants.nullid and p2node not in known:
pending.add(p2node)
yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 @@
def parentfunc(node):
x, p1, p2, x, x, x = entrymap[node]
parents = []
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
parents.append(p2)
return parents
--- a/hgext/remotefilelog/metadatastore.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/metadatastore.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,6 +1,9 @@
from __future__ import absolute_import
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from . import (
basestore,
shallowutil,
@@ -51,9 +54,9 @@
missing.append((name, node))
continue
p1, p2, linknode, copyfrom = value
- if p1 != nullid and p1 not in known:
+ if p1 != sha1nodeconstants.nullid and p1 not in known:
queue.append((copyfrom or curname, p1))
- if p2 != nullid and p2 not in known:
+ if p2 != sha1nodeconstants.nullid and p2 not in known:
queue.append((curname, p2))
return missing
--- a/hgext/remotefilelog/remotefilectx.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/remotefilectx.py Mon Jun 07 17:10:35 2021 -0400
@@ -9,7 +9,7 @@
import collections
import time
-from mercurial.node import bin, hex, nullid, nullrev
+from mercurial.node import bin, hex, nullrev
from mercurial import (
ancestor,
context,
@@ -35,7 +35,7 @@
ancestormap=None,
):
if fileid == nullrev:
- fileid = nullid
+ fileid = repo.nullid
if fileid and len(fileid) == 40:
fileid = bin(fileid)
super(remotefilectx, self).__init__(
@@ -78,7 +78,7 @@
@propertycache
def _linkrev(self):
- if self._filenode == nullid:
+ if self._filenode == self._repo.nullid:
return nullrev
ancestormap = self.ancestormap()
@@ -174,7 +174,7 @@
p1, p2, linknode, copyfrom = ancestormap[self._filenode]
results = []
- if p1 != nullid:
+ if p1 != repo.nullid:
path = copyfrom or self._path
flog = repo.file(path)
p1ctx = remotefilectx(
@@ -183,7 +183,7 @@
p1ctx._descendantrev = self.rev()
results.append(p1ctx)
- if p2 != nullid:
+ if p2 != repo.nullid:
path = self._path
flog = repo.file(path)
p2ctx = remotefilectx(
@@ -504,25 +504,25 @@
if renamed:
p1 = renamed
else:
- p1 = (path, pcl[0]._manifest.get(path, nullid))
+ p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
- p2 = (path, nullid)
+ p2 = (path, self._repo.nullid)
if len(pcl) > 1:
- p2 = (path, pcl[1]._manifest.get(path, nullid))
+ p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
m = {}
- if p1[1] != nullid:
+ if p1[1] != self._repo.nullid:
p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
m.update(p1ctx.filelog().ancestormap(p1[1]))
- if p2[1] != nullid:
+ if p2[1] != self._repo.nullid:
p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
m.update(p2ctx.filelog().ancestormap(p2[1]))
copyfrom = b''
if renamed:
copyfrom = renamed[0]
- m[None] = (p1[1], p2[1], nullid, copyfrom)
+ m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
self._ancestormap = m
return self._ancestormap
--- a/hgext/remotefilelog/remotefilelog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/remotefilelog.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,12 +10,7 @@
import collections
import os
-from mercurial.node import (
- bin,
- nullid,
- wdirfilenodeids,
- wdirid,
-)
+from mercurial.node import bin
from mercurial.i18n import _
from mercurial import (
ancestor,
@@ -100,7 +95,7 @@
pancestors = {}
queue = []
- if realp1 != nullid:
+ if realp1 != self.repo.nullid:
p1flog = self
if copyfrom:
p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 @@
pancestors.update(p1flog.ancestormap(realp1))
queue.append(realp1)
visited.add(realp1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
pancestors.update(self.ancestormap(p2))
queue.append(p2)
visited.add(p2)
@@ -129,10 +124,10 @@
pacopyfrom,
)
- if pa1 != nullid and pa1 not in visited:
+ if pa1 != self.repo.nullid and pa1 not in visited:
queue.append(pa1)
visited.add(pa1)
- if pa2 != nullid and pa2 not in visited:
+ if pa2 != self.repo.nullid and pa2 not in visited:
queue.append(pa2)
visited.add(pa2)
@@ -238,7 +233,7 @@
returns True if text is different than what is stored.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return True
nodetext = self.read(node)
@@ -275,13 +270,13 @@
return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == self.repo.nullid:
+ return self.repo.nullid, self.repo.nullid
ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
p1, p2, linknode, copyfrom = ancestormap[node]
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
return p1, p2
@@ -317,8 +312,8 @@
if prevnode is None:
basenode = prevnode = p1
if basenode == node:
- basenode = nullid
- if basenode != nullid:
+ basenode = self.repo.nullid
+ if basenode != self.repo.nullid:
revision = None
delta = self.revdiff(basenode, node)
else:
@@ -336,6 +331,8 @@
delta=delta,
# Sidedata is not supported yet
sidedata=None,
+ # Protocol flags are not used yet
+ protocol_flags=0,
)
def revdiff(self, node1, node2):
@@ -380,13 +377,16 @@
this is generally only used for bundling and communicating with vanilla
hg clients.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return b""
if len(node) != 20:
raise error.LookupError(
node, self.filename, _(b'invalid revision input')
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.repo.nodeconstants.wdirid
+ or node in self.repo.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
store = self.repo.contentstore
@@ -432,8 +432,8 @@
return self.repo.metadatastore.getancestors(self.filename, node)
def ancestor(self, a, b):
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +442,13 @@
if ancs:
# choose a consistent winner when there's a tie
return min(map(nodemap.__getitem__, ancs))
- return nullid
+ return self.repo.nullid
def commonancestorsheads(self, a, b):
"""calculate all the heads of the common ancestors of nodes a and b"""
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +472,10 @@
p1, p2, linknode, copyfrom = pdata
# Don't follow renames (copyfrom).
# remotefilectx.ancestor does that.
- if p1 != nullid and not copyfrom:
+ if p1 != self.repo.nullid and not copyfrom:
parents.append(p1)
allparents.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
allparents.add(p2)
--- a/hgext/remotefilelog/remotefilelogserver.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/remotefilelogserver.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,7 +13,7 @@
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial.pycompat import open
from mercurial import (
changegroup,
@@ -242,7 +242,7 @@
filecachepath = os.path.join(cachepath, path, hex(node))
if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
filectx = repo.filectx(path, fileid=node)
- if filectx.node() == nullid:
+ if filectx.node() == repo.nullid:
repo.changelog = changelog.changelog(repo.svfs)
filectx = repo.filectx(path, fileid=node)
@@ -284,7 +284,7 @@
"""A server api for requesting a filelog's heads"""
flog = repo.file(path)
heads = flog.heads()
- return b'\n'.join((hex(head) for head in heads if head != nullid))
+ return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
def getfile(repo, proto, file, node):
@@ -302,7 +302,7 @@
if not cachepath:
cachepath = os.path.join(repo.path, b"remotefilelogcache")
node = bin(node.strip())
- if node == nullid:
+ if node == repo.nullid:
return b'0\0'
return b'0\0' + _loadfileblob(repo, cachepath, file, node)
@@ -327,7 +327,7 @@
break
node = bin(request[:40])
- if node == nullid:
+ if node == repo.nullid:
yield b'0\n'
continue
@@ -380,8 +380,8 @@
ancestortext = b""
for ancestorctx in ancestors:
parents = ancestorctx.parents()
- p1 = nullid
- p2 = nullid
+ p1 = repo.nullid
+ p2 = repo.nullid
if len(parents) > 0:
p1 = parents[0].filenode()
if len(parents) > 1:
--- a/hgext/remotefilelog/repack.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/repack.py Mon Jun 07 17:10:35 2021 -0400
@@ -4,10 +4,7 @@
import time
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
- short,
-)
+from mercurial.node import short
from mercurial import (
encoding,
error,
@@ -586,7 +583,7 @@
# Create one contiguous chain and reassign deltabases.
for i, node in enumerate(orphans):
if i == 0:
- deltabases[node] = (nullid, 0)
+ deltabases[node] = (self.repo.nullid, 0)
else:
parent = orphans[i - 1]
deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 @@
# of immediate child
deltatuple = deltabases.get(node, None)
if deltatuple is None:
- deltabase, chainlen = nullid, 0
- deltabases[node] = (nullid, 0)
+ deltabase, chainlen = self.repo.nullid, 0
+ deltabases[node] = (self.repo.nullid, 0)
nobase.add(node)
else:
deltabase, chainlen = deltatuple
@@ -692,7 +689,7 @@
# file was copied from elsewhere. So don't attempt to do any
# deltas with the other file.
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
if chainlen < maxchainlen:
# Record this child as the delta base for its parents.
@@ -700,9 +697,9 @@
# many children, and this will only choose the last one.
# TODO: record all children and try all deltas to find
# best
- if p1 != nullid:
+ if p1 != self.repo.nullid:
deltabases[p1] = (node, chainlen + 1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
deltabases[p2] = (node, chainlen + 1)
# experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 @@
# TODO: Optimize the deltachain fetching. Since we're
# iterating over the different version of the file, we may
# be fetching the same deltachain over and over again.
- if deltabase != nullid:
+ if deltabase != self.repo.nullid:
deltaentry = self.data.getdelta(filename, node)
delta, deltabasename, origdeltabase, meta = deltaentry
size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 @@
# If copyfrom == filename, it means the copy history
# went to come other file, then came back to this one, so we
# should continue processing it.
- if p1 != nullid and copyfrom != filename:
+ if p1 != self.repo.nullid and copyfrom != filename:
dontprocess.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
dontprocess.add(p2)
continue
@@ -814,9 +811,9 @@
def parentfunc(node):
p1, p2, linknode, copyfrom = ancestors[node]
parents = []
- if p1 != nullid:
+ if p1 != self.repo.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
return parents
--- a/hgext/remotefilelog/shallowbundle.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/shallowbundle.py Mon Jun 07 17:10:35 2021 -0400
@@ -7,7 +7,7 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
bundlerepo,
changegroup,
@@ -143,7 +143,7 @@
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
- if prevnode == nullid:
+ if prevnode == revlog.nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
@@ -225,7 +225,17 @@
chain = None
while True:
- # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
+ # returns: None or (
+ # node,
+ # p1,
+ # p2,
+ # cs,
+ # deltabase,
+ # delta,
+ # flags,
+ # sidedata,
+ # proto_flags
+ # )
revisiondata = source.deltachunk(chain)
if not revisiondata:
break
@@ -245,7 +255,7 @@
processed = set()
def available(f, node, depf, depnode):
- if depnode != nullid and (depf, depnode) not in processed:
+ if depnode != repo.nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
@@ -263,11 +273,11 @@
prefetchfiles = []
for f, node in queue:
revisiondata = revisiondatas[(f, node)]
- # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl)
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
- if dependent == nullid or (f, dependent) in revisiondatas:
+ if dependent == repo.nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
@@ -287,8 +297,18 @@
fl = repo.file(f)
revisiondata = revisiondatas[(f, node)]
- # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
- node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl)
+ (
+ node,
+ p1,
+ p2,
+ linknode,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ proto_flags,
+ ) = revisiondata
if not available(f, node, f, deltabase):
continue
@@ -306,7 +326,7 @@
continue
for p in [p1, p2]:
- if p != nullid:
+ if p != repo.nullid:
if not available(f, node, f, p):
continue
--- a/hgext/remotefilelog/shallowrepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/remotefilelog/shallowrepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -9,7 +9,7 @@
import os
from mercurial.i18n import _
-from mercurial.node import hex, nullid, nullrev
+from mercurial.node import hex, nullrev
from mercurial import (
encoding,
error,
@@ -206,8 +206,8 @@
m1 = ctx.p1().manifest()
files = []
for f in ctx.modified() + ctx.added():
- fparent1 = m1.get(f, nullid)
- if fparent1 != nullid:
+ fparent1 = m1.get(f, self.nullid)
+ if fparent1 != self.nullid:
files.append((f, hex(fparent1)))
self.fileservice.prefetch(files)
return super(shallowrepository, self).commitctx(
--- a/hgext/sqlitestore.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/sqlitestore.py Mon Jun 07 17:10:35 2021 -0400
@@ -52,7 +52,6 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -290,6 +289,7 @@
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -366,12 +366,12 @@
)
if p1rev == nullrev:
- p1node = nullid
+ p1node = sha1nodeconstants.nullid
else:
p1node = self._revtonode[p1rev]
if p2rev == nullrev:
- p2node = nullid
+ p2node = sha1nodeconstants.nullid
else:
p2node = self._revtonode[p2rev]
@@ -400,7 +400,7 @@
return iter(pycompat.xrange(len(self._revisions)))
def hasnode(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return False
return node in self._nodetorev
@@ -411,8 +411,8 @@
)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == sha1nodeconstants.nullid:
+ return sha1nodeconstants.nullid, sha1nodeconstants.nullid
if node not in self._revisions:
raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +431,7 @@
return entry.p1rev, entry.p2rev
def rev(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return nullrev
if node not in self._nodetorev:
@@ -441,7 +441,7 @@
def node(self, rev):
if rev == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
if rev not in self._revtonode:
raise IndexError(rev)
@@ -485,7 +485,7 @@
def heads(self, start=None, stop=None):
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [sha1nodeconstants.nullid]
startrev = self.rev(start) if start is not None else nullrev
stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +529,7 @@
return len(self.revision(node))
def revision(self, node, raw=False, _verifyhash=True):
- if node in (nullid, nullrev):
+ if node in (sha1nodeconstants.nullid, nullrev):
return b''
if isinstance(node, int):
@@ -596,7 +596,7 @@
b'unhandled value for nodesorder: %s' % nodesorder
)
- nodes = [n for n in nodes if n != nullid]
+ nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
if not nodes:
return
@@ -705,12 +705,12 @@
raise SQLiteStoreError(b'unhandled revision flag')
if maybemissingparents:
- if p1 != nullid and not self.hasnode(p1):
- p1 = nullid
+ if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
+ p1 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P1
- if p2 != nullid and not self.hasnode(p2):
- p2 = nullid
+ if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
+ p2 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P2
baserev = self.rev(deltabase)
@@ -736,7 +736,10 @@
# Possibly reset parents to make them proper.
entry = self._revisions[node]
- if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P1
+ and p1 != sha1nodeconstants.nullid
+ ):
entry.p1node = p1
entry.p1rev = self._nodetorev[p1]
entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +749,10 @@
(self._nodetorev[p1], entry.flags, entry.rid),
)
- if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P2
+ and p2 != sha1nodeconstants.nullid
+ ):
entry.p2node = p2
entry.p2rev = self._nodetorev[p2]
entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +767,7 @@
empty = False
continue
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
text = mdiff.patch(b'', delta)
storedelta = None
else:
@@ -1012,7 +1018,7 @@
assert revisiondata is not None
deltabase = p1
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
delta = revisiondata
else:
delta = mdiff.textdiff(
@@ -1021,7 +1027,7 @@
# File index stores a pointer to its delta and the parent delta.
# The parent delta is stored via a pointer to the fileindex PK.
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
baseid = None
else:
baseid = self._revisions[deltabase].rid
@@ -1055,12 +1061,12 @@
rev = len(self)
- if p1 == nullid:
+ if p1 == sha1nodeconstants.nullid:
p1rev = nullrev
else:
p1rev = self._nodetorev[p1]
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
p2rev = nullrev
else:
p2rev = self._nodetorev[p2]
--- a/hgext/transplant.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/transplant.py Mon Jun 07 17:10:35 2021 -0400
@@ -22,7 +22,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -134,6 +133,7 @@
class transplanter(object):
def __init__(self, ui, repo, opts):
self.ui = ui
+ self.repo = repo
self.path = repo.vfs.join(b'transplant')
self.opener = vfsmod.vfs(self.path)
self.transplants = transplants(
@@ -221,7 +221,7 @@
exchange.pull(repo, source.peer(), heads=[node])
skipmerge = False
- if parents[1] != nullid:
+ if parents[1] != repo.nullid:
if not opts.get(b'parent'):
self.ui.note(
_(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 @@
def parselog(self, fp):
parents = []
message = []
- node = nullid
+ node = self.repo.nullid
inmsg = False
user = None
date = None
@@ -568,7 +568,7 @@
def matchfn(node):
if self.applied(repo, node, root):
return False
- if source.changelog.parents(node)[1] != nullid:
+ if source.changelog.parents(node)[1] != repo.nullid:
return False
extra = source.changelog.read(node)[5]
cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 @@
tp = transplanter(ui, repo, opts)
p1 = repo.dirstate.p1()
- if len(repo) > 0 and p1 == nullid:
+ if len(repo) > 0 and p1 == repo.nullid:
raise error.Abort(_(b'no revision checked out'))
if opts.get(b'continue'):
if not tp.canresume():
--- a/hgext/uncommit.py Sun Jun 06 01:24:30 2021 +0200
+++ b/hgext/uncommit.py Mon Jun 07 17:10:35 2021 -0400
@@ -20,7 +20,6 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
cmdutil,
@@ -113,7 +112,7 @@
new = context.memctx(
repo,
- parents=[base.node(), nullid],
+ parents=[base.node(), repo.nullid],
text=message,
files=files,
filectxfn=filectxfn,
--- a/mercurial/bookmarks.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/bookmarks.py Mon Jun 07 17:10:35 2021 -0400
@@ -15,7 +15,6 @@
bin,
hex,
short,
- wdirid,
)
from .pycompat import getattr
from . import (
@@ -601,11 +600,12 @@
# if an @pathalias already exists, we overwrite (update) it
if path.startswith(b"file:"):
path = urlutil.url(path).path
- for p, u in ui.configitems(b"paths"):
- if u.startswith(b"file:"):
- u = urlutil.url(u).path
- if path == u:
- return b'%s@%s' % (b, p)
+ for name, p in urlutil.list_paths(ui):
+ loc = p.rawloc
+ if loc.startswith(b"file:"):
+ loc = urlutil.url(loc).path
+ if path == loc:
+ return b'%s@%s' % (b, name)
# assign a unique "@number" suffix newly
for x in range(1, 100):
@@ -642,7 +642,7 @@
binarydata = []
for book, node in bookmarks:
if not node: # None or ''
- node = wdirid
+ node = repo.nodeconstants.wdirid
binarydata.append(_binaryentry.pack(node, len(book)))
binarydata.append(book)
return b''.join(binarydata)
@@ -674,7 +674,7 @@
if len(bookmark) < length:
if entry:
raise error.Abort(_(b'bad bookmark stream'))
- if node == wdirid:
+ if node == repo.nodeconstants.wdirid:
node = None
books.append((bookmark, node))
return books
--- a/mercurial/branchmap.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/branchmap.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,7 +12,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -189,7 +188,7 @@
self,
repo,
entries=(),
- tipnode=nullid,
+ tipnode=None,
tiprev=nullrev,
filteredhash=None,
closednodes=None,
@@ -200,7 +199,10 @@
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
self._repo = repo
- self.tipnode = tipnode
+ if tipnode is None:
+ self.tipnode = repo.nullid
+ else:
+ self.tipnode = tipnode
self.tiprev = tiprev
self.filteredhash = filteredhash
# closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 @@
if not self.validfor(repo):
# cache key are not valid anymore
- self.tipnode = nullid
+ self.tipnode = repo.nullid
self.tiprev = nullrev
for heads in self.iterheads():
tiprev = max(cl.rev(node) for node in heads)
--- a/mercurial/bundle2.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/bundle2.py Mon Jun 07 17:10:35 2021 -0400
@@ -158,7 +158,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from . import (
@@ -181,6 +180,7 @@
stringutil,
urlutil,
)
+from .interfaces import repository
urlerr = util.urlerr
urlreq = util.urlreq
@@ -1730,8 +1730,8 @@
part.addparam(
b'targetphase', b'%d' % phases.secret, mandatory=False
)
- if b'exp-sidedata-flag' in repo.requirements:
- part.addparam(b'exp-sidedata', b'1')
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+ part.addparam(b'exp-sidedata', b'1')
if opts.get(b'streamv2', False):
addpartbundlestream2(bundler, repo, stream=True)
@@ -2014,13 +2014,6 @@
)
scmutil.writereporequirements(op.repo)
- bundlesidedata = bool(b'exp-sidedata' in inpart.params)
- reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
- if reposidedata and not bundlesidedata:
- msg = b"repository is using sidedata but the bundle source do not"
- hint = b'this is currently unsupported'
- raise error.Abort(msg, hint=hint)
-
extrakwargs = {}
targetphase = inpart.params.get(b'targetphase')
if targetphase is not None:
@@ -2576,7 +2569,7 @@
fullnodes=commonnodes,
)
cgdata = packer.generate(
- {nullid},
+ {repo.nullid},
list(commonnodes),
False,
b'narrow_widen',
@@ -2587,9 +2580,9 @@
part.addparam(b'version', cgversion)
if scmutil.istreemanifest(repo):
part.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in repo.requirements:
- part.addparam(b'exp-sidedata', b'1')
- wanted = format_remote_wanted_sidedata(repo)
- part.addparam(b'exp-wanted-sidedata', wanted)
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+ part.addparam(b'exp-sidedata', b'1')
+ wanted = format_remote_wanted_sidedata(repo)
+ part.addparam(b'exp-wanted-sidedata', wanted)
return bundler
--- a/mercurial/bundlecaches.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/bundlecaches.py Mon Jun 07 17:10:35 2021 -0400
@@ -167,6 +167,8 @@
# Generaldelta repos require v2.
if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
version = b'v2'
+ elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
+ version = b'v2'
# Modern compression engines require v2.
if compression not in _bundlespecv1compengines:
version = b'v2'
--- a/mercurial/bundlerepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/bundlerepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -19,7 +19,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -40,6 +39,7 @@
phases,
pycompat,
revlog,
+ revlogutils,
util,
vfs as vfsmod,
)
@@ -47,9 +47,13 @@
urlutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
class bundlerevlog(revlog.revlog):
- def __init__(self, opener, indexfile, cgunpacker, linkmapper):
+ def __init__(self, opener, target, radix, cgunpacker, linkmapper):
# How it works:
# To retrieve a revision, we need to know the offset of the revision in
# the bundle (an unbundle object). We store this offset in the index
@@ -58,7 +62,7 @@
# To differentiate a rev in the bundle from a rev in the revlog, we
# check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ revlog.revlog.__init__(self, opener, target=target, radix=radix)
self.bundle = cgunpacker
n = len(self)
self.repotiprev = n - 1
@@ -81,25 +85,25 @@
for p in (p1, p2):
if not self.index.has_node(p):
raise error.LookupError(
- p, self.indexfile, _(b"unknown parent")
+ p, self.display_id, _(b"unknown parent")
)
if not self.index.has_node(deltabase):
raise LookupError(
- deltabase, self.indexfile, _(b'unknown delta base')
+ deltabase, self.display_id, _(b'unknown delta base')
)
baserev = self.rev(deltabase)
- # start, size, full unc. size, base (unused), link, p1, p2, node
- e = (
- revlog.offset_type(start, flags),
- size,
- -1,
- baserev,
- linkrev,
- self.rev(p1),
- self.rev(p2),
- node,
+ # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
+ e = revlogutils.entry(
+ flags=flags,
+ data_offset=start,
+ data_compressed_length=size,
+ data_delta_base=baserev,
+ link_rev=linkrev,
+ parent_rev_1=self.rev(p1),
+ parent_rev_2=self.rev(p2),
+ node_id=node,
)
self.index.append(e)
self.bundlerevs.add(n)
@@ -172,7 +176,12 @@
changelog.changelog.__init__(self, opener)
linkmapper = lambda x: x
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_CHANGELOG, None),
+ self.radix,
+ cgunpacker,
+ linkmapper,
)
@@ -188,7 +197,12 @@
):
manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_MANIFESTLOG, dir),
+ self._revlog.radix,
+ cgunpacker,
+ linkmapper,
)
if dirlogstarts is None:
dirlogstarts = {}
@@ -215,7 +229,12 @@
def __init__(self, opener, path, cgunpacker, linkmapper):
filelog.filelog.__init__(self, opener, path)
self._revlog = bundlerevlog(
- opener, self.indexfile, cgunpacker, linkmapper
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ radix=self._revlog.radix,
+ cgunpacker=cgunpacker,
+ linkmapper=linkmapper,
)
@@ -447,7 +466,9 @@
return encoding.getcwd() # always outside the repo
# Check if parents exist in localrepo before setting
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
p1rev = self.changelog.rev(p1)
p2rev = self.changelog.rev(p2)
msg = _(b"setting parent to node %s that only exists in the bundle\n")
--- a/mercurial/cext/manifest.c Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/cext/manifest.c Mon Jun 07 17:10:35 2021 -0400
@@ -28,6 +28,7 @@
typedef struct {
PyObject_HEAD
PyObject *pydata;
+ Py_ssize_t nodelen;
line *lines;
int numlines; /* number of line entries */
int livelines; /* number of non-deleted lines */
@@ -49,12 +50,11 @@
}
/* get the node value of a single line */
-static PyObject *nodeof(line *l, char *flag)
+static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag)
{
char *s = l->start;
Py_ssize_t llen = pathlen(l);
Py_ssize_t hlen = l->len - llen - 2;
- Py_ssize_t hlen_raw;
PyObject *hash;
if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -73,36 +73,29 @@
break;
}
- switch (hlen) {
- case 40: /* sha1 */
- hlen_raw = 20;
- break;
- case 64: /* new hash */
- hlen_raw = 32;
- break;
- default:
+ if (hlen != 2 * nodelen) {
PyErr_SetString(PyExc_ValueError, "invalid node length in manifest");
return NULL;
}
- hash = unhexlify(s + llen + 1, hlen_raw * 2);
+ hash = unhexlify(s + llen + 1, nodelen * 2);
if (!hash) {
return NULL;
}
if (l->hash_suffix != '\0') {
char newhash[33];
- memcpy(newhash, PyBytes_AsString(hash), hlen_raw);
+ memcpy(newhash, PyBytes_AsString(hash), nodelen);
Py_DECREF(hash);
- newhash[hlen_raw] = l->hash_suffix;
- hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1);
+ newhash[nodelen] = l->hash_suffix;
+ hash = PyBytes_FromStringAndSize(newhash, nodelen + 1);
}
return hash;
}
/* get the node hash and flags of a line as a tuple */
-static PyObject *hashflags(line *l)
+static PyObject *hashflags(Py_ssize_t nodelen, line *l)
{
char flag;
- PyObject *hash = nodeof(l, &flag);
+ PyObject *hash = nodeof(nodelen, l, &flag);
PyObject *flags;
PyObject *tup;
@@ -190,17 +183,23 @@
static int lazymanifest_init(lazymanifest *self, PyObject *args)
{
char *data;
- Py_ssize_t len;
+ Py_ssize_t nodelen, len;
int err, ret;
PyObject *pydata;
lazymanifest_init_early(self);
- if (!PyArg_ParseTuple(args, "S", &pydata)) {
+ if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) {
return -1;
}
- err = PyBytes_AsStringAndSize(pydata, &data, &len);
+ if (nodelen != 20 && nodelen != 32) {
+ /* See fixed buffer in nodeof */
+ PyErr_Format(PyExc_ValueError, "Unsupported node length");
+ return -1;
+ }
+ self->nodelen = nodelen;
+ self->dirty = false;
- self->dirty = false;
+ err = PyBytes_AsStringAndSize(pydata, &data, &len);
if (err == -1)
return -1;
self->pydata = pydata;
@@ -291,17 +290,18 @@
static PyObject *lmiter_iterentriesnext(PyObject *o)
{
+ lmIter *self = (lmIter *)o;
Py_ssize_t pl;
line *l;
char flag;
PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
- l = lmiter_nextline((lmIter *)o);
+ l = lmiter_nextline(self);
if (!l) {
goto done;
}
pl = pathlen(l);
path = PyBytes_FromStringAndSize(l->start, pl);
- hash = nodeof(l, &flag);
+ hash = nodeof(self->m->nodelen, l, &flag);
if (!path || !hash) {
goto done;
}
@@ -471,7 +471,7 @@
PyErr_Format(PyExc_KeyError, "No such manifest entry.");
return NULL;
}
- return hashflags(hit);
+ return hashflags(self->nodelen, hit);
}
static int lazymanifest_delitem(lazymanifest *self, PyObject *key)
@@ -568,13 +568,13 @@
pyhash = PyTuple_GetItem(value, 0);
if (!PyBytes_Check(pyhash)) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hlen = PyBytes_Size(pyhash);
- if (hlen != 20 && hlen != 32) {
+ if (hlen != self->nodelen) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hash = PyBytes_AsString(pyhash);
@@ -739,6 +739,7 @@
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->numlines = self->numlines;
copy->livelines = self->livelines;
copy->dirty = false;
@@ -777,6 +778,7 @@
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->dirty = true;
copy->lines = malloc(self->maxlines * sizeof(line));
if (!copy->lines) {
@@ -872,7 +874,7 @@
if (!key)
goto nomem;
if (result < 0) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
if (!l) {
goto nomem;
}
@@ -885,7 +887,7 @@
Py_DECREF(outer);
sneedle++;
} else if (result > 0) {
- PyObject *r = hashflags(right);
+ PyObject *r = hashflags(self->nodelen, right);
if (!r) {
goto nomem;
}
@@ -902,12 +904,12 @@
if (left->len != right->len
|| memcmp(left->start, right->start, left->len)
|| left->hash_suffix != right->hash_suffix) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
PyObject *r;
if (!l) {
goto nomem;
}
- r = hashflags(right);
+ r = hashflags(self->nodelen, right);
if (!r) {
Py_DECREF(l);
goto nomem;
--- a/mercurial/cext/parsers.c Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/cext/parsers.c Mon Jun 07 17:10:35 2021 -0400
@@ -668,7 +668,7 @@
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 17;
+static const int version = 20;
static void module_init(PyObject *mod)
{
--- a/mercurial/cext/parsers.pyi Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/cext/parsers.pyi Mon Jun 07 17:10:35 2021 -0400
@@ -29,7 +29,7 @@
# From manifest.c
class lazymanifest:
- def __init__(self, data: bytes): ...
+ def __init__(self, nodelen: int, data: bytes): ...
def __iter__(self) -> Iterator[bytes]: ...
def __len__(self) -> int: ...
--- a/mercurial/cext/revlog.c Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/cext/revlog.c Mon Jun 07 17:10:35 2021 -0400
@@ -99,7 +99,12 @@
int ntlookups; /* # lookups */
int ntmisses; /* # lookups that miss the cache */
int inlined;
- long hdrsize; /* size of index headers. Differs in v1 v.s. v2 format */
+ long entry_size; /* size of index headers. Differs in v1 v.s. v2 format
+ */
+ long rust_ext_compat; /* compatibility with being used in rust
+ extensions */
+ char format_version; /* size of index headers. Differs in v1 v.s. v2
+ format */
};
static Py_ssize_t index_length(const indexObject *self)
@@ -115,18 +120,21 @@
static int index_find_node(indexObject *self, const char *node);
#if LONG_MAX == 0x7fffffffL
-static const char *const v1_tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
+static const char *const tuple_format = PY23("Kiiiiiis#KiBB", "Kiiiiiiy#KiBB");
#else
-static const char *const v1_tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki");
+static const char *const tuple_format = PY23("kiiiiiis#kiBB", "kiiiiiiy#kiBB");
#endif
/* A RevlogNG v1 index entry is 64 bytes long. */
-static const long v1_hdrsize = 64;
+static const long v1_entry_size = 64;
/* A Revlogv2 index entry is 96 bytes long. */
-static const long v2_hdrsize = 96;
+static const long v2_entry_size = 96;
+
+static const long format_v1 = 1; /* Internal only, could be any number */
+static const long format_v2 = 2; /* Internal only, could be any number */
+
+static const char comp_mode_inline = 2;
static void raise_revlog_error(void)
{
@@ -164,7 +172,7 @@
static const char *index_deref(indexObject *self, Py_ssize_t pos)
{
if (pos >= self->length)
- return self->added + (pos - self->length) * self->hdrsize;
+ return self->added + (pos - self->length) * self->entry_size;
if (self->inlined && pos > 0) {
if (self->offsets == NULL) {
@@ -181,7 +189,7 @@
return self->offsets[pos];
}
- return (const char *)(self->buf.buf) + pos * self->hdrsize;
+ return (const char *)(self->buf.buf) + pos * self->entry_size;
}
/*
@@ -290,6 +298,7 @@
uint64_t offset_flags, sidedata_offset;
int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
sidedata_comp_len;
+ char data_comp_mode, sidedata_comp_mode;
const char *c_node_id;
const char *data;
Py_ssize_t length = index_length(self);
@@ -328,19 +337,70 @@
parent_2 = getbe32(data + 28);
c_node_id = data + 32;
- if (self->hdrsize == v1_hdrsize) {
- return Py_BuildValue(v1_tuple_format, offset_flags, comp_len,
- uncomp_len, base_rev, link_rev, parent_1,
- parent_2, c_node_id, self->nodelen);
+ if (self->format_version == format_v1) {
+ sidedata_offset = 0;
+ sidedata_comp_len = 0;
+ data_comp_mode = comp_mode_inline;
+ sidedata_comp_mode = comp_mode_inline;
} else {
sidedata_offset = getbe64(data + 64);
sidedata_comp_len = getbe32(data + 72);
-
- return Py_BuildValue(v2_tuple_format, offset_flags, comp_len,
- uncomp_len, base_rev, link_rev, parent_1,
- parent_2, c_node_id, self->nodelen,
- sidedata_offset, sidedata_comp_len);
+ data_comp_mode = data[76] & 3;
+ sidedata_comp_mode = ((data[76] >> 2) & 3);
+ }
+
+ return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
+ base_rev, link_rev, parent_1, parent_2, c_node_id,
+ self->nodelen, sidedata_offset, sidedata_comp_len,
+ data_comp_mode, sidedata_comp_mode);
+}
+/*
+ * Pack header information in binary
+ */
+static PyObject *index_pack_header(indexObject *self, PyObject *args)
+{
+ int header;
+ char out[4];
+ if (!PyArg_ParseTuple(args, "I", &header)) {
+ return NULL;
+ }
+ if (self->format_version != format_v1) {
+ PyErr_Format(PyExc_RuntimeError,
+ "version header should go in the docket, not the "
+ "index: %lu",
+ header);
+ return NULL;
}
+ putbe32(header, out);
+ return PyBytes_FromStringAndSize(out, 4);
+}
+/*
+ * Return the raw binary string representing a revision
+ */
+static PyObject *index_entry_binary(indexObject *self, PyObject *value)
+{
+ long rev;
+ const char *data;
+ Py_ssize_t length = index_length(self);
+
+ if (!pylong_to_long(value, &rev)) {
+ return NULL;
+ }
+ if (rev < 0 || rev >= length) {
+ PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+ rev);
+ return NULL;
+ };
+
+ data = index_deref(self, rev);
+ if (data == NULL)
+ return NULL;
+ if (rev == 0 && self->format_version == format_v1) {
+ /* the header is eating the start of the first entry */
+ return PyBytes_FromStringAndSize(data + 4,
+ self->entry_size - 4);
+ }
+ return PyBytes_FromStringAndSize(data, self->entry_size);
}
/*
@@ -393,46 +453,53 @@
{
uint64_t offset_flags, sidedata_offset;
int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
+ char data_comp_mode, sidedata_comp_mode;
Py_ssize_t c_node_id_len, sidedata_comp_len;
const char *c_node_id;
+ char comp_field;
char *data;
- if (self->hdrsize == v1_hdrsize) {
- if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags,
- &comp_len, &uncomp_len, &base_rev,
- &link_rev, &parent_1, &parent_2,
- &c_node_id, &c_node_id_len)) {
- PyErr_SetString(PyExc_TypeError, "8-tuple required");
- return NULL;
- }
- } else {
- if (!PyArg_ParseTuple(obj, v2_tuple_format, &offset_flags,
- &comp_len, &uncomp_len, &base_rev,
- &link_rev, &parent_1, &parent_2,
- &c_node_id, &c_node_id_len,
- &sidedata_offset, &sidedata_comp_len)) {
- PyErr_SetString(PyExc_TypeError, "10-tuple required");
- return NULL;
- }
+ if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
+ &uncomp_len, &base_rev, &link_rev, &parent_1,
+ &parent_2, &c_node_id, &c_node_id_len,
+ &sidedata_offset, &sidedata_comp_len,
+ &data_comp_mode, &sidedata_comp_mode)) {
+ PyErr_SetString(PyExc_TypeError, "11-tuple required");
+ return NULL;
}
if (c_node_id_len != self->nodelen) {
PyErr_SetString(PyExc_TypeError, "invalid node");
return NULL;
}
+ if (self->format_version == format_v1) {
+
+ if (data_comp_mode != comp_mode_inline) {
+ PyErr_Format(PyExc_ValueError,
+ "invalid data compression mode: %i",
+ data_comp_mode);
+ return NULL;
+ }
+ if (sidedata_comp_mode != comp_mode_inline) {
+ PyErr_Format(PyExc_ValueError,
+ "invalid sidedata compression mode: %i",
+ sidedata_comp_mode);
+ return NULL;
+ }
+ }
if (self->new_length == self->added_length) {
size_t new_added_length =
self->added_length ? self->added_length * 2 : 4096;
- void *new_added = PyMem_Realloc(self->added, new_added_length *
- self->hdrsize);
+ void *new_added = PyMem_Realloc(
+ self->added, new_added_length * self->entry_size);
if (!new_added)
return PyErr_NoMemory();
self->added = new_added;
self->added_length = new_added_length;
}
rev = self->length + self->new_length;
- data = self->added + self->hdrsize * self->new_length++;
+ data = self->added + self->entry_size * self->new_length++;
putbe32(offset_flags >> 32, data);
putbe32(offset_flags & 0xffffffffU, data + 4);
putbe32(comp_len, data + 8);
@@ -444,11 +511,14 @@
memcpy(data + 32, c_node_id, c_node_id_len);
/* Padding since SHA-1 is only 20 bytes for now */
memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
- if (self->hdrsize != v1_hdrsize) {
+ if (self->format_version == format_v2) {
putbe64(sidedata_offset, data + 64);
putbe32(sidedata_comp_len, data + 72);
+ comp_field = data_comp_mode & 3;
+ comp_field = comp_field | (sidedata_comp_mode & 3) << 2;
+ data[76] = comp_field;
/* Padding for 96 bytes alignment */
- memset(data + 76, 0, self->hdrsize - 76);
+ memset(data + 77, 0, self->entry_size - 77);
}
if (self->ntinitialized)
@@ -463,17 +533,18 @@
inside the transaction that creates the given revision. */
static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
{
- uint64_t sidedata_offset;
+ uint64_t offset_flags, sidedata_offset;
int rev;
+ char comp_mode;
Py_ssize_t sidedata_comp_len;
char *data;
#if LONG_MAX == 0x7fffffffL
- const char *const sidedata_format = PY23("nKi", "nKi");
+ const char *const sidedata_format = PY23("nKiKB", "nKiKB");
#else
- const char *const sidedata_format = PY23("nki", "nki");
+ const char *const sidedata_format = PY23("nkikB", "nkikB");
#endif
- if (self->hdrsize == v1_hdrsize || self->inlined) {
+ if (self->entry_size == v1_entry_size || self->inlined) {
/*
There is a bug in the transaction handling when going from an
inline revlog to a separate index and data file. Turn it off until
@@ -485,7 +556,7 @@
}
if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
- &sidedata_comp_len))
+ &sidedata_comp_len, &offset_flags, &comp_mode))
return NULL;
if (rev < 0 || rev >= index_length(self)) {
@@ -501,9 +572,11 @@
/* Find the newly added node, offset from the "already on-disk" length
*/
- data = self->added + self->hdrsize * (rev - self->length);
+ data = self->added + self->entry_size * (rev - self->length);
+ putbe64(offset_flags, data);
putbe64(sidedata_offset, data + 64);
putbe32(sidedata_comp_len, data + 72);
+ data[76] = (data[76] & ~(3 << 2)) | ((comp_mode & 3) << 2);
Py_RETURN_NONE;
}
@@ -2652,17 +2725,17 @@
const char *data = (const char *)self->buf.buf;
Py_ssize_t pos = 0;
Py_ssize_t end = self->buf.len;
- long incr = self->hdrsize;
+ long incr = self->entry_size;
Py_ssize_t len = 0;
- while (pos + self->hdrsize <= end && pos >= 0) {
+ while (pos + self->entry_size <= end && pos >= 0) {
uint32_t comp_len, sidedata_comp_len = 0;
/* 3rd element of header is length of compressed inline data */
comp_len = getbe32(data + pos + 8);
- if (self->hdrsize == v2_hdrsize) {
+ if (self->entry_size == v2_entry_size) {
sidedata_comp_len = getbe32(data + pos + 72);
}
- incr = self->hdrsize + comp_len + sidedata_comp_len;
+ incr = self->entry_size + comp_len + sidedata_comp_len;
if (offsets)
offsets[len] = data + pos;
len++;
@@ -2699,6 +2772,7 @@
self->offsets = NULL;
self->nodelen = 20;
self->nullentry = NULL;
+ self->rust_ext_compat = 1;
revlogv2 = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist,
@@ -2715,20 +2789,16 @@
}
if (revlogv2 && PyObject_IsTrue(revlogv2)) {
- self->hdrsize = v2_hdrsize;
+ self->format_version = format_v2;
+ self->entry_size = v2_entry_size;
} else {
- self->hdrsize = v1_hdrsize;
+ self->format_version = format_v1;
+ self->entry_size = v1_entry_size;
}
- if (self->hdrsize == v1_hdrsize) {
- self->nullentry =
- Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
- -1, -1, -1, nullid, self->nodelen);
- } else {
- self->nullentry =
- Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0,
- -1, -1, -1, -1, nullid, self->nodelen, 0, 0);
- }
+ self->nullentry = Py_BuildValue(
+ PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, -1, -1, -1,
+ nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline);
if (!self->nullentry)
return -1;
@@ -2751,11 +2821,11 @@
goto bail;
self->length = len;
} else {
- if (size % self->hdrsize) {
+ if (size % self->entry_size) {
PyErr_SetString(PyExc_ValueError, "corrupt index file");
goto bail;
}
- self->length = size / self->hdrsize;
+ self->length = size / self->entry_size;
}
return 0;
@@ -2860,6 +2930,10 @@
{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
"find length of shortest hex nodeid of a binary ID"},
{"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+ {"entry_binary", (PyCFunction)index_entry_binary, METH_O,
+ "return an entry in binary form"},
+ {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS,
+ "pack the revlog header information into binary"},
{NULL} /* Sentinel */
};
@@ -2869,7 +2943,9 @@
};
static PyMemberDef index_members[] = {
- {"entry_size", T_LONG, offsetof(indexObject, hdrsize), 0,
+ {"entry_size", T_LONG, offsetof(indexObject, entry_size), 0,
+ "size of an index entry"},
+ {"rust_ext_compat", T_LONG, offsetof(indexObject, rust_ext_compat), 0,
"size of an index entry"},
{NULL} /* Sentinel */
};
--- a/mercurial/changegroup.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/changegroup.py Mon Jun 07 17:10:35 2021 -0400
@@ -7,7 +7,6 @@
from __future__ import absolute_import
-import collections
import os
import struct
import weakref
@@ -15,7 +14,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -34,10 +32,13 @@
from .interfaces import repository
from .revlogutils import sidedata as sidedatamod
+from .revlogutils import constants as revlog_constants
+from .utils import storageutil
_CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
+_CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
LFS_REQUIREMENT = b'lfs'
@@ -194,19 +195,20 @@
else:
deltabase = prevnode
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def deltachunk(self, prevnode):
+ # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
l = self._chunklength()
if not l:
return {}
headerdata = readexactly(self._stream, self.deltaheadersize)
header = self.deltaheader.unpack(headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
- node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
- # cg4 forward-compat
- sidedata = {}
- return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
+ header = self._deltaheader(header, prevnode)
+ node, p1, p2, deltabase, cs, flags, protocol_flags = header
+ return node, p1, p2, cs, deltabase, delta, flags, {}, protocol_flags
def getchunks(self):
"""returns all the chunks contains in the bundle
@@ -293,8 +295,16 @@
# Only useful if we're adding sidedata categories. If both peers have
# the same categories, then we simply don't do anything.
- if self.version == b'04' and srctype == b'pull':
- sidedata_helpers = get_sidedata_helpers(
+ adding_sidedata = (
+ (
+ requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
+ )
+ and self.version == b'04'
+ and srctype == b'pull'
+ )
+ if adding_sidedata:
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
repo,
sidedata_categories or set(),
pull=True,
@@ -386,15 +396,16 @@
_(b'manifests'), unit=_(b'chunks'), total=changesets
)
on_manifest_rev = None
- if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
- def on_manifest_rev(manifest, rev):
- range = touched_manifests.get(manifest)
- if not range:
- touched_manifests[manifest] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_manifests[manifest] = (range[0], rev)
+ def on_manifest_rev(manifest, rev):
+ range = touched_manifests.get(manifest)
+ if not range:
+ touched_manifests[manifest] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_manifests[manifest] = (range[0], rev)
self._unpackmanifests(
repo,
@@ -417,15 +428,16 @@
needfiles.setdefault(f, set()).add(n)
on_filelog_rev = None
- if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
- def on_filelog_rev(filelog, rev):
- range = touched_filelogs.get(filelog)
- if not range:
- touched_filelogs[filelog] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_filelogs[filelog] = (range[0], rev)
+ def on_filelog_rev(filelog, rev):
+ range = touched_filelogs.get(filelog)
+ if not range:
+ touched_filelogs[filelog] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_filelogs[filelog] = (range[0], rev)
# process the files
repo.ui.status(_(b"adding file changes\n"))
@@ -440,12 +452,14 @@
)
if sidedata_helpers:
- if b'changelog' in sidedata_helpers[1]:
- cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
+ if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
+ cl.rewrite_sidedata(
+ trp, sidedata_helpers, clstart, clend - 1
+ )
for mf, (startrev, endrev) in touched_manifests.items():
- mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+ mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
for fl, (startrev, endrev) in touched_filelogs.items():
- fl.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+ fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
# making sure the value exists
tr.changes.setdefault(b'changegroup-count-changesets', 0)
@@ -570,8 +584,8 @@
"""
chain = None
for chunkdata in iter(lambda: self.deltachunk(chain), {}):
- # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata)
- yield chunkdata
+ # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
+ yield chunkdata[:8]
chain = chunkdata[0]
@@ -590,7 +604,8 @@
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs = headertuple
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
class cg3unpacker(cg2unpacker):
@@ -608,7 +623,8 @@
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs, flags = headertuple
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
super(cg3unpacker, self)._unpackmanifests(
@@ -631,21 +647,48 @@
cg4 streams add support for exchanging sidedata.
"""
+ deltaheader = _CHANGEGROUPV4_DELTA_HEADER
+ deltaheadersize = deltaheader.size
version = b'04'
+ def _deltaheader(self, headertuple, prevnode):
+ protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
+
def deltachunk(self, prevnode):
res = super(cg4unpacker, self).deltachunk(prevnode)
if not res:
return res
- (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+ (
+ node,
+ p1,
+ p2,
+ cs,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ protocol_flags,
+ ) = res
+ assert not sidedata
- sidedata_raw = getchunk(self._stream)
sidedata = {}
- if len(sidedata_raw) > 0:
+ if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
+ sidedata_raw = getchunk(self._stream)
sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
- return node, p1, p2, cs, deltabase, delta, flags, sidedata
+ return (
+ node,
+ p1,
+ p2,
+ cs,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ protocol_flags,
+ )
class headerlessfixup(object):
@@ -673,7 +716,7 @@
if delta.delta is not None:
prefix, data = b'', delta.delta
- elif delta.basenode == nullid:
+ elif delta.basenode == repo.nullid:
data = delta.revision
prefix = mdiff.trivialdiffheader(len(data))
else:
@@ -688,10 +731,10 @@
yield prefix
yield data
- sidedata = delta.sidedata
- if sidedata is not None:
+ if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
# Need a separate chunk for sidedata to be able to differentiate
# "raw delta" length and sidedata length
+ sidedata = delta.sidedata
yield chunkheader(len(sidedata))
yield sidedata
@@ -787,9 +830,15 @@
return i
# We failed to resolve a parent for this node, so
# we crash the changegroup construction.
+ if util.safehasattr(store, 'target'):
+ target = store.display_id
+ else:
+ # some revlog not actually a revlog
+ target = store._revlog.display_id
+
raise error.Abort(
b"unable to resolve parent while packing '%s' %r"
- b' for changeset %r' % (store.indexfile, rev, clrev)
+ b' for changeset %r' % (target, rev, clrev)
)
return nullrev
@@ -828,7 +877,8 @@
If topic is not None, progress detail will be generated using this
topic name (e.g. changesets, manifests, etc).
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if not nodes:
return
@@ -1056,7 +1106,9 @@
# TODO a better approach would be for the strip bundle to
# correctly advertise its sidedata categories directly.
remote_sidedata = repo._wanted_sidedata
- sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
+ repo, remote_sidedata
+ )
clstate, deltas = self._generatechangelog(
cl,
@@ -1194,7 +1246,8 @@
if generate is False, the state will be fully populated and no chunk
stream will be yielded
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
clrevorder = {}
manifests = {}
@@ -1299,7 +1352,8 @@
`source` is unused here, but is used by extensions like remotefilelog to
change what is sent based in pulls vs pushes, etc.
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
repo = self._repo
mfl = repo.manifestlog
@@ -1633,11 +1687,18 @@
fullnodes=None,
remote_sidedata=None,
):
- # Same header func as cg3. Sidedata is in a separate chunk from the delta to
- # differenciate "raw delta" and sidedata.
- builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
- d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
- )
+ # Sidedata is in a separate chunk from the delta to differentiate
+ # "raw delta" and sidedata.
+ def builddeltaheader(d):
+ return _CHANGEGROUPV4_DELTA_HEADER.pack(
+ d.protocol_flags,
+ d.node,
+ d.p1node,
+ d.p2node,
+ d.basenode,
+ d.linknode,
+ d.flags,
+ )
return cgpacker(
repo,
@@ -1682,11 +1743,15 @@
#
# (or even to push subset of history)
needv03 = True
- has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
- if not has_revlogv2:
- versions.discard(b'04')
if not needv03:
versions.discard(b'03')
+ want_v4 = (
+ repo.ui.configbool(b'experimental', b'changegroup4')
+ or requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
+ )
+ if not want_v4:
+ versions.discard(b'04')
return versions
@@ -1913,25 +1978,3 @@
)
return revisions, files
-
-
-def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
- # Computers for computing sidedata on-the-fly
- sd_computers = collections.defaultdict(list)
- # Computers for categories to remove from sidedata
- sd_removers = collections.defaultdict(list)
-
- to_generate = remote_sd_categories - repo._wanted_sidedata
- to_remove = repo._wanted_sidedata - remote_sd_categories
- if pull:
- to_generate, to_remove = to_remove, to_generate
-
- for revlog_kind, computers in repo._sidedata_computers.items():
- for category, computer in computers.items():
- if category in to_generate:
- sd_computers[revlog_kind].append(computer)
- if category in to_remove:
- sd_removers[revlog_kind].append(computer)
-
- sidedata_helpers = (repo, sd_computers, sd_removers)
- return sidedata_helpers
--- a/mercurial/changelog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/changelog.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,7 +11,6 @@
from .node import (
bin,
hex,
- nullid,
)
from .thirdparty import attr
@@ -26,7 +25,10 @@
dateutil,
stringutil,
)
-from .revlogutils import flagutil
+from .revlogutils import (
+ constants as revlog_constants,
+ flagutil,
+)
_defaultextra = {b'branch': b'default'}
@@ -221,7 +223,7 @@
def __new__(cls, cl, text, sidedata, cpsd):
if not text:
- return _changelogrevision(extra=_defaultextra, manifest=nullid)
+ return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
@@ -393,27 +395,22 @@
``concurrencychecker`` will be passed to the revlog init function, see
the documentation there.
"""
- if trypending and opener.exists(b'00changelog.i.a'):
- indexfile = b'00changelog.i.a'
- else:
- indexfile = b'00changelog.i'
-
- datafile = b'00changelog.d'
revlog.revlog.__init__(
self,
opener,
- indexfile,
- datafile=datafile,
+ target=(revlog_constants.KIND_CHANGELOG, None),
+ radix=b'00changelog',
checkambig=True,
mmaplargeindex=True,
persistentnodemap=opener.options.get(b'persistent-nodemap', False),
concurrencychecker=concurrencychecker,
+ trypending=trypending,
)
- if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
+ if self._initempty and (self._format_version == revlog.REVLOGV1):
# changelogs don't benefit from generaldelta.
- self.version &= ~revlog.FLAG_GENERALDELTA
+ self._format_flags &= ~revlog.FLAG_GENERALDELTA
self._generaldelta = False
# Delta chains for changelogs tend to be very small because entries
@@ -428,7 +425,6 @@
self._filteredrevs = frozenset()
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
- self.revlog_kind = b'changelog'
@property
def filteredrevs(self):
@@ -441,19 +437,22 @@
self._filteredrevs = val
self._filteredrevs_hashcache = {}
+ def _write_docket(self, tr):
+ if not self._delayed:
+ super(changelog, self)._write_docket(tr)
+
def delayupdate(self, tr):
"""delay visibility of index updates to other readers"""
-
- if not self._delayed:
+ if self._docket is None and not self._delayed:
if len(self) == 0:
self._divert = True
- if self._realopener.exists(self.indexfile + b'.a'):
- self._realopener.unlink(self.indexfile + b'.a')
- self.opener = _divertopener(self._realopener, self.indexfile)
+ if self._realopener.exists(self._indexfile + b'.a'):
+ self._realopener.unlink(self._indexfile + b'.a')
+ self.opener = _divertopener(self._realopener, self._indexfile)
else:
self._delaybuf = []
self.opener = _delayopener(
- self._realopener, self.indexfile, self._delaybuf
+ self._realopener, self._indexfile, self._delaybuf
)
self._delayed = True
tr.addpending(b'cl-%i' % id(self), self._writepending)
@@ -464,14 +463,16 @@
self._delayed = False
self.opener = self._realopener
# move redirected index data back into place
- if self._divert:
+ if self._docket is not None:
+ self._write_docket(tr)
+ elif self._divert:
assert not self._delaybuf
- tmpname = self.indexfile + b".a"
+ tmpname = self._indexfile + b".a"
nfile = self.opener.open(tmpname)
nfile.close()
- self.opener.rename(tmpname, self.indexfile, checkambig=True)
+ self.opener.rename(tmpname, self._indexfile, checkambig=True)
elif self._delaybuf:
- fp = self.opener(self.indexfile, b'a', checkambig=True)
+ fp = self.opener(self._indexfile, b'a', checkambig=True)
fp.write(b"".join(self._delaybuf))
fp.close()
self._delaybuf = None
@@ -482,10 +483,12 @@
def _writepending(self, tr):
"""create a file containing the unfinalized state for
pretxnchangegroup"""
+ if self._docket:
+ return self._docket.write(tr, pending=True)
if self._delaybuf:
# make a temporary copy of the index
- fp1 = self._realopener(self.indexfile)
- pendingfilename = self.indexfile + b".a"
+ fp1 = self._realopener(self._indexfile)
+ pendingfilename = self._indexfile + b".a"
# register as a temp file to ensure cleanup on failure
tr.registertmp(pendingfilename)
# write existing data
@@ -497,16 +500,16 @@
# switch modes so finalize can simply rename
self._delaybuf = None
self._divert = True
- self.opener = _divertopener(self._realopener, self.indexfile)
+ self.opener = _divertopener(self._realopener, self._indexfile)
if self._divert:
return True
return False
- def _enforceinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr):
if not self._delayed:
- revlog.revlog._enforceinlinesize(self, tr, fp)
+ revlog.revlog._enforceinlinesize(self, tr)
def read(self, nodeorrev):
"""Obtain data from a parsed changelog revision.
@@ -524,15 +527,16 @@
``changelogrevision`` instead, as it is faster for partial object
access.
"""
- d, s = self._revisiondata(nodeorrev)
- c = changelogrevision(
- self, d, s, self._copiesstorage == b'changeset-sidedata'
- )
+ d = self._revisiondata(nodeorrev)
+ sidedata = self.sidedata(nodeorrev)
+ copy_sd = self._copiesstorage == b'changeset-sidedata'
+ c = changelogrevision(self, d, sidedata, copy_sd)
return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
def changelogrevision(self, nodeorrev):
"""Obtain a ``changelogrevision`` for a node or revision."""
- text, sidedata = self._revisiondata(nodeorrev)
+ text = self._revisiondata(nodeorrev)
+ sidedata = self.sidedata(nodeorrev)
return changelogrevision(
self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
)
--- a/mercurial/chgserver.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/chgserver.py Mon Jun 07 17:10:35 2021 -0400
@@ -515,11 +515,9 @@
if inst.hint:
self.ui.error(_(b"(%s)\n") % inst.hint)
errorraised = True
- except error.Abort as inst:
- if isinstance(inst, error.InputError):
- detailed_exit_code = 10
- elif isinstance(inst, error.ConfigError):
- detailed_exit_code = 30
+ except error.Error as inst:
+ if inst.detailed_exit_code is not None:
+ detailed_exit_code = inst.detailed_exit_code
self.ui.error(inst.format())
errorraised = True
--- a/mercurial/cmdutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/cmdutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -15,7 +15,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -62,6 +61,10 @@
stringutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
if pycompat.TYPE_CHECKING:
from typing import (
Any,
@@ -998,11 +1001,6 @@
_(b"a branch of the same name already exists")
)
- if repo.revs(b'obsolete() and %ld', revs):
- raise error.InputError(
- _(b"cannot change branch of a obsolete changeset")
- )
-
# make sure only topological heads
if repo.revs(b'heads(%ld) - head()', revs):
raise error.InputError(
@@ -1097,7 +1095,7 @@
'hint' is the usual hint given to Abort exception.
"""
- if merge and repo.dirstate.p2() != nullid:
+ if merge and repo.dirstate.p2() != repo.nullid:
raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
st = repo.status()
if st.modified or st.added or st.removed or st.deleted:
@@ -1434,8 +1432,12 @@
raise error.CommandError(cmd, _(b'invalid arguments'))
if not os.path.isfile(file_):
raise error.InputError(_(b"revlog '%s' not found") % file_)
+
+ target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
r = revlog.revlog(
- vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
+ vfsmod.vfs(encoding.getcwd(), audit=False),
+ target=target,
+ radix=file_[:-2],
)
return r
@@ -1849,7 +1851,10 @@
continue
copylist.append((tfn(pat, dest, srcs), srcs))
if not copylist:
- raise error.InputError(_(b'no files to copy'))
+ hint = None
+ if rename:
+ hint = _(b'maybe you meant to use --after --at-rev=.')
+ raise error.InputError(_(b'no files to copy'), hint=hint)
errors = 0
for targetpath, srcs in copylist:
@@ -2104,7 +2109,7 @@
if parents:
prev = parents[0]
else:
- prev = nullid
+ prev = repo.nullid
fm.context(ctx=ctx)
fm.plain(b'# HG changeset patch\n')
@@ -2967,7 +2972,7 @@
ms.reset()
# Reroute the working copy parent to the new changeset
- repo.setparents(newid, nullid)
+ repo.setparents(newid, repo.nullid)
# Fixing the dirstate because localrepo.commitctx does not update
# it. This is rather convenient because we did not need to update
@@ -3322,7 +3327,7 @@
# in case of merge, files that are actually added can be reported as
# modified, we need to post process the result
- if p2 != nullid:
+ if p2 != repo.nullid:
mergeadd = set(dsmodified)
for path in dsmodified:
if path in mf:
@@ -3593,7 +3598,7 @@
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
- if p2 != nullid:
+ if p2 != repo.nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
@@ -3690,7 +3695,7 @@
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
- if node == parent and p2 == nullid:
+ if node == parent and p2 == repo.nullid:
normal = repo.dirstate.normal
for f in actions[b'undelete'][0]:
if interactive:
--- a/mercurial/commands.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/commands.py Mon Jun 07 17:10:35 2021 -0400
@@ -15,10 +15,8 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
- wdirhex,
wdirrev,
)
from .pycompat import open
@@ -486,7 +484,7 @@
return b'%d ' % rev
def formathex(h):
- if h == wdirhex:
+ if h == repo.nodeconstants.wdirhex:
return b'%s+' % shorthex(hex(ctx.p1().node()))
else:
return b'%s ' % shorthex(h)
@@ -809,9 +807,9 @@
)
p1, p2 = repo.changelog.parents(node)
- if p1 == nullid:
+ if p1 == repo.nullid:
raise error.InputError(_(b'cannot backout a change with no parents'))
- if p2 != nullid:
+ if p2 != repo.nullid:
if not opts.get(b'parent'):
raise error.InputError(_(b'cannot backout a merge changeset'))
p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 @@
)
else:
node, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'current bisect revision is a merge'))
if rev:
if not nodes:
@@ -2204,6 +2202,7 @@
(b'u', b'untrusted', None, _(b'show untrusted configuration options')),
(b'e', b'edit', None, _(b'edit user config')),
(b'l', b'local', None, _(b'edit repository config')),
+ (b'', b'source', None, _(b'show source of configuration value')),
(
b'',
b'shared',
@@ -2234,7 +2233,7 @@
--global, edit the system-wide config file. With --local, edit the
repository-level config file.
- With --debug, the source (filename and line number) is printed
+ With --source, the source (filename and line number) is printed
for each config item.
See :hg:`help config` for more information about config files.
@@ -2337,6 +2336,7 @@
selentries = set(selentries)
matched = False
+ show_source = ui.debugflag or opts.get(b'source')
for section, name, value in ui.walkconfig(untrusted=untrusted):
source = ui.configsource(section, name, untrusted)
value = pycompat.bytestr(value)
@@ -2348,7 +2348,7 @@
if values and not (section in selsections or entryname in selentries):
continue
fm.startitem()
- fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
+ fm.condwrite(show_source, b'source', b'%s: ', source)
if uniquesel:
fm.data(name=entryname)
fm.write(b'value', b'%s\n', value)
@@ -4847,7 +4847,7 @@
opts = pycompat.byteskwargs(opts)
abort = opts.get(b'abort')
- if abort and repo.dirstate.p2() == nullid:
+ if abort and repo.dirstate.p2() == repo.nullid:
cmdutil.wrongtooltocontinue(repo, _(b'merge'))
cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
if abort:
@@ -5072,7 +5072,7 @@
displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for n in p:
- if n != nullid:
+ if n != repo.nullid:
displayer.show(repo[n])
displayer.close()
@@ -5128,15 +5128,9 @@
"""
opts = pycompat.byteskwargs(opts)
+
+ pathitems = urlutil.list_paths(ui, search)
ui.pager(b'paths')
- if search:
- pathitems = [
- (name, path)
- for name, path in pycompat.iteritems(ui.paths)
- if name == search
- ]
- else:
- pathitems = sorted(pycompat.iteritems(ui.paths))
fm = ui.formatter(b'paths', opts)
if fm.isplain():
@@ -5157,6 +5151,11 @@
assert subopt not in (b'name', b'url')
if showsubopts:
fm.plain(b'%s:%s = ' % (name, subopt))
+ if isinstance(value, bool):
+ if value:
+ value = b'yes'
+ else:
+ value = b'no'
fm.condwrite(showsubopts, subopt, b'%s\n', value)
fm.end()
@@ -6105,7 +6104,7 @@
with repo.wlock():
ms = mergestatemod.mergestate.read(repo)
- if not (ms.active() or repo.dirstate.p2() != nullid):
+ if not (ms.active() or repo.dirstate.p2() != repo.nullid):
raise error.StateError(
_(b'resolve command not applicable when not merging')
)
@@ -6223,7 +6222,7 @@
raise
ms.commit()
- branchmerge = repo.dirstate.p2() != nullid
+ branchmerge = repo.dirstate.p2() != repo.nullid
mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
if not didwork and pats:
@@ -6315,7 +6314,7 @@
opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
parent, p2 = repo.dirstate.parents()
- if not opts.get(b'rev') and p2 != nullid:
+ if not opts.get(b'rev') and p2 != repo.nullid:
# revert after merge is a trap for new users (issue2915)
raise error.InputError(
_(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6334,7 @@
or opts.get(b'interactive')
):
msg = _(b"no files or directories specified")
- if p2 != nullid:
+ if p2 != repo.nullid:
hint = _(
b"uncommitted merge, use --all to discard all changes,"
b" or 'hg update -C .' to abort the merge"
@@ -7396,7 +7395,7 @@
for n in names:
if repo.tagtype(n) == b'global':
alltags = tagsmod.findglobaltags(ui, repo)
- if alltags[n][0] == nullid:
+ if alltags[n][0] == repo.nullid:
raise error.InputError(
_(b"tag '%s' is already removed") % n
)
@@ -7423,7 +7422,7 @@
)
if not opts.get(b'local'):
p1, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'uncommitted merge'))
bheads = repo.branchheads()
if not opts.get(b'force') and bheads and p1 not in bheads:
--- a/mercurial/commit.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/commit.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,7 +10,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -277,10 +276,10 @@
"""
fname = fctx.path()
- fparent1 = manifest1.get(fname, nullid)
- fparent2 = manifest2.get(fname, nullid)
+ fparent1 = manifest1.get(fname, repo.nullid)
+ fparent2 = manifest2.get(fname, repo.nullid)
touched = None
- if fparent1 == fparent2 == nullid:
+ if fparent1 == fparent2 == repo.nullid:
touched = 'added'
if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 @@
if node in [fparent1, fparent2]:
repo.ui.debug(b'reusing %s filelog entry\n' % fname)
if (
- fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+ fparent1 != repo.nullid
+ and manifest1.flags(fname) != fctx.flags()
) or (
- fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+ fparent2 != repo.nullid
+ and manifest2.flags(fname) != fctx.flags()
):
touched = 'modified'
return node, touched
@@ -327,7 +328,9 @@
newfparent = fparent2
if manifest2: # branch merge
- if fparent2 == nullid or cnode is None: # copied on remote side
+ if (
+ fparent2 == repo.nullid or cnode is None
+ ): # copied on remote side
if cfname in manifest2:
cnode = manifest2[cfname]
newfparent = fparent1
@@ -346,7 +349,7 @@
if includecopymeta:
meta[b"copy"] = cfname
meta[b"copyrev"] = hex(cnode)
- fparent1, fparent2 = nullid, newfparent
+ fparent1, fparent2 = repo.nullid, newfparent
else:
repo.ui.warn(
_(
@@ -356,20 +359,20 @@
% (fname, cfname)
)
- elif fparent1 == nullid:
- fparent1, fparent2 = fparent2, nullid
- elif fparent2 != nullid:
+ elif fparent1 == repo.nullid:
+ fparent1, fparent2 = fparent2, repo.nullid
+ elif fparent2 != repo.nullid:
if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
- fparent1, fparent2 = fparent1, nullid
+ fparent1, fparent2 = fparent1, repo.nullid
# is one parent an ancestor of the other?
else:
fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
if fparent1 in fparentancestors:
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif fparent2 in fparentancestors:
- fparent2 = nullid
+ fparent2 = repo.nullid
force_new_node = False
# The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 @@
force_new_node = True
# is the file changed?
text = fctx.data()
- if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+ if (
+ fparent2 != repo.nullid
+ or meta
+ or flog.cmp(fparent1, text)
+ or force_new_node
+ ):
if touched is None: # do not overwrite added
- if fparent2 == nullid:
+ if fparent2 == repo.nullid:
touched = 'modified'
else:
touched = 'merged'
--- a/mercurial/config.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/config.py Mon Jun 07 17:10:35 2021 -0400
@@ -258,93 +258,3 @@
self.parse(
path, fp.read(), sections=sections, remap=remap, include=include
)
-
-
-def parselist(value):
- """parse a configuration value as a list of comma/space separated strings
-
- >>> parselist(b'this,is "a small" ,test')
- ['this', 'is', 'a small', 'test']
- """
-
- def _parse_plain(parts, s, offset):
- whitespace = False
- while offset < len(s) and (
- s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
- ):
- whitespace = True
- offset += 1
- if offset >= len(s):
- return None, parts, offset
- if whitespace:
- parts.append(b'')
- if s[offset : offset + 1] == b'"' and not parts[-1]:
- return _parse_quote, parts, offset + 1
- elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
- parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
- return _parse_plain, parts, offset + 1
- parts[-1] += s[offset : offset + 1]
- return _parse_plain, parts, offset + 1
-
- def _parse_quote(parts, s, offset):
- if offset < len(s) and s[offset : offset + 1] == b'"': # ""
- parts.append(b'')
- offset += 1
- while offset < len(s) and (
- s[offset : offset + 1].isspace()
- or s[offset : offset + 1] == b','
- ):
- offset += 1
- return _parse_plain, parts, offset
-
- while offset < len(s) and s[offset : offset + 1] != b'"':
- if (
- s[offset : offset + 1] == b'\\'
- and offset + 1 < len(s)
- and s[offset + 1 : offset + 2] == b'"'
- ):
- offset += 1
- parts[-1] += b'"'
- else:
- parts[-1] += s[offset : offset + 1]
- offset += 1
-
- if offset >= len(s):
- real_parts = _configlist(parts[-1])
- if not real_parts:
- parts[-1] = b'"'
- else:
- real_parts[0] = b'"' + real_parts[0]
- parts = parts[:-1]
- parts.extend(real_parts)
- return None, parts, offset
-
- offset += 1
- while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
- offset += 1
-
- if offset < len(s):
- if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
- parts[-1] += b'"'
- offset += 1
- else:
- parts.append(b'')
- else:
- return None, parts, offset
-
- return _parse_plain, parts, offset
-
- def _configlist(s):
- s = s.rstrip(b' ,')
- if not s:
- return []
- parser, parts, offset = _parse_plain, [b''], 0
- while parser:
- parser, parts, offset = parser(parts, s, offset)
- return parts
-
- if value is not None and isinstance(value, bytes):
- result = _configlist(value.lstrip(b' ,\n'))
- else:
- result = value
- return result or []
--- a/mercurial/configitems.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/configitems.py Mon Jun 07 17:10:35 2021 -0400
@@ -904,6 +904,11 @@
)
coreconfigitem(
b'experimental',
+ b'changegroup4',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'cleanup-as-archived',
default=False,
)
@@ -954,6 +959,11 @@
)
coreconfigitem(
b'experimental',
+ b'dirstate-tree.in-memory',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'editortmpinhg',
default=False,
)
@@ -1138,6 +1148,27 @@
b'revisions.prefixhexnode',
default=False,
)
+# "out of experimental" todo list.
+#
+# * include management of a persistent nodemap in the main docket
+# * enforce a "no-truncate" policy for mmap safety
+# - for censoring operation
+# - for stripping operation
+# - for rollback operation
+# * proper streaming (race free) of the docket file
+# * track garbage data to evemtually allow rewriting -existing- sidedata.
+# * Exchange-wise, we will also need to do something more efficient than
+# keeping references to the affected revlogs, especially memory-wise when
+# rewriting sidedata.
+# * introduce a proper solution to reduce the number of filelog related files.
+# * use caching for reading sidedata (similar to what we do for data).
+# * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
+# * Improvement to consider
+# - avoid compression header in chunk using the default compression?
+# - forbid "inline" compression mode entirely?
+# - split the data offset and flag field (the 2 bytes save are mostly trouble)
+# - keep track of uncompressed -chunk- size (to preallocate memory better)
+# - keep track of chain base or size (probably not that useful anymore)
coreconfigitem(
b'experimental',
b'revlogv2',
@@ -1272,6 +1303,14 @@
experimental=True,
)
coreconfigitem(
+ # Enable this dirstate format *when creating a new repository*.
+ # Which format to use for existing repos is controlled by .hg/requires
+ b'format',
+ b'exp-dirstate-v2',
+ default=False,
+ experimental=True,
+)
+coreconfigitem(
b'format',
b'dotencode',
default=True,
@@ -1310,6 +1349,20 @@
default=lambda: [b'zstd', b'zlib'],
alias=[(b'experimental', b'format.compression')],
)
+# Experimental TODOs:
+#
+# * Same as for evlogv2 (but for the reduction of the number of files)
+# * Improvement to investigate
+# - storing .hgtags fnode
+# - storing `rank` of changesets
+# - storing branch related identifier
+
+coreconfigitem(
+ b'format',
+ b'exp-use-changelog-v2',
+ default=None,
+ experimental=True,
+)
coreconfigitem(
b'format',
b'usefncache',
@@ -1342,20 +1395,6 @@
b'use-persistent-nodemap',
default=_persistent_nodemap_default,
)
-# TODO needs to grow a docket file to at least store the last offset of the data
-# file when rewriting sidedata.
-# Will also need a way of dealing with garbage data if we allow rewriting
-# *existing* sidedata.
-# Exchange-wise, we will also need to do something more efficient than keeping
-# references to the affected revlogs, especially memory-wise when rewriting
-# sidedata.
-# Also... compress the sidedata? (this should be coming very soon)
-coreconfigitem(
- b'format',
- b'exp-revlogv2.2',
- default=False,
- experimental=True,
-)
coreconfigitem(
b'format',
b'exp-use-copies-side-data-changeset',
@@ -1364,12 +1403,6 @@
)
coreconfigitem(
b'format',
- b'exp-use-side-data',
- default=False,
- experimental=True,
-)
-coreconfigitem(
- b'format',
b'use-share-safe',
default=False,
)
--- a/mercurial/context.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/context.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,14 +14,9 @@
from .i18n import _
from .node import (
- addednodeid,
hex,
- modifiednodeid,
- nullid,
nullrev,
short,
- wdirfilenodeids,
- wdirhex,
)
from .pycompat import (
getattr,
@@ -140,7 +135,7 @@
removed.append(fn)
elif flag1 != flag2:
modified.append(fn)
- elif node2 not in wdirfilenodeids:
+ elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
# When comparing files between two commits, we save time by
# not comparing the file contents when the nodeids differ.
# Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 @@
n2 = c2._parents[0]._node
cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
if not cahs:
- anc = nullid
+ anc = self._repo.nodeconstants.nullid
elif len(cahs) == 1:
anc = cahs[0]
else:
@@ -1132,7 +1127,11 @@
_path = self._path
fl = self._filelog
parents = self._filelog.parents(self._filenode)
- pl = [(_path, node, fl) for node in parents if node != nullid]
+ pl = [
+ (_path, node, fl)
+ for node in parents
+ if node != self._repo.nodeconstants.nullid
+ ]
r = fl.renamed(self._filenode)
if r:
@@ -1393,6 +1392,9 @@
def __bytes__(self):
return bytes(self._parents[0]) + b"+"
+ def hex(self):
+ self._repo.nodeconstants.wdirhex
+
__str__ = encoding.strmethod(__bytes__)
def __nonzero__(self):
@@ -1556,12 +1558,12 @@
return self._repo.dirstate[key] not in b"?r"
def hex(self):
- return wdirhex
+ return self._repo.nodeconstants.wdirhex
@propertycache
def _parents(self):
p = self._repo.dirstate.parents()
- if p[1] == nullid:
+ if p[1] == self._repo.nodeconstants.nullid:
p = p[:-1]
# use unfiltered repo to delay/avoid loading obsmarkers
unfi = self._repo.unfiltered()
@@ -1572,7 +1574,9 @@
for n in p
]
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
dirstate = self._repo.dirstate
with dirstate.parentchange():
copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1588,7 @@
for f in copies:
if f not in pctx and copies[f] in pctx:
dirstate.copy(copies[f], f)
- if p2node == nullid:
+ if p2node == self._repo.nodeconstants.nullid:
for f, s in sorted(dirstate.copies().items()):
if f not in pctx and s not in pctx:
dirstate.copy(None, f)
@@ -1836,7 +1840,7 @@
def _poststatusfixup(self, status, fixup):
"""update dirstate for files that are actually clean"""
poststatus = self._repo.postdsstatus()
- if fixup or poststatus:
+ if fixup or poststatus or self._repo.dirstate._dirty:
try:
oldid = self._repo.dirstate.identity()
@@ -1944,8 +1948,8 @@
ff = self._flagfunc
for i, l in (
- (addednodeid, status.added),
- (modifiednodeid, status.modified),
+ (self._repo.nodeconstants.addednodeid, status.added),
+ (self._repo.nodeconstants.modifiednodeid, status.modified),
):
for f in l:
man[f] = i
@@ -2070,13 +2074,18 @@
path = self.copysource()
if not path:
return None
- return path, self._changectx._parents[0]._manifest.get(path, nullid)
+ return (
+ path,
+ self._changectx._parents[0]._manifest.get(
+ path, self._repo.nodeconstants.nullid
+ ),
+ )
def parents(self):
'''return parent filectxs, following copies if necessary'''
def filenode(ctx, path):
- return ctx._manifest.get(path, nullid)
+ return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
path = self._path
fl = self._filelog
@@ -2094,7 +2103,7 @@
return [
self._parentfilectx(p, fileid=n, filelog=l)
for p, n, l in pl
- if n != nullid
+ if n != self._repo.nodeconstants.nullid
]
def children(self):
@@ -2222,7 +2231,9 @@
# ``overlayworkingctx`` (e.g. with --collapse).
util.clearcachedproperty(self, b'_manifest')
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
assert p1node == self._wrappedctx.node()
self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
@@ -2248,10 +2259,10 @@
flag = self._flagfunc
for path in self.added():
- man[path] = addednodeid
+ man[path] = self._repo.nodeconstants.addednodeid
man.setflag(path, flag(path))
for path in self.modified():
- man[path] = modifiednodeid
+ man[path] = self._repo.nodeconstants.modifiednodeid
man.setflag(path, flag(path))
for path in self.removed():
del man[path]
@@ -2827,7 +2838,7 @@
)
self._rev = None
self._node = None
- parents = [(p or nullid) for p in parents]
+ parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
p1, p2 = parents
self._parents = [self._repo[p] for p in (p1, p2)]
files = sorted(set(files))
@@ -2866,10 +2877,10 @@
man = pctx.manifest().copy()
for f in self._status.modified:
- man[f] = modifiednodeid
+ man[f] = self._repo.nodeconstants.modifiednodeid
for f in self._status.added:
- man[f] = addednodeid
+ man[f] = self._repo.nodeconstants.addednodeid
for f in self._status.removed:
if f in man:
@@ -3006,12 +3017,12 @@
# sanity check to ensure that the reused manifest parents are
# manifests of our commit parents
mp1, mp2 = self.manifestctx().parents
- if p1 != nullid and p1.manifestnode() != mp1:
+ if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
raise RuntimeError(
r"can't reuse the manifest: its p1 "
r"doesn't match the new ctx p1"
)
- if p2 != nullid and p2.manifestnode() != mp2:
+ if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
raise RuntimeError(
r"can't reuse the manifest: "
r"its p2 doesn't match the new ctx p2"
--- a/mercurial/copies.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/copies.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,10 +12,7 @@
import os
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
match as matchmod,
@@ -579,7 +576,7 @@
parents = fctx._filelog.parents(fctx._filenode)
nb_parents = 0
for n in parents:
- if n != nullid:
+ if n != repo.nullid:
nb_parents += 1
return nb_parents >= 2
--- a/mercurial/debugcommands.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/debugcommands.py Mon Jun 07 17:10:35 2021 -0400
@@ -30,7 +30,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -92,6 +91,7 @@
wireprotoserver,
wireprotov2peer,
)
+from .interfaces import repository
from .utils import (
cborutil,
compression,
@@ -794,7 +794,7 @@
index = r.index
start = r.start
length = r.length
- generaldelta = r.version & revlog.FLAG_GENERALDELTA
+ generaldelta = r._generaldelta
withsparseread = getattr(r, '_withsparseread', False)
def revinfo(rev):
@@ -941,6 +941,7 @@
),
(b'', b'dates', True, _(b'display the saved mtime')),
(b'', b'datesort', None, _(b'sort by saved mtime')),
+ (b'', b'dirs', False, _(b'display directories')),
],
_(b'[OPTION]...'),
)
@@ -956,7 +957,11 @@
keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
else:
keyfunc = None # sort by filename
- for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
+ entries = list(pycompat.iteritems(repo.dirstate))
+ if opts['dirs']:
+ entries.extend(repo.dirstate.directories())
+ entries.sort(key=keyfunc)
+ for file_, ent in entries:
if ent[3] == -1:
timestr = b'unset '
elif nodates:
@@ -1667,7 +1672,7 @@
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -1675,7 +1680,7 @@
@command(b'debugindexstats', [])
def debugindexstats(ui, repo):
"""show stats related to the changelog index"""
- repo.changelog.shortest(nullid, 1)
+ repo.changelog.shortest(repo.nullid, 1)
index = repo.changelog.index
if not util.safehasattr(index, b'stats'):
raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2430,7 @@
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
- if len(n) != len(nullid):
+ if len(n) != repo.nodeconstants.nodelen:
raise TypeError()
return n
except TypeError:
@@ -2973,8 +2978,8 @@
)
return 0
- v = r.version
- format = v & 0xFFFF
+ format = r._format_version
+ v = r._format_flags
flags = []
gdelta = False
if v & revlog.FLAG_INLINE_DATA:
@@ -3328,7 +3333,7 @@
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
if ui.verbose:
ui.write(
b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3747,9 @@
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [True for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ True for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b"no_merges") and len(parents) == 2:
continue
count += 1
@@ -4046,7 +4053,7 @@
def debugupdatecaches(ui, repo, *pats, **opts):
"""warm all known caches in the repository"""
with repo.wlock(), repo.lock():
- repo.updatecaches(full=True)
+ repo.updatecaches(caches=repository.CACHES_ALL)
@command(
--- a/mercurial/dirstate.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/dirstate.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,7 +14,6 @@
import stat
from .i18n import _
-from .node import nullid
from .pycompat import delattr
from hgdemandimport import tracing
@@ -40,6 +39,8 @@
parsers = policy.importmod('parsers')
rustmod = policy.importrust('dirstate')
+SUPPORTS_DIRSTATE_V2 = rustmod is not None
+
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7FFFFFFF
@@ -74,7 +75,14 @@
@interfaceutil.implementer(intdirstate.idirstate)
class dirstate(object):
def __init__(
- self, opener, ui, root, validate, sparsematchfn, nodeconstants
+ self,
+ opener,
+ ui,
+ root,
+ validate,
+ sparsematchfn,
+ nodeconstants,
+ use_dirstate_v2,
):
"""Create a new dirstate object.
@@ -82,6 +90,7 @@
dirstate file; root is the root of the directory tracked by
the dirstate.
"""
+ self._use_dirstate_v2 = use_dirstate_v2
self._nodeconstants = nodeconstants
self._opener = opener
self._validate = validate
@@ -140,7 +149,11 @@
def _map(self):
"""Return the dirstate contents (see documentation for dirstatemap)."""
self._map = self._mapcls(
- self._ui, self._opener, self._root, self._nodeconstants
+ self._ui,
+ self._opener,
+ self._root,
+ self._nodeconstants,
+ self._use_dirstate_v2,
)
return self._map
@@ -302,6 +315,9 @@
iteritems = items
+ def directories(self):
+ return self._map.directories()
+
def parents(self):
return [self._validate(p) for p in self._pl]
@@ -314,7 +330,7 @@
def branch(self):
return encoding.tolocal(self._branch)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
@@ -323,6 +339,8 @@
See localrepo.setparents()
"""
+ if p2 is None:
+ p2 = self._nodeconstants.nullid
if self._parentwriters == 0:
raise ValueError(
b"cannot set dirstate parent outside of "
@@ -335,10 +353,12 @@
self._origpl = self._pl
self._map.setparents(p1, p2)
copies = {}
- if oldp2 != nullid and p2 == nullid:
- candidatefiles = self._map.nonnormalset.union(
- self._map.otherparentset
- )
+ if (
+ oldp2 != self._nodeconstants.nullid
+ and p2 == self._nodeconstants.nullid
+ ):
+ candidatefiles = self._map.non_normal_or_other_parent_paths()
+
for f in candidatefiles:
s = self._map.get(f)
if s is None:
@@ -459,7 +479,7 @@
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
- if self._pl[1] != nullid:
+ if self._pl[1] != self._nodeconstants.nullid:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
@@ -481,7 +501,7 @@
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
- if self._pl[1] == nullid:
+ if self._pl[1] == self._nodeconstants.nullid:
raise error.Abort(
_(b"setting %r to other parent only allowed in merges") % f
)
@@ -503,7 +523,7 @@
self._dirty = True
oldstate = self[f]
size = 0
- if self._pl[1] != nullid:
+ if self._pl[1] != self._nodeconstants.nullid:
entry = self._map.get(f)
if entry is not None:
# backup the previous state
@@ -519,7 +539,7 @@
def merge(self, f):
'''Mark a file merged.'''
- if self._pl[1] == nullid:
+ if self._pl[1] == self._nodeconstants.nullid:
return self.normallookup(f)
return self.otherparent(f)
@@ -638,7 +658,7 @@
if self._origpl is None:
self._origpl = self._pl
- self._map.setparents(parent, nullid)
+ self._map.setparents(parent, self._nodeconstants.nullid)
for f in to_lookup:
self.normallookup(f)
@@ -1120,6 +1140,7 @@
warnings,
bad,
traversed,
+ dirty,
) = rustmod.status(
self._map._rustmap,
matcher,
@@ -1133,6 +1154,8 @@
bool(matcher.traversedir),
)
+ self._dirty |= dirty
+
if matcher.traversedir:
for dir in traversed:
matcher.traversedir(dir)
@@ -1430,13 +1453,16 @@
denormalized form that they appear as in the dirstate.
"""
- def __init__(self, ui, opener, root, nodeconstants):
+ def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
self._ui = ui
self._opener = opener
self._root = root
self._filename = b'dirstate'
self._nodelen = 20
self._nodeconstants = nodeconstants
+ assert (
+ not use_dirstate_v2
+ ), "should have detected unsupported requirement"
self._parents = None
self._dirtyparents = False
@@ -1456,10 +1482,14 @@
self._map
return self.copymap
+ def directories(self):
+ # Rust / dirstate-v2 only
+ return []
+
def clear(self):
self._map.clear()
self.copymap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"filefoldmap")
@@ -1636,7 +1666,10 @@
st[self._nodelen : 2 * self._nodelen],
)
elif l == 0:
- self._parents = (nullid, nullid)
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
else:
raise error.Abort(
_(b'working directory state appears damaged!')
@@ -1718,6 +1751,9 @@
self.nonnormalset = nonnorm
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self.nonnormalset.union(self.otherparentset)
+
@propertycache
def identity(self):
self._map
@@ -1735,18 +1771,26 @@
if rustmod is not None:
class dirstatemap(object):
- def __init__(self, ui, opener, root, nodeconstants):
+ def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
+ self._use_dirstate_v2 = use_dirstate_v2
self._nodeconstants = nodeconstants
self._ui = ui
self._opener = opener
self._root = root
self._filename = b'dirstate'
+ self._nodelen = 20 # Also update Rust code when changing this!
self._parents = None
self._dirtyparents = False
# for consistent view between _pl() and _read() invocations
self._pendingmode = None
+ self._use_dirstate_tree = self._ui.configbool(
+ b"experimental",
+ b"dirstate-tree.in-memory",
+ False,
+ )
+
def addfile(self, *args, **kwargs):
return self._rustmap.addfile(*args, **kwargs)
@@ -1765,36 +1809,21 @@
def get(self, *args, **kwargs):
return self._rustmap.get(*args, **kwargs)
- @propertycache
- def _rustmap(self):
- """
- Fills the Dirstatemap when called.
- Use `self._inner_rustmap` if reading the dirstate is not necessary.
- """
- self._rustmap = self._inner_rustmap
- self.read()
- return self._rustmap
-
- @propertycache
- def _inner_rustmap(self):
- """
- Does not fill the Dirstatemap when called. This allows for
- optimizations where only setting/getting the parents is needed.
- """
- self._inner_rustmap = rustmod.DirstateMap(self._root)
- return self._inner_rustmap
-
@property
def copymap(self):
return self._rustmap.copymap()
+ def directories(self):
+ return self._rustmap.directories()
+
def preload(self):
self._rustmap
def clear(self):
self._rustmap.clear()
- self._inner_rustmap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(
+ self._nodeconstants.nullid, self._nodeconstants.nullid
+ )
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"dirfoldmap")
@@ -1833,15 +1862,19 @@
return fp
def setparents(self, p1, p2):
- self._rustmap.setparents(p1, p2)
self._parents = (p1, p2)
self._dirtyparents = True
def parents(self):
if not self._parents:
+ if self._use_dirstate_v2:
+ offset = len(rustmod.V2_FORMAT_MARKER)
+ else:
+ offset = 0
+ read_len = offset + self._nodelen * 2
try:
fp = self._opendirstatefile()
- st = fp.read(40)
+ st = fp.read(read_len)
fp.close()
except IOError as err:
if err.errno != errno.ENOENT:
@@ -1849,16 +1882,30 @@
# File doesn't exist, so the current state is empty
st = b''
- try:
- self._parents = self._inner_rustmap.parents(st)
- except ValueError:
+ l = len(st)
+ if l == read_len:
+ st = st[offset:]
+ self._parents = (
+ st[: self._nodelen],
+ st[self._nodelen : 2 * self._nodelen],
+ )
+ elif l == 0:
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
+ else:
raise error.Abort(
_(b'working directory state appears damaged!')
)
return self._parents
- def read(self):
+ @propertycache
+ def _rustmap(self):
+ """
+ Fills the Dirstatemap when called.
+ """
# ignore HG_PENDING because identity is used only for writing
self.identity = util.filestat.frompath(
self._opener.join(self._filename)
@@ -1873,22 +1920,26 @@
except IOError as err:
if err.errno != errno.ENOENT:
raise
- return
- if not st:
- return
+ st = b''
- parse_dirstate = util.nogc(self._rustmap.read)
- parents = parse_dirstate(st)
+ self._rustmap, parents = rustmod.DirstateMap.new(
+ self._use_dirstate_tree, self._use_dirstate_v2, st
+ )
+
if parents and not self._dirtyparents:
self.setparents(*parents)
self.__contains__ = self._rustmap.__contains__
self.__getitem__ = self._rustmap.__getitem__
self.get = self._rustmap.get
+ return self._rustmap
def write(self, st, now):
parents = self.parents()
- st.write(self._rustmap.write(parents[0], parents[1], now))
+ packed = self._rustmap.write(
+ self._use_dirstate_v2, parents[0], parents[1], now
+ )
+ st.write(packed)
st.close()
self._dirtyparents = False
@@ -1930,6 +1981,9 @@
otherparents = self._rustmap.other_parent_entries()
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self._rustmap.non_normal_or_other_parent_paths()
+
@propertycache
def dirfoldmap(self):
f = {}
--- a/mercurial/discovery.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/discovery.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,7 +12,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
@@ -107,7 +106,7 @@
if missingroots:
discbases = []
for n in missingroots:
- discbases.extend([p for p in cl.parents(n) if p != nullid])
+ discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
# TODO remove call to nodesbetween.
# TODO populate attributes on outgoing instance instead of setting
# discbases.
@@ -116,7 +115,7 @@
ancestorsof = heads
commonheads = [n for n in discbases if n not in included]
elif not commonheads:
- commonheads = [nullid]
+ commonheads = [repo.nullid]
self.commonheads = commonheads
self.ancestorsof = ancestorsof
self._revlog = cl
@@ -381,7 +380,7 @@
# - a local outgoing head descended from update
# - a remote head that's known locally and not
# ancestral to an outgoing head
- if remoteheads == [nullid]:
+ if remoteheads == [repo.nullid]:
# remote is empty, nothing to check.
return
--- a/mercurial/dispatch.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/dispatch.py Mon Jun 07 17:10:35 2021 -0400
@@ -1064,6 +1064,16 @@
if req.earlyoptions[b'profile']:
for ui_ in uis:
ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
+ elif req.earlyoptions[b'profile'] is False:
+ # Check for it being set already, so that we don't pollute the config
+ # with this when using chg in the very common case that it's not
+ # enabled.
+ if lui.configbool(b'profiling', b'enabled'):
+ # Only do this on lui so that `chg foo` with a user config setting
+ # profiling.enabled=1 still shows profiling information (chg will
+ # specify `--no-profile` when `hg serve` is starting up, we don't
+ # want that to propagate to every later invocation).
+ lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile')
profile = lui.configbool(b'profiling', b'enabled')
with profiling.profile(lui, enabled=profile) as profiler:
--- a/mercurial/error.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/error.py Mon Jun 07 17:10:35 2021 -0400
@@ -51,13 +51,52 @@
super(Hint, self).__init__(*args, **kw)
-class StorageError(Hint, Exception):
+class Error(Hint, Exception):
+ """Base class for Mercurial errors."""
+
+ coarse_exit_code = None
+ detailed_exit_code = None
+
+ def __init__(self, message, hint=None):
+ # type: (bytes, Optional[bytes]) -> None
+ self.message = message
+ self.hint = hint
+ # Pass the message into the Exception constructor to help extensions
+ # that look for exc.args[0].
+ Exception.__init__(self, message)
+
+ def __bytes__(self):
+ return self.message
+
+ if pycompat.ispy3:
+
+ def __str__(self):
+ # the output would be unreadable if the message was translated,
+ # but do not replace it with encoding.strfromlocal(), which
+ # may raise another exception.
+ return pycompat.sysstr(self.__bytes__())
+
+ def format(self):
+ # type: () -> bytes
+ from .i18n import _
+
+ message = _(b"abort: %s\n") % self.message
+ if self.hint:
+ message += _(b"(%s)\n") % self.hint
+ return message
+
+
+class Abort(Error):
+ """Raised if a command needs to print an error and exit."""
+
+
+class StorageError(Error):
"""Raised when an error occurs in a storage layer.
Usually subclassed by a storage-specific exception.
"""
- __bytes__ = _tobytes
+ detailed_exit_code = 50
class RevlogError(StorageError):
@@ -159,10 +198,20 @@
__bytes__ = _tobytes
-class InterventionRequired(Hint, Exception):
+class InterventionRequired(Abort):
"""Exception raised when a command requires human intervention."""
- __bytes__ = _tobytes
+ coarse_exit_code = 1
+ detailed_exit_code = 240
+
+ def format(self):
+ # type: () -> bytes
+ from .i18n import _
+
+ message = _(b"%s\n") % self.message
+ if self.hint:
+ message += _(b"(%s)\n") % self.hint
+ return message
class ConflictResolutionRequired(InterventionRequired):
@@ -182,44 +231,14 @@
)
-class Abort(Hint, Exception):
- """Raised if a command needs to print an error and exit."""
-
- def __init__(self, message, hint=None):
- # type: (bytes, Optional[bytes]) -> None
- self.message = message
- self.hint = hint
- # Pass the message into the Exception constructor to help extensions
- # that look for exc.args[0].
- Exception.__init__(self, message)
-
- def __bytes__(self):
- return self.message
-
- if pycompat.ispy3:
-
- def __str__(self):
- # the output would be unreadable if the message was translated,
- # but do not replace it with encoding.strfromlocal(), which
- # may raise another exception.
- return pycompat.sysstr(self.__bytes__())
-
- def format(self):
- # type: () -> bytes
- from .i18n import _
-
- message = _(b"abort: %s\n") % self.message
- if self.hint:
- message += _(b"(%s)\n") % self.hint
- return message
-
-
class InputError(Abort):
"""Indicates that the user made an error in their input.
Examples: Invalid command, invalid flags, invalid revision.
"""
+ detailed_exit_code = 10
+
class StateError(Abort):
"""Indicates that the operation might work if retried in a different state.
@@ -227,6 +246,8 @@
Examples: Unresolved merge conflicts, unfinished operations.
"""
+ detailed_exit_code = 20
+
class CanceledError(Abort):
"""Indicates that the user canceled the operation.
@@ -234,6 +255,8 @@
Examples: Close commit editor with error status, quit chistedit.
"""
+ detailed_exit_code = 250
+
class SecurityError(Abort):
"""Indicates that some aspect of security failed.
@@ -242,6 +265,8 @@
filesystem, mismatched GPG signature, DoS protection.
"""
+ detailed_exit_code = 150
+
class HookLoadError(Abort):
"""raised when loading a hook fails, aborting an operation
@@ -254,10 +279,14 @@
Exists to allow more specialized catching."""
+ detailed_exit_code = 40
+
class ConfigError(Abort):
"""Exception raised when parsing config files"""
+ detailed_exit_code = 30
+
def __init__(self, message, location=None, hint=None):
# type: (bytes, Optional[bytes], Optional[bytes]) -> None
super(ConfigError, self).__init__(message, hint=hint)
@@ -307,6 +336,8 @@
class RemoteError(Abort):
"""Exception raised when interacting with a remote repo fails"""
+ detailed_exit_code = 100
+
class OutOfBandError(RemoteError):
"""Exception raised when a remote repo reports failure"""
@@ -325,6 +356,8 @@
class ParseError(Abort):
"""Raised when parsing config files and {rev,file}sets (msg[, pos])"""
+ detailed_exit_code = 10
+
def __init__(self, message, location=None, hint=None):
# type: (bytes, Optional[Union[bytes, int]], Optional[bytes]) -> None
super(ParseError, self).__init__(message, hint=hint)
--- a/mercurial/exchange.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/exchange.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,7 +13,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
from . import (
@@ -44,6 +43,7 @@
stringutil,
urlutil,
)
+from .interfaces import repository
urlerr = util.urlerr
urlreq = util.urlreq
@@ -164,7 +164,7 @@
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(repo, common, heads)
@@ -894,7 +894,7 @@
cgpart.addparam(b'version', version)
if scmutil.istreemanifest(pushop.repo):
cgpart.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in pushop.repo.requirements:
+ if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
cgpart.addparam(b'exp-sidedata', b'1')
def handlereply(op):
@@ -1839,7 +1839,7 @@
if (
pullop.remote.capable(b'clonebundles')
and pullop.heads is None
- and list(pullop.common) == [nullid]
+ and list(pullop.common) == [pullop.repo.nullid]
):
kwargs[b'cbattempted'] = pullop.clonebundleattempted
@@ -1849,7 +1849,7 @@
pullop.repo.ui.status(_(b"no changes found\n"))
pullop.cgresult = 0
else:
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1920,7 @@
pullop.cgresult = 0
return
tr = pullop.gettransaction()
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
# issue1320, avoid a race if remote changed after discovery
@@ -2428,7 +2428,7 @@
if scmutil.istreemanifest(repo):
part.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in repo.requirements:
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
part.addparam(b'exp-sidedata', b'1')
sidedata = bundle2.format_remote_wanted_sidedata(repo)
part.addparam(b'exp-wanted-sidedata', sidedata)
--- a/mercurial/exchangev2.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/exchangev2.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,10 +11,7 @@
import weakref
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
bookmarks,
error,
@@ -304,7 +301,7 @@
if set(remoteheads).issubset(common):
fetch = []
- common.discard(nullid)
+ common.discard(repo.nullid)
return common, fetch, remoteheads
@@ -413,7 +410,7 @@
# Linknode is always itself for changesets.
cset[b'node'],
# We always send full revisions. So delta base is not set.
- nullid,
+ repo.nullid,
mdiff.trivialdiffheader(len(data)) + data,
# Flags not yet supported.
0,
@@ -478,7 +475,7 @@
basenode = manifest[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -610,7 +607,7 @@
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -705,7 +702,7 @@
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
--- a/mercurial/exewrapper.c Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/exewrapper.c Mon Jun 07 17:10:35 2021 -0400
@@ -48,7 +48,7 @@
int(__cdecl * Py_Main)(int argc, TCHAR *argv[]);
#if PY_MAJOR_VERSION >= 3
- Py_LegacyWindowsStdioFlag = 1;
+ _wputenv(L"PYTHONLEGACYWINDOWSSTDIO=1");
#endif
if (GetModuleFileName(NULL, pyscript, _countof(pyscript)) == 0) {
--- a/mercurial/filelog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/filelog.py Mon Jun 07 17:10:35 2021 -0400
@@ -8,10 +8,7 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
revlog,
@@ -21,18 +18,24 @@
util as interfaceutil,
)
from .utils import storageutil
+from .revlogutils import (
+ constants as revlog_constants,
+)
@interfaceutil.implementer(repository.ifilestorage)
class filelog(object):
def __init__(self, opener, path):
self._revlog = revlog.revlog(
- opener, b'/'.join((b'data', path + b'.i')), censorable=True
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ radix=b'/'.join((b'data', path)),
+ censorable=True,
)
# Full name of the user visible file, relative to the repository root.
# Used by LFS.
self._revlog.filename = path
- self._revlog.revlog_kind = b'filelog'
self.nullid = self._revlog.nullid
def __len__(self):
@@ -42,7 +45,7 @@
return self._revlog.__iter__()
def hasnode(self, node):
- if node in (nullid, nullrev):
+ if node in (self.nullid, nullrev):
return False
try:
@@ -68,7 +71,7 @@
def lookup(self, node):
return storageutil.fileidlookup(
- self._revlog, node, self._revlog.indexfile
+ self._revlog, node, self._revlog.display_id
)
def linkrev(self, rev):
@@ -225,18 +228,6 @@
storedsize=storedsize,
)
- # TODO these aren't part of the interface and aren't internal methods.
- # Callers should be fixed to not use them.
-
- # Used by bundlefilelog, unionfilelog.
- @property
- def indexfile(self):
- return self._revlog.indexfile
-
- @indexfile.setter
- def indexfile(self, value):
- self._revlog.indexfile = value
-
# Used by repo upgrade.
def clone(self, tr, destrevlog, **kwargs):
if not isinstance(destrevlog, filelog):
--- a/mercurial/filemerge.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/filemerge.py Mon Jun 07 17:10:35 2021 -0400
@@ -15,7 +15,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from .pycompat import (
@@ -111,7 +110,7 @@
return None
def filenode(self):
- return nullid
+ return self._ctx.repo().nullid
_customcmp = True
--- a/mercurial/help.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/help.py Mon Jun 07 17:10:35 2021 -0400
@@ -540,6 +540,12 @@
TOPIC_CATEGORY_CONCEPTS,
),
(
+ [b"evolution"],
+ _(b"Safely rewriting history (EXPERIMENTAL)"),
+ loaddoc(b'evolution'),
+ TOPIC_CATEGORY_CONCEPTS,
+ ),
+ (
[b'scripting'],
_(b'Using Mercurial from scripts and automation'),
loaddoc(b'scripting'),
--- a/mercurial/helptext/config.txt Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/helptext/config.txt Mon Jun 07 17:10:35 2021 -0400
@@ -5,7 +5,7 @@
===============
If you're having problems with your configuration,
-:hg:`config --debug` can help you understand what is introducing
+:hg:`config --source` can help you understand what is introducing
a setting into your environment.
See :hg:`help config.syntax` and :hg:`help config.files`
@@ -1718,6 +1718,12 @@
The following sub-options can be defined:
+``multi-urls``
+ A boolean option. When enabled the value of the `[paths]` entry will be
+ parsed as a list and the alias will resolve to multiple destination. If some
+ of the list entry use the `path://` syntax, the suboption will be inherited
+ individually.
+
``pushurl``
The URL to use for push operations. If not defined, the location
defined by the path's main entry is used.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/evolution.txt Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,56 @@
+Obsolescence markers make it possible to mark changesets that have been
+deleted or superseded in a new version of the changeset.
+
+Unlike the previous way of handling such changes, by stripping the old
+changesets from the repository, obsolescence markers can be propagated
+between repositories. This allows for a safe and simple way of exchanging
+mutable history and altering it after the fact. Changeset phases are
+respected, such that only draft and secret changesets can be altered (see
+:hg:`help phases` for details).
+
+Obsolescence is tracked using "obsolescence markers", a piece of metadata
+tracking which changesets have been made obsolete, potential successors for
+a given changeset, the moment the changeset was marked as obsolete, and the
+user who performed the rewriting operation. The markers are stored
+separately from standard changeset data can be exchanged without any of the
+precursor changesets, preventing unnecessary exchange of obsolescence data.
+
+The complete set of obsolescence markers describes a history of changeset
+modifications that is orthogonal to the repository history of file
+modifications. This changeset history allows for detection and automatic
+resolution of edge cases arising from multiple users rewriting the same part
+of history concurrently.
+
+Current feature status
+======================
+
+This feature is still in development.
+
+Instability
+===========
+
+Rewriting changesets might introduce instability.
+
+There are two main kinds of instability: orphaning and diverging.
+
+Orphans are changesets left behind when their ancestors are rewritten.
+Divergence has two variants:
+
+* Content-divergence occurs when independent rewrites of the same changesets
+ lead to different results.
+
+* Phase-divergence occurs when the old (obsolete) version of a changeset
+ becomes public.
+
+It is possible to prevent local creation of orphans by using the following config::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+
+You can also enable that option explicitly::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+ evolution.allowunstable = true
--- a/mercurial/helptext/internals/changegroups.txt Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/helptext/internals/changegroups.txt Mon Jun 07 17:10:35 2021 -0400
@@ -2,12 +2,13 @@
the changelog data, root/flat manifest data, treemanifest data, and
filelogs.
-There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
+There are 4 versions of changegroups: ``1``, ``2``, ``3`` and ``4``. From a
high-level, versions ``1`` and ``2`` are almost exactly the same, with the
only difference being an additional item in the *delta header*. Version
``3`` adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
-``changegroup`` part in the bundle2).
+``changegroup`` part in the bundle2). Version ``4`` adds support for exchanging
+sidedata (additional revision metadata not part of the digest).
Changegroups when not exchanging treemanifests consist of 3 logical
segments::
@@ -74,8 +75,8 @@
entry (either that the recipient already has, or previously specified in the
bundle/changegroup).
-The *delta header* is different between versions ``1``, ``2``, and
-``3`` of the changegroup format.
+The *delta header* is different between versions ``1``, ``2``, ``3`` and ``4``
+of the changegroup format.
Version 1 (headerlen=80)::
@@ -104,6 +105,15 @@
| | | | | | |
+------------------------------------------------------------------------------+
+Version 4 (headerlen=103)::
+
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+
The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
series of *delta*s, densely packed (no separators). These deltas describe a diff
from an existing entry (either that the recipient already has, or previously
@@ -140,12 +150,24 @@
Externally stored. The revision fulltext contains ``key:value`` ``\n``
delimited metadata defining an object stored elsewhere. Used by the LFS
extension.
+4096
+ Contains copy information. This revision changes files in a way that could
+ affect copy tracing. This does *not* affect changegroup handling, but is
+ relevant for other parts of Mercurial.
For historical reasons, the integer values are identical to revlog version 1
per-revision storage flags and correspond to bits being set in this 2-byte
field. Bits were allocated starting from the most-significant bit, hence the
reverse ordering and allocation of these flags.
+The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+itself. They are first in the header since they may affect the handling of the
+rest of the fields in a future version. They are defined as such:
+
+1 indicates whether to read a chunk of sidedata (of variable length) right
+ after the revision flags.
+
+
Changeset Segment
=================
@@ -166,9 +188,9 @@
Treemanifests Segment
---------------------
-The *treemanifests segment* only exists in changegroup version ``3``, and
-only if the 'treemanifest' param is part of the bundle2 changegroup part
-(it is not possible to use changegroup version 3 outside of bundle2).
+The *treemanifests segment* only exists in changegroup version ``3`` and ``4``,
+and only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 or 4 outside of bundle2).
Aside from the filenames in the *treemanifests segment* containing a
trailing ``/`` character, it behaves identically to the *filelogs segment*
(see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/mercurial/hg.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/hg.py Mon Jun 07 17:10:35 2021 -0400
@@ -16,8 +16,7 @@
from .i18n import _
from .node import (
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
short,
)
from .pycompat import getattr
@@ -53,6 +52,7 @@
verify as verifymod,
vfs as vfsmod,
)
+from .interfaces import repository as repositorymod
from .utils import (
hashutil,
stringutil,
@@ -772,7 +772,7 @@
},
).result()
- if rootnode != nullid:
+ if rootnode != sha1nodeconstants.nullid:
sharepath = os.path.join(sharepool, hex(rootnode))
else:
ui.status(
@@ -883,7 +883,9 @@
# we need to re-init the repo after manually copying the data
# into it
destpeer = peer(srcrepo, peeropts, dest)
- srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
+ srcrepo.hook(
+ b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
+ )
else:
try:
# only pass ui when no srcrepo
@@ -1053,7 +1055,7 @@
# as the only "bad" outcome would be some slowness. That potential
# slowness already affect reader.
with destrepo.lock():
- destrepo.updatecaches(full=b"post-clone")
+ destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
finally:
release(srclock, destlock)
if cleandir is not None:
@@ -1329,7 +1331,9 @@
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [p for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ p for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b'no_merges') and len(parents) == 2:
continue
count += 1
@@ -1406,7 +1410,7 @@
for n in revs:
if limit is not None and count >= limit:
break
- parents = [p for p in cl.parents(n) if p != nullid]
+ parents = [p for p in cl.parents(n) if p != repo.nullid]
if no_merges and len(parents) == 2:
continue
count += 1
--- a/mercurial/hgweb/server.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/hgweb/server.py Mon Jun 07 17:10:35 2021 -0400
@@ -344,7 +344,7 @@
try:
import threading
- threading.activeCount() # silence pyflakes and bypass demandimport
+ threading.active_count() # silence pyflakes and bypass demandimport
_mixin = socketserver.ThreadingMixIn
except ImportError:
if util.safehasattr(os, b"fork"):
--- a/mercurial/hgweb/webutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/hgweb/webutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,7 +14,7 @@
import re
from ..i18n import _
-from ..node import hex, nullid, short
+from ..node import hex, short
from ..pycompat import setattr
from .common import (
@@ -220,7 +220,7 @@
def _siblings(siblings=None, hiderev=None):
if siblings is None:
siblings = []
- siblings = [s for s in siblings if s.node() != nullid]
+ siblings = [s for s in siblings if s.node() != s.repo().nullid]
if len(siblings) == 1 and siblings[0].rev() == hiderev:
siblings = []
return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 @@
yield {name: t}
-def showtag(repo, t1, node=nullid):
+def showtag(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodetags, node, b'tag')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
-def showbookmark(repo, t1, node=nullid):
+def showbookmark(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodebookmarks, node, b'bookmark')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
--- a/mercurial/interfaces/dirstate.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/interfaces/dirstate.py Mon Jun 07 17:10:35 2021 -0400
@@ -2,13 +2,19 @@
import contextlib
-from .. import node as nodemod
-
from . import util as interfaceutil
class idirstate(interfaceutil.Interface):
- def __init__(opener, ui, root, validate, sparsematchfn, nodeconstants):
+ def __init__(
+ opener,
+ ui,
+ root,
+ validate,
+ sparsematchfn,
+ nodeconstants,
+ use_dirstate_v2,
+ ):
"""Create a new dirstate object.
opener is an open()-like callable that can be used to open the
@@ -97,7 +103,7 @@
def branch():
pass
- def setparents(p1, p2=nodemod.nullid):
+ def setparents(p1, p2=None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
--- a/mercurial/interfaces/repository.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/interfaces/repository.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,4 +1,5 @@
# repository.py - Interfaces and base classes for repositories and peers.
+# coding: utf-8
#
# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
#
@@ -21,20 +22,20 @@
REPO_FEATURE_LFS = b'lfs'
# Repository supports being stream cloned.
REPO_FEATURE_STREAM_CLONE = b'streamclone'
+# Repository supports (at least) some sidedata to be stored
+REPO_FEATURE_SIDE_DATA = b'side-data'
# Files storage may lack data for all ancestors.
REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
REVISION_FLAG_CENSORED = 1 << 15
REVISION_FLAG_ELLIPSIS = 1 << 14
REVISION_FLAG_EXTSTORED = 1 << 13
-REVISION_FLAG_SIDEDATA = 1 << 12
-REVISION_FLAG_HASCOPIESINFO = 1 << 11
+REVISION_FLAG_HASCOPIESINFO = 1 << 12
REVISION_FLAGS_KNOWN = (
REVISION_FLAG_CENSORED
| REVISION_FLAG_ELLIPSIS
| REVISION_FLAG_EXTSTORED
- | REVISION_FLAG_SIDEDATA
| REVISION_FLAG_HASCOPIESINFO
)
@@ -44,6 +45,54 @@
CG_DELTAMODE_P1 = b'p1'
+## Cache related constants:
+#
+# Used to control which cache should be warmed in a repo.updatecaches(…) call.
+
+# Warm branchmaps of all known repoview's filter-level
+CACHE_BRANCHMAP_ALL = b"branchmap-all"
+# Warm branchmaps of repoview's filter-level used by server
+CACHE_BRANCHMAP_SERVED = b"branchmap-served"
+# Warm internal changelog cache (eg: persistent nodemap)
+CACHE_CHANGELOG_CACHE = b"changelog-cache"
+# Warm full manifest cache
+CACHE_FULL_MANIFEST = b"full-manifest"
+# Warm file-node-tags cache
+CACHE_FILE_NODE_TAGS = b"file-node-tags"
+# Warm internal manifestlog cache (eg: persistent nodemap)
+CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
+# Warn rev branch cache
+CACHE_REV_BRANCH = b"rev-branch-cache"
+# Warm tags' cache for default repoview'
+CACHE_TAGS_DEFAULT = b"tags-default"
+# Warm tags' cache for repoview's filter-level used by server
+CACHE_TAGS_SERVED = b"tags-served"
+
+# the cache to warm by default after a simple transaction
+# (this is a mutable set to let extension update it)
+CACHES_DEFAULT = {
+ CACHE_BRANCHMAP_SERVED,
+}
+
+# the caches to warm when warming all of them
+# (this is a mutable set to let extension update it)
+CACHES_ALL = {
+ CACHE_BRANCHMAP_SERVED,
+ CACHE_BRANCHMAP_ALL,
+ CACHE_CHANGELOG_CACHE,
+ CACHE_FILE_NODE_TAGS,
+ CACHE_FULL_MANIFEST,
+ CACHE_MANIFESTLOG_CACHE,
+ CACHE_TAGS_DEFAULT,
+ CACHE_TAGS_SERVED,
+}
+
+# the cache to warm by default on simple call
+# (this is a mutable set to let extension update it)
+CACHES_POST_CLONE = CACHES_ALL.copy()
+CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
+
+
class ipeerconnection(interfaceutil.Interface):
"""Represents a "connection" to a repository.
@@ -457,6 +506,13 @@
"""Raw sidedata bytes for the given revision."""
)
+ protocol_flags = interfaceutil.Attribute(
+ """Single byte of integer flags that can influence the protocol.
+
+ This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
+ """
+ )
+
class ifilerevisionssequence(interfaceutil.Interface):
"""Contains index data for all revisions of a file.
@@ -1162,13 +1218,6 @@
"""An ``ifilerevisionssequence`` instance."""
)
- indexfile = interfaceutil.Attribute(
- """Path of revlog index file.
-
- TODO this is revlog specific and should not be exposed.
- """
- )
-
opener = interfaceutil.Attribute(
"""VFS opener to use to access underlying files used for storage.
@@ -1176,13 +1225,6 @@
"""
)
- version = interfaceutil.Attribute(
- """Revlog version number.
-
- TODO this is revlog specific and should not be exposed.
- """
- )
-
_generaldelta = interfaceutil.Attribute(
"""Whether generaldelta storage is being used.
@@ -1851,7 +1893,9 @@
def savecommitmessage(text):
pass
- def register_sidedata_computer(kind, category, keys, computer):
+ def register_sidedata_computer(
+ kind, category, keys, computer, flags, replace=False
+ ):
pass
def register_wanted_sidedata(category):
--- a/mercurial/localrepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/localrepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -19,7 +19,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -50,7 +49,6 @@
match as matchmod,
mergestate as mergestatemod,
mergeutil,
- metadata as metadatamod,
namespaces,
narrowspec,
obsolete,
@@ -91,6 +89,7 @@
from .revlogutils import (
concurrency_checker as revlogchecker,
constants as revlogconst,
+ sidedata as sidedatamod,
)
release = lockmod.release
@@ -738,6 +737,14 @@
storevfs = store.vfs
storevfs.options = resolvestorevfsoptions(ui, requirements, features)
+ if (
+ requirementsmod.REVLOGV2_REQUIREMENT in requirements
+ or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
+ ):
+ features.add(repository.REPO_FEATURE_SIDE_DATA)
+ # the revlogv2 docket introduced race condition that we need to fix
+ features.discard(repository.REPO_FEATURE_STREAM_CLONE)
+
# The cache vfs is used to manage cache files.
cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
cachevfs.createmode = store.createmode
@@ -880,6 +887,9 @@
# Start with all requirements supported by this file.
supported = set(localrepository._basesupported)
+ if dirstate.SUPPORTS_DIRSTATE_V2:
+ supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
+
# Execute ``featuresetupfuncs`` entries if they belong to an extension
# relevant to this ui instance.
modules = {m.__name__ for n, m in extensions.extensions(ui)}
@@ -1017,6 +1027,8 @@
options[b'revlogv1'] = True
if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
options[b'revlogv2'] = True
+ if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
+ options[b'changelogv2'] = True
if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
options[b'generaldelta'] = True
@@ -1064,9 +1076,6 @@
if sparserevlog:
options[b'generaldelta'] = True
- sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
- options[b'side-data'] = sidedata
-
maxchainlen = None
if sparserevlog:
maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
@@ -1219,7 +1228,7 @@
requirementsmod.TREEMANIFEST_REQUIREMENT,
requirementsmod.COPIESSDC_REQUIREMENT,
requirementsmod.REVLOGV2_REQUIREMENT,
- requirementsmod.SIDEDATA_REQUIREMENT,
+ requirementsmod.CHANGELOGV2_REQUIREMENT,
requirementsmod.SPARSEREVLOG_REQUIREMENT,
requirementsmod.NODEMAP_REQUIREMENT,
bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
@@ -1408,7 +1417,7 @@
self._wanted_sidedata = set()
self._sidedata_computers = {}
- metadatamod.set_sidedata_spec_for_repo(self)
+ sidedatamod.set_sidedata_spec_for_repo(self)
def _getvfsward(self, origfunc):
"""build a ward for self.vfs"""
@@ -1681,6 +1690,8 @@
def _makedirstate(self):
"""Extension point for wrapping the dirstate per-repo."""
sparsematchfn = lambda: sparse.matcher(self)
+ v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
+ use_dirstate_v2 = v2_req in self.requirements
return dirstate.dirstate(
self.vfs,
@@ -1689,6 +1700,7 @@
self._dirstatevalidate,
sparsematchfn,
self.nodeconstants,
+ use_dirstate_v2,
)
def _dirstatevalidate(self, node):
@@ -1702,7 +1714,7 @@
_(b"warning: ignoring unknown working parent %s!\n")
% short(node)
)
- return nullid
+ return self.nullid
@storecache(narrowspec.FILENAME)
def narrowpats(self):
@@ -1753,9 +1765,9 @@
@unfilteredpropertycache
def _quick_access_changeid_null(self):
return {
- b'null': (nullrev, nullid),
- nullrev: (nullrev, nullid),
- nullid: (nullrev, nullid),
+ b'null': (nullrev, self.nodeconstants.nullid),
+ nullrev: (nullrev, self.nodeconstants.nullid),
+ self.nullid: (nullrev, self.nullid),
}
@unfilteredpropertycache
@@ -1765,7 +1777,7 @@
quick = self._quick_access_changeid_null.copy()
cl = self.unfiltered().changelog
for node in self.dirstate.parents():
- if node == nullid:
+ if node == self.nullid:
continue
rev = cl.index.get_rev(node)
if rev is None:
@@ -1785,7 +1797,7 @@
quick[r] = pair
quick[n] = pair
p1node = self.dirstate.p1()
- if p1node != nullid:
+ if p1node != self.nullid:
quick[b'.'] = quick[p1node]
return quick
@@ -1841,7 +1853,7 @@
# when we know that '.' won't be hidden
node = self.dirstate.p1()
rev = self.unfiltered().changelog.rev(node)
- elif len(changeid) == 20:
+ elif len(changeid) == self.nodeconstants.nodelen:
try:
node = changeid
rev = self.changelog.rev(changeid)
@@ -1862,7 +1874,7 @@
changeid = hex(changeid) # for the error message
raise
- elif len(changeid) == 40:
+ elif len(changeid) == 2 * self.nodeconstants.nodelen:
node = bin(changeid)
rev = self.changelog.rev(node)
else:
@@ -2037,7 +2049,7 @@
# local encoding.
tags = {}
for (name, (node, hist)) in pycompat.iteritems(alltags):
- if node != nullid:
+ if node != self.nullid:
tags[encoding.tolocal(name)] = node
tags[b'tip'] = self.changelog.tip()
tagtypes = {
@@ -2161,7 +2173,9 @@
def wjoin(self, f, *insidef):
return self.vfs.reljoin(self.root, f, *insidef)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
self[None].setparents(p1, p2)
self._quick_access_changeid_invalidate()
@@ -2718,7 +2732,7 @@
return updater
@unfilteredmethod
- def updatecaches(self, tr=None, full=False):
+ def updatecaches(self, tr=None, full=False, caches=None):
"""warm appropriate caches
If this function is called after a transaction closed. The transaction
@@ -2738,40 +2752,61 @@
# later call to `destroyed` will refresh them.
return
- if tr is None or tr.changes[b'origrepolen'] < len(self):
- # accessing the 'served' branchmap should refresh all the others,
- self.ui.debug(b'updating the branch cache\n')
- self.filtered(b'served').branchmap()
- self.filtered(b'served.hidden').branchmap()
+ unfi = self.unfiltered()
if full:
- unfi = self.unfiltered()
-
+ msg = (
+ "`full` argument for `repo.updatecaches` is deprecated\n"
+ "(use `caches=repository.CACHE_ALL` instead)"
+ )
+ self.ui.deprecwarn(msg, "5.9")
+ caches = repository.CACHES_ALL
+ if full == b"post-clone":
+ caches = repository.CACHES_POST_CLONE
+ caches = repository.CACHES_ALL
+ elif caches is None:
+ caches = repository.CACHES_DEFAULT
+
+ if repository.CACHE_BRANCHMAP_SERVED in caches:
+ if tr is None or tr.changes[b'origrepolen'] < len(self):
+ # accessing the 'served' branchmap should refresh all the others,
+ self.ui.debug(b'updating the branch cache\n')
+ self.filtered(b'served').branchmap()
+ self.filtered(b'served.hidden').branchmap()
+
+ if repository.CACHE_CHANGELOG_CACHE in caches:
self.changelog.update_caches(transaction=tr)
+
+ if repository.CACHE_MANIFESTLOG_CACHE in caches:
self.manifestlog.update_caches(transaction=tr)
+ if repository.CACHE_REV_BRANCH in caches:
rbc = unfi.revbranchcache()
for r in unfi.changelog:
rbc.branchinfo(r)
rbc.write()
+ if repository.CACHE_FULL_MANIFEST in caches:
# ensure the working copy parents are in the manifestfulltextcache
for ctx in self[b'.'].parents():
ctx.manifest() # accessing the manifest is enough
- if not full == b"post-clone":
- # accessing fnode cache warms the cache
- tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
+ if repository.CACHE_FILE_NODE_TAGS in caches:
+ # accessing fnode cache warms the cache
+ tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
+
+ if repository.CACHE_TAGS_DEFAULT in caches:
# accessing tags warm the cache
self.tags()
+ if repository.CACHE_TAGS_SERVED in caches:
self.filtered(b'served').tags()
- # The `full` arg is documented as updating even the lazily-loaded
- # caches immediately, so we're forcing a write to cause these caches
- # to be warmed up even if they haven't explicitly been requested
- # yet (if they've never been used by hg, they won't ever have been
- # written, even if they're a subset of another kind of cache that
- # *has* been used).
+ if repository.CACHE_BRANCHMAP_ALL in caches:
+ # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
+ # so we're forcing a write to cause these caches to be warmed up
+ # even if they haven't explicitly been requested yet (if they've
+ # never been used by hg, they won't ever have been written, even if
+ # they're a subset of another kind of cache that *has* been used).
for filt in repoview.filtertable.keys():
filtered = self.filtered(filt)
filtered.branchmap().write(filtered)
@@ -3100,7 +3135,7 @@
subrepoutil.writestate(self, newstate)
p1, p2 = self.dirstate.parents()
- hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
+ hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
try:
self.hook(
b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3273,7 +3308,7 @@
t = n
while True:
p = self.changelog.parents(n)
- if p[1] != nullid or p[0] == nullid:
+ if p[1] != self.nullid or p[0] == self.nullid:
b.append((t, n, p[0], p[1]))
break
n = p[0]
@@ -3286,7 +3321,7 @@
n, l, i = top, [], 0
f = 1
- while n != bottom and n != nullid:
+ while n != bottom and n != self.nullid:
p = self.changelog.parents(n)[0]
if i == f:
l.append(n)
@@ -3370,20 +3405,32 @@
return self.pathto(fp.name[len(self.root) + 1 :])
def register_wanted_sidedata(self, category):
+ if repository.REPO_FEATURE_SIDE_DATA not in self.features:
+ # Only revlogv2 repos can want sidedata.
+ return
self._wanted_sidedata.add(pycompat.bytestr(category))
- def register_sidedata_computer(self, kind, category, keys, computer):
- if kind not in (b"changelog", b"manifest", b"filelog"):
+ def register_sidedata_computer(
+ self, kind, category, keys, computer, flags, replace=False
+ ):
+ if kind not in revlogconst.ALL_KINDS:
msg = _(b"unexpected revlog kind '%s'.")
raise error.ProgrammingError(msg % kind)
category = pycompat.bytestr(category)
- if category in self._sidedata_computers.get(kind, []):
+ already_registered = category in self._sidedata_computers.get(kind, [])
+ if already_registered and not replace:
msg = _(
b"cannot register a sidedata computer twice for category '%s'."
)
raise error.ProgrammingError(msg % category)
+ if replace and not already_registered:
+ msg = _(
+ b"cannot replace a sidedata computer that isn't registered "
+ b"for category '%s'."
+ )
+ raise error.ProgrammingError(msg % category)
self._sidedata_computers.setdefault(kind, {})
- self._sidedata_computers[kind][category] = (keys, computer)
+ self._sidedata_computers[kind][category] = (keys, computer, flags)
# used to avoid circular references so destructors work
@@ -3507,25 +3554,33 @@
if ui.configbool(b'format', b'sparse-revlog'):
requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
- # experimental config: format.exp-use-side-data
- if ui.configbool(b'format', b'exp-use-side-data'):
- requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
- requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+ # experimental config: format.exp-dirstate-v2
+ # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
+ if ui.configbool(b'format', b'exp-dirstate-v2'):
+ if dirstate.SUPPORTS_DIRSTATE_V2:
+ requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
+ else:
+ raise error.Abort(
+ _(
+ b"dirstate v2 format requested by config "
+ b"but not supported (requires Rust extensions)"
+ )
+ )
+
# experimental config: format.exp-use-copies-side-data-changeset
if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
- requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
- requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+ requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
if ui.configbool(b'experimental', b'treemanifest'):
requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
+ changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
+ if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
+ requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
+
revlogv2 = ui.config(b'experimental', b'revlogv2')
if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- # generaldelta is implied by revlogv2.
- requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
# experimental config: format.internal-phase
if ui.configbool(b'format', b'internal-phase'):
--- a/mercurial/logcmdutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/logcmdutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,12 +12,7 @@
import posixpath
from .i18n import _
-from .node import (
- nullid,
- nullrev,
- wdirid,
- wdirrev,
-)
+from .node import nullrev, wdirrev
from .thirdparty import attr
@@ -357,7 +352,7 @@
if self.ui.debugflag:
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = self.repo.nodeconstants.wdirid
mrev = wdirrev
else:
mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +500,11 @@
)
if self.ui.debugflag or b'manifest' in datahint:
- fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+ fm.data(
+ manifest=fm.hexfunc(
+ ctx.manifestnode() or self.repo.nodeconstants.wdirid
+ )
+ )
if self.ui.debugflag or b'extra' in datahint:
fm.data(extra=fm.formatdict(ctx.extra()))
@@ -991,7 +990,7 @@
"""Return the initial set of revisions to be filtered or followed"""
if wopts.revspec:
revs = scmutil.revrange(repo, wopts.revspec)
- elif wopts.follow and repo.dirstate.p1() == nullid:
+ elif wopts.follow and repo.dirstate.p1() == repo.nullid:
revs = smartset.baseset()
elif wopts.follow:
revs = repo.revs(b'.')
--- a/mercurial/manifest.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/manifest.py Mon Jun 07 17:10:35 2021 -0400
@@ -16,7 +16,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from .pycompat import getattr
@@ -35,6 +34,9 @@
repository,
util as interfaceutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
parsers = policy.importmod('parsers')
propertycache = util.propertycache
@@ -43,7 +45,7 @@
FASTDELTA_TEXTDIFF_THRESHOLD = 1000
-def _parse(data):
+def _parse(nodelen, data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -64,7 +66,7 @@
nl -= 1
else:
flags = b''
- if nl not in (40, 64):
+ if nl != 2 * nodelen:
raise ValueError(b'Invalid manifest line')
yield f, bin(n), flags
@@ -132,7 +134,7 @@
else:
hlen = nlpos - zeropos - 1
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self.lm._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(
data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
@@ -177,12 +179,14 @@
def __init__(
self,
+ nodelen,
data,
positions=None,
extrainfo=None,
extradata=None,
hasremovals=False,
):
+ self._nodelen = nodelen
if positions is None:
self.positions = self.findlines(data)
self.extrainfo = [0] * len(self.positions)
@@ -289,7 +293,7 @@
hlen -= 1
else:
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
return (hashval, flags)
@@ -345,6 +349,7 @@
def copy(self):
# XXX call _compact like in C?
return _lazymanifest(
+ self._nodelen,
self.data,
self.positions,
self.extrainfo,
@@ -455,7 +460,7 @@
def filtercopy(self, filterfn):
# XXX should be optimized
- c = _lazymanifest(b'')
+ c = _lazymanifest(self._nodelen, b'')
for f, n, fl in self.iterentries():
if filterfn(f):
c[f] = n, fl
@@ -470,8 +475,9 @@
@interfaceutil.implementer(repository.imanifestdict)
class manifestdict(object):
- def __init__(self, data=b''):
- self._lm = _lazymanifest(data)
+ def __init__(self, nodelen, data=b''):
+ self._nodelen = nodelen
+ self._lm = _lazymanifest(nodelen, data)
def __getitem__(self, key):
return self._lm[key][0]
@@ -579,14 +585,14 @@
return self.copy()
if self._filesfastpath(match):
- m = manifestdict()
+ m = manifestdict(self._nodelen)
lm = self._lm
for fn in match.files():
if fn in lm:
m._lm[fn] = lm[fn]
return m
- m = manifestdict()
+ m = manifestdict(self._nodelen)
m._lm = self._lm.filtercopy(match)
return m
@@ -629,7 +635,7 @@
return b''
def copy(self):
- c = manifestdict()
+ c = manifestdict(self._nodelen)
c._lm = self._lm.copy()
return c
@@ -795,7 +801,8 @@
def __init__(self, nodeconstants, dir=b'', text=b''):
self._dir = dir
self.nodeconstants = nodeconstants
- self._node = nullid
+ self._node = self.nodeconstants.nullid
+ self._nodelen = self.nodeconstants.nodelen
self._loadfunc = _noop
self._copyfunc = _noop
self._dirty = False
@@ -1323,7 +1330,7 @@
def parse(self, text, readsubtree):
selflazy = self._lazydirs
- for f, n, fl in _parse(text):
+ for f, n, fl in _parse(self._nodelen, text):
if fl == b't':
f = f + b'/'
# False below means "doesn't need to be copied" and can use the
@@ -1391,7 +1398,7 @@
continue
subp1 = getnode(m1, d)
subp2 = getnode(m2, d)
- if subp1 == nullid:
+ if subp1 == self.nodeconstants.nullid:
subp1, subp2 = subp2, subp1
writesubtree(subm, subp1, subp2, match)
@@ -1560,7 +1567,6 @@
opener,
tree=b'',
dirlogcache=None,
- indexfile=None,
treemanifest=False,
):
"""Constructs a new manifest revlog
@@ -1591,10 +1597,9 @@
if tree:
assert self._treeondisk, b'opts is %r' % opts
- if indexfile is None:
- indexfile = b'00manifest.i'
- if tree:
- indexfile = b"meta/" + tree + indexfile
+ radix = b'00manifest'
+ if tree:
+ radix = b"meta/" + tree + radix
self.tree = tree
@@ -1606,7 +1611,8 @@
self._revlog = revlog.revlog(
opener,
- indexfile,
+ target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
+ radix=radix,
# only root indexfile is cached
checkambig=not bool(tree),
mmaplargeindex=True,
@@ -1615,9 +1621,7 @@
)
self.index = self._revlog.index
- self.version = self._revlog.version
self._generaldelta = self._revlog._generaldelta
- self._revlog.revlog_kind = b'manifest'
def _setupmanifestcachehooks(self, repo):
"""Persist the manifestfulltextcache on lock release"""
@@ -1901,14 +1905,6 @@
)
@property
- def indexfile(self):
- return self._revlog.indexfile
-
- @indexfile.setter
- def indexfile(self, value):
- self._revlog.indexfile = value
-
- @property
def opener(self):
return self._revlog.opener
@@ -1994,7 +1990,7 @@
else:
m = manifestctx(self, node)
- if node != nullid:
+ if node != self.nodeconstants.nullid:
mancache = self._dirmancache.get(tree)
if not mancache:
mancache = util.lrucachedict(self._cachesize)
@@ -2020,7 +2016,7 @@
class memmanifestctx(object):
def __init__(self, manifestlog):
self._manifestlog = manifestlog
- self._manifestdict = manifestdict()
+ self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
def _storage(self):
return self._manifestlog.getstorage(b'')
@@ -2082,8 +2078,9 @@
def read(self):
if self._data is None:
- if self._node == nullid:
- self._data = manifestdict()
+ nc = self._manifestlog.nodeconstants
+ if self._node == nc.nullid:
+ self._data = manifestdict(nc.nodelen)
else:
store = self._storage()
if self._node in store.fulltextcache:
@@ -2092,7 +2089,7 @@
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
- self._data = manifestdict(text)
+ self._data = manifestdict(nc.nodelen, text)
return self._data
def readfast(self, shallow=False):
@@ -2119,7 +2116,7 @@
store = self._storage()
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
def find(self, key):
return self.read().find(key)
@@ -2188,7 +2185,7 @@
def read(self):
if self._data is None:
store = self._storage()
- if self._node == nullid:
+ if self._node == self._manifestlog.nodeconstants.nullid:
self._data = treemanifest(self._manifestlog.nodeconstants)
# TODO accessing non-public API
elif store._treeondisk:
@@ -2245,7 +2242,7 @@
if shallow:
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
else:
# Need to perform a slow delta
r0 = store.deltaparent(store.rev(self._node))
@@ -2274,7 +2271,9 @@
return self.readdelta(shallow=shallow)
if shallow:
- return manifestdict(store.revision(self._node))
+ return manifestdict(
+ store.nodeconstants.nodelen, store.revision(self._node)
+ )
else:
return self.read()
--- a/mercurial/merge.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/merge.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,12 +13,7 @@
import struct
from .i18n import _
-from .node import (
- addednodeid,
- modifiednodeid,
- nullid,
- nullrev,
-)
+from .node import nullrev
from .thirdparty import attr
from .utils import stringutil
from . import (
@@ -779,7 +774,7 @@
# to flag the change. If wctx is a committed revision, we shouldn't
# care for the dirty state of the working directory.
if any(wctx.sub(s).dirty() for s in wctx.substate):
- m1[b'.hgsubstate'] = modifiednodeid
+ m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
# Don't use m2-vs-ma optimization if:
# - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 @@
mresult.addcommitinfo(
f, b'merge-removal-candidate', b'yes'
)
- elif n1 == addednodeid:
+ elif n1 == repo.nodeconstants.addednodeid:
# This file was locally added. We should forget it instead of
# deleting it.
mresult.addfile(
@@ -1729,20 +1724,13 @@
removed += msremoved
extraactions = ms.actions()
- if extraactions:
- for k, acts in pycompat.iteritems(extraactions):
- for a in acts:
- mresult.addfile(a[0], k, *a[1:])
- if k == mergestatemod.ACTION_GET and wantfiledata:
- # no filedata until mergestate is updated to provide it
- for a in acts:
- getfiledata[a[0]] = None
progress.complete()
- assert len(getfiledata) == (
- mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
+ return (
+ updateresult(updated, merged, removed, unresolved),
+ getfiledata,
+ extraactions,
)
- return updateresult(updated, merged, removed, unresolved), getfiledata
def _advertisefsmonitor(repo, num_gets, p1node):
@@ -1785,7 +1773,7 @@
if (
fsmonitorwarning
and not fsmonitorenabled
- and p1node == nullid
+ and p1node == repo.nullid
and num_gets >= fsmonitorthreshold
and pycompat.sysplatform.startswith((b'linux', b'darwin'))
):
@@ -1913,7 +1901,7 @@
else:
if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
- pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+ pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
else:
pas = [p1.ancestor(p2, warn=branchmerge)]
@@ -2112,7 +2100,7 @@
### apply phase
if not branchmerge: # just jump to the new rev
- fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
+ fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
# If we're doing a partial update, we need to skip updating
# the dirstate.
always = matcher is None or matcher.always()
@@ -2127,7 +2115,7 @@
)
wantfiledata = updatedirstate and not branchmerge
- stats, getfiledata = applyupdates(
+ stats, getfiledata, extraactions = applyupdates(
repo,
mresult,
wc,
@@ -2138,6 +2126,18 @@
)
if updatedirstate:
+ if extraactions:
+ for k, acts in pycompat.iteritems(extraactions):
+ for a in acts:
+ mresult.addfile(a[0], k, *a[1:])
+ if k == mergestatemod.ACTION_GET and wantfiledata:
+ # no filedata until mergestate is updated to provide it
+ for a in acts:
+ getfiledata[a[0]] = None
+
+ assert len(getfiledata) == (
+ mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
+ )
with repo.dirstate.parentchange():
repo.setparents(fp1, fp2)
mergestatemod.recordupdates(
@@ -2281,14 +2281,14 @@
if keepconflictparent and stats.unresolvedcount:
pother = ctx.node()
else:
- pother = nullid
+ pother = repo.nullid
parents = ctx.parents()
if keepparent and len(parents) == 2 and base in parents:
parents.remove(base)
pother = parents[0].node()
# Never set both parents equal to each other
if pother == pctx.node():
- pother = nullid
+ pother = repo.nullid
if wctx.isinmemory():
wctx.setparents(pctx.node(), pother)
--- a/mercurial/mergestate.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/mergestate.py Mon Jun 07 17:10:35 2021 -0400
@@ -9,7 +9,6 @@
from .node import (
bin,
hex,
- nullhex,
nullrev,
)
from . import (
@@ -32,7 +31,7 @@
def _filectxorabsent(hexnode, ctx, f):
- if hexnode == nullhex:
+ if hexnode == ctx.repo().nodeconstants.nullhex:
return filemerge.absentfilectx(ctx, f)
else:
return ctx[f]
@@ -248,7 +247,7 @@
note: also write the local version to the `.hg/merge` directory.
"""
if fcl.isabsent():
- localkey = nullhex
+ localkey = self._repo.nodeconstants.nullhex
else:
localkey = mergestate.getlocalkey(fcl.path())
self._make_backup(fcl, localkey)
@@ -354,7 +353,7 @@
flags = flo
if preresolve:
# restore local
- if localkey != nullhex:
+ if localkey != self._repo.nodeconstants.nullhex:
self._restore_backup(wctx[dfile], localkey, flags)
else:
wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 @@
records.append(
(RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
)
- elif v[1] == nullhex or v[6] == nullhex:
+ elif (
+ v[1] == self._repo.nodeconstants.nullhex
+ or v[6] == self._repo.nodeconstants.nullhex
+ ):
# Change/Delete or Delete/Change conflicts. These are stored in
# 'C' records. v[1] is the local file, and is nullhex when the
# file is deleted locally ('dc'). v[6] is the remote file, and
--- a/mercurial/metadata.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/metadata.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,14 +11,9 @@
import multiprocessing
import struct
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
- pycompat,
- requirements as requirementsmod,
util,
)
@@ -617,7 +612,7 @@
if f in ctx:
fctx = ctx[f]
parents = fctx._filelog.parents(fctx._filenode)
- if parents[1] != nullid:
+ if parents[1] != ctx.repo().nullid:
merged.append(f)
return merged
@@ -822,26 +817,9 @@
def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
- return _getsidedata(repo, rev)[0]
-
-
-def set_sidedata_spec_for_repo(repo):
- if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
- repo.register_wanted_sidedata(sidedatamod.SD_FILES)
- repo.register_sidedata_computer(
- b"changelog",
- sidedatamod.SD_FILES,
- (sidedatamod.SD_FILES,),
- copies_sidedata_computer,
- )
-
-
-def getsidedataadder(srcrepo, destrepo):
- use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
- if pycompat.iswindows or not use_w:
- return _get_simple_sidedata_adder(srcrepo, destrepo)
- else:
- return _get_worker_sidedata_adder(srcrepo, destrepo)
+ sidedata, has_copies_info = _getsidedata(repo, rev)
+ flags_to_add = sidedataflag.REVIDX_HASCOPIESINFO if has_copies_info else 0
+ return sidedata, (flags_to_add, 0)
def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
@@ -910,57 +888,21 @@
# received, when shelve 43 for later use.
staging = {}
- def sidedata_companion(revlog, rev):
- data = {}, False
- if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
- # Is the data previously shelved ?
- data = staging.pop(rev, None)
- if data is None:
- # look at the queued result until we find the one we are lookig
- # for (shelve the other ones)
+ def sidedata_companion(repo, revlog, rev, old_sidedata):
+ # Is the data previously shelved ?
+ data = staging.pop(rev, None)
+ if data is None:
+ # look at the queued result until we find the one we are lookig
+ # for (shelve the other ones)
+ r, data = sidedataq.get()
+ while r != rev:
+ staging[r] = data
r, data = sidedataq.get()
- while r != rev:
- staging[r] = data
- r, data = sidedataq.get()
- tokens.release()
+ tokens.release()
sidedata, has_copies_info = data
new_flag = 0
if has_copies_info:
new_flag = sidedataflag.REVIDX_HASCOPIESINFO
- return False, (), sidedata, new_flag, 0
+ return sidedata, (new_flag, 0)
return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
- """The simple version of the sidedata computation
-
- It just compute it in the same thread on request"""
-
- def sidedatacompanion(revlog, rev):
- sidedata, has_copies_info = {}, False
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- sidedata, has_copies_info = _getsidedata(srcrepo, rev)
- new_flag = 0
- if has_copies_info:
- new_flag = sidedataflag.REVIDX_HASCOPIESINFO
-
- return False, (), sidedata, new_flag, 0
-
- return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
- def sidedatacompanion(revlog, rev):
- f = ()
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
- f = (
- sidedatamod.SD_P1COPIES,
- sidedatamod.SD_P2COPIES,
- sidedatamod.SD_FILESADDED,
- sidedatamod.SD_FILESREMOVED,
- )
- return False, f, {}, 0, sidedataflag.REVIDX_HASCOPIESINFO
-
- return sidedatacompanion
--- a/mercurial/narrowspec.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/narrowspec.py Mon Jun 07 17:10:35 2021 -0400
@@ -346,6 +346,9 @@
ds.drop(f)
pctx = repo[b'.']
+
+ # only update added files that are in the sparse checkout
+ addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
for f in newfiles:
ds.normallookup(f)
--- a/mercurial/obsolete.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/obsolete.py Mon Jun 07 17:10:35 2021 -0400
@@ -73,11 +73,14 @@
import struct
from .i18n import _
+from .node import (
+ bin,
+ hex,
+)
from .pycompat import getattr
from .node import (
bin,
hex,
- nullid,
)
from . import (
encoding,
@@ -103,6 +106,7 @@
# Options for obsolescence
createmarkersopt = b'createmarkers'
allowunstableopt = b'allowunstable'
+allowdivergenceopt = b'allowdivergence'
exchangeopt = b'exchange'
@@ -141,10 +145,13 @@
createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
unstablevalue = _getoptionvalue(repo, allowunstableopt)
+ divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
exchangevalue = _getoptionvalue(repo, exchangeopt)
# createmarkers must be enabled if other options are enabled
- if (unstablevalue or exchangevalue) and not createmarkersvalue:
+ if (
+ unstablevalue or divergencevalue or exchangevalue
+ ) and not createmarkersvalue:
raise error.Abort(
_(
b"'createmarkers' obsolete option must be enabled "
@@ -155,6 +162,7 @@
return {
createmarkersopt: createmarkersvalue,
allowunstableopt: unstablevalue,
+ allowdivergenceopt: divergencevalue,
exchangeopt: exchangevalue,
}
@@ -526,14 +534,14 @@
children.setdefault(p, set()).add(mark)
-def _checkinvalidmarkers(markers):
+def _checkinvalidmarkers(repo, markers):
"""search for marker with invalid data and raise error if needed
Exist as a separated function to allow the evolve extension for a more
subtle handling.
"""
for mark in markers:
- if nullid in mark[1]:
+ if repo.nullid in mark[1]:
raise error.Abort(
_(
b'bad obsolescence marker detected: '
@@ -727,7 +735,7 @@
return []
self._version, markers = _readmarkers(data)
markers = list(markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
return markers
@propertycache
@@ -761,7 +769,7 @@
_addpredecessors(self.predecessors, markers)
if self._cached('children'):
_addchildren(self.children, markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
def relevantmarkers(self, nodes):
"""return a set of all obsolescence markers relevant to a set of nodes.
--- a/mercurial/patch.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/patch.py Mon Jun 07 17:10:35 2021 -0400
@@ -20,7 +20,7 @@
from .i18n import _
from .node import (
hex,
- nullhex,
+ sha1nodeconstants,
short,
)
from .pycompat import open
@@ -3100,8 +3100,8 @@
ctx1, fctx1, path1, flag1, content1, date1 = data1
ctx2, fctx2, path2, flag2, content2, date2 = data2
- index1 = _gitindex(content1) if path1 in ctx1 else nullhex
- index2 = _gitindex(content2) if path2 in ctx2 else nullhex
+ index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
+ index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
if binary and opts.git and not opts.nobinary:
text = mdiff.b85diff(content1, content2)
if text:
--- a/mercurial/phases.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/phases.py Mon Jun 07 17:10:35 2021 -0400
@@ -109,7 +109,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
wdirrev,
@@ -862,7 +861,7 @@
node = bin(nhex)
phase = int(phase)
if phase == public:
- if node != nullid:
+ if node != repo.nullid:
repo.ui.warn(
_(
b'ignoring inconsistent public root'
@@ -919,10 +918,10 @@
rev = cl.index.get_rev
if not roots:
return heads
- if not heads or heads == [nullid]:
+ if not heads or heads == [repo.nullid]:
return []
# The logic operated on revisions, convert arguments early for convenience
- new_heads = {rev(n) for n in heads if n != nullid}
+ new_heads = {rev(n) for n in heads if n != repo.nullid}
roots = [rev(n) for n in roots]
# compute the area we need to remove
affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
--- a/mercurial/policy.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/policy.py Mon Jun 07 17:10:35 2021 -0400
@@ -80,7 +80,7 @@
('cext', 'bdiff'): 3,
('cext', 'mpatch'): 1,
('cext', 'osutil'): 4,
- ('cext', 'parsers'): 17,
+ ('cext', 'parsers'): 20,
}
# map import request to other package or module
--- a/mercurial/pure/parsers.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/pure/parsers.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,9 +10,14 @@
import struct
import zlib
-from ..node import nullid, nullrev
+from ..node import (
+ nullrev,
+ sha1nodeconstants,
+)
from .. import (
+ error,
pycompat,
+ revlogutils,
util,
)
@@ -38,11 +43,9 @@
return int(q & 0xFFFF)
-def offset_type(offset, type):
- return int(int(offset) << 16 | type)
-
-
class BaseIndexObject(object):
+ # Can I be passed to an algorithme implemented in Rust ?
+ rust_ext_compat = 0
# Format of an index entry according to Python's `struct` language
index_format = revlog_constants.INDEX_ENTRY_V1
# Size of a C unsigned long long int, platform independent
@@ -50,7 +53,20 @@
# Size of a C long int, platform independent
int_size = struct.calcsize(b'>i')
# An empty index entry, used as a default value to be overridden, or nullrev
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+ null_item = (
+ 0,
+ 0,
+ 0,
+ -1,
+ -1,
+ -1,
+ -1,
+ sha1nodeconstants.nullid,
+ 0,
+ 0,
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
+ )
@util.propertycache
def entry_size(self):
@@ -64,7 +80,7 @@
@util.propertycache
def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
for r in range(0, len(self)):
n = self[r][7]
nodemap[n] = r
@@ -101,9 +117,14 @@
def append(self, tup):
if '_nodemap' in vars(self):
self._nodemap[tup[7]] = len(self)
- data = self.index_format.pack(*tup)
+ data = self._pack_entry(len(self), tup)
self._extra.append(data)
+ def _pack_entry(self, rev, entry):
+ assert entry[8] == 0
+ assert entry[9] == 0
+ return self.index_format.pack(*entry[:8])
+
def _check_index(self, i):
if not isinstance(i, int):
raise TypeError(b"expecting int indexes")
@@ -119,15 +140,43 @@
else:
index = self._calculate_index(i)
data = self._data[index : index + self.entry_size]
- r = self.index_format.unpack(data)
+ r = self._unpack_entry(i, data)
if self._lgt and i == 0:
- r = (offset_type(0, gettype(r[0])),) + r[1:]
+ offset = revlogutils.offset_type(0, gettype(r[0]))
+ r = (offset,) + r[1:]
+ return r
+
+ def _unpack_entry(self, rev, data):
+ r = self.index_format.unpack(data)
+ r = r + (
+ 0,
+ 0,
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
+ )
return r
+ def pack_header(self, header):
+ """pack header information as binary"""
+ v_fmt = revlog_constants.INDEX_HEADER
+ return v_fmt.pack(header)
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
+ if rev == 0:
+ p = p[revlog_constants.INDEX_HEADER.size :]
+ return p
+
class IndexObject(BaseIndexObject):
def __init__(self, data):
- assert len(data) % self.entry_size == 0
+ assert len(data) % self.entry_size == 0, (
+ len(data),
+ self.entry_size,
+ len(data) % self.entry_size,
+ )
self._data = data
self._lgt = len(data) // self.entry_size
self._extra = []
@@ -240,64 +289,92 @@
if not inline:
cls = IndexObject2 if revlogv2 else IndexObject
return cls(data), None
- cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
+ cls = InlinedIndexObject
return cls(data, inline), (0, data)
-class Index2Mixin(object):
+def parse_index_cl_v2(data):
+ return IndexChangelogV2(data), None
+
+
+class IndexObject2(IndexObject):
index_format = revlog_constants.INDEX_ENTRY_V2
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
- def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+ def replace_sidedata_info(
+ self,
+ rev,
+ sidedata_offset,
+ sidedata_length,
+ offset_flags,
+ compression_mode,
+ ):
"""
Replace an existing index entry's sidedata offset and length with new
ones.
This cannot be used outside of the context of sidedata rewriting,
- inside the transaction that creates the revision `i`.
+ inside the transaction that creates the revision `rev`.
"""
- if i < 0:
+ if rev < 0:
raise KeyError
- self._check_index(i)
- sidedata_format = b">Qi"
- packed_size = struct.calcsize(sidedata_format)
- if i >= self._lgt:
- packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
- old = self._extra[i - self._lgt]
- new = old[:64] + packed + old[64 + packed_size :]
- self._extra[i - self._lgt] = new
- else:
+ self._check_index(rev)
+ if rev < self._lgt:
msg = b"cannot rewrite entries outside of this transaction"
raise KeyError(msg)
+ else:
+ entry = list(self[rev])
+ entry[0] = offset_flags
+ entry[8] = sidedata_offset
+ entry[9] = sidedata_length
+ entry[11] = compression_mode
+ entry = tuple(entry)
+ new = self._pack_entry(rev, entry)
+ self._extra[rev - self._lgt] = new
+ def _unpack_entry(self, rev, data):
+ data = self.index_format.unpack(data)
+ entry = data[:10]
+ data_comp = data[10] & 3
+ sidedata_comp = (data[10] & (3 << 2)) >> 2
+ return entry + (data_comp, sidedata_comp)
-class IndexObject2(Index2Mixin, IndexObject):
- pass
+ def _pack_entry(self, rev, entry):
+ data = entry[:10]
+ data_comp = entry[10] & 3
+ sidedata_comp = (entry[11] & 3) << 2
+ data += (data_comp | sidedata_comp,)
+
+ return self.index_format.pack(*data)
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ return self._pack_entry(rev, entry)
+
+ def pack_header(self, header):
+ """pack header information as binary"""
+ msg = 'version header should go in the docket, not the index: %d'
+ msg %= header
+ raise error.ProgrammingError(msg)
-class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
- def _inline_scan(self, lgt):
- sidedata_length_pos = 72
- off = 0
- if lgt is not None:
- self._offsets = [0] * lgt
- count = 0
- while off <= len(self._data) - self.entry_size:
- start = off + self.big_int_size
- (data_size,) = struct.unpack(
- b'>i',
- self._data[start : start + self.int_size],
- )
- start = off + sidedata_length_pos
- (side_data_size,) = struct.unpack(
- b'>i', self._data[start : start + self.int_size]
- )
- if lgt is not None:
- self._offsets[count] = off
- count += 1
- off += self.entry_size + data_size + side_data_size
- if off != len(self._data):
- raise ValueError(b"corrupted data")
- return count
+class IndexChangelogV2(IndexObject2):
+ index_format = revlog_constants.INDEX_ENTRY_CL_V2
+
+ def _unpack_entry(self, rev, data, r=True):
+ items = self.index_format.unpack(data)
+ entry = items[:3] + (rev, rev) + items[3:8]
+ data_comp = items[8] & 3
+ sidedata_comp = (items[8] >> 2) & 3
+ return entry + (data_comp, sidedata_comp)
+
+ def _pack_entry(self, rev, entry):
+ assert entry[3] == rev, entry[3]
+ assert entry[4] == rev, entry[4]
+ data = entry[:3] + entry[5:10]
+ data_comp = entry[10] & 3
+ sidedata_comp = (entry[11] & 3) << 2
+ data += (data_comp | sidedata_comp,)
+ return self.index_format.pack(*data)
def parse_index_devel_nodemap(data, inline):
--- a/mercurial/repoview.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/repoview.py Mon Jun 07 17:10:35 2021 -0400
@@ -333,7 +333,7 @@
r = super(filteredchangelogmixin, self).rev(node)
if r in self.filteredrevs:
raise error.FilteredLookupError(
- hex(node), self.indexfile, _(b'filtered node')
+ hex(node), self.display_id, _(b'filtered node')
)
return r
--- a/mercurial/requirements.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/requirements.py Mon Jun 07 17:10:35 2021 -0400
@@ -12,6 +12,8 @@
STORE_REQUIREMENT = b'store'
FNCACHE_REQUIREMENT = b'fncache'
+DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
+
# When narrowing is finalized and no longer subject to format changes,
# we should move this to just "narrow" or similar.
NARROW_REQUIREMENT = b'narrowhg-experimental'
@@ -30,6 +32,10 @@
# Increment the sub-version when the revlog v2 format changes to lock out old
# clients.
+CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
+
+# Increment the sub-version when the revlog v2 format changes to lock out old
+# clients.
REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
# A repository with the sparserevlog feature will have delta chains that
@@ -41,10 +47,6 @@
# This is why once a repository has enabled sparse-read, it becomes required.
SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
-# A repository with the sidedataflag requirement will allow to store extra
-# information for revision without altering their original hashes.
-SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
-
# A repository with the the copies-sidedata-changeset requirement will store
# copies related information in changeset's sidedata.
COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
@@ -74,9 +76,12 @@
# repo. Hence both of them should be stored in working copy
# * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
# the requirements are stored in store's requires
+# * DIRSTATE_V2_REQUIREMENT affects .hg/dirstate, of which there is one per
+# working directory.
WORKING_DIR_REQUIREMENTS = {
SPARSE_REQUIREMENT,
SHARED_REQUIREMENT,
RELATIVE_SHARED_REQUIREMENT,
SHARESAFE_REQUIREMENT,
+ DIRSTATE_V2_REQUIREMENT,
}
--- a/mercurial/revlog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlog.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,4 +1,5 @@
# revlog.py - storage back-end for mercurial
+# coding: utf8
#
# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
@@ -26,25 +27,24 @@
from .node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
short,
- wdirfilenodeids,
- wdirhex,
- wdirid,
wdirrev,
)
from .i18n import _
from .pycompat import getattr
from .revlogutils.constants import (
+ ALL_KINDS,
+ CHANGELOGV2,
+ COMP_MODE_DEFAULT,
+ COMP_MODE_INLINE,
+ COMP_MODE_PLAIN,
+ FEATURES_BY_VERSION,
FLAG_GENERALDELTA,
FLAG_INLINE_DATA,
- INDEX_ENTRY_V0,
- INDEX_ENTRY_V1,
- INDEX_ENTRY_V2,
INDEX_HEADER,
+ KIND_CHANGELOG,
REVLOGV0,
REVLOGV1,
REVLOGV1_FLAGS,
@@ -53,6 +53,7 @@
REVLOG_DEFAULT_FLAGS,
REVLOG_DEFAULT_FORMAT,
REVLOG_DEFAULT_VERSION,
+ SUPPORTED_FLAGS,
)
from .revlogutils.flagutil import (
REVIDX_DEFAULT_FLAGS,
@@ -62,7 +63,6 @@
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .thirdparty import attr
from . import (
@@ -72,6 +72,7 @@
mdiff,
policy,
pycompat,
+ revlogutils,
templatefilters,
util,
)
@@ -80,9 +81,12 @@
util as interfaceutil,
)
from .revlogutils import (
+ censor,
deltas as deltautil,
+ docket as docketutil,
flagutil,
nodemap as nodemaputil,
+ revlogv0,
sidedata as sidedatautil,
)
from .utils import (
@@ -92,6 +96,7 @@
# blanked usage of all the name to prevent pyflakes constraints
# We need these name available in the module for extensions.
+
REVLOGV0
REVLOGV1
REVLOGV2
@@ -104,7 +109,6 @@
REVLOGV2_FLAGS
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO
REVIDX_EXTSTORED
REVIDX_DEFAULT_FLAGS
@@ -143,20 +147,6 @@
)
-def getoffset(q):
- return int(q >> 16)
-
-
-def gettype(q):
- return int(q & 0xFFFF)
-
-
-def offset_type(offset, type):
- if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
- raise ValueError(b'unknown revlog index flags')
- return int(int(offset) << 16 | type)
-
-
def _verify_revision(rl, skipflags, state, node):
"""Verify the integrity of the given revlog ``node`` while providing a hook
point for extensions to influence the operation."""
@@ -177,27 +167,6 @@
)
-@attr.s(slots=True, frozen=True)
-class _revisioninfo(object):
- """Information about a revision that allows building its fulltext
- node: expected hash of the revision
- p1, p2: parent revs of the revision
- btext: built text cache consisting of a one-element list
- cachedelta: (baserev, uncompressed_delta) or None
- flags: flags associated to the revision storage
-
- One of btext[0] or cachedelta must be set.
- """
-
- node = attr.ib()
- p1 = attr.ib()
- p2 = attr.ib()
- btext = attr.ib()
- textlen = attr.ib()
- cachedelta = attr.ib()
- flags = attr.ib()
-
-
@interfaceutil.implementer(repository.irevisiondelta)
@attr.s(slots=True)
class revlogrevisiondelta(object):
@@ -210,6 +179,7 @@
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -221,161 +191,55 @@
node = attr.ib(default=None)
-class revlogoldindex(list):
- entry_size = INDEX_ENTRY_V0.size
-
- @property
- def nodemap(self):
- msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self._nodemap
-
- @util.propertycache
- def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- for r in range(0, len(self)):
- n = self[r][7]
- nodemap[n] = r
- return nodemap
-
- def has_node(self, node):
- """return True if the node exist in the index"""
- return node in self._nodemap
-
- def rev(self, node):
- """return a revision for a node
-
- If the node is unknown, raise a RevlogError"""
- return self._nodemap[node]
-
- def get_rev(self, node):
- """return a revision for a node
-
- If the node is unknown, return None"""
- return self._nodemap.get(node)
-
- def append(self, tup):
- self._nodemap[tup[7]] = len(self)
- super(revlogoldindex, self).append(tup)
-
- def __delitem__(self, i):
- if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
- raise ValueError(b"deleting slices only supports a:-1 with step 1")
- for r in pycompat.xrange(i.start, len(self)):
- del self._nodemap[self[r][7]]
- super(revlogoldindex, self).__delitem__(i)
-
- def clearcaches(self):
- self.__dict__.pop('_nodemap', None)
-
- def __getitem__(self, i):
- if i == -1:
- return (0, 0, 0, -1, -1, -1, -1, nullid)
- return list.__getitem__(self, i)
-
-
-class revlogoldio(object):
- def parseindex(self, data, inline):
- s = INDEX_ENTRY_V0.size
- index = []
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- n = off = 0
- l = len(data)
- while off + s <= l:
- cur = data[off : off + s]
- off += s
- e = INDEX_ENTRY_V0.unpack(cur)
- # transform to revlogv1 format
- e2 = (
- offset_type(e[0], 0),
- e[1],
- -1,
- e[2],
- e[3],
- nodemap.get(e[4], nullrev),
- nodemap.get(e[5], nullrev),
- e[6],
- )
- index.append(e2)
- nodemap[e[6]] = n
- n += 1
-
- index = revlogoldindex(index)
- return index, None
-
- def packentry(self, entry, node, version, rev):
- """return the binary representation of an entry
-
- entry: a tuple containing all the values (see index.__getitem__)
- node: a callback to convert a revision to nodeid
- version: the changelog version
- rev: the revision number
- """
- if gettype(entry[0]):
- raise error.RevlogError(
- _(b'index entry flags need revlog version 1')
- )
- e2 = (
- getoffset(entry[0]),
- entry[1],
- entry[3],
- entry[4],
- node(entry[5]),
- node(entry[6]),
- entry[7],
- )
- return INDEX_ENTRY_V0.pack(*e2)
+def parse_index_v1(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline)
+ return index, cache
+
+
+def parse_index_v2(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+ return index, cache
+
+
+def parse_index_cl_v2(data, inline):
+ # call the C implementation to parse the index data
+ assert not inline
+ from .pure.parsers import parse_index_cl_v2
+
+ index, cache = parse_index_cl_v2(data)
+ return index, cache
+
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+ def parse_index_v1_nodemap(data, inline):
+ index, cache = parsers.parse_index_devel_nodemap(data, inline)
+ return index, cache
+
+
+else:
+ parse_index_v1_nodemap = None
+
+
+def parse_index_v1_mixed(data, inline):
+ index, cache = parse_index_v1(data, inline)
+ return rustrevlog.MixedIndex(index), cache
# corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
# signed integer)
_maxentrysize = 0x7FFFFFFF
-
-class revlogio(object):
- def parseindex(self, data, inline):
- # call the C implementation to parse the index data
- index, cache = parsers.parse_index2(data, inline)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V1.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-class revlogv2io(object):
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index2(data, inline, revlogv2=True)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V2.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-NodemapRevlogIO = None
-
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
-
- class NodemapRevlogIO(revlogio):
- """A debug oriented IO class that return a PersistentNodeMapIndexObject
-
- The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
- """
-
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index_devel_nodemap(data, inline)
- return index, cache
-
-
-class rustrevlogio(revlogio):
- def parseindex(self, data, inline):
- index, cache = super(rustrevlogio, self).parseindex(data, inline)
- return rustrevlog.MixedIndex(index), cache
+PARTIAL_READ_MSG = _(
+ b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
+)
+
+FILE_TOO_SHORT_MSG = _(
+ b'cannot read from revlog %s;'
+ b' expected %d bytes from offset %d, data size is %d'
+)
class revlog(object):
@@ -419,6 +283,9 @@
file handle, a filename, and an expected position. It should check whether
the current position in the file handle is valid, and log/warn/fail (by
raising).
+
+ See mercurial/revlogutils/contants.py for details about the content of an
+ index entry.
"""
_flagserrorclass = error.RevlogError
@@ -426,14 +293,16 @@
def __init__(
self,
opener,
- indexfile,
- datafile=None,
+ target,
+ radix,
+ postfix=None, # only exist for `tmpcensored` now
checkambig=False,
mmaplargeindex=False,
censorable=False,
upperboundcomp=None,
persistentnodemap=False,
concurrencychecker=None,
+ trypending=False,
):
"""
create a revlog object
@@ -441,17 +310,31 @@
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
+ `target`: a (KIND, ID) tuple that identify the content stored in
+ this revlog. It help the rest of the code to understand what the revlog
+ is about without having to resort to heuristic and index filename
+ analysis. Note: that this must be reliably be set by normal code, but
+ that test, debug, or performance measurement code might not set this to
+ accurate value.
"""
self.upperboundcomp = upperboundcomp
- self.indexfile = indexfile
- self.datafile = datafile or (indexfile[:-2] + b".d")
- self.nodemap_file = None
+
+ self.radix = radix
+
+ self._docket_file = None
+ self._indexfile = None
+ self._datafile = None
+ self._sidedatafile = None
+ self._nodemap_file = None
+ self.postfix = postfix
+ self._trypending = trypending
+ self.opener = opener
if persistentnodemap:
- self.nodemap_file = nodemaputil.get_nodemap_file(
- opener, self.indexfile
- )
-
- self.opener = opener
+ self._nodemap_file = nodemaputil.get_nodemap_file(self)
+
+ assert target[0] in ALL_KINDS
+ assert len(target) == 2
+ self.target = target
# When True, indexfile is opened with checkambig=True at writing, to
# avoid file stat ambiguity.
self._checkambig = checkambig
@@ -468,6 +351,7 @@
self._maxchainlen = None
self._deltabothparents = True
self.index = None
+ self._docket = None
self._nodemap_docket = None
# Mapping of partial identifiers to full nodes.
self._pcache = {}
@@ -477,6 +361,7 @@
self._maxdeltachainspan = -1
self._withsparseread = False
self._sparserevlog = False
+ self.hassidedata = False
self._srdensitythreshold = 0.50
self._srmingapsize = 262144
@@ -484,27 +369,46 @@
# custom flags.
self._flagprocessors = dict(flagutil.flagprocessors)
- # 2-tuple of file handles being used for active writing.
+ # 3-tuple of file handles being used for active writing.
self._writinghandles = None
+ # prevent nesting of addgroup
+ self._adding_group = None
self._loadindex()
self._concurrencychecker = concurrencychecker
- def _loadindex(self):
+ def _init_opts(self):
+ """process options (from above/config) to setup associated default revlog mode
+
+ These values might be affected when actually reading on disk information.
+
+ The relevant values are returned for use in _loadindex().
+
+ * newversionflags:
+ version header to use if we need to create a new revlog
+
+ * mmapindexthreshold:
+ minimal index size for start to use mmap
+
+ * force_nodemap:
+ force the usage of a "development" version of the nodemap code
+ """
mmapindexthreshold = None
opts = self.opener.options
- if b'revlogv2' in opts:
- newversionflags = REVLOGV2 | FLAG_INLINE_DATA
+ if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
+ new_header = CHANGELOGV2
+ elif b'revlogv2' in opts:
+ new_header = REVLOGV2
elif b'revlogv1' in opts:
- newversionflags = REVLOGV1 | FLAG_INLINE_DATA
+ new_header = REVLOGV1 | FLAG_INLINE_DATA
if b'generaldelta' in opts:
- newversionflags |= FLAG_GENERALDELTA
+ new_header |= FLAG_GENERALDELTA
elif b'revlogv0' in self.opener.options:
- newversionflags = REVLOGV0
+ new_header = REVLOGV0
else:
- newversionflags = REVLOG_DEFAULT_VERSION
+ new_header = REVLOG_DEFAULT_VERSION
if b'chunkcachesize' in opts:
self._chunkcachesize = opts[b'chunkcachesize']
@@ -526,7 +430,6 @@
self._maxdeltachainspan = opts[b'maxdeltachainspan']
if self._mmaplargeindex and b'mmapindexthreshold' in opts:
mmapindexthreshold = opts[b'mmapindexthreshold']
- self.hassidedata = bool(opts.get(b'side-data', False))
self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
withsparseread = bool(opts.get(b'with-sparse-read', False))
# sparse-revlog forces sparse-read
@@ -554,75 +457,118 @@
_(b'revlog chunk cache size %r is not a power of 2')
% self._chunkcachesize
)
-
- indexdata = b''
- self._initempty = True
+ force_nodemap = opts.get(b'devel-force-nodemap', False)
+ return new_header, mmapindexthreshold, force_nodemap
+
+ def _get_data(self, filepath, mmap_threshold, size=None):
+ """return a file content with or without mmap
+
+ If the file is missing return the empty string"""
try:
- with self._indexfp() as f:
- if (
- mmapindexthreshold is not None
- and self.opener.fstat(f).st_size >= mmapindexthreshold
- ):
- # TODO: should .close() to release resources without
- # relying on Python GC
- indexdata = util.buffer(util.mmapread(f))
+ with self.opener(filepath) as fp:
+ if mmap_threshold is not None:
+ file_size = self.opener.fstat(fp).st_size
+ if file_size >= mmap_threshold:
+ if size is not None:
+ # avoid potentiel mmap crash
+ size = min(file_size, size)
+ # TODO: should .close() to release resources without
+ # relying on Python GC
+ if size is None:
+ return util.buffer(util.mmapread(fp))
+ else:
+ return util.buffer(util.mmapread(fp, size))
+ if size is None:
+ return fp.read()
else:
- indexdata = f.read()
- if len(indexdata) > 0:
- versionflags = INDEX_HEADER.unpack(indexdata[:4])[0]
- self._initempty = False
- else:
- versionflags = newversionflags
+ return fp.read(size)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
-
- versionflags = newversionflags
-
- self.version = versionflags
-
- flags = versionflags & ~0xFFFF
- fmt = versionflags & 0xFFFF
-
- if fmt == REVLOGV0:
- if flags:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
+ return b''
+
+ def _loadindex(self, docket=None):
+
+ new_header, mmapindexthreshold, force_nodemap = self._init_opts()
+
+ if self.postfix is not None:
+ entry_point = b'%s.i.%s' % (self.radix, self.postfix)
+ elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
+ entry_point = b'%s.i.a' % self.radix
+ else:
+ entry_point = b'%s.i' % self.radix
+
+ if docket is not None:
+ self._docket = docket
+ self._docket_file = entry_point
+ else:
+ entry_data = b''
+ self._initempty = True
+ entry_data = self._get_data(entry_point, mmapindexthreshold)
+ if len(entry_data) > 0:
+ header = INDEX_HEADER.unpack(entry_data[:4])[0]
+ self._initempty = False
+ else:
+ header = new_header
+
+ self._format_flags = header & ~0xFFFF
+ self._format_version = header & 0xFFFF
+
+ supported_flags = SUPPORTED_FLAGS.get(self._format_version)
+ if supported_flags is None:
+ msg = _(b'unknown version (%d) in revlog %s')
+ msg %= (self._format_version, self.display_id)
+ raise error.RevlogError(msg)
+ elif self._format_flags & ~supported_flags:
+ msg = _(b'unknown flags (%#04x) in version %d revlog %s')
+ display_flag = self._format_flags >> 16
+ msg %= (display_flag, self._format_version, self.display_id)
+ raise error.RevlogError(msg)
+
+ features = FEATURES_BY_VERSION[self._format_version]
+ self._inline = features[b'inline'](self._format_flags)
+ self._generaldelta = features[b'generaldelta'](self._format_flags)
+ self.hassidedata = features[b'sidedata']
+
+ if not features[b'docket']:
+ self._indexfile = entry_point
+ index_data = entry_data
+ else:
+ self._docket_file = entry_point
+ if self._initempty:
+ self._docket = docketutil.default_docket(self, header)
+ else:
+ self._docket = docketutil.parse_docket(
+ self, entry_data, use_pending=self._trypending
+ )
+
+ if self._docket is not None:
+ self._indexfile = self._docket.index_filepath()
+ index_data = b''
+ index_size = self._docket.index_end
+ if index_size > 0:
+ index_data = self._get_data(
+ self._indexfile, mmapindexthreshold, size=index_size
)
-
- self._inline = False
- self._generaldelta = False
-
- elif fmt == REVLOGV1:
- if flags & ~REVLOGV1_FLAGS:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
- )
-
- self._inline = versionflags & FLAG_INLINE_DATA
- self._generaldelta = versionflags & FLAG_GENERALDELTA
-
- elif fmt == REVLOGV2:
- if flags & ~REVLOGV2_FLAGS:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
- )
-
- # There is a bug in the transaction handling when going from an
- # inline revlog to a separate index and data file. Turn it off until
- # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
- # See issue6485
+ if len(index_data) < index_size:
+ msg = _(b'too few index data for %s: got %d, expected %d')
+ msg %= (self.display_id, len(index_data), index_size)
+ raise error.RevlogError(msg)
+
self._inline = False
# generaldelta implied by version 2 revlogs.
self._generaldelta = True
-
+ # the logic for persistent nodemap will be dealt with within the
+ # main docket, so disable it for now.
+ self._nodemap_file = None
+
+ if self._docket is not None:
+ self._datafile = self._docket.data_filepath()
+ self._sidedatafile = self._docket.sidedata_filepath()
+ elif self.postfix is None:
+ self._datafile = b'%s.d' % self.radix
else:
- raise error.RevlogError(
- _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
- )
+ self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
self.nodeconstants = sha1nodeconstants
self.nullid = self.nodeconstants.nullid
@@ -634,33 +580,35 @@
self._storedeltachains = True
devel_nodemap = (
- self.nodemap_file
- and opts.get(b'devel-force-nodemap', False)
- and NodemapRevlogIO is not None
+ self._nodemap_file
+ and force_nodemap
+ and parse_index_v1_nodemap is not None
)
use_rust_index = False
if rustrevlog is not None:
- if self.nodemap_file is not None:
+ if self._nodemap_file is not None:
use_rust_index = True
else:
use_rust_index = self.opener.options.get(b'rust.index')
- self._io = revlogio()
- if self.version == REVLOGV0:
- self._io = revlogoldio()
- elif fmt == REVLOGV2:
- self._io = revlogv2io()
+ self._parse_index = parse_index_v1
+ if self._format_version == REVLOGV0:
+ self._parse_index = revlogv0.parse_index_v0
+ elif self._format_version == REVLOGV2:
+ self._parse_index = parse_index_v2
+ elif self._format_version == CHANGELOGV2:
+ self._parse_index = parse_index_cl_v2
elif devel_nodemap:
- self._io = NodemapRevlogIO()
+ self._parse_index = parse_index_v1_nodemap
elif use_rust_index:
- self._io = rustrevlogio()
+ self._parse_index = parse_index_v1_mixed
try:
- d = self._io.parseindex(indexdata, self._inline)
+ d = self._parse_index(index_data, self._inline)
index, _chunkcache = d
use_nodemap = (
not self._inline
- and self.nodemap_file is not None
+ and self._nodemap_file is not None
and util.safehasattr(index, 'update_nodemap_data')
)
if use_nodemap:
@@ -676,7 +624,7 @@
index.update_nodemap_data(*nodemap_data)
except (ValueError, IndexError):
raise error.RevlogError(
- _(b"index %s is corrupted") % self.indexfile
+ _(b"index %s is corrupted") % self.display_id
)
self.index, self._chunkcache = d
if not self._chunkcache:
@@ -687,22 +635,78 @@
self._decompressors = {}
@util.propertycache
+ def revlog_kind(self):
+ return self.target[0]
+
+ @util.propertycache
+ def display_id(self):
+ """The public facing "ID" of the revlog that we use in message"""
+ # Maybe we should build a user facing representation of
+ # revlog.target instead of using `self.radix`
+ return self.radix
+
+ def _get_decompressor(self, t):
+ try:
+ compressor = self._decompressors[t]
+ except KeyError:
+ try:
+ engine = util.compengines.forrevlogheader(t)
+ compressor = engine.revlogcompressor(self._compengineopts)
+ self._decompressors[t] = compressor
+ except KeyError:
+ raise error.RevlogError(
+ _(b'unknown compression type %s') % binascii.hexlify(t)
+ )
+ return compressor
+
+ @util.propertycache
def _compressor(self):
engine = util.compengines[self._compengine]
return engine.revlogcompressor(self._compengineopts)
- def _indexfp(self, mode=b'r'):
+ @util.propertycache
+ def _decompressor(self):
+ """the default decompressor"""
+ if self._docket is None:
+ return None
+ t = self._docket.default_compression_header
+ c = self._get_decompressor(t)
+ return c.decompress
+
+ def _indexfp(self):
"""file object for the revlog's index file"""
- args = {'mode': mode}
- if mode != b'r':
- args['checkambig'] = self._checkambig
- if mode == b'w':
- args['atomictemp'] = True
- return self.opener(self.indexfile, **args)
+ return self.opener(self._indexfile, mode=b"r")
+
+ def __index_write_fp(self):
+ # You should not use this directly and use `_writing` instead
+ try:
+ f = self.opener(
+ self._indexfile, mode=b"r+", checkambig=self._checkambig
+ )
+ if self._docket is None:
+ f.seek(0, os.SEEK_END)
+ else:
+ f.seek(self._docket.index_end, os.SEEK_SET)
+ return f
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return self.opener(
+ self._indexfile, mode=b"w+", checkambig=self._checkambig
+ )
+
+ def __index_new_fp(self):
+ # You should not use this unless you are upgrading from inline revlog
+ return self.opener(
+ self._indexfile,
+ mode=b"w",
+ checkambig=self._checkambig,
+ atomictemp=True,
+ )
def _datafp(self, mode=b'r'):
"""file object for the revlog's data file"""
- return self.opener(self.datafile, mode=mode)
+ return self.opener(self._datafile, mode=mode)
@contextlib.contextmanager
def _datareadfp(self, existingfp=None):
@@ -730,6 +734,15 @@
with func() as fp:
yield fp
+ @contextlib.contextmanager
+ def _sidedatareadfp(self):
+ """file object suitable to read sidedata"""
+ if self._writinghandles:
+ yield self._writinghandles[2]
+ else:
+ with self.opener(self._sidedatafile) as fp:
+ yield fp
+
def tiprev(self):
return len(self.index) - 1
@@ -785,7 +798,7 @@
return True
def update_caches(self, transaction):
- if self.nodemap_file is not None:
+ if self._nodemap_file is not None:
if transaction is None:
nodemaputil.update_persistent_nodemap(self)
else:
@@ -802,7 +815,7 @@
# end up having to refresh it here.
use_nodemap = (
not self._inline
- and self.nodemap_file is not None
+ and self._nodemap_file is not None
and util.safehasattr(self.index, 'update_nodemap_data')
)
if use_nodemap:
@@ -818,9 +831,12 @@
raise
except error.RevlogError:
# parsers.c radix tree lookup failed
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ raise error.LookupError(node, self.display_id, _(b'no node'))
# Accessors for index entries.
@@ -829,6 +845,23 @@
def start(self, rev):
return int(self.index[rev][0] >> 16)
+ def sidedata_cut_off(self, rev):
+ sd_cut_off = self.index[rev][8]
+ if sd_cut_off != 0:
+ return sd_cut_off
+ # This is some annoying dance, because entries without sidedata
+ # currently use 0 as their ofsset. (instead of previous-offset +
+ # previous-size)
+ #
+ # We should reconsider this sidedata → 0 sidata_offset policy.
+ # In the meantime, we need this.
+ while 0 <= rev:
+ e = self.index[rev]
+ if e[9] != 0:
+ return e[8] + e[9]
+ rev -= 1
+ return 0
+
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
@@ -836,7 +869,7 @@
return self.index[rev][1]
def sidedata_length(self, rev):
- if self.version & 0xFFFF != REVLOGV2:
+ if not self.hassidedata:
return 0
return self.index[rev][9]
@@ -909,7 +942,7 @@
i = self.index
d = i[self.rev(node)]
# inline node() to avoid function call overhead
- if d[5] == nullid:
+ if d[5] == self.nullid:
return i[d[6]][7], i[d[5]][7]
else:
return i[d[5]][7], i[d[6]][7]
@@ -1002,7 +1035,7 @@
checkrev(r)
# and we're sure ancestors aren't filtered as well
- if rustancestor is not None:
+ if rustancestor is not None and self.index.rust_ext_compat:
lazyancestors = rustancestor.LazyAncestors
arg = self.index
else:
@@ -1027,7 +1060,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1089,7 +1122,7 @@
if common is None:
common = [nullrev]
- if rustancestor is not None:
+ if rustancestor is not None and self.index.rust_ext_compat:
return rustancestor.MissingAncestors(self.index, common)
return ancestor.incrementalmissingancestors(self.parentrevs, common)
@@ -1133,7 +1166,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1171,11 +1204,15 @@
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
- roots = [nullid] # Everybody's a descendant of nullid
+ roots = [self.nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
- return ([self.node(r) for r in self], [nullid], list(self.heads()))
+ return (
+ [self.node(r) for r in self],
+ [self.nullid],
+ list(self.heads()),
+ )
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
@@ -1201,7 +1238,7 @@
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
- if n == nullid:
+ if n == self.nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
@@ -1213,7 +1250,7 @@
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update(
- [p for p in self.parents(n) if p != nullid]
+ [p for p in self.parents(n) if p != self.nullid]
)
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
@@ -1241,7 +1278,7 @@
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
- roots = [nullid]
+ roots = [self.nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
@@ -1305,7 +1342,7 @@
return self.index.headrevs()
except AttributeError:
return self._headrevs()
- if rustdagop is not None:
+ if rustdagop is not None and self.index.rust_ext_compat:
return rustdagop.headrevs(self.index, revs)
return dagop.headrevs(revs, self._uncheckedparentrevs)
@@ -1335,7 +1372,7 @@
"""
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [self.nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
@@ -1425,13 +1462,13 @@
if ancs:
# choose a consistent winner when there's a tie
return min(map(self.node, ancs))
- return nullid
+ return self.nullid
def _match(self, id):
if isinstance(id, int):
# rev
return self.node(id)
- if len(id) == 20:
+ if len(id) == self.nodeconstants.nodelen:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
@@ -1452,7 +1489,7 @@
return self.node(rev)
except (ValueError, OverflowError):
pass
- if len(id) == 40:
+ if len(id) == 2 * self.nodeconstants.nodelen:
try:
# a full hex nodeid?
node = bin(id)
@@ -1463,29 +1500,34 @@
def _partialmatch(self, id):
# we don't care wdirfilenodeids as they should be always full hash
- maybewdir = wdirhex.startswith(id)
+ maybewdir = self.nodeconstants.wdirhex.startswith(id)
+ ambiguous = False
try:
partial = self.index.partialmatch(id)
if partial and self.hasnode(partial):
if maybewdir:
# single 'ff...' match in radix tree, ambiguous with wdir
- raise error.RevlogError
- return partial
- if maybewdir:
+ ambiguous = True
+ else:
+ return partial
+ elif maybewdir:
# no 'ff...' match in radix tree, wdir identified
raise error.WdirUnsupported
- return None
+ else:
+ return None
except error.RevlogError:
# parsers.c radix tree lookup gave multiple matches
# fast path: for unfiltered changelog, radix tree is accurate
if not getattr(self, 'filteredrevs', None):
- raise error.AmbiguousPrefixLookupError(
- id, self.indexfile, _(b'ambiguous identifier')
- )
+ ambiguous = True
# fall through to slow path that filters hidden revisions
except (AttributeError, ValueError):
# we are pure python, or key was too short to search radix tree
pass
+ if ambiguous:
+ raise error.AmbiguousPrefixLookupError(
+ id, self.display_id, _(b'ambiguous identifier')
+ )
if id in self._pcache:
return self._pcache[id]
@@ -1499,14 +1541,14 @@
nl = [
n for n in nl if hex(n).startswith(id) and self.hasnode(n)
]
- if nullhex.startswith(id):
- nl.append(nullid)
+ if self.nodeconstants.nullhex.startswith(id):
+ nl.append(self.nullid)
if len(nl) > 0:
if len(nl) == 1 and not maybewdir:
self._pcache[id] = nl[0]
return nl[0]
raise error.AmbiguousPrefixLookupError(
- id, self.indexfile, _(b'ambiguous identifier')
+ id, self.display_id, _(b'ambiguous identifier')
)
if maybewdir:
raise error.WdirUnsupported
@@ -1526,7 +1568,7 @@
if n:
return n
- raise error.LookupError(id, self.indexfile, _(b'no match found'))
+ raise error.LookupError(id, self.display_id, _(b'no match found'))
def shortest(self, node, minlength=1):
"""Find the shortest unambiguous prefix that matches node."""
@@ -1540,7 +1582,7 @@
# single 'ff...' match
return True
if matchednode is None:
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ raise error.LookupError(node, self.display_id, _(b'no node'))
return True
def maybewdir(prefix):
@@ -1560,13 +1602,15 @@
length = max(self.index.shortest(node), minlength)
return disambiguate(hexnode, length)
except error.RevlogError:
- if node != wdirid:
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ if node != self.nodeconstants.wdirid:
+ raise error.LookupError(
+ node, self.display_id, _(b'no node')
+ )
except AttributeError:
# Fall through to pure code
pass
- if node == wdirid:
+ if node == self.nodeconstants.wdirid:
for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if isvalid(prefix):
@@ -1626,34 +1670,17 @@
if offset != realoffset or reallength != length:
startoffset = offset - realoffset
if len(d) - startoffset < length:
- raise error.RevlogError(
- _(
- b'partial read of revlog %s; expected %d bytes from '
- b'offset %d, got %d'
- )
- % (
- self.indexfile if self._inline else self.datafile,
- length,
- realoffset,
- len(d) - startoffset,
- )
- )
-
+ filename = self._indexfile if self._inline else self._datafile
+ got = len(d) - startoffset
+ m = PARTIAL_READ_MSG % (filename, length, offset, got)
+ raise error.RevlogError(m)
return util.buffer(d, startoffset, length)
if len(d) < length:
- raise error.RevlogError(
- _(
- b'partial read of revlog %s; expected %d bytes from offset '
- b'%d, got %d'
- )
- % (
- self.indexfile if self._inline else self.datafile,
- length,
- offset,
- len(d),
- )
- )
+ filename = self._indexfile if self._inline else self._datafile
+ got = len(d) - startoffset
+ m = PARTIAL_READ_MSG % (filename, length, offset, got)
+ raise error.RevlogError(m)
return d
@@ -1724,7 +1751,18 @@
Returns a str holding uncompressed data for the requested revision.
"""
- return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
+ compression_mode = self.index[rev][10]
+ data = self._getsegmentforrevs(rev, rev, df=df)[1]
+ if compression_mode == COMP_MODE_PLAIN:
+ return data
+ elif compression_mode == COMP_MODE_DEFAULT:
+ return self._decompressor(data)
+ elif compression_mode == COMP_MODE_INLINE:
+ return self.decompress(data)
+ else:
+ msg = 'unknown compression mode %d'
+ msg %= compression_mode
+ raise error.RevlogError(msg)
def _chunks(self, revs, df=None, targetsize=None):
"""Obtain decompressed chunks for the specified revisions.
@@ -1772,12 +1810,25 @@
return [self._chunk(rev, df=df) for rev in revschunk]
decomp = self.decompress
+ # self._decompressor might be None, but will not be used in that case
+ def_decomp = self._decompressor
for rev in revschunk:
chunkstart = start(rev)
if inline:
chunkstart += (rev + 1) * iosize
chunklength = length(rev)
- ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
+ comp_mode = self.index[rev][10]
+ c = buffer(data, chunkstart - offset, chunklength)
+ if comp_mode == COMP_MODE_PLAIN:
+ ladd(c)
+ elif comp_mode == COMP_MODE_INLINE:
+ ladd(decomp(c))
+ elif comp_mode == COMP_MODE_DEFAULT:
+ ladd(def_decomp(c))
+ else:
+ msg = 'unknown compression mode %d'
+ msg %= comp_mode
+ raise error.RevlogError(msg)
return l
@@ -1860,7 +1911,7 @@
b'use revlog.rawdata(...)'
)
util.nouideprecwarn(msg, b'5.2', stacklevel=2)
- return self._revisiondata(nodeorrev, _df, raw=raw)[0]
+ return self._revisiondata(nodeorrev, _df, raw=raw)
def sidedata(self, nodeorrev, _df=None):
"""a map of extra data related to the changeset but not part of the hash
@@ -1869,7 +1920,12 @@
mapping object will likely be used in the future for a more
efficient/lazy code.
"""
- return self._revisiondata(nodeorrev, _df)[1]
+ # deal with <nodeorrev> argument type
+ if isinstance(nodeorrev, int):
+ rev = nodeorrev
+ else:
+ rev = self.rev(nodeorrev)
+ return self._sidedata(rev)
def _revisiondata(self, nodeorrev, _df=None, raw=False):
# deal with <nodeorrev> argument type
@@ -1881,24 +1937,17 @@
rev = None
# fast path the special `nullid` rev
- if node == nullid:
- return b"", {}
+ if node == self.nullid:
+ return b""
# ``rawtext`` is the text as stored inside the revlog. Might be the
# revision or might need to be processed to retrieve the revision.
rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
- if self.version & 0xFFFF == REVLOGV2:
- if rev is None:
- rev = self.rev(node)
- sidedata = self._sidedata(rev)
- else:
- sidedata = {}
-
if raw and validated:
# if we don't want to process the raw text and that raw
# text is cached, we can exit early.
- return rawtext, sidedata
+ return rawtext
if rev is None:
rev = self.rev(node)
# the revlog's flag for this revision
@@ -1907,7 +1956,7 @@
if validated and flags == REVIDX_DEFAULT_FLAGS:
# no extra flags set, no flag processor runs, text = rawtext
- return rawtext, sidedata
+ return rawtext
if raw:
validatehash = flagutil.processflagsraw(self, rawtext, flags)
@@ -1920,7 +1969,7 @@
if not validated:
self._revisioncache = (node, rev, rawtext)
- return text, sidedata
+ return text
def _rawtext(self, node, rev, _df=None):
"""return the possibly unvalidated rawtext for a revision
@@ -1976,7 +2025,39 @@
if sidedata_size == 0:
return {}
- segment = self._getsegment(sidedata_offset, sidedata_size)
+ # XXX this need caching, as we do for data
+ with self._sidedatareadfp() as sdf:
+ if self._docket.sidedata_end < sidedata_offset + sidedata_size:
+ filename = self._sidedatafile
+ end = self._docket.sidedata_end
+ offset = sidedata_offset
+ length = sidedata_size
+ m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
+ raise error.RevlogError(m)
+
+ sdf.seek(sidedata_offset, os.SEEK_SET)
+ comp_segment = sdf.read(sidedata_size)
+
+ if len(comp_segment) < sidedata_size:
+ filename = self._sidedatafile
+ length = sidedata_size
+ offset = sidedata_offset
+ got = len(comp_segment)
+ m = PARTIAL_READ_MSG % (filename, length, offset, got)
+ raise error.RevlogError(m)
+
+ comp = self.index[rev][11]
+ if comp == COMP_MODE_PLAIN:
+ segment = comp_segment
+ elif comp == COMP_MODE_DEFAULT:
+ segment = self._decompressor(comp_segment)
+ elif comp == COMP_MODE_INLINE:
+ segment = self.decompress(comp_segment)
+ else:
+ msg = 'unknown compression mode %d'
+ msg %= comp
+ raise error.RevlogError(msg)
+
sidedata = sidedatautil.deserialize_sidedata(segment)
return sidedata
@@ -1985,7 +2066,7 @@
_df - an existing file handle to read from. (internal-only)
"""
- return self._revisiondata(nodeorrev, _df, raw=True)[0]
+ return self._revisiondata(nodeorrev, _df, raw=True)
def hash(self, text, p1, p2):
"""Compute a node hash.
@@ -2019,14 +2100,14 @@
revornode = templatefilters.short(hex(node))
raise error.RevlogError(
_(b"integrity check failed on %s:%s")
- % (self.indexfile, pycompat.bytestr(revornode))
+ % (self.display_id, pycompat.bytestr(revornode))
)
except error.RevlogError:
if self._censorable and storageutil.iscensoredtext(text):
- raise error.CensoredNodeError(self.indexfile, node, text)
+ raise error.CensoredNodeError(self.display_id, node, text)
raise
- def _enforceinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr):
"""Check if the revlog is too big for inline and convert if so.
This should be called after revisions are added to the revlog. If the
@@ -2034,51 +2115,149 @@
to use multiple index and data files.
"""
tiprev = len(self) - 1
- if (
- not self._inline
- or (self.start(tiprev) + self.length(tiprev)) < _maxinline
- ):
+ total_size = self.start(tiprev) + self.length(tiprev)
+ if not self._inline or total_size < _maxinline:
return
- troffset = tr.findoffset(self.indexfile)
+ troffset = tr.findoffset(self._indexfile)
if troffset is None:
raise error.RevlogError(
- _(b"%s not found in the transaction") % self.indexfile
+ _(b"%s not found in the transaction") % self._indexfile
)
trindex = 0
- tr.add(self.datafile, 0)
-
- if fp:
+ tr.add(self._datafile, 0)
+
+ existing_handles = False
+ if self._writinghandles is not None:
+ existing_handles = True
+ fp = self._writinghandles[0]
fp.flush()
fp.close()
# We can't use the cached file handle after close(). So prevent
# its usage.
self._writinghandles = None
- with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
- for r in self:
- dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
- if troffset <= self.start(r):
- trindex = r
-
- with self._indexfp(b'w') as fp:
- self.version &= ~FLAG_INLINE_DATA
- self._inline = False
- io = self._io
- for i in self:
- e = io.packentry(self.index[i], self.node, self.version, i)
- fp.write(e)
-
- # the temp file replace the real index when we exit the context
- # manager
-
- tr.replace(self.indexfile, trindex * self.index.entry_size)
- nodemaputil.setup_persistent_nodemap(tr, self)
- self._chunkclear()
+ new_dfh = self._datafp(b'w+')
+ new_dfh.truncate(0) # drop any potentially existing data
+ try:
+ with self._indexfp() as read_ifh:
+ for r in self:
+ new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
+ if troffset <= self.start(r) + r * self.index.entry_size:
+ trindex = r
+ new_dfh.flush()
+
+ with self.__index_new_fp() as fp:
+ self._format_flags &= ~FLAG_INLINE_DATA
+ self._inline = False
+ for i in self:
+ e = self.index.entry_binary(i)
+ if i == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ e = header + e
+ fp.write(e)
+ if self._docket is not None:
+ self._docket.index_end = fp.tell()
+
+ # There is a small transactional race here. If the rename of
+ # the index fails, we should remove the datafile. It is more
+ # important to ensure that the data file is not truncated
+ # when the index is replaced as otherwise data is lost.
+ tr.replace(self._datafile, self.start(trindex))
+
+ # the temp file replace the real index when we exit the context
+ # manager
+
+ tr.replace(self._indexfile, trindex * self.index.entry_size)
+ nodemaputil.setup_persistent_nodemap(tr, self)
+ self._chunkclear()
+
+ if existing_handles:
+ # switched from inline to conventional reopen the index
+ ifh = self.__index_write_fp()
+ self._writinghandles = (ifh, new_dfh, None)
+ new_dfh = None
+ finally:
+ if new_dfh is not None:
+ new_dfh.close()
def _nodeduplicatecallback(self, transaction, node):
"""called when trying to add a node already stored."""
+ @contextlib.contextmanager
+ def _writing(self, transaction):
+ if self._trypending:
+ msg = b'try to write in a `trypending` revlog: %s'
+ msg %= self.display_id
+ raise error.ProgrammingError(msg)
+ if self._writinghandles is not None:
+ yield
+ else:
+ ifh = dfh = sdfh = None
+ try:
+ r = len(self)
+ # opening the data file.
+ dsize = 0
+ if r:
+ dsize = self.end(r - 1)
+ dfh = None
+ if not self._inline:
+ try:
+ dfh = self._datafp(b"r+")
+ if self._docket is None:
+ dfh.seek(0, os.SEEK_END)
+ else:
+ dfh.seek(self._docket.data_end, os.SEEK_SET)
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ dfh = self._datafp(b"w+")
+ transaction.add(self._datafile, dsize)
+ if self._sidedatafile is not None:
+ try:
+ sdfh = self.opener(self._sidedatafile, mode=b"r+")
+ dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ sdfh = self.opener(self._sidedatafile, mode=b"w+")
+ transaction.add(
+ self._sidedatafile, self._docket.sidedata_end
+ )
+
+ # opening the index file.
+ isize = r * self.index.entry_size
+ ifh = self.__index_write_fp()
+ if self._inline:
+ transaction.add(self._indexfile, dsize + isize)
+ else:
+ transaction.add(self._indexfile, isize)
+ # exposing all file handle for writing.
+ self._writinghandles = (ifh, dfh, sdfh)
+ yield
+ if self._docket is not None:
+ self._write_docket(transaction)
+ finally:
+ self._writinghandles = None
+ if dfh is not None:
+ dfh.close()
+ if sdfh is not None:
+ dfh.close()
+ # closing the index file last to avoid exposing referent to
+ # potential unflushed data content.
+ if ifh is not None:
+ ifh.close()
+
+ def _write_docket(self, transaction):
+ """write the current docket on disk
+
+ Exist as a method to help changelog to implement transaction logic
+
+ We could also imagine using the same transaction logic for all revlog
+ since docket are cheap."""
+ self._docket.write(transaction)
+
def addrevision(
self,
text,
@@ -2108,12 +2287,12 @@
"""
if link == nullrev:
raise error.RevlogError(
- _(b"attempted to add linkrev -1 to %s") % self.indexfile
+ _(b"attempted to add linkrev -1 to %s") % self.display_id
)
if sidedata is None:
sidedata = {}
- elif not self.hassidedata:
+ elif sidedata and not self.hassidedata:
raise error.ProgrammingError(
_(b"trying to add sidedata to a revlog who don't support them")
)
@@ -2133,7 +2312,7 @@
_(
b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
)
- % (self.indexfile, len(rawtext))
+ % (self.display_id, len(rawtext))
)
node = node or self.hash(rawtext, p1, p2)
@@ -2174,11 +2353,7 @@
useful when reusing a revision not stored in this revlog (ex: received
over wire, or read from an external bundle).
"""
- dfh = None
- if not self._inline:
- dfh = self._datafp(b"a+")
- ifh = self._indexfp(b"a+")
- try:
+ with self._writing(transaction):
return self._addrevision(
node,
rawtext,
@@ -2188,15 +2363,9 @@
p2,
flags,
cachedelta,
- ifh,
- dfh,
deltacomputer=deltacomputer,
sidedata=sidedata,
)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
def compress(self, data):
"""Generate a possibly-compressed representation of data."""
@@ -2259,17 +2428,7 @@
elif t == b'u':
return util.buffer(data, 1)
- try:
- compressor = self._decompressors[t]
- except KeyError:
- try:
- engine = util.compengines.forrevlogheader(t)
- compressor = engine.revlogcompressor(self._compengineopts)
- self._decompressors[t] = compressor
- except KeyError:
- raise error.RevlogError(
- _(b'unknown compression type %s') % binascii.hexlify(t)
- )
+ compressor = self._get_decompressor(t)
return compressor.decompress(data)
@@ -2283,8 +2442,6 @@
p2,
flags,
cachedelta,
- ifh,
- dfh,
alwayscache=False,
deltacomputer=None,
sidedata=None,
@@ -2302,19 +2459,25 @@
- rawtext is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
"""
- if node == nullid:
+ if node == self.nullid:
raise error.RevlogError(
- _(b"%s: attempt to add null revision") % self.indexfile
+ _(b"%s: attempt to add null revision") % self.display_id
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.RevlogError(
- _(b"%s: attempt to add wdir revision") % self.indexfile
+ _(b"%s: attempt to add wdir revision") % self.display_id
)
+ if self._writinghandles is None:
+ msg = b'adding revision outside `revlog._writing` context'
+ raise error.ProgrammingError(msg)
if self._inline:
- fh = ifh
+ fh = self._writinghandles[0]
else:
- fh = dfh
+ fh = self._writinghandles[1]
btext = [rawtext]
@@ -2324,18 +2487,20 @@
offset = self._get_data_offset(prev)
if self._concurrencychecker:
+ ifh, dfh, sdfh = self._writinghandles
+ # XXX no checking for the sidedata file
if self._inline:
# offset is "as if" it were in the .d file, so we need to add on
# the size of the entry metadata.
self._concurrencychecker(
- ifh, self.indexfile, offset + curr * self.index.entry_size
+ ifh, self._indexfile, offset + curr * self.index.entry_size
)
else:
# Entries in the .i are a consistent size.
self._concurrencychecker(
- ifh, self.indexfile, curr * self.index.entry_size
+ ifh, self._indexfile, curr * self.index.entry_size
)
- self._concurrencychecker(dfh, self.datafile, offset)
+ self._concurrencychecker(dfh, self._datafile, offset)
p1r, p2r = self.rev(p1), self.rev(p2)
@@ -2354,13 +2519,57 @@
if deltacomputer is None:
deltacomputer = deltautil.deltacomputer(self)
- revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
+ revinfo = revlogutils.revisioninfo(
+ node,
+ p1,
+ p2,
+ btext,
+ textlen,
+ cachedelta,
+ flags,
+ )
deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
- if sidedata:
+ compression_mode = COMP_MODE_INLINE
+ if self._docket is not None:
+ h, d = deltainfo.data
+ if not h and not d:
+ # not data to store at all... declare them uncompressed
+ compression_mode = COMP_MODE_PLAIN
+ elif not h:
+ t = d[0:1]
+ if t == b'\0':
+ compression_mode = COMP_MODE_PLAIN
+ elif t == self._docket.default_compression_header:
+ compression_mode = COMP_MODE_DEFAULT
+ elif h == b'u':
+ # we have a more efficient way to declare uncompressed
+ h = b''
+ compression_mode = COMP_MODE_PLAIN
+ deltainfo = deltautil.drop_u_compression(deltainfo)
+
+ sidedata_compression_mode = COMP_MODE_INLINE
+ if sidedata and self.hassidedata:
+ sidedata_compression_mode = COMP_MODE_PLAIN
serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
- sidedata_offset = offset + deltainfo.deltalen
+ sidedata_offset = self._docket.sidedata_end
+ h, comp_sidedata = self.compress(serialized_sidedata)
+ if (
+ h != b'u'
+ and comp_sidedata[0:1] != b'\0'
+ and len(comp_sidedata) < len(serialized_sidedata)
+ ):
+ assert not h
+ if (
+ comp_sidedata[0:1]
+ == self._docket.default_compression_header
+ ):
+ sidedata_compression_mode = COMP_MODE_DEFAULT
+ serialized_sidedata = comp_sidedata
+ else:
+ sidedata_compression_mode = COMP_MODE_INLINE
+ serialized_sidedata = comp_sidedata
else:
serialized_sidedata = b""
# Don't store the offset if the sidedata is empty, that way
@@ -2368,33 +2577,36 @@
# than ones we manually add.
sidedata_offset = 0
- e = (
- offset_type(offset, flags),
- deltainfo.deltalen,
- textlen,
- deltainfo.base,
- link,
- p1r,
- p2r,
- node,
- sidedata_offset,
- len(serialized_sidedata),
+ e = revlogutils.entry(
+ flags=flags,
+ data_offset=offset,
+ data_compressed_length=deltainfo.deltalen,
+ data_uncompressed_length=textlen,
+ data_compression_mode=compression_mode,
+ data_delta_base=deltainfo.base,
+ link_rev=link,
+ parent_rev_1=p1r,
+ parent_rev_2=p2r,
+ node_id=node,
+ sidedata_offset=sidedata_offset,
+ sidedata_compressed_length=len(serialized_sidedata),
+ sidedata_compression_mode=sidedata_compression_mode,
)
- if self.version & 0xFFFF != REVLOGV2:
- e = e[:8]
-
self.index.append(e)
- entry = self._io.packentry(e, self.node, self.version, curr)
+ entry = self.index.entry_binary(curr)
+ if curr == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ entry = header + entry
self._writeentry(
transaction,
- ifh,
- dfh,
entry,
deltainfo.data,
link,
offset,
serialized_sidedata,
+ sidedata_offset,
)
rawtext = btext[0]
@@ -2416,19 +2628,13 @@
to `n - 1`'s sidedata being written after `n`'s data.
TODO cache this in a docket file before getting out of experimental."""
- if self.version & 0xFFFF != REVLOGV2:
+ if self._docket is None:
return self.end(prev)
-
- offset = 0
- for rev, entry in enumerate(self.index):
- sidedata_end = entry[8] + entry[9]
- # Sidedata for a previous rev has potentially been written after
- # this rev's end, so take the max.
- offset = max(self.end(rev), offset, sidedata_end)
- return offset
+ else:
+ return self._docket.data_end
def _writeentry(
- self, transaction, ifh, dfh, entry, data, link, offset, sidedata
+ self, transaction, entry, data, link, offset, sidedata, sidedata_offset
):
# Files opened in a+ mode have inconsistent behavior on various
# platforms. Windows requires that a file positioning call be made
@@ -2442,29 +2648,47 @@
# Note: This is likely not necessary on Python 3. However, because
# the file handle is reused for reads and may be seeked there, we need
# to be careful before changing this.
- ifh.seek(0, os.SEEK_END)
+ if self._writinghandles is None:
+ msg = b'adding revision outside `revlog._writing` context'
+ raise error.ProgrammingError(msg)
+ ifh, dfh, sdfh = self._writinghandles
+ if self._docket is None:
+ ifh.seek(0, os.SEEK_END)
+ else:
+ ifh.seek(self._docket.index_end, os.SEEK_SET)
if dfh:
- dfh.seek(0, os.SEEK_END)
+ if self._docket is None:
+ dfh.seek(0, os.SEEK_END)
+ else:
+ dfh.seek(self._docket.data_end, os.SEEK_SET)
+ if sdfh:
+ sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
curr = len(self) - 1
if not self._inline:
- transaction.add(self.datafile, offset)
- transaction.add(self.indexfile, curr * len(entry))
+ transaction.add(self._datafile, offset)
+ if self._sidedatafile:
+ transaction.add(self._sidedatafile, sidedata_offset)
+ transaction.add(self._indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
if sidedata:
- dfh.write(sidedata)
+ sdfh.write(sidedata)
ifh.write(entry)
else:
offset += curr * self.index.entry_size
- transaction.add(self.indexfile, offset)
+ transaction.add(self._indexfile, offset)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
- if sidedata:
- ifh.write(sidedata)
- self._enforceinlinesize(transaction, ifh)
+ assert not sidedata
+ self._enforceinlinesize(transaction)
+ if self._docket is not None:
+ self._docket.index_end = self._writinghandles[0].tell()
+ self._docket.data_end = self._writinghandles[1].tell()
+ self._docket.sidedata_end = self._writinghandles[2].tell()
+
nodemaputil.setup_persistent_nodemap(transaction, self)
def addgroup(
@@ -2487,115 +2711,93 @@
this revlog and the node that was added.
"""
- if self._writinghandles:
+ if self._adding_group:
raise error.ProgrammingError(b'cannot nest addgroup() calls')
- r = len(self)
- end = 0
- if r:
- end = self.end(r - 1)
- ifh = self._indexfp(b"a+")
- isize = r * self.index.entry_size
- if self._inline:
- transaction.add(self.indexfile, end + isize)
- dfh = None
- else:
- transaction.add(self.indexfile, isize)
- transaction.add(self.datafile, end)
- dfh = self._datafp(b"a+")
-
- def flush():
- if dfh:
- dfh.flush()
- ifh.flush()
-
- self._writinghandles = (ifh, dfh)
+ self._adding_group = True
empty = True
-
try:
- deltacomputer = deltautil.deltacomputer(self)
- # loop through our set of deltas
- for data in deltas:
- node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
- link = linkmapper(linknode)
- flags = flags or REVIDX_DEFAULT_FLAGS
-
- rev = self.index.get_rev(node)
- if rev is not None:
- # this can happen if two branches make the same change
- self._nodeduplicatecallback(transaction, rev)
- if duplicaterevisioncb:
- duplicaterevisioncb(self, rev)
- empty = False
- continue
-
- for p in (p1, p2):
- if not self.index.has_node(p):
+ with self._writing(transaction):
+ deltacomputer = deltautil.deltacomputer(self)
+ # loop through our set of deltas
+ for data in deltas:
+ (
+ node,
+ p1,
+ p2,
+ linknode,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ ) = data
+ link = linkmapper(linknode)
+ flags = flags or REVIDX_DEFAULT_FLAGS
+
+ rev = self.index.get_rev(node)
+ if rev is not None:
+ # this can happen if two branches make the same change
+ self._nodeduplicatecallback(transaction, rev)
+ if duplicaterevisioncb:
+ duplicaterevisioncb(self, rev)
+ empty = False
+ continue
+
+ for p in (p1, p2):
+ if not self.index.has_node(p):
+ raise error.LookupError(
+ p, self.radix, _(b'unknown parent')
+ )
+
+ if not self.index.has_node(deltabase):
raise error.LookupError(
- p, self.indexfile, _(b'unknown parent')
+ deltabase, self.display_id, _(b'unknown delta base')
)
- if not self.index.has_node(deltabase):
- raise error.LookupError(
- deltabase, self.indexfile, _(b'unknown delta base')
+ baserev = self.rev(deltabase)
+
+ if baserev != nullrev and self.iscensored(baserev):
+ # if base is censored, delta must be full replacement in a
+ # single patch operation
+ hlen = struct.calcsize(b">lll")
+ oldlen = self.rawsize(baserev)
+ newlen = len(delta) - hlen
+ if delta[:hlen] != mdiff.replacediffheader(
+ oldlen, newlen
+ ):
+ raise error.CensoredBaseError(
+ self.display_id, self.node(baserev)
+ )
+
+ if not flags and self._peek_iscensored(baserev, delta):
+ flags |= REVIDX_ISCENSORED
+
+ # We assume consumers of addrevisioncb will want to retrieve
+ # the added revision, which will require a call to
+ # revision(). revision() will fast path if there is a cache
+ # hit. So, we tell _addrevision() to always cache in this case.
+ # We're only using addgroup() in the context of changegroup
+ # generation so the revision data can always be handled as raw
+ # by the flagprocessor.
+ rev = self._addrevision(
+ node,
+ None,
+ transaction,
+ link,
+ p1,
+ p2,
+ flags,
+ (baserev, delta),
+ alwayscache=alwayscache,
+ deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
- baserev = self.rev(deltabase)
-
- if baserev != nullrev and self.iscensored(baserev):
- # if base is censored, delta must be full replacement in a
- # single patch operation
- hlen = struct.calcsize(b">lll")
- oldlen = self.rawsize(baserev)
- newlen = len(delta) - hlen
- if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
- raise error.CensoredBaseError(
- self.indexfile, self.node(baserev)
- )
-
- if not flags and self._peek_iscensored(baserev, delta, flush):
- flags |= REVIDX_ISCENSORED
-
- # We assume consumers of addrevisioncb will want to retrieve
- # the added revision, which will require a call to
- # revision(). revision() will fast path if there is a cache
- # hit. So, we tell _addrevision() to always cache in this case.
- # We're only using addgroup() in the context of changegroup
- # generation so the revision data can always be handled as raw
- # by the flagprocessor.
- rev = self._addrevision(
- node,
- None,
- transaction,
- link,
- p1,
- p2,
- flags,
- (baserev, delta),
- ifh,
- dfh,
- alwayscache=alwayscache,
- deltacomputer=deltacomputer,
- sidedata=sidedata,
- )
-
- if addrevisioncb:
- addrevisioncb(self, rev)
- empty = False
-
- if not dfh and not self._inline:
- # addrevision switched from inline to conventional
- # reopen the index
- ifh.close()
- dfh = self._datafp(b"a+")
- ifh = self._indexfp(b"a+")
- self._writinghandles = (ifh, dfh)
+ if addrevisioncb:
+ addrevisioncb(self, rev)
+ empty = False
finally:
- self._writinghandles = None
-
- if dfh:
- dfh.close()
- ifh.close()
+ self._adding_group = False
return not empty
def iscensored(self, rev):
@@ -2605,7 +2807,7 @@
return self.flags(rev) & REVIDX_ISCENSORED
- def _peek_iscensored(self, baserev, delta, flush):
+ def _peek_iscensored(self, baserev, delta):
"""Quickly check if a delta produces a censored revision."""
if not self._censorable:
return False
@@ -2648,14 +2850,25 @@
return
# first truncate the files on disk
- end = self.start(rev)
+ data_end = self.start(rev)
if not self._inline:
- transaction.add(self.datafile, end)
+ transaction.add(self._datafile, data_end)
end = rev * self.index.entry_size
else:
- end += rev * self.index.entry_size
-
- transaction.add(self.indexfile, end)
+ end = data_end + (rev * self.index.entry_size)
+
+ if self._sidedatafile:
+ sidedata_end = self.sidedata_cut_off(rev)
+ transaction.add(self._sidedatafile, sidedata_end)
+
+ transaction.add(self._indexfile, end)
+ if self._docket is not None:
+ # XXX we could, leverage the docket while stripping. However it is
+ # not powerfull enough at the time of this comment
+ self._docket.index_end = end
+ self._docket.data_end = data_end
+ self._docket.sidedata_end = sidedata_end
+ self._docket.write(transaction, stripping=True)
# then reset internal state in memory to forget those revisions
self._revisioncache = None
@@ -2688,7 +2901,7 @@
dd = 0
try:
- f = self.opener(self.indexfile)
+ f = self.opener(self._indexfile)
f.seek(0, io.SEEK_END)
actual = f.tell()
f.close()
@@ -2709,9 +2922,9 @@
return (dd, di)
def files(self):
- res = [self.indexfile]
+ res = [self._indexfile]
if not self._inline:
- res.append(self.datafile)
+ res.append(self._datafile)
return res
def emitrevisions(
@@ -2768,7 +2981,7 @@
addrevisioncb=None,
deltareuse=DELTAREUSESAMEREVS,
forcedeltabothparents=None,
- sidedatacompanion=None,
+ sidedata_helpers=None,
):
"""Copy this revlog to another, possibly with format changes.
@@ -2811,21 +3024,8 @@
argument controls whether to force compute deltas against both parents
for merges. By default, the current default is used.
- If not None, the `sidedatacompanion` is callable that accept two
- arguments:
-
- (srcrevlog, rev)
-
- and return a quintet that control changes to sidedata content from the
- old revision to the new clone result:
-
- (dropall, filterout, update, new_flags, dropped_flags)
-
- * if `dropall` is True, all sidedata should be dropped
- * `filterout` is a set of sidedata keys that should be dropped
- * `update` is a mapping of additionnal/new key -> value
- * new_flags is a bitfields of new flags that the revision should get
- * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if deltareuse not in self.DELTAREUSEALL:
raise ValueError(
@@ -2865,7 +3065,7 @@
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
)
finally:
@@ -2880,7 +3080,7 @@
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
):
"""perform the core duty of `revlog.clone` after parameter processing"""
deltacomputer = deltautil.deltacomputer(destrevlog)
@@ -2896,31 +3096,19 @@
p2 = index[entry[6]][7]
node = entry[7]
- sidedataactions = (False, [], {}, 0, 0)
- if sidedatacompanion is not None:
- sidedataactions = sidedatacompanion(self, rev)
-
# (Possibly) reuse the delta from the revlog if allowed and
# the revlog chunk is a delta.
cachedelta = None
rawtext = None
- if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
- dropall = sidedataactions[0]
- filterout = sidedataactions[1]
- update = sidedataactions[2]
- new_flags = sidedataactions[3]
- dropped_flags = sidedataactions[4]
- text, sidedata = self._revisiondata(rev)
- if dropall:
- sidedata = {}
- for key in filterout:
- sidedata.pop(key, None)
- sidedata.update(update)
- if not sidedata:
- sidedata = None
-
- flags |= new_flags
- flags &= ~dropped_flags
+ if deltareuse == self.DELTAREUSEFULLADD:
+ text = self._revisiondata(rev)
+ sidedata = self.sidedata(rev)
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
destrevlog.addrevision(
text,
@@ -2940,16 +3128,20 @@
if dp != nullrev:
cachedelta = (dp, bytes(self._chunk(rev)))
+ sidedata = None
if not cachedelta:
- rawtext = self.rawdata(rev)
-
- ifh = destrevlog.opener(
- destrevlog.indexfile, b'a+', checkambig=False
- )
- dfh = None
- if not destrevlog._inline:
- dfh = destrevlog.opener(destrevlog.datafile, b'a+')
- try:
+ rawtext = self._revisiondata(rev)
+ sidedata = self.sidedata(rev)
+ if sidedata is None:
+ sidedata = self.sidedata(rev)
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
+
+ with destrevlog._writing(tr):
destrevlog._addrevision(
node,
rawtext,
@@ -2959,101 +3151,28 @@
p2,
flags,
cachedelta,
- ifh,
- dfh,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
if addrevisioncb:
addrevisioncb(self, rev, node)
def censorrevision(self, tr, censornode, tombstone=b''):
- if (self.version & 0xFFFF) == REVLOGV0:
+ if self._format_version == REVLOGV0:
raise error.RevlogError(
- _(b'cannot censor with version %d revlogs') % self.version
- )
-
- censorrev = self.rev(censornode)
- tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
-
- if len(tombstone) > self.rawsize(censorrev):
- raise error.Abort(
- _(b'censor tombstone must be no longer than censored data')
+ _(b'cannot censor with version %d revlogs')
+ % self._format_version
)
-
- # Rewriting the revlog in place is hard. Our strategy for censoring is
- # to create a new revlog, copy all revisions to it, then replace the
- # revlogs on transaction close.
-
- newindexfile = self.indexfile + b'.tmpcensored'
- newdatafile = self.datafile + b'.tmpcensored'
-
- # This is a bit dangerous. We could easily have a mismatch of state.
- newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
- newrl.version = self.version
- newrl._generaldelta = self._generaldelta
- newrl._io = self._io
-
- for rev in self.revs():
- node = self.node(rev)
- p1, p2 = self.parents(node)
-
- if rev == censorrev:
- newrl.addrawrevision(
- tombstone,
- tr,
- self.linkrev(censorrev),
- p1,
- p2,
- censornode,
- REVIDX_ISCENSORED,
- )
-
- if newrl.deltaparent(rev) != nullrev:
- raise error.Abort(
- _(
- b'censored revision stored as delta; '
- b'cannot censor'
- ),
- hint=_(
- b'censoring of revlogs is not '
- b'fully implemented; please report '
- b'this bug'
- ),
- )
- continue
-
- if self.iscensored(rev):
- if self.deltaparent(rev) != nullrev:
- raise error.Abort(
- _(
- b'cannot censor due to censored '
- b'revision having delta stored'
- )
- )
- rawtext = self._chunk(rev)
- else:
- rawtext = self.rawdata(rev)
-
- newrl.addrawrevision(
- rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
+ elif self._format_version == REVLOGV1:
+ censor.v1_censor(self, tr, censornode, tombstone)
+ else:
+ # revlog v2
+ raise error.RevlogError(
+ _(b'cannot censor with version %d revlogs')
+ % self._format_version
)
- tr.addbackup(self.indexfile, location=b'store')
- if not self._inline:
- tr.addbackup(self.datafile, location=b'store')
-
- self.opener.rename(newrl.indexfile, self.indexfile)
- if not self._inline:
- self.opener.rename(newrl.datafile, self.datafile)
-
- self.clearcaches()
- self._loadindex()
-
def verifyintegrity(self, state):
"""Verifies the integrity of the revlog.
@@ -3066,13 +3185,13 @@
if di:
yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
- version = self.version & 0xFFFF
+ version = self._format_version
# The verifier tells us what version revlog we should be.
if version != state[b'expectedversion']:
yield revlogproblem(
warning=_(b"warning: '%s' uses revlog format %d; expected %d")
- % (self.indexfile, version, state[b'expectedversion'])
+ % (self.display_id, version, state[b'expectedversion'])
)
state[b'skipread'] = set()
@@ -3170,9 +3289,9 @@
d = {}
if exclusivefiles:
- d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
+ d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
if not self._inline:
- d[b'exclusivefiles'].append((self.opener, self.datafile))
+ d[b'exclusivefiles'].append((self.opener, self._datafile))
if sharedfiles:
d[b'sharedfiles'] = []
@@ -3190,12 +3309,10 @@
return d
- def rewrite_sidedata(self, helpers, startrev, endrev):
- if self.version & 0xFFFF != REVLOGV2:
+ def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
+ if not self.hassidedata:
return
- # inline are not yet supported because they suffer from an issue when
- # rewriting them (since it's not an append-only operation).
- # See issue6485.
+ # revlog formats with sidedata support does not support inline
assert not self._inline
if not helpers[1] and not helpers[2]:
# Nothing to generate or remove
@@ -3203,13 +3320,14 @@
new_entries = []
# append the new sidedata
- with self._datafp(b'a+') as fp:
- # Maybe this bug still exists, see revlog._writeentry
- fp.seek(0, os.SEEK_END)
- current_offset = fp.tell()
+ with self._writing(transaction):
+ ifh, dfh, sdfh = self._writinghandles
+ dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
+
+ current_offset = sdfh.tell()
for rev in range(startrev, endrev + 1):
entry = self.index[rev]
- new_sidedata = storageutil.run_sidedata_helpers(
+ new_sidedata, flags = sidedatautil.run_sidedata_helpers(
store=self,
sidedata_helpers=helpers,
sidedata={},
@@ -3219,24 +3337,58 @@
serialized_sidedata = sidedatautil.serialize_sidedata(
new_sidedata
)
+
+ sidedata_compression_mode = COMP_MODE_INLINE
+ if serialized_sidedata and self.hassidedata:
+ sidedata_compression_mode = COMP_MODE_PLAIN
+ h, comp_sidedata = self.compress(serialized_sidedata)
+ if (
+ h != b'u'
+ and comp_sidedata[0] != b'\0'
+ and len(comp_sidedata) < len(serialized_sidedata)
+ ):
+ assert not h
+ if (
+ comp_sidedata[0]
+ == self._docket.default_compression_header
+ ):
+ sidedata_compression_mode = COMP_MODE_DEFAULT
+ serialized_sidedata = comp_sidedata
+ else:
+ sidedata_compression_mode = COMP_MODE_INLINE
+ serialized_sidedata = comp_sidedata
if entry[8] != 0 or entry[9] != 0:
# rewriting entries that already have sidedata is not
# supported yet, because it introduces garbage data in the
# revlog.
- msg = b"Rewriting existing sidedata is not supported yet"
+ msg = b"rewriting existing sidedata is not supported yet"
raise error.Abort(msg)
- entry = entry[:8]
- entry += (current_offset, len(serialized_sidedata))
-
- fp.write(serialized_sidedata)
- new_entries.append(entry)
+
+ # Apply (potential) flags to add and to remove after running
+ # the sidedata helpers
+ new_offset_flags = entry[0] | flags[0] & ~flags[1]
+ entry_update = (
+ current_offset,
+ len(serialized_sidedata),
+ new_offset_flags,
+ sidedata_compression_mode,
+ )
+
+ # the sidedata computation might have move the file cursors around
+ sdfh.seek(current_offset, os.SEEK_SET)
+ sdfh.write(serialized_sidedata)
+ new_entries.append(entry_update)
current_offset += len(serialized_sidedata)
-
- # rewrite the new index entries
- with self._indexfp(b'w+') as fp:
- fp.seek(startrev * self.index.entry_size)
- for i, entry in enumerate(new_entries):
+ self._docket.sidedata_end = sdfh.tell()
+
+ # rewrite the new index entries
+ ifh.seek(startrev * self.index.entry_size)
+ for i, e in enumerate(new_entries):
rev = startrev + i
- self.index.replace_sidedata_info(rev, entry[8], entry[9])
- packed = self._io.packentry(entry, self.node, self.version, rev)
- fp.write(packed)
+ self.index.replace_sidedata_info(rev, *e)
+ packed = self.index.entry_binary(rev)
+ if rev == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ packed = header + packed
+ ifh.write(packed)
--- a/mercurial/revlogutils/__init__.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/__init__.py Mon Jun 07 17:10:35 2021 -0400
@@ -6,3 +6,75 @@
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
+
+from ..thirdparty import attr
+from ..interfaces import repository
+
+# See mercurial.revlogutils.constants for doc
+COMP_MODE_INLINE = 2
+
+
+def offset_type(offset, type):
+ if (type & ~repository.REVISION_FLAGS_KNOWN) != 0:
+ raise ValueError(b'unknown revlog index flags: %d' % type)
+ return int(int(offset) << 16 | type)
+
+
+def entry(
+ data_offset,
+ data_compressed_length,
+ data_delta_base,
+ link_rev,
+ parent_rev_1,
+ parent_rev_2,
+ node_id,
+ flags=0,
+ data_uncompressed_length=-1,
+ data_compression_mode=COMP_MODE_INLINE,
+ sidedata_offset=0,
+ sidedata_compressed_length=0,
+ sidedata_compression_mode=COMP_MODE_INLINE,
+):
+ """Build one entry from symbolic name
+
+ This is useful to abstract the actual detail of how we build the entry
+ tuple for caller who don't care about it.
+
+ This should always be called using keyword arguments. Some arguments have
+ default value, this match the value used by index version that does not store such data.
+ """
+ return (
+ offset_type(data_offset, flags),
+ data_compressed_length,
+ data_uncompressed_length,
+ data_delta_base,
+ link_rev,
+ parent_rev_1,
+ parent_rev_2,
+ node_id,
+ sidedata_offset,
+ sidedata_compressed_length,
+ data_compression_mode,
+ sidedata_compression_mode,
+ )
+
+
+@attr.s(slots=True, frozen=True)
+class revisioninfo(object):
+ """Information about a revision that allows building its fulltext
+ node: expected hash of the revision
+ p1, p2: parent revs of the revision
+ btext: built text cache consisting of a one-element list
+ cachedelta: (baserev, uncompressed_delta) or None
+ flags: flags associated to the revision storage
+
+ One of btext[0] or cachedelta must be set.
+ """
+
+ node = attr.ib()
+ p1 = attr.ib()
+ p2 = attr.ib()
+ btext = attr.ib()
+ textlen = attr.ib()
+ cachedelta = attr.ib()
+ flags = attr.ib()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/censor.py Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,97 @@
+# censor code related to censoring revision
+#
+# Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
+# Copyright 2015 Google, Inc <martinvonz@google.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from ..node import (
+ nullrev,
+)
+from ..i18n import _
+from .. import (
+ error,
+)
+from ..utils import (
+ storageutil,
+)
+from . import constants
+
+
+def v1_censor(rl, tr, censornode, tombstone=b''):
+ """censors a revision in a "version 1" revlog"""
+ assert rl._format_version == constants.REVLOGV1, rl._format_version
+
+ # avoid cycle
+ from .. import revlog
+
+ censorrev = rl.rev(censornode)
+ tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
+
+ # Rewriting the revlog in place is hard. Our strategy for censoring is
+ # to create a new revlog, copy all revisions to it, then replace the
+ # revlogs on transaction close.
+ #
+ # This is a bit dangerous. We could easily have a mismatch of state.
+ newrl = revlog.revlog(
+ rl.opener,
+ target=rl.target,
+ radix=rl.radix,
+ postfix=b'tmpcensored',
+ censorable=True,
+ )
+ newrl._format_version = rl._format_version
+ newrl._format_flags = rl._format_flags
+ newrl._generaldelta = rl._generaldelta
+ newrl._parse_index = rl._parse_index
+
+ for rev in rl.revs():
+ node = rl.node(rev)
+ p1, p2 = rl.parents(node)
+
+ if rev == censorrev:
+ newrl.addrawrevision(
+ tombstone,
+ tr,
+ rl.linkrev(censorrev),
+ p1,
+ p2,
+ censornode,
+ constants.REVIDX_ISCENSORED,
+ )
+
+ if newrl.deltaparent(rev) != nullrev:
+ m = _(b'censored revision stored as delta; cannot censor')
+ h = _(
+ b'censoring of revlogs is not fully implemented;'
+ b' please report this bug'
+ )
+ raise error.Abort(m, hint=h)
+ continue
+
+ if rl.iscensored(rev):
+ if rl.deltaparent(rev) != nullrev:
+ m = _(
+ b'cannot censor due to censored '
+ b'revision having delta stored'
+ )
+ raise error.Abort(m)
+ rawtext = rl._chunk(rev)
+ else:
+ rawtext = rl.rawdata(rev)
+
+ newrl.addrawrevision(
+ rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
+ )
+
+ tr.addbackup(rl._indexfile, location=b'store')
+ if not rl._inline:
+ tr.addbackup(rl._datafile, location=b'store')
+
+ rl.opener.rename(newrl._indexfile, rl._indexfile)
+ if not rl._inline:
+ rl.opener.rename(newrl._datafile, rl._datafile)
+
+ rl.clearcaches()
+ rl._loadindex()
--- a/mercurial/revlogutils/constants.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/constants.py Mon Jun 07 17:10:35 2021 -0400
@@ -1,4 +1,4 @@
-# revlogdeltas.py - constant used for revlog logic
+# revlogdeltas.py - constant used for revlog logic.
#
# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
# Copyright 2018 Octobus <contact@octobus.net>
@@ -12,16 +12,110 @@
import struct
from ..interfaces import repository
+from .. import revlogutils
+
+### Internal utily constants
+
+KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
+KIND_MANIFESTLOG = 1002
+KIND_FILELOG = 1003
+KIND_OTHER = 1004
+
+ALL_KINDS = {
+ KIND_CHANGELOG,
+ KIND_MANIFESTLOG,
+ KIND_FILELOG,
+ KIND_OTHER,
+}
+
+### Index entry key
+#
+#
+# Internal details
+# ----------------
+#
+# A large part of the revlog logic deals with revisions' "index entries", tuple
+# objects that contains the same "items" whatever the revlog version.
+# Different versions will have different ways of storing these items (sometimes
+# not having them at all), but the tuple will always be the same. New fields
+# are usually added at the end to avoid breaking existing code that relies
+# on the existing order. The field are defined as follows:
+
+# [0] offset:
+# The byte index of the start of revision data chunk.
+# That value is shifted up by 16 bits. use "offset = field >> 16" to
+# retrieve it.
+#
+# flags:
+# A flag field that carries special information or changes the behavior
+# of the revision. (see `REVIDX_*` constants for details)
+# The flag field only occupies the first 16 bits of this field,
+# use "flags = field & 0xFFFF" to retrieve the value.
+ENTRY_DATA_OFFSET = 0
+
+# [1] compressed length:
+# The size, in bytes, of the chunk on disk
+ENTRY_DATA_COMPRESSED_LENGTH = 1
+
+# [2] uncompressed length:
+# The size, in bytes, of the full revision once reconstructed.
+ENTRY_DATA_UNCOMPRESSED_LENGTH = 2
+
+# [3] base rev:
+# Either the base of the revision delta chain (without general
+# delta), or the base of the delta (stored in the data chunk)
+# with general delta.
+ENTRY_DELTA_BASE = 3
+
+# [4] link rev:
+# Changelog revision number of the changeset introducing this
+# revision.
+ENTRY_LINK_REV = 4
+
+# [5] parent 1 rev:
+# Revision number of the first parent
+ENTRY_PARENT_1 = 5
+
+# [6] parent 2 rev:
+# Revision number of the second parent
+ENTRY_PARENT_2 = 6
+
+# [7] node id:
+# The node id of the current revision
+ENTRY_NODE_ID = 7
+
+# [8] sidedata offset:
+# The byte index of the start of the revision's side-data chunk.
+ENTRY_SIDEDATA_OFFSET = 8
+
+# [9] sidedata chunk length:
+# The size, in bytes, of the revision's side-data chunk.
+ENTRY_SIDEDATA_COMPRESSED_LENGTH = 9
+
+# [10] data compression mode:
+# two bits that detail the way the data chunk is compressed on disk.
+# (see "COMP_MODE_*" constants for details). For revlog version 0 and
+# 1 this will always be COMP_MODE_INLINE.
+ENTRY_DATA_COMPRESSION_MODE = 10
+
+# [11] side-data compression mode:
+# two bits that detail the way the sidedata chunk is compressed on disk.
+# (see "COMP_MODE_*" constants for details)
+ENTRY_SIDEDATA_COMPRESSION_MODE = 11
### main revlog header
-INDEX_HEADER = struct.Struct(b">I")
+# We cannot rely on Struct.format is inconsistent for python <=3.6 versus above
+INDEX_HEADER_FMT = b">I"
+INDEX_HEADER = struct.Struct(INDEX_HEADER_FMT)
## revlog version
REVLOGV0 = 0
REVLOGV1 = 1
# Dummy value until file format is finalized.
REVLOGV2 = 0xDEAD
+# Dummy value until file format is finalized.
+CHANGELOGV2 = 0xD34D
## global revlog header flags
# Shared across v1 and v2.
@@ -31,8 +125,10 @@
REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
REVLOG_DEFAULT_FORMAT = REVLOGV1
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGV0_FLAGS = 0
REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
REVLOGV2_FLAGS = FLAG_INLINE_DATA
+CHANGELOGV2_FLAGS = 0
### individual entry
@@ -70,9 +166,24 @@
# 32 bytes: nodeid
# 8 bytes: sidedata offset
# 4 bytes: sidedata compressed length
-# 20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
-INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQi20x")
-assert INDEX_ENTRY_V2.size == 32 * 3
+# 1 bytes: compression mode (2 lower bit are data_compression_mode)
+# 19 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQiB19x")
+assert INDEX_ENTRY_V2.size == 32 * 3, INDEX_ENTRY_V2.size
+
+# 6 bytes: offset
+# 2 bytes: flags
+# 4 bytes: compressed length
+# 4 bytes: uncompressed length
+# 4 bytes: parent 1 rev
+# 4 bytes: parent 2 rev
+# 32 bytes: nodeid
+# 8 bytes: sidedata offset
+# 4 bytes: sidedata compressed length
+# 1 bytes: compression mode (2 lower bit are data_compression_mode)
+# 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x")
+assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size
# revlog index flags
@@ -85,8 +196,6 @@
REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
# revision data is stored externally
REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
-# revision data contains extra metadata not part of the official digest
-REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
# revision changes files in a way that could affect copy tracing.
REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
REVIDX_DEFAULT_FLAGS = 0
@@ -95,13 +204,79 @@
REVIDX_ISCENSORED,
REVIDX_ELLIPSIS,
REVIDX_EXTSTORED,
- REVIDX_SIDEDATA,
REVIDX_HASCOPIESINFO,
]
# bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = (
- REVIDX_ISCENSORED | REVIDX_EXTSTORED | REVIDX_SIDEDATA
-)
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
+
+## chunk compression mode constants:
+# These constants are used in revlog version >=2 to denote the compression used
+# for a chunk.
+
+# Chunk use no compression, the data stored on disk can be directly use as
+# chunk value. Without any header information prefixed.
+COMP_MODE_PLAIN = 0
+
+# Chunk use the "default compression" for the revlog (usually defined in the
+# revlog docket). A header is still used.
+#
+# XXX: keeping a header is probably not useful and we should probably drop it.
+#
+# XXX: The value of allow mixed type of compression in the revlog is unclear
+# and we should consider making PLAIN/DEFAULT the only available mode for
+# revlog v2, disallowing INLINE mode.
+COMP_MODE_DEFAULT = 1
+
+# Chunk use a compression mode stored "inline" at the start of the chunk
+# itself. This is the mode always used for revlog version "0" and "1"
+COMP_MODE_INLINE = revlogutils.COMP_MODE_INLINE
+
+SUPPORTED_FLAGS = {
+ REVLOGV0: REVLOGV0_FLAGS,
+ REVLOGV1: REVLOGV1_FLAGS,
+ REVLOGV2: REVLOGV2_FLAGS,
+ CHANGELOGV2: CHANGELOGV2_FLAGS,
+}
+
+_no = lambda flags: False
+_yes = lambda flags: True
+
+
+def _from_flag(flag):
+ return lambda flags: bool(flags & flag)
+
+
+FEATURES_BY_VERSION = {
+ REVLOGV0: {
+ b'inline': _no,
+ b'generaldelta': _no,
+ b'sidedata': False,
+ b'docket': False,
+ },
+ REVLOGV1: {
+ b'inline': _from_flag(FLAG_INLINE_DATA),
+ b'generaldelta': _from_flag(FLAG_GENERALDELTA),
+ b'sidedata': False,
+ b'docket': False,
+ },
+ REVLOGV2: {
+ # The point of inline-revlog is to reduce the number of files used in
+ # the store. Using a docket defeat this purpose. So we needs other
+ # means to reduce the number of files for revlogv2.
+ b'inline': _no,
+ b'generaldelta': _yes,
+ b'sidedata': True,
+ b'docket': True,
+ },
+ CHANGELOGV2: {
+ b'inline': _no,
+ # General delta is useless for changelog since we don't do any delta
+ b'generaldelta': _no,
+ b'sidedata': True,
+ b'docket': True,
+ },
+}
+
SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
--- a/mercurial/revlogutils/deltas.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/deltas.py Mon Jun 07 17:10:35 2021 -0400
@@ -553,6 +553,24 @@
snapshotdepth = attr.ib()
+def drop_u_compression(delta):
+ """turn into a "u" (no-compression) into no-compression without header
+
+ This is useful for revlog format that has better compression method.
+ """
+ assert delta.data[0] == b'u', delta.data[0]
+ return _deltainfo(
+ delta.distance,
+ delta.deltalen - 1,
+ (b'', delta.data[1]),
+ delta.base,
+ delta.chainbase,
+ delta.chainlen,
+ delta.compresseddeltalen,
+ delta.snapshotdepth,
+ )
+
+
def isgooddeltainfo(revlog, deltainfo, revinfo):
"""Returns True if the given delta is good. Good means that it is within
the disk span, disk size, and chain length bounds that we know to be
@@ -914,7 +932,7 @@
def buildtext(self, revinfo, fh):
"""Builds a fulltext version of a revision
- revinfo: _revisioninfo instance that contains all needed info
+ revinfo: revisioninfo instance that contains all needed info
fh: file handle to either the .i or the .d revlog file,
depending on whether it is inlined or not
"""
@@ -1032,7 +1050,7 @@
snapshotdepth,
)
- def finddeltainfo(self, revinfo, fh):
+ def finddeltainfo(self, revinfo, fh, excluded_bases=None):
"""Find an acceptable delta against a candidate revision
revinfo: information about the revision (instance of _revisioninfo)
@@ -1044,10 +1062,17 @@
If no suitable deltabase is found, we return delta info for a full
snapshot.
+
+ `excluded_bases` is an optional set of revision that cannot be used as
+ a delta base. Use this to recompute delta suitable in censor or strip
+ context.
"""
if not revinfo.textlen:
return self._fullsnapshotinfo(fh, revinfo)
+ if excluded_bases is None:
+ excluded_bases = set()
+
# no delta for flag processor revision (see "candelta" for why)
# not calling candelta since only one revision needs test, also to
# avoid overhead fetching flags again.
@@ -1072,6 +1097,8 @@
# challenge it against refined candidates
nominateddeltas.append(deltainfo)
for candidaterev in candidaterevs:
+ if candidaterev in excluded_bases:
+ continue
candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
if candidatedelta is not None:
if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/docket.py Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,333 @@
+# docket - code related to revlog "docket"
+#
+# Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+### Revlog docket file
+#
+# The revlog is stored on disk using multiple files:
+#
+# * a small docket file, containing metadata and a pointer,
+#
+# * an index file, containing fixed width information about revisions,
+#
+# * a data file, containing variable width data for these revisions,
+
+from __future__ import absolute_import
+
+import errno
+import os
+import random
+import struct
+
+from .. import (
+ encoding,
+ error,
+ node,
+ pycompat,
+ util,
+)
+
+from . import (
+ constants,
+)
+
+
+def make_uid(id_size=8):
+ """return a new unique identifier.
+
+ The identifier is random and composed of ascii characters."""
+ # size we "hex" the result we need half the number of bits to have a final
+ # uuid of size ID_SIZE
+ return node.hex(os.urandom(id_size // 2))
+
+
+# some special test logic to avoid anoying random output in the test
+stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
+
+if stable_docket_file:
+
+ def make_uid(id_size=8):
+ try:
+ with open(stable_docket_file, mode='rb') as f:
+ seed = f.read().strip()
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ seed = b'04' # chosen by a fair dice roll. garanteed to be random
+ if pycompat.ispy3:
+ iter_seed = iter(seed)
+ else:
+ iter_seed = (ord(c) for c in seed)
+ # some basic circular sum hashing on 64 bits
+ int_seed = 0
+ low_mask = int('1' * 35, 2)
+ for i in iter_seed:
+ high_part = int_seed >> 35
+ low_part = (int_seed & low_mask) << 28
+ int_seed = high_part + low_part + i
+ r = random.Random()
+ if pycompat.ispy3:
+ r.seed(int_seed, version=1)
+ else:
+ r.seed(int_seed)
+ # once we drop python 3.8 support we can simply use r.randbytes
+ raw = r.getrandbits(id_size * 4)
+ assert id_size == 8
+ p = struct.pack('>L', raw)
+ new = node.hex(p)
+ with open(stable_docket_file, 'wb') as f:
+ f.write(new)
+ return new
+
+
+# Docket format
+#
+# * 4 bytes: revlog version
+# | This is mandatory as docket must be compatible with the previous
+# | revlog index header.
+# * 1 bytes: size of index uuid
+# * 1 bytes: size of data uuid
+# * 1 bytes: size of sizedata uuid
+# * 8 bytes: size of index-data
+# * 8 bytes: pending size of index-data
+# * 8 bytes: size of data
+# * 8 bytes: size of sidedata
+# * 8 bytes: pending size of data
+# * 8 bytes: pending size of sidedata
+# * 1 bytes: default compression header
+S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBLLLLLLc')
+
+
+class RevlogDocket(object):
+ """metadata associated with revlog"""
+
+ def __init__(
+ self,
+ revlog,
+ use_pending=False,
+ version_header=None,
+ index_uuid=None,
+ data_uuid=None,
+ sidedata_uuid=None,
+ index_end=0,
+ pending_index_end=0,
+ data_end=0,
+ pending_data_end=0,
+ sidedata_end=0,
+ pending_sidedata_end=0,
+ default_compression_header=None,
+ ):
+ self._version_header = version_header
+ self._read_only = bool(use_pending)
+ self._dirty = False
+ self._radix = revlog.radix
+ self._path = revlog._docket_file
+ self._opener = revlog.opener
+ self._index_uuid = index_uuid
+ self._data_uuid = data_uuid
+ self._sidedata_uuid = sidedata_uuid
+ # thes asserts should be True as long as we have a single index filename
+ assert index_end <= pending_index_end
+ assert data_end <= pending_data_end
+ assert sidedata_end <= pending_sidedata_end
+ self._initial_index_end = index_end
+ self._pending_index_end = pending_index_end
+ self._initial_data_end = data_end
+ self._pending_data_end = pending_data_end
+ self._initial_sidedata_end = sidedata_end
+ self._pending_sidedata_end = pending_sidedata_end
+ if use_pending:
+ self._index_end = self._pending_index_end
+ self._data_end = self._pending_data_end
+ self._sidedata_end = self._pending_sidedata_end
+ else:
+ self._index_end = self._initial_index_end
+ self._data_end = self._initial_data_end
+ self._sidedata_end = self._initial_sidedata_end
+ self.default_compression_header = default_compression_header
+
+ def index_filepath(self):
+ """file path to the current index file associated to this docket"""
+ # very simplistic version at first
+ if self._index_uuid is None:
+ self._index_uuid = make_uid()
+ return b"%s-%s.idx" % (self._radix, self._index_uuid)
+
+ def data_filepath(self):
+ """file path to the current data file associated to this docket"""
+ # very simplistic version at first
+ if self._data_uuid is None:
+ self._data_uuid = make_uid()
+ return b"%s-%s.dat" % (self._radix, self._data_uuid)
+
+ def sidedata_filepath(self):
+ """file path to the current sidedata file associated to this docket"""
+ # very simplistic version at first
+ if self._sidedata_uuid is None:
+ self._sidedata_uuid = make_uid()
+ return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
+
+ @property
+ def index_end(self):
+ return self._index_end
+
+ @index_end.setter
+ def index_end(self, new_size):
+ if new_size != self._index_end:
+ self._index_end = new_size
+ self._dirty = True
+
+ @property
+ def data_end(self):
+ return self._data_end
+
+ @data_end.setter
+ def data_end(self, new_size):
+ if new_size != self._data_end:
+ self._data_end = new_size
+ self._dirty = True
+
+ @property
+ def sidedata_end(self):
+ return self._sidedata_end
+
+ @sidedata_end.setter
+ def sidedata_end(self, new_size):
+ if new_size != self._sidedata_end:
+ self._sidedata_end = new_size
+ self._dirty = True
+
+ def write(self, transaction, pending=False, stripping=False):
+ """write the modification of disk if any
+
+ This make the new content visible to all process"""
+ if not self._dirty:
+ return False
+ else:
+ if self._read_only:
+ msg = b'writing read-only docket: %s'
+ msg %= self._path
+ raise error.ProgrammingError(msg)
+ if not stripping:
+ # XXX we could, leverage the docket while stripping. However it
+ # is not powerfull enough at the time of this comment
+ transaction.addbackup(self._path, location=b'store')
+ with self._opener(self._path, mode=b'w', atomictemp=True) as f:
+ f.write(self._serialize(pending=pending))
+ # if pending we still need to the write final data eventually
+ self._dirty = pending
+ return True
+
+ def _serialize(self, pending=False):
+ if pending:
+ official_index_end = self._initial_index_end
+ official_data_end = self._initial_data_end
+ official_sidedata_end = self._initial_sidedata_end
+ else:
+ official_index_end = self._index_end
+ official_data_end = self._data_end
+ official_sidedata_end = self._sidedata_end
+
+ # this assert should be True as long as we have a single index filename
+ assert official_data_end <= self._data_end
+ assert official_sidedata_end <= self._sidedata_end
+ data = (
+ self._version_header,
+ len(self._index_uuid),
+ len(self._data_uuid),
+ len(self._sidedata_uuid),
+ official_index_end,
+ self._index_end,
+ official_data_end,
+ self._data_end,
+ official_sidedata_end,
+ self._sidedata_end,
+ self.default_compression_header,
+ )
+ s = []
+ s.append(S_HEADER.pack(*data))
+ s.append(self._index_uuid)
+ s.append(self._data_uuid)
+ s.append(self._sidedata_uuid)
+ return b''.join(s)
+
+
+def default_docket(revlog, version_header):
+ """given a revlog version a new docket object for the given revlog"""
+ rl_version = version_header & 0xFFFF
+ if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
+ return None
+ comp = util.compengines[revlog._compengine].revlogheader()
+ docket = RevlogDocket(
+ revlog,
+ version_header=version_header,
+ default_compression_header=comp,
+ )
+ docket._dirty = True
+ return docket
+
+
+def parse_docket(revlog, data, use_pending=False):
+ """given some docket data return a docket object for the given revlog"""
+ header = S_HEADER.unpack(data[: S_HEADER.size])
+
+ # this is a mutable closure capture used in `get_data`
+ offset = [S_HEADER.size]
+
+ def get_data(size):
+ """utility closure to access the `size` next bytes"""
+ if offset[0] + size > len(data):
+ # XXX better class
+ msg = b"docket is too short, expected %d got %d"
+ msg %= (offset[0] + size, len(data))
+ raise error.Abort(msg)
+ raw = data[offset[0] : offset[0] + size]
+ offset[0] += size
+ return raw
+
+ iheader = iter(header)
+
+ version_header = next(iheader)
+
+ index_uuid_size = next(iheader)
+ index_uuid = get_data(index_uuid_size)
+
+ data_uuid_size = next(iheader)
+ data_uuid = get_data(data_uuid_size)
+
+ sidedata_uuid_size = next(iheader)
+ sidedata_uuid = get_data(sidedata_uuid_size)
+
+ index_size = next(iheader)
+
+ pending_index_size = next(iheader)
+
+ data_size = next(iheader)
+
+ pending_data_size = next(iheader)
+
+ sidedata_size = next(iheader)
+
+ pending_sidedata_size = next(iheader)
+
+ default_compression_header = next(iheader)
+
+ docket = RevlogDocket(
+ revlog,
+ use_pending=use_pending,
+ version_header=version_header,
+ index_uuid=index_uuid,
+ data_uuid=data_uuid,
+ sidedata_uuid=sidedata_uuid,
+ index_end=index_size,
+ pending_index_end=pending_index_size,
+ data_end=data_size,
+ pending_data_end=pending_data_size,
+ sidedata_end=sidedata_size,
+ pending_sidedata_end=pending_sidedata_size,
+ default_compression_header=default_compression_header,
+ )
+ return docket
--- a/mercurial/revlogutils/flagutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/flagutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -18,7 +18,6 @@
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .. import error, util
@@ -28,7 +27,6 @@
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
REVIDX_EXTSTORED
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO,
REVIDX_DEFAULT_FLAGS
REVIDX_FLAGS_ORDER
--- a/mercurial/revlogutils/nodemap.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/nodemap.py Mon Jun 07 17:10:35 2021 -0400
@@ -9,7 +9,6 @@
from __future__ import absolute_import
import errno
-import os
import re
import struct
@@ -19,6 +18,7 @@
error,
util,
)
+from . import docket as docket_mod
class NodeMap(dict):
@@ -28,9 +28,9 @@
def persisted_data(revlog):
"""read the nodemap for a revlog from disk"""
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return None
- pdata = revlog.opener.tryread(revlog.nodemap_file)
+ pdata = revlog.opener.tryread(revlog._nodemap_file)
if not pdata:
return None
offset = 0
@@ -77,11 +77,11 @@
"""
if revlog._inline:
return # inlined revlog are too small for this to be relevant
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return # we do not use persistent_nodemap on this revlog
# we need to happen after the changelog finalization, in that use "cl-"
- callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
+ callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
if tr.hasfinalize(callback_id):
return # no need to register again
tr.addpending(
@@ -123,7 +123,7 @@
"""
if revlog._inline:
return # inlined revlog are too small for this to be relevant
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return # we do not use persistent_nodemap on this revlog
notr = _NoTransaction()
@@ -133,11 +133,11 @@
def delete_nodemap(tr, repo, revlog):
- """Delete nodemap data on disk for a given revlog"""
- if revlog.nodemap_file is None:
+ """ Delete nodemap data on disk for a given revlog"""
+ if revlog._nodemap_file is None:
msg = "calling persist nodemap on a revlog without the feature enabled"
raise error.ProgrammingError(msg)
- repo.svfs.unlink(revlog.nodemap_file)
+ repo.svfs.unlink(revlog._nodemap_file)
def persist_nodemap(tr, revlog, pending=False, force=False):
@@ -146,11 +146,9 @@
raise error.ProgrammingError(
"cannot persist nodemap of a filtered changelog"
)
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
if force:
- revlog.nodemap_file = get_nodemap_file(
- revlog.opener, revlog.indexfile
- )
+ revlog._nodemap_file = get_nodemap_file(revlog)
else:
msg = "calling persist nodemap on a revlog without the feature enabled"
raise error.ProgrammingError(msg)
@@ -227,7 +225,7 @@
target_docket.tip_node = revlog.node(target_docket.tip_rev)
# EXP-TODO: if this is a cache, this should use a cache vfs, not a
# store vfs
- file_path = revlog.nodemap_file
+ file_path = revlog._nodemap_file
if pending:
file_path += b'.a'
tr.registertmp(file_path)
@@ -250,7 +248,7 @@
for oldfile in olds:
realvfs.tryunlink(oldfile)
- callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
+ callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
tr.addpostclose(callback_id, cleanup)
@@ -280,15 +278,6 @@
S_VERSION = struct.Struct(">B")
S_HEADER = struct.Struct(">BQQQQ")
-ID_SIZE = 8
-
-
-def _make_uid():
- """return a new unique identifier.
-
- The identifier is random and composed of ascii characters."""
- return hex(os.urandom(ID_SIZE))
-
class NodeMapDocket(object):
"""metadata associated with persistent nodemap data
@@ -298,7 +287,7 @@
def __init__(self, uid=None):
if uid is None:
- uid = _make_uid()
+ uid = docket_mod.make_uid()
# a unique identifier for the data file:
# - When new data are appended, it is preserved.
# - When a new data file is created, a new identifier is generated.
@@ -365,15 +354,12 @@
def _rawdata_filepath(revlog, docket):
"""The (vfs relative) nodemap's rawdata file for a given uid"""
- if revlog.nodemap_file.endswith(b'.n.a'):
- prefix = revlog.nodemap_file[:-4]
- else:
- prefix = revlog.nodemap_file[:-2]
+ prefix = revlog.radix
return b"%s-%s.nd" % (prefix, docket.uid)
def _other_rawdata_filepath(revlog, docket):
- prefix = revlog.nodemap_file[:-2]
+ prefix = revlog.radix
pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
new_file_path = _rawdata_filepath(revlog, docket)
new_file_name = revlog.opener.basename(new_file_path)
@@ -653,12 +639,9 @@
return entry
-def get_nodemap_file(opener, indexfile):
- if indexfile.endswith(b'.a'):
- pending_path = indexfile[:-4] + b".n.a"
- if opener.exists(pending_path):
+def get_nodemap_file(revlog):
+ if revlog._trypending:
+ pending_path = revlog.radix + b".n.a"
+ if revlog.opener.exists(pending_path):
return pending_path
- else:
- return indexfile[:-4] + b".n"
- else:
- return indexfile[:-2] + b".n"
+ return revlog.radix + b".n"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/revlogv0.py Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,147 @@
+# revlogv0 - code related to revlog format "V0"
+#
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+
+from ..node import sha1nodeconstants
+from .constants import (
+ INDEX_ENTRY_V0,
+)
+from ..i18n import _
+
+from .. import (
+ error,
+ node,
+ pycompat,
+ revlogutils,
+ util,
+)
+
+from . import (
+ nodemap as nodemaputil,
+)
+
+
+def getoffset(q):
+ return int(q >> 16)
+
+
+def gettype(q):
+ return int(q & 0xFFFF)
+
+
+class revlogoldindex(list):
+ rust_ext_compat = 0
+ entry_size = INDEX_ENTRY_V0.size
+ null_item = revlogutils.entry(
+ data_offset=0,
+ data_compressed_length=0,
+ data_delta_base=node.nullrev,
+ link_rev=node.nullrev,
+ parent_rev_1=node.nullrev,
+ parent_rev_2=node.nullrev,
+ node_id=sha1nodeconstants.nullid,
+ )
+
+ @property
+ def nodemap(self):
+ msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+ util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+ return self._nodemap
+
+ @util.propertycache
+ def _nodemap(self):
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
+ for r in range(0, len(self)):
+ n = self[r][7]
+ nodemap[n] = r
+ return nodemap
+
+ def has_node(self, node):
+ """return True if the node exist in the index"""
+ return node in self._nodemap
+
+ def rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, raise a RevlogError"""
+ return self._nodemap[node]
+
+ def get_rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, return None"""
+ return self._nodemap.get(node)
+
+ def append(self, tup):
+ self._nodemap[tup[7]] = len(self)
+ super(revlogoldindex, self).append(tup)
+
+ def __delitem__(self, i):
+ if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+ raise ValueError(b"deleting slices only supports a:-1 with step 1")
+ for r in pycompat.xrange(i.start, len(self)):
+ del self._nodemap[self[r][7]]
+ super(revlogoldindex, self).__delitem__(i)
+
+ def clearcaches(self):
+ self.__dict__.pop('_nodemap', None)
+
+ def __getitem__(self, i):
+ if i == -1:
+ return self.null_item
+ return list.__getitem__(self, i)
+
+ def pack_header(self, header):
+ """pack header information in binary"""
+ return b''
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ if gettype(entry[0]):
+ raise error.RevlogError(
+ _(b'index entry flags need revlog version 1')
+ )
+ e2 = (
+ getoffset(entry[0]),
+ entry[1],
+ entry[3],
+ entry[4],
+ self[entry[5]][7],
+ self[entry[6]][7],
+ entry[7],
+ )
+ return INDEX_ENTRY_V0.pack(*e2)
+
+
+def parse_index_v0(data, inline):
+ s = INDEX_ENTRY_V0.size
+ index = []
+ nodemap = nodemaputil.NodeMap({node.nullid: node.nullrev})
+ n = off = 0
+ l = len(data)
+ while off + s <= l:
+ cur = data[off : off + s]
+ off += s
+ e = INDEX_ENTRY_V0.unpack(cur)
+ # transform to revlogv1 format
+ e2 = revlogutils.entry(
+ data_offset=e[0],
+ data_compressed_length=e[1],
+ data_delta_base=e[2],
+ link_rev=e[3],
+ parent_rev_1=nodemap.get(e[4], node.nullrev),
+ parent_rev_2=nodemap.get(e[5], node.nullrev),
+ node_id=e[6],
+ )
+ index.append(e2)
+ nodemap[e[6]] = n
+ n += 1
+
+ index = revlogoldindex(index)
+ return index, None
--- a/mercurial/revlogutils/sidedata.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revlogutils/sidedata.py Mon Jun 07 17:10:35 2021 -0400
@@ -32,9 +32,11 @@
from __future__ import absolute_import
+import collections
import struct
-from .. import error
+from .. import error, requirements as requirementsmod
+from ..revlogutils import constants, flagutil
from ..utils import hashutil
## sidedata type constant
@@ -91,3 +93,83 @@
sidedata[key] = entrytext
dataoffset = nextdataoffset
return sidedata
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+ """
+ Returns a dictionary mapping revlog types to tuples of
+ `(repo, computers, removers)`:
+ * `repo` is used as an argument for computers
+ * `computers` is a list of `(category, (keys, computer, flags)` that
+ compute the missing sidedata categories that were asked:
+ * `category` is the sidedata category
+ * `keys` are the sidedata keys to be affected
+ * `flags` is a bitmask (an integer) of flags to remove when
+ removing the category.
+ * `computer` is the function `(repo, store, rev, sidedata)` that
+ returns a tuple of
+ `(new sidedata dict, (flags to add, flags to remove))`.
+ For example, it will return `({}, (0, 1 << 15))` to return no
+ sidedata, with no flags to add and one flag to remove.
+ * `removers` will remove the keys corresponding to the categories
+ that are present, but not needed.
+ If both `computers` and `removers` are empty, sidedata will simply not
+ be transformed.
+ """
+ # Computers for computing sidedata on-the-fly
+ sd_computers = collections.defaultdict(list)
+ # Computers for categories to remove from sidedata
+ sd_removers = collections.defaultdict(list)
+ to_generate = remote_sd_categories - repo._wanted_sidedata
+ to_remove = repo._wanted_sidedata - remote_sd_categories
+ if pull:
+ to_generate, to_remove = to_remove, to_generate
+
+ for revlog_kind, computers in repo._sidedata_computers.items():
+ for category, computer in computers.items():
+ if category in to_generate:
+ sd_computers[revlog_kind].append(computer)
+ if category in to_remove:
+ sd_removers[revlog_kind].append(computer)
+
+ sidedata_helpers = (repo, sd_computers, sd_removers)
+ return sidedata_helpers
+
+
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+ """Returns the sidedata for the given revision after running through
+ the given helpers.
+ - `store`: the revlog this applies to (changelog, manifest, or filelog
+ instance)
+ - `sidedata_helpers`: see `get_sidedata_helpers`
+ - `sidedata`: previous sidedata at the given rev, if any
+ - `rev`: affected rev of `store`
+ """
+ repo, sd_computers, sd_removers = sidedata_helpers
+ kind = store.revlog_kind
+ flags_to_add = 0
+ flags_to_remove = 0
+ for _keys, sd_computer, _flags in sd_computers.get(kind, []):
+ sidedata, flags = sd_computer(repo, store, rev, sidedata)
+ flags_to_add |= flags[0]
+ flags_to_remove |= flags[1]
+ for keys, _computer, flags in sd_removers.get(kind, []):
+ for key in keys:
+ sidedata.pop(key, None)
+ flags_to_remove |= flags
+ return sidedata, (flags_to_add, flags_to_remove)
+
+
+def set_sidedata_spec_for_repo(repo):
+ # prevent cycle metadata -> revlogutils.sidedata -> metadata
+ from .. import metadata
+
+ if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+ repo.register_wanted_sidedata(SD_FILES)
+ repo.register_sidedata_computer(
+ constants.KIND_CHANGELOG,
+ SD_FILES,
+ (SD_FILES,),
+ metadata.copies_sidedata_computer,
+ flagutil.REVIDX_HASCOPIESINFO,
+ )
--- a/mercurial/revset.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/revset.py Mon Jun 07 17:10:35 2021 -0400
@@ -1724,7 +1724,7 @@
def _node(repo, n):
"""process a node input"""
rn = None
- if len(n) == 40:
+ if len(n) == 2 * repo.nodeconstants.nodelen:
try:
rn = repo.changelog.rev(bin(n))
except error.WdirUnsupported:
@@ -1842,6 +1842,9 @@
def outgoing(repo, subset, x):
"""Changesets not found in the specified destination repository, or the
default push location.
+
+ If the location resolve to multiple repositories, the union of all
+ outgoing changeset will be used.
"""
# Avoid cycles.
from . import (
--- a/mercurial/rewriteutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/rewriteutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -17,16 +17,38 @@
from . import (
error,
+ node,
obsolete,
obsutil,
revset,
scmutil,
+ util,
)
NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b')
+def _formatrevs(repo, revs, maxrevs=4):
+ """returns a string summarizing revisions in a decent size
+
+ If there are few enough revisions, we list them all. Otherwise we display a
+ summary of the form:
+
+ 1ea73414a91b and 5 others
+ """
+ tonode = repo.changelog.node
+ numrevs = len(revs)
+ if numrevs < maxrevs:
+ shorts = [node.short(tonode(r)) for r in revs]
+ summary = b', '.join(shorts)
+ else:
+ first = revs.first()
+ summary = _(b'%s and %d others')
+ summary %= (node.short(tonode(first)), numrevs - 1)
+ return summary
+
+
def precheck(repo, revs, action=b'rewrite'):
"""check if revs can be rewritten
action is used to control the error message.
@@ -34,22 +56,75 @@
Make sure this function is called after taking the lock.
"""
if nullrev in revs:
- msg = _(b"cannot %s null changeset") % action
+ msg = _(b"cannot %s the null revision") % action
hint = _(b"no changeset checked out")
raise error.InputError(msg, hint=hint)
+ if any(util.safehasattr(r, 'rev') for r in revs):
+ repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
+ revs = (r.rev() for r in revs)
+
if len(repo[None].parents()) > 1:
- raise error.StateError(_(b"cannot %s while merging") % action)
+ raise error.StateError(
+ _(b"cannot %s changesets while merging") % action
+ )
publicrevs = repo.revs(b'%ld and public()', revs)
if publicrevs:
- msg = _(b"cannot %s public changesets") % action
+ summary = _formatrevs(repo, publicrevs)
+ msg = _(b"cannot %s public changesets: %s") % (action, summary)
hint = _(b"see 'hg help phases' for details")
raise error.InputError(msg, hint=hint)
newunstable = disallowednewunstable(repo, revs)
if newunstable:
- raise error.InputError(_(b"cannot %s changeset with children") % action)
+ hint = _(b"see 'hg help evolution.instability'")
+ raise error.InputError(
+ _(b"cannot %s changeset, as that will orphan %d descendants")
+ % (action, len(newunstable)),
+ hint=hint,
+ )
+
+ if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
+ new_divergence = _find_new_divergence(repo, revs)
+ if new_divergence:
+ local_ctx, other_ctx, base_ctx = new_divergence
+ msg = _(
+ b'cannot %s %s, as that creates content-divergence with %s'
+ ) % (
+ action,
+ local_ctx,
+ other_ctx,
+ )
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(b', from %s') % base_ctx
+ if repo.ui.verbose:
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(
+ b'\n changeset %s is a successor of ' b'changeset %s'
+ ) % (local_ctx, base_ctx)
+ msg += _(
+ b'\n changeset %s already has a successor in '
+ b'changeset %s\n'
+ b' rewriting changeset %s would create '
+ b'"content-divergence"\n'
+ b' set experimental.evolution.allowdivergence=True to '
+ b'skip this check'
+ ) % (base_ctx, other_ctx, local_ctx)
+ raise error.InputError(
+ msg,
+ hint=_(
+ b"see 'hg help evolution.instability' for details on content-divergence"
+ ),
+ )
+ else:
+ raise error.InputError(
+ msg,
+ hint=_(
+ b"add --verbose for details or see "
+ b"'hg help evolution.instability'"
+ ),
+ )
def disallowednewunstable(repo, revs):
@@ -65,6 +140,40 @@
return repo.revs(b"(%ld::) - %ld", revs, revs)
+def _find_new_divergence(repo, revs):
+ obsrevs = repo.revs(b'%ld and obsolete()', revs)
+ for r in obsrevs:
+ div = find_new_divergence_from(repo, repo[r])
+ if div:
+ return (repo[r], repo[div[0]], repo[div[1]])
+ return None
+
+
+def find_new_divergence_from(repo, ctx):
+ """return divergent revision if rewriting an obsolete cset (ctx) will
+ create divergence
+
+ Returns (<other node>, <common ancestor node>) or None
+ """
+ if not ctx.obsolete():
+ return None
+ # We need to check two cases that can cause divergence:
+ # case 1: the rev being rewritten has a non-obsolete successor (easily
+ # detected by successorssets)
+ sset = obsutil.successorssets(repo, ctx.node())
+ if sset:
+ return (sset[0][0], ctx.node())
+ else:
+ # case 2: one of the precursors of the rev being revived has a
+ # non-obsolete successor (we need divergentsets for this)
+ divsets = obsutil.divergentsets(repo, ctx)
+ if divsets:
+ nsuccset = divsets[0][b'divergentnodes']
+ prec = divsets[0][b'commonpredecessor']
+ return (nsuccset[0], prec)
+ return None
+
+
def skip_empty_successor(ui, command):
empty_successor = ui.config(b'rewrite', b'empty-successor')
if empty_successor == b'skip':
--- a/mercurial/scmutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/scmutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -19,10 +19,8 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
- wdirid,
wdirrev,
)
from .pycompat import getattr
@@ -200,34 +198,13 @@
ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
except error.CensoredNodeError as inst:
ui.error(_(b"abort: file censored %s\n") % inst)
- except error.StorageError as inst:
- ui.error(_(b"abort: %s\n") % inst)
- if inst.hint:
- ui.error(_(b"(%s)\n") % inst.hint)
- detailed_exit_code = 50
- except error.InterventionRequired as inst:
- ui.error(b"%s\n" % inst)
- if inst.hint:
- ui.error(_(b"(%s)\n") % inst.hint)
- detailed_exit_code = 240
- coarse_exit_code = 1
except error.WdirUnsupported:
ui.error(_(b"abort: working directory revision cannot be specified\n"))
- except error.Abort as inst:
- if isinstance(inst, (error.InputError, error.ParseError)):
- detailed_exit_code = 10
- elif isinstance(inst, error.StateError):
- detailed_exit_code = 20
- elif isinstance(inst, error.ConfigError):
- detailed_exit_code = 30
- elif isinstance(inst, error.HookAbort):
- detailed_exit_code = 40
- elif isinstance(inst, error.RemoteError):
- detailed_exit_code = 100
- elif isinstance(inst, error.SecurityError):
- detailed_exit_code = 150
- elif isinstance(inst, error.CanceledError):
- detailed_exit_code = 250
+ except error.Error as inst:
+ if inst.detailed_exit_code is not None:
+ detailed_exit_code = inst.detailed_exit_code
+ if inst.coarse_exit_code is not None:
+ coarse_exit_code = inst.coarse_exit_code
ui.error(inst.format())
except error.WorkerError as inst:
# Don't print a message -- the worker already should have
@@ -450,7 +427,7 @@
"""Return binary node id for a given basectx"""
node = ctx.node()
if node is None:
- return wdirid
+ return ctx.repo().nodeconstants.wdirid
return node
@@ -645,7 +622,7 @@
except (ValueError, OverflowError, IndexError):
pass
- if len(symbol) == 40:
+ if len(symbol) == 2 * repo.nodeconstants.nodelen:
try:
node = bin(symbol)
rev = repo.changelog.rev(node)
@@ -1108,7 +1085,7 @@
if roots:
newnode = roots[0].node()
else:
- newnode = nullid
+ newnode = repo.nullid
else:
newnode = newnodes[0]
moves[oldnode] = newnode
@@ -1506,7 +1483,7 @@
oldctx = repo[b'.']
ds = repo.dirstate
copies = dict(ds.copies())
- ds.setparents(newctx.node(), nullid)
+ ds.setparents(newctx.node(), repo.nullid)
s = newctx.status(oldctx, match=match)
for f in s.modified:
if ds[f] == b'r':
--- a/mercurial/setdiscovery.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/setdiscovery.py Mon Jun 07 17:10:35 2021 -0400
@@ -46,10 +46,7 @@
import random
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
policy,
@@ -277,6 +274,8 @@
return sample
+pure_partialdiscovery = partialdiscovery
+
partialdiscovery = policy.importrust(
'discovery', member='PartialDiscovery', default=partialdiscovery
)
@@ -391,9 +390,9 @@
audit[b'total-roundtrips'] = 1
if cl.tiprev() == nullrev:
- if srvheadhashes != [nullid]:
- return [nullid], True, srvheadhashes
- return [nullid], False, []
+ if srvheadhashes != [cl.nullid]:
+ return [cl.nullid], True, srvheadhashes
+ return [cl.nullid], False, []
else:
# we still need the remote head for the function return
with remote.commandexecutor() as e:
@@ -406,7 +405,7 @@
knownsrvheads = [] # revnos of remote heads that are known locally
for node in srvheadhashes:
- if node == nullid:
+ if node == cl.nullid:
continue
try:
@@ -437,9 +436,11 @@
hard_limit_sample = not (dynamic_sample or remote.limitedarguments)
randomize = ui.configbool(b'devel', b'discovery.randomize')
- disco = partialdiscovery(
- local, ownheads, hard_limit_sample, randomize=randomize
- )
+ if cl.index.rust_ext_compat:
+ pd = partialdiscovery
+ else:
+ pd = pure_partialdiscovery
+ disco = pd(local, ownheads, hard_limit_sample, randomize=randomize)
if initial_head_exchange:
# treat remote heads (and maybe own heads) as a first implicit sample
# response
@@ -503,17 +504,17 @@
if audit is not None:
audit[b'total-roundtrips'] = roundtrips
- if not result and srvheadhashes != [nullid]:
+ if not result and srvheadhashes != [cl.nullid]:
if abortwhenunrelated:
raise error.Abort(_(b"repository is unrelated"))
else:
ui.warn(_(b"warning: repository is unrelated\n"))
return (
- {nullid},
+ {cl.nullid},
True,
srvheadhashes,
)
- anyincoming = srvheadhashes != [nullid]
+ anyincoming = srvheadhashes != [cl.nullid]
result = {clnode(r) for r in result}
return result, anyincoming, srvheadhashes
--- a/mercurial/shelve.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/shelve.py Mon Jun 07 17:10:35 2021 -0400
@@ -31,7 +31,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -822,7 +821,7 @@
pendingctx = state.pendingctx
with repo.dirstate.parentchange():
- repo.setparents(state.pendingctx.node(), nullid)
+ repo.setparents(state.pendingctx.node(), repo.nullid)
repo.dirstate.write(repo.currenttransaction())
targetphase = phases.internal
@@ -831,7 +830,7 @@
overrides = {(b'phases', b'new-commit'): targetphase}
with repo.ui.configoverride(overrides, b'unshelve'):
with repo.dirstate.parentchange():
- repo.setparents(state.parents[0], nullid)
+ repo.setparents(state.parents[0], repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
@@ -1027,7 +1026,7 @@
raise error.ConflictResolutionRequired(b'unshelve')
with repo.dirstate.parentchange():
- repo.setparents(tmpwctx.node(), nullid)
+ repo.setparents(tmpwctx.node(), repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
--- a/mercurial/sparse.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/sparse.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,10 +10,7 @@
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
error,
match as matchmod,
@@ -177,7 +174,7 @@
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
allincludes = set()
@@ -321,7 +318,7 @@
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
signature = configsignature(repo, includetemp=includetemp)
--- a/mercurial/statichttprepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/statichttprepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -177,6 +177,7 @@
self.filtername = None
self._extrafilterid = None
self._wanted_sidedata = set()
+ self.features = set()
try:
requirements = set(self.vfs.read(b'requires').splitlines())
--- a/mercurial/store.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/store.py Mon Jun 07 17:10:35 2021 -0400
@@ -389,7 +389,15 @@
]
REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
-REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
+REVLOG_FILES_OTHER_EXT = (
+ b'.idx',
+ b'.d',
+ b'.dat',
+ b'.n',
+ b'.nd',
+ b'.sda',
+ b'd.tmpcensored',
+)
# files that are "volatile" and might change between listing and streaming
#
# note: the ".nd" file are nodemap data and won't "change" but they might be
@@ -397,7 +405,7 @@
REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
# some exception to the above matching
-EXCLUDED = re.compile(b'.*undo\.[^/]+\.nd?$')
+EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
def is_revlog(f, kind, st):
@@ -407,13 +415,14 @@
def revlog_type(f):
- if f.endswith(REVLOG_FILES_MAIN_EXT):
+ if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
return FILEFLAGS_REVLOG_MAIN
elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
t = FILETYPE_FILELOG_OTHER
if f.endswith(REVLOG_FILES_VOLATILE_EXT):
t |= FILEFLAGS_VOLATILE
return t
+ return None
# the file is part of changelog data
@@ -706,7 +715,7 @@
# do not trigger a fncache load when adding a file that already is
# known to exist.
notload = self.fncache.entries is None and self.vfs.exists(encoded)
- if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
+ if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
# when appending to an existing file, if the file has size zero,
# it should be considered as missing. Such zero-size files are
# the result of truncation when a transaction is aborted.
@@ -753,6 +762,7 @@
ef = self.encode(f)
try:
t = revlog_type(f)
+ assert t is not None, f
t |= FILEFLAGS_FILELOG
yield t, f, ef, self.getsize(ef)
except OSError as err:
--- a/mercurial/strip.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/strip.py Mon Jun 07 17:10:35 2021 -0400
@@ -2,7 +2,6 @@
from .i18n import _
from .pycompat import getattr
-from .node import nullid
from . import (
bookmarks as bookmarksmod,
cmdutil,
@@ -39,7 +38,7 @@
if (
util.safehasattr(repo, b'mq')
- and p2 != nullid
+ and p2 != repo.nullid
and p2 in [x.node for x in repo.mq.applied]
):
unode = p2
@@ -218,7 +217,7 @@
# if one of the wdir parent is stripped we'll need
# to update away to an earlier revision
update = any(
- p != nullid and cl.rev(p) in strippedrevs
+ p != repo.nullid and cl.rev(p) in strippedrevs
for p in repo.dirstate.parents()
)
--- a/mercurial/subrepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/subrepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -21,7 +21,6 @@
from .node import (
bin,
hex,
- nullid,
short,
)
from . import (
@@ -686,7 +685,7 @@
# we can't fully delete the repository as it may contain
# local-only history
self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
- hg.clean(self._repo, nullid, False)
+ hg.clean(self._repo, self._repo.nullid, False)
def _get(self, state):
source, revision, kind = state
--- a/mercurial/tagmerge.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/tagmerge.py Mon Jun 07 17:10:35 2021 -0400
@@ -74,9 +74,6 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullhex,
-)
from . import (
tags as tagsmod,
util,
@@ -243,8 +240,8 @@
pnlosttagset = basetagset - pntagset
for t in pnlosttagset:
pntags[t] = basetags[t]
- if pntags[t][-1][0] != nullhex:
- pntags[t].append([nullhex, None])
+ if pntags[t][-1][0] != repo.nodeconstants.nullhex:
+ pntags[t].append([repo.nodeconstants.nullhex, None])
conflictedtags = [] # for reporting purposes
mergedtags = util.sortdict(p1tags)
--- a/mercurial/tags.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/tags.py Mon Jun 07 17:10:35 2021 -0400
@@ -18,7 +18,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -96,12 +95,12 @@
return fnodes
-def _nulltonone(value):
+def _nulltonone(repo, value):
"""convert nullid to None
For tag value, nullid means "deleted". This small utility function helps
translating that to None."""
- if value == nullid:
+ if value == repo.nullid:
return None
return value
@@ -123,14 +122,14 @@
# list of (tag, old, new): None means missing
entries = []
for tag, (new, __) in newtags.items():
- new = _nulltonone(new)
+ new = _nulltonone(repo, new)
old, __ = oldtags.pop(tag, (None, None))
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old != new:
entries.append((tag, old, new))
# handle deleted tags
for tag, (old, __) in oldtags.items():
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old is not None:
entries.append((tag, old, None))
entries.sort()
@@ -452,7 +451,7 @@
repoheads = repo.heads()
# Case 2 (uncommon): empty repo; get out quickly and don't bother
# writing an empty cache.
- if repoheads == [nullid]:
+ if repoheads == [repo.nullid]:
return ([], {}, valid, {}, False)
# Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 @@
for node in nodes:
fnode = fnodescache.getfnode(node)
flog = repo.file(b'.hgtags')
- if fnode != nullid:
+ if fnode != repo.nullid:
if fnode not in validated_fnodes:
if flog.hasnode(fnode):
validated_fnodes.add(fnode)
@@ -510,7 +509,7 @@
if unknown_entries:
fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
for node, fnode in pycompat.iteritems(fixed_nodemap):
- if fnode != nullid:
+ if fnode != repo.nullid:
cachefnode[node] = fnode
fnodescache.write()
@@ -632,7 +631,7 @@
m = name
if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
- old = repo.tags().get(name, nullid)
+ old = repo.tags().get(name, repo.nullid)
fp.write(b'%s %s\n' % (hex(old), m))
fp.write(b'%s %s\n' % (hex(node), m))
fp.close()
@@ -762,8 +761,8 @@
If an .hgtags does not exist at the specified revision, nullid is
returned.
"""
- if node == nullid:
- return nullid
+ if node == self._repo.nullid:
+ return node
ctx = self._repo[node]
rev = ctx.rev()
@@ -826,7 +825,7 @@
fnode = ctx.filenode(b'.hgtags')
except error.LookupError:
# No .hgtags file on this revision.
- fnode = nullid
+ fnode = self._repo.nullid
return fnode
def setfnode(self, node, fnode):
--- a/mercurial/templatefuncs.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/templatefuncs.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,10 +10,7 @@
import re
from .i18n import _
-from .node import (
- bin,
- wdirid,
-)
+from .node import bin
from . import (
color,
dagop,
@@ -767,9 +764,10 @@
)
repo = context.resource(mapping, b'repo')
- if len(hexnode) > 40:
+ hexnodelen = 2 * repo.nodeconstants.nodelen
+ if len(hexnode) > hexnodelen:
return hexnode
- elif len(hexnode) == 40:
+ elif len(hexnode) == hexnodelen:
try:
node = bin(hexnode)
except TypeError:
@@ -778,7 +776,7 @@
try:
node = scmutil.resolvehexnodeidprefix(repo, hexnode)
except error.WdirUnsupported:
- node = wdirid
+ node = repo.nodeconstants.wdirid
except error.LookupError:
return hexnode
if not node:
--- a/mercurial/templatekw.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/templatekw.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,8 +10,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
- wdirid,
wdirrev,
)
@@ -29,7 +27,10 @@
templateutil,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
_hybrid = templateutil.hybrid
hybriddict = templateutil.hybriddict
@@ -412,7 +413,7 @@
def getgraphnodecurrent(repo, ctx, cache):
wpnodes = repo.dirstate.parents()
- if wpnodes[1] == nullid:
+ if wpnodes[1] == repo.nullid:
wpnodes = wpnodes[:1]
if ctx.node() in wpnodes:
return b'@'
@@ -525,11 +526,12 @@
ctx = context.resource(mapping, b'ctx')
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = repo.nodeconstants.wdirid
mrev = wdirrev
+ mhex = repo.nodeconstants.wdirhex
else:
mrev = repo.manifestlog.rev(mnode)
- mhex = hex(mnode)
+ mhex = hex(mnode)
mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
f = context.process(b'manifest', mapping)
return templateutil.hybriditem(
@@ -661,17 +663,29 @@
repo = context.resource(mapping, b'repo')
# see commands.paths() for naming of dictionary keys
paths = repo.ui.paths
- urls = util.sortdict(
- (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
- )
+ all_paths = urlutil.list_paths(repo.ui)
+ urls = util.sortdict((k, p.rawloc) for k, p in all_paths)
def makemap(k):
- p = paths[k]
- d = {b'name': k, b'url': p.rawloc}
- d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
+ ps = paths[k]
+ d = {b'name': k}
+ if len(ps) == 1:
+ d[b'url'] = ps[0].rawloc
+ sub_opts = pycompat.iteritems(ps[0].suboptions)
+ sub_opts = util.sortdict(sorted(sub_opts))
+ d.update(sub_opts)
+ path_dict = util.sortdict()
+ for p in ps:
+ sub_opts = util.sortdict(sorted(pycompat.iteritems(p.suboptions)))
+ path_dict[b'url'] = p.rawloc
+ path_dict.update(sub_opts)
+ d[b'urls'] = [path_dict]
return d
- return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
+ def format_one(k):
+ return b'%s=%s' % (k, urls[k])
+
+ return _hybrid(None, urls, makemap, format_one)
@templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
--- a/mercurial/testing/storage.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/testing/storage.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,7 +11,6 @@
from ..node import (
hex,
- nullid,
nullrev,
)
from ..pycompat import getattr
@@ -51,7 +50,7 @@
self.assertFalse(f.hasnode(None))
self.assertFalse(f.hasnode(0))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(b'0'))
self.assertFalse(f.hasnode(b'a' * 20))
@@ -64,8 +63,8 @@
self.assertEqual(list(f.revs(start=20)), [])
- # parents() and parentrevs() work with nullid/nullrev.
- self.assertEqual(f.parents(nullid), (nullid, nullid))
+ # parents() and parentrevs() work with f.nullid/nullrev.
+ self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 @@
with self.assertRaises(IndexError):
f.parentrevs(i)
- # nullid/nullrev lookup always works.
- self.assertEqual(f.rev(nullid), nullrev)
- self.assertEqual(f.node(nullrev), nullid)
+ # f.nullid/nullrev lookup always works.
+ self.assertEqual(f.rev(f.nullid), nullrev)
+ self.assertEqual(f.node(nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.rev(b'\x01' * 20)
@@ -92,16 +91,16 @@
with self.assertRaises(IndexError):
f.node(i)
- self.assertEqual(f.lookup(nullid), nullid)
- self.assertEqual(f.lookup(nullrev), nullid)
- self.assertEqual(f.lookup(hex(nullid)), nullid)
- self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+ self.assertEqual(f.lookup(f.nullid), f.nullid)
+ self.assertEqual(f.lookup(nullrev), f.nullid)
+ self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
+ self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.lookup(b'badvalue')
with self.assertRaises(error.LookupError):
- f.lookup(hex(nullid)[0:12])
+ f.lookup(hex(f.nullid)[0:12])
with self.assertRaises(error.LookupError):
f.lookup(b'-2')
@@ -140,19 +139,19 @@
with self.assertRaises(IndexError):
f.iscensored(i)
- self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+ self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
with self.assertRaises(ValueError):
self.assertEqual(list(f.descendants([])), [])
self.assertEqual(list(f.descendants([nullrev])), [])
- self.assertEqual(f.heads(), [nullid])
- self.assertEqual(f.heads(nullid), [nullid])
- self.assertEqual(f.heads(None, [nullid]), [nullid])
- self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+ self.assertEqual(f.heads(), [f.nullid])
+ self.assertEqual(f.heads(f.nullid), [f.nullid])
+ self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
+ self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
- self.assertEqual(f.children(nullid), [])
+ self.assertEqual(f.children(f.nullid), [])
with self.assertRaises(error.LookupError):
f.children(b'\x01' * 20)
@@ -160,7 +159,7 @@
def testsinglerevision(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(b'initial', None, tr, 0, nullid, nullid)
+ node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
self.assertEqual(len(f), 1)
self.assertEqual(list(f), [0])
@@ -174,7 +173,7 @@
self.assertTrue(f.hasnode(node))
self.assertFalse(f.hasnode(hex(node)))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(node[0:12]))
self.assertFalse(f.hasnode(hex(node)[0:20]))
@@ -188,7 +187,7 @@
self.assertEqual(list(f.revs(1, 0)), [1, 0])
self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
- self.assertEqual(f.parents(node), (nullid, nullid))
+ self.assertEqual(f.parents(node), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 @@
self.assertEqual(f.lookup(node), node)
self.assertEqual(f.lookup(0), node)
- self.assertEqual(f.lookup(-1), nullid)
+ self.assertEqual(f.lookup(-1), f.nullid)
self.assertEqual(f.lookup(b'0'), node)
self.assertEqual(f.lookup(hex(node)), node)
@@ -256,9 +255,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(len(f), 3)
self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 @@
# TODO this is wrong
self.assertEqual(list(f.revs(3, 2)), [3, 2])
- self.assertEqual(f.parents(node0), (nullid, nullid))
- self.assertEqual(f.parents(node1), (node0, nullid))
- self.assertEqual(f.parents(node2), (node1, nullid))
+ self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
+ self.assertEqual(f.parents(node2), (node1, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 @@
with self.assertRaises(IndexError):
f.iscensored(3)
- self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+ self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- node2 = f.add(b'2', None, tr, 2, node1, nullid)
- node3 = f.add(b'3', None, tr, 3, node0, nullid)
- node4 = f.add(b'4', None, tr, 4, node3, nullid)
- node5 = f.add(b'5', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
+ node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
+ node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
+ node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
self.assertEqual(len(f), 6)
@@ -427,24 +426,24 @@
with self.assertRaises(IndexError):
f.size(i)
- self.assertEqual(f.revision(nullid), b'')
- self.assertEqual(f.rawdata(nullid), b'')
+ self.assertEqual(f.revision(f.nullid), b'')
+ self.assertEqual(f.rawdata(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.revision(b'\x01' * 20)
- self.assertEqual(f.read(nullid), b'')
+ self.assertEqual(f.read(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertFalse(f.renamed(nullid))
+ self.assertFalse(f.renamed(f.nullid))
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertTrue(f.cmp(nullid, b''))
- self.assertTrue(f.cmp(nullid, b'foo'))
+ self.assertTrue(f.cmp(f.nullid, b''))
+ self.assertTrue(f.cmp(f.nullid, b'foo'))
with self.assertRaises(error.LookupError):
f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 @@
next(gen)
# Emitting null node yields nothing.
- gen = f.emitrevisions([nullid])
+ gen = f.emitrevisions([f.nullid])
with self.assertRaises(StopIteration):
next(gen)
@@ -468,7 +467,7 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(fulltext, None, tr, 0, nullid, nullid)
+ node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -496,10 +495,10 @@
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertIsNone(rev.delta)
@@ -512,10 +511,10 @@
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext)
self.assertIsNone(rev.delta)
@@ -534,9 +533,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -596,10 +595,10 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -608,7 +607,7 @@
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 @@
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -653,7 +652,7 @@
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 @@
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext2)
self.assertIsNone(rev.delta)
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
# Delta behavior is storage dependent, so we can't easily test it.
with self.assertRaises(StopIteration):
@@ -722,8 +721,8 @@
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext1)
self.assertIsNone(rev.delta)
@@ -731,7 +730,7 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -751,7 +750,7 @@
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -768,9 +767,9 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -789,9 +788,9 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -802,7 +801,7 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
with self.assertRaises(StopIteration):
@@ -841,11 +840,11 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
- # Metadata header isn't recognized when parent isn't nullid.
+ # Metadata header isn't recognized when parent isn't f.nullid.
self.assertEqual(f.size(1), len(stored1))
self.assertEqual(f.size(2), len(fulltext2))
@@ -886,8 +885,8 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+ node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
# TODO this is buggy.
self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
self.assertEqual(len(f), 2)
- self.assertEqual(f.parents(node1), (node0, nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
# revision() raises since it performs hash verification.
with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 @@
fulltext2 = fulltext1 + b'baz\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
f.read(node1)
- node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+ node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
with self._maketransactionfn() as tr:
delta = mdiff.textdiff(fulltext1, fulltext2)
self._addrawrevisionfn(
- f, tr, node2, node1, nullid, 2, delta=(1, delta)
+ f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
)
self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 @@
def testaddnoop(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# Varying by linkrev shouldn't impact hash.
- node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+ node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
self.assertEqual(node1, node0)
self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 @@
with self._maketransactionfn() as tr:
# Adding a revision with bad node value fails.
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+ f.addrevision(
+ b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
+ )
def testaddrevisionunknownflag(self):
f = self._makefilefn()
@@ -1113,7 +1114,7 @@
break
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+ f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
def testaddgroupsimple(self):
f = self._makefilefn()
@@ -1153,12 +1154,12 @@
delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
f = self._makefilefn()
deltas = [
- (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
+ (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
]
with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 @@
nodes = []
with self._maketransactionfn() as tr:
for fulltext in fulltexts:
- nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+ nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
f = self._makefilefn()
deltas = []
@@ -1215,7 +1216,7 @@
delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
deltas.append(
- (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+ (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
)
with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
deltas = [
- (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+ (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
]
with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
- node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
with self._maketransactionfn() as tr:
f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 @@
with self._maketransactionfn() as tr:
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+ f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- f.add(b'2', None, tr, 2, node1, nullid)
- f.add(b'3', None, tr, 3, node0, nullid)
- f.add(b'4', None, tr, 4, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ f.add(b'2', None, tr, 2, node1, f.nullid)
+ f.add(b'3', None, tr, 3, node0, f.nullid)
+ f.add(b'4', None, tr, 4, node0, f.nullid)
for rev in range(5):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- f.add(b'1', None, tr, 10, node0, nullid)
- f.add(b'2', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ f.add(b'1', None, tr, 10, node0, f.nullid)
+ f.add(b'2', None, tr, 5, node0, f.nullid)
self.assertEqual(f.getstrippoint(0), (0, set()))
self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
self.assertEqual(len(f), 10)
@@ -1377,9 +1378,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 5, nullid, nullid)
- node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+ f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
+ node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
self.assertEqual(len(f), 3)
--- a/mercurial/transaction.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/transaction.py Mon Jun 07 17:10:35 2021 -0400
@@ -56,7 +56,7 @@
unlink=True,
checkambigfiles=None,
):
- for f, o in entries:
+ for f, o in sorted(dict(entries).items()):
if o or not unlink:
checkambig = checkambigfiles and (f, b'') in checkambigfiles
try:
@@ -720,9 +720,8 @@
entries = []
backupentries = []
- fp = opener.open(file)
- lines = fp.readlines()
- fp.close()
+ with opener.open(file) as fp:
+ lines = fp.readlines()
for l in lines:
try:
f, o = l.split(b'\0')
--- a/mercurial/treediscovery.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/treediscovery.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,10 +10,7 @@
import collections
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
error,
pycompat,
@@ -44,11 +41,11 @@
if audit is not None:
audit[b'total-roundtrips'] = 1
- if repo.changelog.tip() == nullid:
- base.add(nullid)
- if heads != [nullid]:
- return [nullid], [nullid], list(heads)
- return [nullid], [], heads
+ if repo.changelog.tip() == repo.nullid:
+ base.add(repo.nullid)
+ if heads != [repo.nullid]:
+ return [repo.nullid], [repo.nullid], list(heads)
+ return [repo.nullid], [], heads
# assume we're closer to the tip than the root
# and start by examining the heads
@@ -84,7 +81,7 @@
continue
repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
- if n[0] == nullid: # found the end of the branch
+ if n[0] == repo.nullid: # found the end of the branch
pass
elif n in seenbranch:
repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 @@
raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
base = list(base)
- if base == [nullid]:
+ if base == [repo.nullid]:
if force:
repo.ui.warn(_(b"warning: repository is unrelated\n"))
else:
--- a/mercurial/ui.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/ui.py Mon Jun 07 17:10:35 2021 -0400
@@ -233,6 +233,8 @@
self._trustusers = set()
self._trustgroups = set()
self.callhooks = True
+ # hold the root to use for each [paths] entry
+ self._path_to_root = {}
# Insecure server connections requested.
self.insecureconnections = False
# Blocked time
@@ -264,6 +266,7 @@
self._trustgroups = src._trustgroups.copy()
self.environ = src.environ
self.callhooks = src.callhooks
+ self._path_to_root = src._path_to_root
self.insecureconnections = src.insecureconnections
self._colormode = src._colormode
self._terminfoparams = src._terminfoparams.copy()
@@ -545,22 +548,26 @@
root = root or encoding.getcwd()
for c in self._tcfg, self._ucfg, self._ocfg:
for n, p in c.items(b'paths'):
+ old_p = p
+ s = self.configsource(b'paths', n) or b'none'
+ root_key = (n, p, s)
+ if root_key not in self._path_to_root:
+ self._path_to_root[root_key] = root
# Ignore sub-options.
if b':' in n:
continue
if not p:
continue
if b'%%' in p:
- s = self.configsource(b'paths', n) or b'none'
+ if s is None:
+ s = 'none'
self.warn(
_(b"(deprecated '%%' in path %s=%s from %s)\n")
% (n, p, s)
)
p = p.replace(b'%%', b'%')
- p = util.expandpath(p)
- if not urlutil.hasscheme(p) and not os.path.isabs(p):
- p = os.path.normpath(os.path.join(root, p))
- c.alter(b"paths", n, p)
+ if p != old_p:
+ c.alter(b"paths", n, p)
if section in (None, b'ui'):
# update ui options
@@ -886,10 +893,10 @@
"""
# default is not always a list
v = self.configwith(
- config.parselist, section, name, default, b'list', untrusted
+ stringutil.parselist, section, name, default, b'list', untrusted
)
if isinstance(v, bytes):
- return config.parselist(v)
+ return stringutil.parselist(v)
elif v is None:
return []
return v
@@ -1057,6 +1064,8 @@
This method exist as `getpath` need a ui for potential warning message.
"""
+ msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, '6.0')
return self.paths.getpath(self, *args, **kwargs)
@property
--- a/mercurial/unionrepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/unionrepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -31,9 +31,13 @@
vfs as vfsmod,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
class unionrevlog(revlog.revlog):
- def __init__(self, opener, indexfile, revlog2, linkmapper):
+ def __init__(self, opener, radix, revlog2, linkmapper):
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
@@ -41,7 +45,11 @@
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ target = getattr(revlog2, 'target', None)
+ if target is None:
+ # a revlog wrapper, eg: the manifestlog that is not an actual revlog
+ target = revlog2._revlog.target
+ revlog.revlog.__init__(self, opener, target=target, radix=radix)
self.revlog2 = revlog2
n = len(self)
@@ -50,7 +58,20 @@
for rev2 in self.revlog2:
rev = self.revlog2.index[rev2]
# rev numbers - in revlog2, very different from self.rev
- _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
+ (
+ _start,
+ _csize,
+ rsize,
+ base,
+ linkrev,
+ p1rev,
+ p2rev,
+ node,
+ _sdo,
+ _sds,
+ _dcm,
+ _sdcm,
+ ) = rev
flags = _start & 0xFFFF
if linkmapper is None: # link is to same revlog
@@ -82,6 +103,10 @@
self.rev(p1node),
self.rev(p2node),
node,
+ 0, # sidedata offset
+ 0, # sidedata size
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
)
self.index.append(e)
self.bundlerevs.add(n)
@@ -147,9 +172,7 @@
changelog.changelog.__init__(self, opener)
linkmapper = None
changelog2 = changelog.changelog(opener2)
- unionrevlog.__init__(
- self, opener, self.indexfile, changelog2, linkmapper
- )
+ unionrevlog.__init__(self, opener, self.radix, changelog2, linkmapper)
class unionmanifest(unionrevlog, manifest.manifestrevlog):
@@ -157,7 +180,7 @@
manifest.manifestrevlog.__init__(self, nodeconstants, opener)
manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
unionrevlog.__init__(
- self, opener, self.indexfile, manifest2, linkmapper
+ self, opener, self._revlog.radix, manifest2, linkmapper
)
@@ -166,7 +189,7 @@
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
self._revlog = unionrevlog(
- opener, self.indexfile, filelog2._revlog, linkmapper
+ opener, self._revlog.radix, filelog2._revlog, linkmapper
)
self._repo = repo
self.repotiprev = self._revlog.repotiprev
--- a/mercurial/upgrade_utils/actions.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/upgrade_utils/actions.py Mon Jun 07 17:10:35 2021 -0400
@@ -30,6 +30,8 @@
RECLONES_REQUIREMENTS = {
requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
}
@@ -42,92 +44,16 @@
class improvement(object):
- """Represents an improvement that can be made as part of an upgrade.
-
- The following attributes are defined on each instance:
-
- name
- Machine-readable string uniquely identifying this improvement. It
- will be mapped to an action later in the upgrade process.
-
- type
- Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
- A format variant is where we change the storage format. Not all format
- variant changes are an obvious problem.
- An optimization is an action (sometimes optional) that
- can be taken to further improve the state of the repository.
-
- description
- Message intended for humans explaining the improvement in more detail,
- including the implications of it. For ``FORMAT_VARIANT`` types, should be
- worded in the present tense. For ``OPTIMISATION`` types, should be
- worded in the future tense.
+ """Represents an improvement that can be made as part of an upgrade."""
- upgrademessage
- Message intended for humans explaining what an upgrade addressing this
- issue will do. Should be worded in the future tense.
-
- postupgrademessage
- Message intended for humans which will be shown post an upgrade
- operation when the improvement will be added
-
- postdowngrademessage
- Message intended for humans which will be shown post an upgrade
- operation in which this improvement was removed
-
- touches_filelogs (bool)
- Whether this improvement touches filelogs
-
- touches_manifests (bool)
- Whether this improvement touches manifests
-
- touches_changelog (bool)
- Whether this improvement touches changelog
+ ### The following attributes should be defined for each subclass:
- touches_requirements (bool)
- Whether this improvement changes repository requirements
- """
-
- def __init__(self, name, type, description, upgrademessage):
- self.name = name
- self.type = type
- self.description = description
- self.upgrademessage = upgrademessage
- self.postupgrademessage = None
- self.postdowngrademessage = None
- # By default for now, we assume every improvement touches
- # all the things
- self.touches_filelogs = True
- self.touches_manifests = True
- self.touches_changelog = True
- self.touches_requirements = True
-
- def __eq__(self, other):
- if not isinstance(other, improvement):
- # This is what python tell use to do
- return NotImplemented
- return self.name == other.name
-
- def __ne__(self, other):
- return not (self == other)
-
- def __hash__(self):
- return hash(self.name)
-
-
-allformatvariant = [] # type: List[Type['formatvariant']]
-
-
-def registerformatvariant(cls):
- allformatvariant.append(cls)
- return cls
-
-
-class formatvariant(improvement):
- """an improvement subclass dedicated to repository format"""
-
- type = FORMAT_VARIANT
- ### The following attributes should be defined for each class:
+ # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
+ # A format variant is where we change the storage format. Not all format
+ # variant changes are an obvious problem.
+ # An optimization is an action (sometimes optional) that
+ # can be taken to further improve the state of the repository.
+ type = None
# machine-readable string uniquely identifying this improvement. it will be
# mapped to an action later in the upgrade process.
@@ -154,14 +80,36 @@
# operation in which this improvement was removed
postdowngrademessage = None
- # By default for now, we assume every improvement touches all the things
+ # By default we assume that every improvement touches requirements and all revlogs
+
+ # Whether this improvement touches filelogs
touches_filelogs = True
+
+ # Whether this improvement touches manifests
touches_manifests = True
+
+ # Whether this improvement touches changelog
touches_changelog = True
+
+ # Whether this improvement changes repository requirements
touches_requirements = True
- def __init__(self):
- raise NotImplementedError()
+ # Whether this improvement touches the dirstate
+ touches_dirstate = False
+
+
+allformatvariant = [] # type: List[Type['formatvariant']]
+
+
+def registerformatvariant(cls):
+ allformatvariant.append(cls)
+ return cls
+
+
+class formatvariant(improvement):
+ """an improvement subclass dedicated to repository format"""
+
+ type = FORMAT_VARIANT
@staticmethod
def fromrepo(repo):
@@ -222,6 +170,27 @@
@registerformatvariant
+class dirstatev2(requirementformatvariant):
+ name = b'dirstate-v2'
+ _requirement = requirements.DIRSTATE_V2_REQUIREMENT
+
+ default = False
+
+ description = _(
+ b'version 1 of the dirstate file format requires '
+ b'reading and parsing it all at once.'
+ )
+
+ upgrademessage = _(b'"hg status" will be faster')
+
+ touches_filelogs = False
+ touches_manifests = False
+ touches_changelog = False
+ touches_requirements = True
+ touches_dirstate = True
+
+
+@registerformatvariant
class dotencode(requirementformatvariant):
name = b'dotencode'
@@ -372,6 +341,15 @@
@registerformatvariant
+class changelogv2(requirementformatvariant):
+ name = b'changelog-v2'
+ _requirement = requirements.CHANGELOGV2_REQUIREMENT
+ default = False
+ description = _(b'An iteration of the revlog focussed on changelog needs.')
+ upgrademessage = _(b'quite experimental')
+
+
+@registerformatvariant
class removecldeltachain(formatvariant):
name = b'plain-cl-delta'
@@ -534,87 +512,100 @@
return obj
-register_optimization(
- improvement(
- name=b're-delta-parent',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will be recalculated to '
- b'choose an optimal base revision where this was not '
- b'already done; the size of the repository may shrink and '
- b'various operations may become faster; the first time '
- b'this optimization is performed could slow down upgrade '
- b'execution considerably; subsequent invocations should '
- b'not run noticeably slower'
- ),
- upgrademessage=_(
- b'deltas within internal storage will choose a new '
- b'base revision if needed'
- ),
+class optimization(improvement):
+ """an improvement subclass dedicated to optimizations"""
+
+ type = OPTIMISATION
+
+
+@register_optimization
+class redeltaparents(optimization):
+ name = b're-delta-parent'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will be recalculated to '
+ b'choose an optimal base revision where this was not '
+ b'already done; the size of the repository may shrink and '
+ b'various operations may become faster; the first time '
+ b'this optimization is performed could slow down upgrade '
+ b'execution considerably; subsequent invocations should '
+ b'not run noticeably slower'
)
-)
+
+ upgrademessage = _(
+ b'deltas within internal storage will choose a new '
+ b'base revision if needed'
+ )
+
+
+@register_optimization
+class redeltamultibase(optimization):
+ name = b're-delta-multibase'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will be recalculated '
+ b'against multiple base revision and the smallest '
+ b'difference will be used; the size of the repository may '
+ b'shrink significantly when there are many merges; this '
+ b'optimization will slow down execution in proportion to '
+ b'the number of merges in the repository and the amount '
+ b'of files in the repository; this slow down should not '
+ b'be significant unless there are tens of thousands of '
+ b'files and thousands of merges'
+ )
-register_optimization(
- improvement(
- name=b're-delta-multibase',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will be recalculated '
- b'against multiple base revision and the smallest '
- b'difference will be used; the size of the repository may '
- b'shrink significantly when there are many merges; this '
- b'optimization will slow down execution in proportion to '
- b'the number of merges in the repository and the amount '
- b'of files in the repository; this slow down should not '
- b'be significant unless there are tens of thousands of '
- b'files and thousands of merges'
- ),
- upgrademessage=_(
- b'deltas within internal storage will choose an '
- b'optimal delta by computing deltas against multiple '
- b'parents; may slow down execution time '
- b'significantly'
- ),
+ upgrademessage = _(
+ b'deltas within internal storage will choose an '
+ b'optimal delta by computing deltas against multiple '
+ b'parents; may slow down execution time '
+ b'significantly'
)
-)
+
+
+@register_optimization
+class redeltaall(optimization):
+ name = b're-delta-all'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will always be '
+ b'recalculated without reusing prior deltas; this will '
+ b'likely make execution run several times slower; this '
+ b'optimization is typically not needed'
+ )
-register_optimization(
- improvement(
- name=b're-delta-all',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will always be '
- b'recalculated without reusing prior deltas; this will '
- b'likely make execution run several times slower; this '
- b'optimization is typically not needed'
- ),
- upgrademessage=_(
- b'deltas within internal storage will be fully '
- b'recomputed; this will likely drastically slow down '
- b'execution time'
- ),
+ upgrademessage = _(
+ b'deltas within internal storage will be fully '
+ b'recomputed; this will likely drastically slow down '
+ b'execution time'
)
-)
+
+
+@register_optimization
+class redeltafulladd(optimization):
+ name = b're-delta-fulladd'
+
+ type = OPTIMISATION
-register_optimization(
- improvement(
- name=b're-delta-fulladd',
- type=OPTIMISATION,
- description=_(
- b'every revision will be re-added as if it was new '
- b'content. It will go through the full storage '
- b'mechanism giving extensions a chance to process it '
- b'(eg. lfs). This is similar to "re-delta-all" but even '
- b'slower since more logic is involved.'
- ),
- upgrademessage=_(
- b'each revision will be added as new content to the '
- b'internal storage; this will likely drastically slow '
- b'down execution time, but some extensions might need '
- b'it'
- ),
+ description = _(
+ b'every revision will be re-added as if it was new '
+ b'content. It will go through the full storage '
+ b'mechanism giving extensions a chance to process it '
+ b'(eg. lfs). This is similar to "re-delta-all" but even '
+ b'slower since more logic is involved.'
)
-)
+
+ upgrademessage = _(
+ b'each revision will be added as new content to the '
+ b'internal storage; this will likely drastically slow '
+ b'down execution time, but some extensions might need '
+ b'it'
+ )
def findoptimizations(repo):
@@ -677,7 +668,6 @@
self.current_requirements = current_requirements
# list of upgrade actions the operation will perform
self.upgrade_actions = upgrade_actions
- self._upgrade_actions_names = set([a.name for a in upgrade_actions])
self.removed_actions = removed_actions
self.revlogs_to_process = revlogs_to_process
# requirements which will be added by the operation
@@ -700,41 +690,42 @@
]
# delta reuse mode of this upgrade operation
+ upgrade_actions_names = self.upgrade_actions_names
self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
- if b're-delta-all' in self._upgrade_actions_names:
+ if b're-delta-all' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
- elif b're-delta-parent' in self._upgrade_actions_names:
+ elif b're-delta-parent' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
- elif b're-delta-multibase' in self._upgrade_actions_names:
+ elif b're-delta-multibase' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
- elif b're-delta-fulladd' in self._upgrade_actions_names:
+ elif b're-delta-fulladd' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
# should this operation force re-delta of both parents
self.force_re_delta_both_parents = (
- b're-delta-multibase' in self._upgrade_actions_names
+ b're-delta-multibase' in upgrade_actions_names
)
# should this operation create a backup of the store
self.backup_store = backup_store
- # whether the operation touches different revlogs at all or not
- self.touches_filelogs = self._touches_filelogs()
- self.touches_manifests = self._touches_manifests()
- self.touches_changelog = self._touches_changelog()
- # whether the operation touches requirements file or not
- self.touches_requirements = self._touches_requirements()
- self.touches_store = (
- self.touches_filelogs
- or self.touches_manifests
- or self.touches_changelog
- )
+ @property
+ def upgrade_actions_names(self):
+ return set([a.name for a in self.upgrade_actions])
+
+ @property
+ def requirements_only(self):
# does the operation only touches repository requirement
- self.requirements_only = (
- self.touches_requirements and not self.touches_store
+ return (
+ self.touches_requirements
+ and not self.touches_filelogs
+ and not self.touches_manifests
+ and not self.touches_changelog
+ and not self.touches_dirstate
)
- def _touches_filelogs(self):
+ @property
+ def touches_filelogs(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -746,7 +737,8 @@
return True
return False
- def _touches_manifests(self):
+ @property
+ def touches_manifests(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -758,7 +750,8 @@
return True
return False
- def _touches_changelog(self):
+ @property
+ def touches_changelog(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -770,7 +763,8 @@
return True
return False
- def _touches_requirements(self):
+ @property
+ def touches_requirements(self):
for a in self.upgrade_actions:
# optimisations are used to re-process revlogs and does not result
# in a requirement being added or removed
@@ -782,6 +776,18 @@
if a.touches_requirements:
return True
+ @property
+ def touches_dirstate(self):
+ for a in self.upgrade_actions:
+ # revlog optimisations do not affect the dirstate
+ if a.type == OPTIMISATION:
+ pass
+ elif a.touches_dirstate:
+ return True
+ for a in self.removed_actions:
+ if a.touches_dirstate:
+ return True
+
return False
def _write_labeled(self, l, label):
@@ -935,12 +941,13 @@
"""
supported = {
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -966,11 +973,12 @@
requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
requirements.STORE_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -996,12 +1004,13 @@
requirements.FNCACHE_REQUIREMENT,
requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
--- a/mercurial/upgrade_utils/engine.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/upgrade_utils/engine.py Mon Jun 07 17:10:35 2021 -0400
@@ -19,13 +19,33 @@
metadata,
pycompat,
requirements,
- revlog,
scmutil,
store,
util,
vfs as vfsmod,
)
-from ..revlogutils import nodemap
+from ..revlogutils import (
+ constants as revlogconst,
+ flagutil,
+ nodemap,
+ sidedata as sidedatamod,
+)
+from . import actions as upgrade_actions
+
+
+def get_sidedata_helpers(srcrepo, dstrepo):
+ use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+ sequential = pycompat.iswindows or not use_w
+ if not sequential:
+ srcrepo.register_sidedata_computer(
+ revlogconst.KIND_CHANGELOG,
+ sidedatamod.SD_FILES,
+ (sidedatamod.SD_FILES,),
+ metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
+ flagutil.REVIDX_HASCOPIESINFO,
+ replace=True,
+ )
+ return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
def _revlogfrompath(repo, rl_type, path):
@@ -61,16 +81,16 @@
oldvfs = oldrl.opener
newvfs = newrl.opener
- oldindex = oldvfs.join(oldrl.indexfile)
- newindex = newvfs.join(newrl.indexfile)
- olddata = oldvfs.join(oldrl.datafile)
- newdata = newvfs.join(newrl.datafile)
+ oldindex = oldvfs.join(oldrl._indexfile)
+ newindex = newvfs.join(newrl._indexfile)
+ olddata = oldvfs.join(oldrl._datafile)
+ newdata = newvfs.join(newrl._datafile)
- with newvfs(newrl.indexfile, b'w'):
+ with newvfs(newrl._indexfile, b'w'):
pass # create all the directories
util.copyfile(oldindex, newindex)
- copydata = oldrl.opener.exists(oldrl.datafile)
+ copydata = oldrl.opener.exists(oldrl._datafile)
if copydata:
util.copyfile(olddata, newdata)
@@ -89,25 +109,6 @@
)
-def getsidedatacompanion(srcrepo, dstrepo):
- sidedatacompanion = None
- removedreqs = srcrepo.requirements - dstrepo.requirements
- addedreqs = dstrepo.requirements - srcrepo.requirements
- if requirements.SIDEDATA_REQUIREMENT in removedreqs:
-
- def sidedatacompanion(rl, rev):
- rl = getattr(rl, '_revlog', rl)
- if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
- return True, (), {}, 0, 0
- return False, (), {}, 0, 0
-
- elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
- sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
- elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
- sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
- return sidedatacompanion
-
-
def matchrevlog(revlogfilter, rl_type):
"""check if a revlog is selected for cloning.
@@ -131,7 +132,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
):
"""returns the new revlog object created"""
@@ -147,7 +148,7 @@
addrevisioncb=oncopiedrevision,
deltareuse=upgrade_op.delta_reuse_mode,
forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
- sidedatacompanion=sidedatacompanion,
+ sidedata_helpers=sidedata_helpers,
)
else:
msg = _(b'blindly copying %s containing %i revisions\n')
@@ -257,7 +258,7 @@
def oncopiedrevision(rl, rev, node):
progress.increment()
- sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
+ sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
# Migrating filelogs
ui.status(
@@ -282,7 +283,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -322,7 +323,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -361,7 +362,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -458,6 +459,19 @@
)
)
+ if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions:
+ ui.status(_(b'upgrading to dirstate-v2 from v1\n'))
+ upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2')
+ upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2)
+
+ if upgrade_actions.dirstatev2 in upgrade_op.removed_actions:
+ ui.status(_(b'downgrading from dirstate-v2 to v1\n'))
+ upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
+ upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
+
+ if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
+ return
+
if upgrade_op.requirements_only:
ui.status(_(b'upgrading repository requirements\n'))
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
@@ -466,7 +480,7 @@
# through the whole cloning process
elif (
len(upgrade_op.upgrade_actions) == 1
- and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
+ and b'persistent-nodemap' in upgrade_op.upgrade_actions_names
and not upgrade_op.removed_actions
):
ui.status(
@@ -591,3 +605,28 @@
backupvfs.unlink(b'store/lock')
return backuppath
+
+
+def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new):
+ if upgrade_op.backup_store:
+ backuppath = pycompat.mkdtemp(
+ prefix=b'upgradebackup.', dir=srcrepo.path
+ )
+ ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
+ backupvfs = vfsmod.vfs(backuppath)
+ util.copyfile(
+ srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
+ )
+ util.copyfile(
+ srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate')
+ )
+
+ assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
+ srcrepo.dirstate._map._use_dirstate_tree = True
+ srcrepo.dirstate._map.preload()
+ srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
+ srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
+ srcrepo.dirstate._dirty = True
+ srcrepo.dirstate.write(None)
+
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
--- a/mercurial/util.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/util.py Mon Jun 07 17:10:35 2021 -0400
@@ -34,6 +34,7 @@
import traceback
import warnings
+from .node import hex
from .thirdparty import attr
from .pycompat import (
delattr,
@@ -1908,7 +1909,9 @@
}
-def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
+def copyfile(
+ src, dest, hardlink=False, copystat=False, checkambig=False, nb_bytes=None
+):
"""copy a file, preserving mode and optionally other stat info like
atime/mtime
@@ -1917,6 +1920,8 @@
repo.wlock).
copystat and checkambig should be exclusive.
+
+ nb_bytes: if set only copy the first `nb_bytes` of the source file.
"""
assert not (copystat and checkambig)
oldstat = None
@@ -1936,6 +1941,9 @@
if hardlink:
try:
oslink(src, dest)
+ if nb_bytes is not None:
+ m = "the `nb_bytes` argument is incompatible with `hardlink`"
+ raise error.ProgrammingError(m)
return
except (IOError, OSError):
pass # fall back to normal copy
@@ -1943,6 +1951,9 @@
os.symlink(os.readlink(src), dest)
# copytime is ignored for symlinks, but in general copytime isn't needed
# for them anyway
+ if nb_bytes is not None:
+ m = "cannot use `nb_bytes` on a symlink"
+ raise error.ProgrammingError(m)
else:
try:
shutil.copyfile(src, dest)
@@ -1959,6 +1970,10 @@
oldstat.stat[stat.ST_MTIME] + 1
) & 0x7FFFFFFF
os.utime(dest, (advanced, advanced))
+ # We could do something smarter using `copy_file_range` call or similar
+ if nb_bytes is not None:
+ with open(dest, mode='r+') as f:
+ f.truncate(nb_bytes)
except shutil.Error as inst:
raise error.Abort(stringutil.forcebytestr(inst))
--- a/mercurial/utils/storageutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/utils/storageutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,8 +13,8 @@
from ..i18n import _
from ..node import (
bin,
- nullid,
nullrev,
+ sha1nodeconstants,
)
from .. import (
dagop,
@@ -26,7 +26,11 @@
from ..revlogutils import sidedata as sidedatamod
from ..utils import hashutil
-_nullhash = hashutil.sha1(nullid)
+_nullhash = hashutil.sha1(sha1nodeconstants.nullid)
+
+# revision data contains extra metadata not part of the official digest
+# Only used in changegroup >= v4.
+CG_FLAG_SIDEDATA = 1
def hashrevisionsha1(text, p1, p2):
@@ -37,7 +41,7 @@
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
# deep copy of a hash is faster than creating one
s = _nullhash.copy()
s.update(p1)
@@ -107,7 +111,7 @@
Returns ``False`` if the file has no copy metadata. Otherwise a
2-tuple of the source filename and node.
"""
- if store.parents(node)[0] != nullid:
+ if store.parents(node)[0] != sha1nodeconstants.nullid:
return False
meta = parsemeta(store.revision(node))[0]
@@ -360,19 +364,7 @@
``assumehaveparentrevisions``
``sidedata_helpers`` (optional)
If not None, means that sidedata should be included.
- A dictionary of revlog type to tuples of `(repo, computers, removers)`:
- * `repo` is used as an argument for computers
- * `computers` is a list of `(category, (keys, computer)` that
- compute the missing sidedata categories that were asked:
- * `category` is the sidedata category
- * `keys` are the sidedata keys to be affected
- * `computer` is the function `(repo, store, rev, sidedata)` that
- returns a new sidedata dict.
- * `removers` will remove the keys corresponding to the categories
- that are present, but not needed.
- If both `computers` and `removers` are empty, sidedata are simply not
- transformed.
- Revlog types are `changelog`, `manifest` or `filelog`.
+ See `revlogutil.sidedata.get_sidedata_helpers`.
"""
fnode = store.node
@@ -486,51 +478,48 @@
available.add(rev)
- sidedata = None
+ serialized_sidedata = None
+ sidedata_flags = (0, 0)
if sidedata_helpers:
- sidedata = store.sidedata(rev)
- sidedata = run_sidedata_helpers(
- store=store,
- sidedata_helpers=sidedata_helpers,
- sidedata=sidedata,
- rev=rev,
- )
- sidedata = sidedatamod.serialize_sidedata(sidedata)
+ try:
+ old_sidedata = store.sidedata(rev)
+ except error.CensoredNodeError:
+ # skip any potential sidedata of the censored revision
+ sidedata = {}
+ else:
+ sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
+ store=store,
+ sidedata_helpers=sidedata_helpers,
+ sidedata=old_sidedata,
+ rev=rev,
+ )
+ if sidedata:
+ serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
+
+ flags = flagsfn(rev) if flagsfn else 0
+ protocol_flags = 0
+ if serialized_sidedata:
+ # Advertise that sidedata exists to the other side
+ protocol_flags |= CG_FLAG_SIDEDATA
+ # Computers and removers can return flags to add and/or remove
+ flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
yield resultcls(
node=node,
p1node=fnode(p1rev),
p2node=fnode(p2rev),
basenode=fnode(baserev),
- flags=flagsfn(rev) if flagsfn else 0,
+ flags=flags,
baserevisionsize=baserevisionsize,
revision=revision,
delta=delta,
- sidedata=sidedata,
+ sidedata=serialized_sidedata,
+ protocol_flags=protocol_flags,
)
prevrev = rev
-def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
- """Returns the sidedata for the given revision after running through
- the given helpers.
- - `store`: the revlog this applies to (changelog, manifest, or filelog
- instance)
- - `sidedata_helpers`: see `storageutil.emitrevisions`
- - `sidedata`: previous sidedata at the given rev, if any
- - `rev`: affected rev of `store`
- """
- repo, sd_computers, sd_removers = sidedata_helpers
- kind = store.revlog_kind
- for _keys, sd_computer in sd_computers.get(kind, []):
- sidedata = sd_computer(repo, store, rev, sidedata)
- for keys, _computer in sd_removers.get(kind, []):
- for key in keys:
- sidedata.pop(key, None)
- return sidedata
-
-
def deltaiscensored(delta, baserev, baselenfn):
"""Determine if a delta represents censored revision data.
--- a/mercurial/utils/stringutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/utils/stringutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -868,6 +868,96 @@
return _booleans.get(s.lower(), None)
+def parselist(value):
+ """parse a configuration value as a list of comma/space separated strings
+
+ >>> parselist(b'this,is "a small" ,test')
+ ['this', 'is', 'a small', 'test']
+ """
+
+ def _parse_plain(parts, s, offset):
+ whitespace = False
+ while offset < len(s) and (
+ s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
+ ):
+ whitespace = True
+ offset += 1
+ if offset >= len(s):
+ return None, parts, offset
+ if whitespace:
+ parts.append(b'')
+ if s[offset : offset + 1] == b'"' and not parts[-1]:
+ return _parse_quote, parts, offset + 1
+ elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
+ parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
+ return _parse_plain, parts, offset + 1
+ parts[-1] += s[offset : offset + 1]
+ return _parse_plain, parts, offset + 1
+
+ def _parse_quote(parts, s, offset):
+ if offset < len(s) and s[offset : offset + 1] == b'"': # ""
+ parts.append(b'')
+ offset += 1
+ while offset < len(s) and (
+ s[offset : offset + 1].isspace()
+ or s[offset : offset + 1] == b','
+ ):
+ offset += 1
+ return _parse_plain, parts, offset
+
+ while offset < len(s) and s[offset : offset + 1] != b'"':
+ if (
+ s[offset : offset + 1] == b'\\'
+ and offset + 1 < len(s)
+ and s[offset + 1 : offset + 2] == b'"'
+ ):
+ offset += 1
+ parts[-1] += b'"'
+ else:
+ parts[-1] += s[offset : offset + 1]
+ offset += 1
+
+ if offset >= len(s):
+ real_parts = _configlist(parts[-1])
+ if not real_parts:
+ parts[-1] = b'"'
+ else:
+ real_parts[0] = b'"' + real_parts[0]
+ parts = parts[:-1]
+ parts.extend(real_parts)
+ return None, parts, offset
+
+ offset += 1
+ while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
+ offset += 1
+
+ if offset < len(s):
+ if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
+ parts[-1] += b'"'
+ offset += 1
+ else:
+ parts.append(b'')
+ else:
+ return None, parts, offset
+
+ return _parse_plain, parts, offset
+
+ def _configlist(s):
+ s = s.rstrip(b' ,')
+ if not s:
+ return []
+ parser, parts, offset = _parse_plain, [b''], 0
+ while parser:
+ parser, parts, offset = parser(parts, s, offset)
+ return parts
+
+ if value is not None and isinstance(value, bytes):
+ result = _configlist(value.lstrip(b' ,\n'))
+ else:
+ result = value
+ return result or []
+
+
def evalpythonliteral(s):
"""Evaluate a string containing a Python literal expression"""
# We could backport our tokenizer hack to rewrite '' to u'' if we want
--- a/mercurial/utils/urlutil.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/utils/urlutil.py Mon Jun 07 17:10:35 2021 -0400
@@ -20,6 +20,10 @@
urllibcompat,
)
+from . import (
+ stringutil,
+)
+
if pycompat.TYPE_CHECKING:
from typing import (
@@ -445,13 +449,41 @@
return bytes(u)
+def list_paths(ui, target_path=None):
+ """list all the (name, paths) in the passed ui"""
+ result = []
+ if target_path is None:
+ for name, paths in sorted(pycompat.iteritems(ui.paths)):
+ for p in paths:
+ result.append((name, p))
+
+ else:
+ for path in ui.paths.get(target_path, []):
+ result.append((target_path, path))
+ return result
+
+
+def try_path(ui, url):
+ """try to build a path from a url
+
+ Return None if no Path could built.
+ """
+ try:
+ # we pass the ui instance are warning might need to be issued
+ return path(ui, None, rawloc=url)
+ except ValueError:
+ return None
+
+
def get_push_paths(repo, ui, dests):
"""yields all the `path` selected as push destination by `dests`"""
if not dests:
if b'default-push' in ui.paths:
- yield ui.paths[b'default-push']
+ for p in ui.paths[b'default-push']:
+ yield p
elif b'default' in ui.paths:
- yield ui.paths[b'default']
+ for p in ui.paths[b'default']:
+ yield p
else:
raise error.ConfigError(
_(b'default repository not configured!'),
@@ -459,7 +491,16 @@
)
else:
for dest in dests:
- yield ui.getpath(dest)
+ if dest in ui.paths:
+ for p in ui.paths[dest]:
+ yield p
+ else:
+ path = try_path(ui, dest)
+ if path is None:
+ msg = _(b'repository %s does not exist')
+ msg %= dest
+ raise error.RepoError(msg)
+ yield path
def get_pull_paths(repo, ui, sources, default_branches=()):
@@ -468,15 +509,16 @@
sources = [b'default']
for source in sources:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ for p in ui.paths[source]:
+ yield parseurl(p.rawloc, default_branches)
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
+ path = try_path(ui, source)
+ if path is not None:
+ url = path.rawloc
+ else:
url = source
- yield parseurl(url, default_branches)
+ yield parseurl(url, default_branches)
def get_unique_push_path(action, repo, ui, dest=None):
@@ -494,7 +536,14 @@
else:
dests = [dest]
dests = list(get_push_paths(repo, ui, dests))
- assert len(dests) == 1
+ if len(dests) != 1:
+ if dest is None:
+ msg = _("default path points to %d urls while %s only supports one")
+ msg %= (len(dests), action)
+ else:
+ msg = _("path points to %d urls while %s only supports one: %s")
+ msg %= (len(dests), action, dest)
+ raise error.Abort(msg)
return dests[0]
@@ -508,45 +557,66 @@
The `action` parameter will be used for the error message.
"""
+ urls = []
if source is None:
if b'default' in ui.paths:
- url = ui.paths[b'default'].rawloc
+ urls.extend(p.rawloc for p in ui.paths[b'default'])
else:
# XXX this is the historical default behavior, but that is not
# great, consider breaking BC on this.
- url = b'default'
+ urls.append(b'default')
else:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ urls.extend(p.rawloc for p in ui.paths[source])
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
- url = source
- return parseurl(url, default_branches)
+ path = try_path(ui, source)
+ if path is not None:
+ urls.append(path.rawloc)
+ else:
+ urls.append(source)
+ if len(urls) != 1:
+ if source is None:
+ msg = _("default path points to %d urls while %s only supports one")
+ msg %= (len(urls), action)
+ else:
+ msg = _("path points to %d urls while %s only supports one: %s")
+ msg %= (len(urls), action, source)
+ raise error.Abort(msg)
+ return parseurl(urls[0], default_branches)
def get_clone_path(ui, source, default_branches=()):
"""return the `(origsource, path, branch)` selected as clone source"""
+ urls = []
if source is None:
if b'default' in ui.paths:
- url = ui.paths[b'default'].rawloc
+ urls.extend(p.rawloc for p in ui.paths[b'default'])
else:
# XXX this is the historical default behavior, but that is not
# great, consider breaking BC on this.
- url = b'default'
+ urls.append(b'default')
else:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ urls.extend(p.rawloc for p in ui.paths[source])
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
- url = source
+ path = try_path(ui, source)
+ if path is not None:
+ urls.append(path.rawloc)
+ else:
+ urls.append(source)
+ if len(urls) != 1:
+ if source is None:
+ msg = _(
+ "default path points to %d urls while only one is supported"
+ )
+ msg %= len(urls)
+ else:
+ msg = _("path points to %d urls while only one is supported: %s")
+ msg %= (len(urls), source)
+ raise error.Abort(msg)
+ url = urls[0]
clone_path, branch = parseurl(url, default_branches)
return url, clone_path, branch
@@ -571,15 +641,38 @@
def __init__(self, ui):
dict.__init__(self)
- for name, loc in ui.configitems(b'paths', ignoresub=True):
+ home_path = os.path.expanduser(b'~')
+
+ for name, value in ui.configitems(b'paths', ignoresub=True):
# No location is the same as not existing.
- if not loc:
+ if not value:
continue
- loc, sub_opts = ui.configsuboptions(b'paths', name)
- self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
+ _value, sub_opts = ui.configsuboptions(b'paths', name)
+ s = ui.configsource(b'paths', name)
+ root_key = (name, value, s)
+ root = ui._path_to_root.get(root_key, home_path)
+
+ multi_url = sub_opts.get(b'multi-urls')
+ if multi_url is not None and stringutil.parsebool(multi_url):
+ base_locs = stringutil.parselist(value)
+ else:
+ base_locs = [value]
- for name, p in sorted(self.items()):
- p.chain_path(ui, self)
+ paths = []
+ for loc in base_locs:
+ loc = os.path.expandvars(loc)
+ loc = os.path.expanduser(loc)
+ if not hasscheme(loc) and not os.path.isabs(loc):
+ loc = os.path.normpath(os.path.join(root, loc))
+ p = path(ui, name, rawloc=loc, suboptions=sub_opts)
+ paths.append(p)
+ self[name] = paths
+
+ for name, old_paths in sorted(self.items()):
+ new_paths = []
+ for p in old_paths:
+ new_paths.extend(_chain_path(p, ui, self))
+ self[name] = new_paths
def getpath(self, ui, name, default=None):
"""Return a ``path`` from a string, falling back to default.
@@ -590,6 +683,8 @@
Returns None if ``name`` is not a registered path, a URI, or a local
path to a repo.
"""
+ msg = b'getpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, '6.0')
# Only fall back to default if no path was requested.
if name is None:
if not default:
@@ -598,7 +693,7 @@
default = (default,)
for k in default:
try:
- return self[k]
+ return self[k][0]
except KeyError:
continue
return None
@@ -607,16 +702,14 @@
# This may need to raise in the future.
if not name:
return None
-
- try:
- return self[name]
- except KeyError:
+ if name in self:
+ return self[name][0]
+ else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- return path(ui, None, rawloc=name)
- except ValueError:
+ path = try_path(ui, name)
+ if path is None:
raise error.RepoError(_(b'repository %s does not exist') % name)
+ return path.rawloc
_pathsuboptions = {}
@@ -649,7 +742,9 @@
u = url(value)
# Actually require a URL.
if not u.scheme:
- ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
+ msg = _(b'(paths.%s:pushurl not a URL; ignoring: "%s")\n')
+ msg %= (path.name, value)
+ ui.warn(msg)
return None
# Don't support the #foo syntax in the push URL to declare branch to
@@ -672,10 +767,54 @@
return value
+@pathsuboption(b'multi-urls', b'multi_urls')
+def multiurls_pathoption(ui, path, value):
+ res = stringutil.parsebool(value)
+ if res is None:
+ ui.warn(
+ _(b'(paths.%s:multi-urls not a boolean; ignoring)\n') % path.name
+ )
+ res = False
+ return res
+
+
+def _chain_path(base_path, ui, paths):
+ """return the result of "path://" logic applied on a given path"""
+ new_paths = []
+ if base_path.url.scheme != b'path':
+ new_paths.append(base_path)
+ else:
+ assert base_path.url.path is None
+ sub_paths = paths.get(base_path.url.host)
+ if sub_paths is None:
+ m = _(b'cannot use `%s`, "%s" is not a known path')
+ m %= (base_path.rawloc, base_path.url.host)
+ raise error.Abort(m)
+ for subpath in sub_paths:
+ path = base_path.copy()
+ if subpath.raw_url.scheme == b'path':
+ m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
+ m %= (path.rawloc, path.url.host)
+ raise error.Abort(m)
+ path.url = subpath.url
+ path.rawloc = subpath.rawloc
+ path.loc = subpath.loc
+ if path.branch is None:
+ path.branch = subpath.branch
+ else:
+ base = path.rawloc.rsplit(b'#', 1)[0]
+ path.rawloc = b'%s#%s' % (base, path.branch)
+ suboptions = subpath._all_sub_opts.copy()
+ suboptions.update(path._own_sub_opts)
+ path._apply_suboptions(ui, suboptions)
+ new_paths.append(path)
+ return new_paths
+
+
class path(object):
"""Represents an individual path and its configuration."""
- def __init__(self, ui, name, rawloc=None, suboptions=None):
+ def __init__(self, ui=None, name=None, rawloc=None, suboptions=None):
"""Construct a path from its config options.
``ui`` is the ``ui`` instance the path is coming from.
@@ -687,6 +826,13 @@
filesystem path with a .hg directory or b) a URL. If not,
``ValueError`` is raised.
"""
+ if ui is None:
+ # used in copy
+ assert name is None
+ assert rawloc is None
+ assert suboptions is None
+ return
+
if not rawloc:
raise ValueError(b'rawloc must be defined')
@@ -717,30 +863,15 @@
self._apply_suboptions(ui, sub_opts)
- def chain_path(self, ui, paths):
- if self.url.scheme == b'path':
- assert self.url.path is None
- try:
- subpath = paths[self.url.host]
- except KeyError:
- m = _(b'cannot use `%s`, "%s" is not a known path')
- m %= (self.rawloc, self.url.host)
- raise error.Abort(m)
- if subpath.raw_url.scheme == b'path':
- m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
- m %= (self.rawloc, self.url.host)
- raise error.Abort(m)
- self.url = subpath.url
- self.rawloc = subpath.rawloc
- self.loc = subpath.loc
- if self.branch is None:
- self.branch = subpath.branch
- else:
- base = self.rawloc.rsplit(b'#', 1)[0]
- self.rawloc = b'%s#%s' % (base, self.branch)
- suboptions = subpath._all_sub_opts.copy()
- suboptions.update(self._own_sub_opts)
- self._apply_suboptions(ui, suboptions)
+ def copy(self):
+ """make a copy of this path object"""
+ new = self.__class__()
+ for k, v in self.__dict__.items():
+ new_copy = getattr(v, 'copy', None)
+ if new_copy is not None:
+ v = new_copy()
+ new.__dict__[k] = v
+ return new
def _validate_path(self):
# When given a raw location but not a symbolic name, validate the
--- a/mercurial/verify.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/verify.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,13 +10,8 @@
import os
from .i18n import _
-from .node import (
- nullid,
- short,
-)
-from .utils import (
- stringutil,
-)
+from .node import short
+from .utils import stringutil
from . import (
error,
@@ -43,6 +38,23 @@
return f
+HINT_FNCACHE = _(
+ b'hint: run "hg debugrebuildfncache" to recover from corrupt fncache\n'
+)
+
+WARN_PARENT_DIR_UNKNOWN_REV = _(
+ b"parent-directory manifest refers to unknown revision %s"
+)
+
+WARN_UNKNOWN_COPY_SOURCE = _(
+ b"warning: copy source of '%s' not in parents of %s"
+)
+
+WARN_NULLID_COPY_SOURCE = _(
+ b"warning: %s@%s: copy source revision is nullid %s:%s\n"
+)
+
+
class verifier(object):
def __init__(self, repo, level=None):
self.repo = repo.unfiltered()
@@ -56,7 +68,7 @@
self.warnings = 0
self.havecl = len(repo.changelog) > 0
self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
- self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
+ self.revlogv1 = repo.changelog._format_version != revlog.REVLOGV0
self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__)
self.refersmf = False
self.fncachewarned = False
@@ -107,7 +119,7 @@
if d[1]:
self._err(None, _(b"index contains %d extra bytes") % d[1], name)
- if obj.version != revlog.REVLOGV0:
+ if obj._format_version != revlog.REVLOGV0:
if not self.revlogv1:
self._warn(_(b"warning: `%s' uses revlog format 1") % name)
elif self.revlogv1:
@@ -119,7 +131,7 @@
arguments are:
- obj: the source revlog
- i: the revision number
- - node: the revision node id
+ - node: the revision node id
- seen: nodes previously seen for this revlog
- linkrevs: [changelog-revisions] introducing "node"
- f: string label ("changelog", "manifest", or filename)
@@ -144,33 +156,25 @@
if f and len(linkrevs) > 1:
try:
# attempt to filter down to real linkrevs
- linkrevs = [
- l
- for l in linkrevs
- if self.lrugetctx(l)[f].filenode() == node
- ]
+ linkrevs = []
+ for lr in linkrevs:
+ if self.lrugetctx(lr)[f].filenode() == node:
+ linkrevs.append(lr)
except Exception:
pass
- self._warn(
- _(b" (expected %s)")
- % b" ".join(map(pycompat.bytestr, linkrevs))
- )
+ msg = _(b" (expected %s)")
+ msg %= b" ".join(map(pycompat.bytestr, linkrevs))
+ self._warn(msg)
lr = None # can't be trusted
try:
p1, p2 = obj.parents(node)
- if p1 not in seen and p1 != nullid:
- self._err(
- lr,
- _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
- f,
- )
- if p2 not in seen and p2 != nullid:
- self._err(
- lr,
- _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
- f,
- )
+ if p1 not in seen and p1 != self.repo.nullid:
+ msg = _(b"unknown parent 1 %s of %s") % (short(p1), short(node))
+ self._err(lr, msg, f)
+ if p2 not in seen and p2 != self.repo.nullid:
+ msg = _(b"unknown parent 2 %s of %s") % (short(p2), short(node))
+ self._err(lr, msg, f)
except Exception as inst:
self._exc(lr, _(b"checking parents of %s") % short(node), inst, f)
@@ -215,19 +219,13 @@
if self.warnings:
ui.warn(_(b"%d warnings encountered!\n") % self.warnings)
if self.fncachewarned:
- ui.warn(
- _(
- b'hint: run "hg debugrebuildfncache" to recover from '
- b'corrupt fncache\n'
- )
- )
+ ui.warn(HINT_FNCACHE)
if self.errors:
ui.warn(_(b"%d integrity errors encountered!\n") % self.errors)
if self.badrevs:
- ui.warn(
- _(b"(first damaged changeset appears to be %d)\n")
- % min(self.badrevs)
- )
+ msg = _(b"(first damaged changeset appears to be %d)\n")
+ msg %= min(self.badrevs)
+ ui.warn(msg)
return 1
return 0
@@ -267,7 +265,7 @@
try:
changes = cl.read(n)
- if changes[0] != nullid:
+ if changes[0] != self.repo.nullid:
mflinkrevs.setdefault(changes[0], []).append(i)
self.refersmf = True
for f in changes[3]:
@@ -331,7 +329,7 @@
if self.refersmf:
# Do not check manifest if there are only changelog entries with
# null manifests.
- self._checkrevlog(mf, label, 0)
+ self._checkrevlog(mf._revlog, label, 0)
progress = ui.makeprogress(
_(b'checking'), unit=_(b'manifests'), total=len(mf)
)
@@ -343,11 +341,8 @@
if n in mflinkrevs:
del mflinkrevs[n]
elif dir:
- self._err(
- lr,
- _(b"%s not in parent-directory manifest") % short(n),
- label,
- )
+ msg = _(b"%s not in parent-directory manifest") % short(n)
+ self._err(lr, msg, label)
else:
self._err(lr, _(b"%s not in changesets") % short(n), label)
@@ -362,9 +357,8 @@
if fl == b't':
if not match.visitdir(fullpath):
continue
- subdirnodes.setdefault(fullpath + b'/', {}).setdefault(
- fn, []
- ).append(lr)
+ sdn = subdirnodes.setdefault(fullpath + b'/', {})
+ sdn.setdefault(fn, []).append(lr)
else:
if not match(fullpath):
continue
@@ -378,12 +372,8 @@
# code (eg: hash verification, filename are ordered, etc.)
mfdelta = mfl.get(dir, n).read()
except Exception as inst:
- self._exc(
- lr,
- _(b"reading full manifest %s") % short(n),
- inst,
- label,
- )
+ msg = _(b"reading full manifest %s") % short(n)
+ self._exc(lr, msg, inst, label)
if not dir:
progress.complete()
@@ -394,22 +384,11 @@
changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]]
for c, m in sorted(changesetpairs):
if dir:
- self._err(
- c,
- _(
- b"parent-directory manifest refers to unknown"
- b" revision %s"
- )
- % short(m),
- label,
- )
+ self._err(c, WARN_PARENT_DIR_UNKNOWN_REV % short(m), label)
else:
- self._err(
- c,
- _(b"changeset refers to unknown revision %s")
- % short(m),
- label,
- )
+ msg = _(b"changeset refers to unknown revision %s")
+ msg %= short(m)
+ self._err(c, msg, label)
if not dir and subdirnodes:
self.ui.status(_(b"checking directory manifests\n"))
@@ -488,7 +467,7 @@
state = {
# TODO this assumes revlog storage for changelog.
- b'expectedversion': self.repo.changelog.version & 0xFFFF,
+ b'expectedversion': self.repo.changelog._format_version,
b'skipflags': self.skipflags,
# experimental config: censor.policy
b'erroroncensored': ui.config(b'censor', b'policy') == b'abort',
@@ -523,9 +502,8 @@
storefiles.remove(ff)
except KeyError:
if self.warnorphanstorefiles:
- self._warn(
- _(b" warning: revlog '%s' not in fncache!") % ff
- )
+ msg = _(b" warning: revlog '%s' not in fncache!")
+ self._warn(msg % ff)
self.fncachewarned = True
if not len(fl) and (self.havecl or self.havemf):
@@ -544,11 +522,8 @@
if problem.warning:
self._warn(problem.warning)
elif problem.error:
- self._err(
- linkrev if linkrev is not None else lr,
- problem.error,
- f,
- )
+ linkrev_msg = linkrev if linkrev is not None else lr
+ self._err(linkrev_msg, problem.error, f)
else:
raise error.ProgrammingError(
b'problem instance does not set warning or error '
@@ -580,32 +555,15 @@
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
if not any(rp[0] in pctx for pctx in ctx.parents()):
- self._warn(
- _(
- b"warning: copy source of '%s' not"
- b" in parents of %s"
- )
- % (f, ctx)
- )
+ self._warn(WARN_UNKNOWN_COPY_SOURCE % (f, ctx))
fl2 = repo.file(rp[0])
if not len(fl2):
- self._err(
- lr,
- _(
- b"empty or missing copy source revlog "
- b"%s:%s"
- )
- % (rp[0], short(rp[1])),
- f,
- )
- elif rp[1] == nullid:
- ui.note(
- _(
- b"warning: %s@%s: copy source"
- b" revision is nullid %s:%s\n"
- )
- % (f, lr, rp[0], short(rp[1]))
- )
+ m = _(b"empty or missing copy source revlog %s:%s")
+ self._err(lr, m % (rp[0], short(rp[1])), f)
+ elif rp[1] == self.repo.nullid:
+ msg = WARN_NULLID_COPY_SOURCE
+ msg %= (f, lr, rp[0], short(rp[1]))
+ ui.note(msg)
else:
fl2.rev(rp[1])
except Exception as inst:
@@ -617,12 +575,8 @@
if f in filenodes:
fns = [(v, k) for k, v in pycompat.iteritems(filenodes[f])]
for lr, node in sorted(fns):
- self._err(
- lr,
- _(b"manifest refers to unknown revision %s")
- % short(node),
- f,
- )
+ msg = _(b"manifest refers to unknown revision %s")
+ self._err(lr, msg % short(node), f)
progress.complete()
if self.warnorphanstorefiles:
--- a/mercurial/vfs.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/vfs.py Mon Jun 07 17:10:35 2021 -0400
@@ -307,7 +307,7 @@
# multiple instances puts us at risk of running out of file descriptors
# only allow to use backgroundfilecloser when in main thread.
if not isinstance(
- threading.currentThread(),
+ threading.current_thread(),
threading._MainThread, # pytype: disable=module-attr
):
yield
@@ -483,7 +483,7 @@
fp = checkambigatclosing(fp)
if backgroundclose and isinstance(
- threading.currentThread(),
+ threading.current_thread(),
threading._MainThread, # pytype: disable=module-attr
):
if (
--- a/mercurial/wireprotov1server.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/wireprotov1server.py Mon Jun 07 17:10:35 2021 -0400
@@ -11,10 +11,7 @@
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from .pycompat import getattr
from . import (
@@ -470,7 +467,7 @@
clheads = set(repo.changelog.heads())
heads = set(opts.get(b'heads', set()))
common = set(opts.get(b'common', set()))
- common.discard(nullid)
+ common.discard(repo.nullid)
if (
repo.ui.configbool(b'server', b'pullbundle')
and b'partial-pull' in proto.getprotocaps()
--- a/mercurial/wireprotov2server.py Sun Jun 06 01:24:30 2021 +0200
+++ b/mercurial/wireprotov2server.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,10 +10,7 @@
import contextlib
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
discovery,
encoding,
@@ -950,7 +947,7 @@
if spec[b'roots']:
common = [n for n in spec[b'roots'] if clhasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
if n not in seen:
--- a/relnotes/next Sun Jun 06 01:24:30 2021 +0200
+++ b/relnotes/next Mon Jun 07 17:10:35 2021 -0400
@@ -1,5 +1,8 @@
== New Features ==
-
+
+ * `hg config` now has a `--source` option to show where each
+ configuration value comes from.
+
== Default Format Change ==
--- a/rust/Cargo.lock Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/Cargo.lock Mon Jun 07 17:10:35 2021 -0400
@@ -57,6 +57,15 @@
]
[[package]]
+name = "block-buffer"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
name = "byteorder"
version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -64,9 +73,9 @@
[[package]]
name = "bytes-cast"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
dependencies = [
"bytes-cast-derive",
]
@@ -138,6 +147,15 @@
checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
[[package]]
+name = "cpufeatures"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
+dependencies = [
+ "libc",
+]
+
+[[package]]
name = "cpython"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -254,6 +272,15 @@
checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
[[package]]
+name = "digest"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
name = "either"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -308,16 +335,14 @@
]
[[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
+name = "generic-array"
+version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-
-[[package]]
-name = "gcc"
-version = "0.3.55"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
+dependencies = [
+ "typenum",
+ "version_check",
+]
[[package]]
name = "getrandom"
@@ -358,18 +383,19 @@
"format-bytes",
"home",
"im-rc",
+ "itertools",
"lazy_static",
"log",
"memmap",
"micro-timer",
"pretty_assertions",
- "rand 0.7.3",
+ "rand",
"rand_distr",
"rand_pcg",
"rayon",
"regex",
- "rust-crypto",
"same-file",
+ "sha-1",
"tempfile",
"twox-hash",
"zstd",
@@ -412,7 +438,7 @@
checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
dependencies = [
"bitmaps",
- "rand_core 0.5.1",
+ "rand_core",
"rand_xoshiro",
"sized-chunks",
"typenum",
@@ -562,6 +588,12 @@
]
[[package]]
+name = "opaque-debug"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
+
+[[package]]
name = "output_vt100"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -665,29 +697,6 @@
[[package]]
name = "rand"
-version = "0.3.23"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
-dependencies = [
- "libc",
- "rand 0.4.6",
-]
-
-[[package]]
-name = "rand"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-dependencies = [
- "fuchsia-cprng",
- "libc",
- "rand_core 0.3.1",
- "rdrand",
- "winapi",
-]
-
-[[package]]
-name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
@@ -695,7 +704,7 @@
"getrandom",
"libc",
"rand_chacha",
- "rand_core 0.5.1",
+ "rand_core",
"rand_hc",
]
@@ -706,26 +715,11 @@
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
name = "rand_core"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-dependencies = [
- "rand_core 0.4.2",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-
-[[package]]
-name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
@@ -739,7 +733,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
dependencies = [
- "rand 0.7.3",
+ "rand",
]
[[package]]
@@ -748,7 +742,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -757,7 +751,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -766,7 +760,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -795,15 +789,6 @@
]
[[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-dependencies = [
- "rand_core 0.3.1",
-]
-
-[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -854,25 +839,6 @@
]
[[package]]
-name = "rust-crypto"
-version = "0.2.36"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
-dependencies = [
- "gcc",
- "libc",
- "rand 0.3.23",
- "rustc-serialize",
- "time",
-]
-
-[[package]]
-name = "rustc-serialize"
-version = "0.3.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-
-[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -888,6 +854,19 @@
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
+name = "sha-1"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
+dependencies = [
+ "block-buffer",
+ "cfg-if 1.0.0",
+ "cpufeatures",
+ "digest",
+ "opaque-debug",
+]
+
+[[package]]
name = "sized-chunks"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -928,7 +907,7 @@
dependencies = [
"cfg-if 0.1.10",
"libc",
- "rand 0.7.3",
+ "rand",
"redox_syscall",
"remove_dir_all",
"winapi",
@@ -979,7 +958,7 @@
checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
dependencies = [
"cfg-if 0.1.10",
- "rand 0.7.3",
+ "rand",
"static_assertions",
]
--- a/rust/hg-core/Cargo.toml Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/Cargo.toml Mon Jun 07 17:10:35 2021 -0400
@@ -9,25 +9,27 @@
name = "hg"
[dependencies]
-bytes-cast = "0.1"
+bytes-cast = "0.2"
byteorder = "1.3.4"
derive_more = "0.99"
home = "0.5"
im-rc = "15.0.*"
+itertools = "0.9"
lazy_static = "1.4.0"
rand = "0.7.3"
rand_pcg = "0.2.1"
rand_distr = "0.2.2"
rayon = "1.3.0"
regex = "1.3.9"
+sha-1 = "0.9.6"
twox-hash = "1.5.0"
same-file = "1.0.6"
+tempfile = "3.1.0"
crossbeam-channel = "0.4"
micro-timer = "0.3.0"
log = "0.4.8"
memmap = "0.7.0"
zstd = "0.5.3"
-rust-crypto = "0.2.36"
format-bytes = "0.2.2"
# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
@@ -40,4 +42,3 @@
[dev-dependencies]
clap = "*"
pretty_assertions = "0.6.1"
-tempfile = "3.1.0"
--- a/rust/hg-core/src/config/config.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/config/config.rs Mon Jun 07 17:10:35 2021 -0400
@@ -361,10 +361,11 @@
///
/// This is appropriate for new configuration keys. The value syntax is
/// **not** the same as most existing list-valued config, which has Python
- /// parsing implemented in `parselist()` in `mercurial/config.py`.
- /// Faithfully porting that parsing algorithm to Rust (including behavior
- /// that are arguably bugs) turned out to be non-trivial and hasn’t been
- /// completed as of this writing.
+ /// parsing implemented in `parselist()` in
+ /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
+ /// algorithm to Rust (including behavior that are arguably bugs)
+ /// turned out to be non-trivial and hasn’t been completed as of this
+ /// writing.
///
/// Instead, the "simple" syntax is: split on comma, then trim leading and
/// trailing whitespace of each component. Quotes or backslashes are not
--- a/rust/hg-core/src/copy_tracing/tests_support.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/copy_tracing/tests_support.rs Mon Jun 07 17:10:35 2021 -0400
@@ -123,7 +123,10 @@
),
)
})
- .collect::<OrdMap<_, _>>()
+ .collect::<OrdMap<
+ String,
+ (Revision, Option<String>, OrdSet<Revision>)
+ >>()
}
};
}
--- a/rust/hg-core/src/dirstate.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/dirstate.rs Mon Jun 07 17:10:35 2021 -0400
@@ -5,11 +5,13 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::errors::HgError;
+use crate::revlog::node::NULL_NODE;
use crate::revlog::Node;
-use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::FastHashMap;
use bytes_cast::{unaligned, BytesCast};
-use std::collections::hash_map;
use std::convert::TryFrom;
pub mod dirs_multiset;
@@ -24,6 +26,13 @@
pub p2: Node,
}
+impl DirstateParents {
+ pub const NULL: Self = Self {
+ p1: NULL_NODE,
+ p2: NULL_NODE,
+ };
+}
+
/// The C implementation uses all signed types. This will be an issue
/// either when 4GB+ source files are commonplace or in 2038, whichever
/// comes first.
@@ -35,6 +44,29 @@
pub size: i32,
}
+impl DirstateEntry {
+ pub fn is_non_normal(&self) -> bool {
+ self.state != EntryState::Normal || self.mtime == MTIME_UNSET
+ }
+
+ pub fn is_from_other_parent(&self) -> bool {
+ self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
+ }
+
+ // TODO: other platforms
+ #[cfg(unix)]
+ pub fn mode_changed(
+ &self,
+ filesystem_metadata: &std::fs::Metadata,
+ ) -> bool {
+ use std::os::unix::fs::MetadataExt;
+ const EXEC_BIT_MASK: u32 = 0o100;
+ let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
+ let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
+ dirstate_exec_bit != fs_exec_bit
+ }
+}
+
#[derive(BytesCast)]
#[repr(C)]
struct RawEntry {
@@ -45,16 +77,27 @@
length: unaligned::I32Be,
}
+const MTIME_UNSET: i32 = -1;
+
/// A `DirstateEntry` with a size of `-2` means that it was merged from the
/// other parent. This allows revert to pick the right status back during a
/// merge.
pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
-pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+pub type StateMapIter<'a> = Box<
+ dyn Iterator<
+ Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>,
+ > + Send
+ + 'a,
+>;
pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
-pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
+pub type CopyMapIter<'a> = Box<
+ dyn Iterator<Item = Result<(&'a HgPath, &'a HgPath), DirstateV2ParseError>>
+ + Send
+ + 'a,
+>;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EntryState {
@@ -65,6 +108,16 @@
Unknown,
}
+impl EntryState {
+ pub fn is_tracked(self) -> bool {
+ use EntryState::*;
+ match self {
+ Normal | Added | Merged => true,
+ Removed | Unknown => false,
+ }
+ }
+}
+
impl TryFrom<u8> for EntryState {
type Error = HgError;
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Mon Jun 07 17:10:35 2021 -0400
@@ -8,13 +8,14 @@
//! A multiset of directory names.
//!
//! Used to counts the references to directories in a manifest or dirstate.
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::{
dirstate::EntryState,
utils::{
files,
hg_path::{HgPath, HgPathBuf, HgPathError},
},
- DirstateEntry, DirstateMapError, FastHashMap, StateMap,
+ DirstateEntry, DirstateError, DirstateMapError, FastHashMap,
};
use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
@@ -30,17 +31,25 @@
/// Initializes the multiset from a dirstate.
///
/// If `skip_state` is provided, skips dirstate entries with equal state.
- pub fn from_dirstate(
- dirstate: &StateMap,
+ pub fn from_dirstate<I, P>(
+ dirstate: I,
skip_state: Option<EntryState>,
- ) -> Result<Self, DirstateMapError> {
+ ) -> Result<Self, DirstateError>
+ where
+ I: IntoIterator<
+ Item = Result<(P, DirstateEntry), DirstateV2ParseError>,
+ >,
+ P: AsRef<HgPath>,
+ {
let mut multiset = DirsMultiset {
inner: FastHashMap::default(),
};
- for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
+ for item in dirstate {
+ let (filename, entry) = item?;
+ let filename = filename.as_ref();
// This `if` is optimized out of the loop
if let Some(skip) = skip_state {
- if skip != *state {
+ if skip != entry.state {
multiset.add_path(filename)?;
}
} else {
@@ -207,6 +216,7 @@
#[cfg(test)]
mod tests {
use super::*;
+ use crate::StateMap;
#[test]
fn test_delete_path_path_not_found() {
@@ -331,8 +341,11 @@
};
assert_eq!(expected, new);
- let new =
- DirsMultiset::from_dirstate(&StateMap::default(), None).unwrap();
+ let new = DirsMultiset::from_dirstate(
+ StateMap::default().into_iter().map(Ok),
+ None,
+ )
+ .unwrap();
let expected = DirsMultiset {
inner: FastHashMap::default(),
};
@@ -356,26 +369,23 @@
};
assert_eq!(expected, new);
- let input_map = ["b/x", "a/c", "a/d/x"]
- .iter()
- .map(|f| {
- (
- HgPathBuf::from_bytes(f.as_bytes()),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- },
- )
- })
- .collect();
+ let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
+ Ok((
+ HgPathBuf::from_bytes(f.as_bytes()),
+ DirstateEntry {
+ state: EntryState::Normal,
+ mode: 0,
+ mtime: 0,
+ size: 0,
+ },
+ ))
+ });
let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
.iter()
.map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
.collect();
- let new = DirsMultiset::from_dirstate(&input_map, None).unwrap();
+ let new = DirsMultiset::from_dirstate(input_map, None).unwrap();
let expected = DirsMultiset {
inner: expected_inner,
};
@@ -392,7 +402,7 @@
]
.iter()
.map(|(f, state)| {
- (
+ Ok((
HgPathBuf::from_bytes(f.as_bytes()),
DirstateEntry {
state: *state,
@@ -400,9 +410,8 @@
mtime: 0,
size: 0,
},
- )
- })
- .collect();
+ ))
+ });
// "a" incremented with "a/c" and "a/d/"
let expected_inner = [("", 1), ("a", 2)]
@@ -411,7 +420,7 @@
.collect();
let new =
- DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal))
+ DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal))
.unwrap();
let expected = DirsMultiset {
inner: expected_inner,
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs Mon Jun 07 17:10:35 2021 -0400
@@ -5,40 +5,27 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::errors::HgError;
-use crate::revlog::node::NULL_NODE;
+use crate::dirstate::parsers::Timestamp;
use crate::{
- dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
+ dirstate::EntryState,
pack_dirstate, parse_dirstate,
- utils::{
- files::normalize_case,
- hg_path::{HgPath, HgPathBuf},
- },
- CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
- DirstateParents, FastHashMap, StateMap,
+ utils::hg_path::{HgPath, HgPathBuf},
+ CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
+ StateMap,
};
use micro_timer::timed;
use std::collections::HashSet;
-use std::convert::TryInto;
use std::iter::FromIterator;
use std::ops::Deref;
-use std::time::Duration;
-
-pub type FileFoldMap = FastHashMap<HgPathBuf, HgPathBuf>;
-
-const MTIME_UNSET: i32 = -1;
#[derive(Default)]
pub struct DirstateMap {
state_map: StateMap,
pub copy_map: CopyMap,
- file_fold_map: Option<FileFoldMap>,
pub dirs: Option<DirsMultiset>,
pub all_dirs: Option<DirsMultiset>,
non_normal_set: Option<HashSet<HgPathBuf>>,
other_parent_set: Option<HashSet<HgPathBuf>>,
- parents: Option<DirstateParents>,
- dirty_parents: bool,
}
/// Should only really be used in python interface code, for clarity
@@ -69,13 +56,8 @@
pub fn clear(&mut self) {
self.state_map = StateMap::default();
self.copy_map.clear();
- self.file_fold_map = None;
self.non_normal_set = None;
self.other_parent_set = None;
- self.set_parents(&DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- })
}
/// Add a tracked file to the dirstate
@@ -84,7 +66,7 @@
filename: &HgPath,
old_state: EntryState,
entry: DirstateEntry,
- ) -> Result<(), DirstateMapError> {
+ ) -> Result<(), DirstateError> {
if old_state == EntryState::Unknown || old_state == EntryState::Removed
{
if let Some(ref mut dirs) = self.dirs {
@@ -98,13 +80,13 @@
}
self.state_map.insert(filename.to_owned(), entry.to_owned());
- if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
+ if entry.is_non_normal() {
self.get_non_normal_other_parent_entries()
.0
.insert(filename.to_owned());
}
- if entry.size == SIZE_FROM_OTHER_PARENT {
+ if entry.is_from_other_parent() {
self.get_non_normal_other_parent_entries()
.1
.insert(filename.to_owned());
@@ -122,7 +104,7 @@
filename: &HgPath,
old_state: EntryState,
size: i32,
- ) -> Result<(), DirstateMapError> {
+ ) -> Result<(), DirstateError> {
if old_state != EntryState::Unknown && old_state != EntryState::Removed
{
if let Some(ref mut dirs) = self.dirs {
@@ -135,9 +117,6 @@
}
}
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.state_map.insert(
filename.to_owned(),
DirstateEntry {
@@ -159,7 +138,7 @@
&mut self,
filename: &HgPath,
old_state: EntryState,
- ) -> Result<bool, DirstateMapError> {
+ ) -> Result<bool, DirstateError> {
let exists = self.state_map.remove(filename).is_some();
if exists {
@@ -172,9 +151,6 @@
all_dirs.delete_path(filename)?;
}
}
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.get_non_normal_other_parent_entries()
.0
.remove(filename);
@@ -188,32 +164,22 @@
now: i32,
) {
for filename in filenames {
- let mut changed = false;
if let Some(entry) = self.state_map.get_mut(&filename) {
- if entry.state == EntryState::Normal && entry.mtime == now {
- changed = true;
- *entry = DirstateEntry {
- mtime: MTIME_UNSET,
- ..*entry
- };
+ if entry.clear_ambiguous_mtime(now) {
+ self.get_non_normal_other_parent_entries()
+ .0
+ .insert(filename.to_owned());
}
}
- if changed {
- self.get_non_normal_other_parent_entries()
- .0
- .insert(filename.to_owned());
- }
}
}
- pub fn non_normal_entries_remove(
- &mut self,
- key: impl AsRef<HgPath>,
- ) -> bool {
+ pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
self.get_non_normal_other_parent_entries()
.0
- .remove(key.as_ref())
+ .remove(key.as_ref());
}
+
pub fn non_normal_entries_union(
&mut self,
other: HashSet<HgPathBuf>,
@@ -264,18 +230,11 @@
let mut non_normal = HashSet::new();
let mut other_parent = HashSet::new();
- for (
- filename,
- DirstateEntry {
- state, size, mtime, ..
- },
- ) in self.state_map.iter()
- {
- if *state != EntryState::Normal || *mtime == MTIME_UNSET {
+ for (filename, entry) in self.state_map.iter() {
+ if entry.is_non_normal() {
non_normal.insert(filename.to_owned());
}
- if *state == EntryState::Normal && *size == SIZE_FROM_OTHER_PARENT
- {
+ if entry.is_from_other_parent() {
other_parent.insert(filename.to_owned());
}
}
@@ -287,18 +246,20 @@
/// emulate a Python lazy property, but it is ugly and unidiomatic.
/// TODO One day, rewriting this struct using the typestate might be a
/// good idea.
- pub fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+ pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
if self.all_dirs.is_none() {
- self.all_dirs =
- Some(DirsMultiset::from_dirstate(&self.state_map, None)?);
+ self.all_dirs = Some(DirsMultiset::from_dirstate(
+ self.state_map.iter().map(|(k, v)| Ok((k, *v))),
+ None,
+ )?);
}
Ok(())
}
- pub fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+ pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
if self.dirs.is_none() {
self.dirs = Some(DirsMultiset::from_dirstate(
- &self.state_map,
+ self.state_map.iter().map(|(k, v)| Ok((k, *v))),
Some(EntryState::Removed),
)?);
}
@@ -308,7 +269,7 @@
pub fn has_tracked_dir(
&mut self,
directory: &HgPath,
- ) -> Result<bool, DirstateMapError> {
+ ) -> Result<bool, DirstateError> {
self.set_dirs()?;
Ok(self.dirs.as_ref().unwrap().contains(directory))
}
@@ -316,51 +277,16 @@
pub fn has_dir(
&mut self,
directory: &HgPath,
- ) -> Result<bool, DirstateMapError> {
+ ) -> Result<bool, DirstateError> {
self.set_all_dirs()?;
Ok(self.all_dirs.as_ref().unwrap().contains(directory))
}
- pub fn parents(
+ #[timed]
+ pub fn read(
&mut self,
file_contents: &[u8],
- ) -> Result<&DirstateParents, DirstateError> {
- if let Some(ref parents) = self.parents {
- return Ok(parents);
- }
- let parents;
- if file_contents.len() == PARENT_SIZE * 2 {
- parents = DirstateParents {
- p1: file_contents[..PARENT_SIZE].try_into().unwrap(),
- p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2]
- .try_into()
- .unwrap(),
- };
- } else if file_contents.is_empty() {
- parents = DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- };
- } else {
- return Err(
- HgError::corrupted("Dirstate appears to be damaged").into()
- );
- }
-
- self.parents = Some(parents);
- Ok(self.parents.as_ref().unwrap())
- }
-
- pub fn set_parents(&mut self, parents: &DirstateParents) {
- self.parents = Some(parents.clone());
- self.dirty_parents = true;
- }
-
- #[timed]
- pub fn read<'a>(
- &mut self,
- file_contents: &'a [u8],
- ) -> Result<Option<&'a DirstateParents>, DirstateError> {
+ ) -> Result<Option<DirstateParents>, DirstateError> {
if file_contents.is_empty() {
return Ok(None);
}
@@ -376,42 +302,20 @@
.into_iter()
.map(|(path, copy)| (path.to_owned(), copy.to_owned())),
);
-
- if !self.dirty_parents {
- self.set_parents(&parents);
- }
-
- Ok(Some(parents))
+ Ok(Some(parents.clone()))
}
pub fn pack(
&mut self,
parents: DirstateParents,
- now: Duration,
+ now: Timestamp,
) -> Result<Vec<u8>, DirstateError> {
let packed =
pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
- self.dirty_parents = false;
-
self.set_non_normal_other_parent_entries(true);
Ok(packed)
}
- pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
- if let Some(ref file_fold_map) = self.file_fold_map {
- return file_fold_map;
- }
- let mut new_file_fold_map = FileFoldMap::default();
-
- for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
- if *state != EntryState::Removed {
- new_file_fold_map
- .insert(normalize_case(&filename), filename.to_owned());
- }
- }
- self.file_fold_map = Some(new_file_fold_map);
- self.file_fold_map.as_ref().unwrap()
- }
}
#[cfg(test)]
--- a/rust/hg-core/src/dirstate/parsers.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/dirstate/parsers.rs Mon Jun 07 17:10:35 2021 -0400
@@ -13,7 +13,6 @@
use bytes_cast::BytesCast;
use micro_timer::timed;
use std::convert::{TryFrom, TryInto};
-use std::time::Duration;
/// Parents are stored in the dirstate as byte hashes.
pub const PARENT_SIZE: usize = 20;
@@ -35,10 +34,28 @@
}
#[timed]
-pub fn parse_dirstate(mut contents: &[u8]) -> Result<ParseResult, HgError> {
+pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
let mut copies = Vec::new();
let mut entries = Vec::new();
+ let parents =
+ parse_dirstate_entries(contents, |path, entry, copy_source| {
+ if let Some(source) = copy_source {
+ copies.push((path, source));
+ }
+ entries.push((path, *entry));
+ Ok(())
+ })?;
+ Ok((parents, entries, copies))
+}
+pub fn parse_dirstate_entries<'a>(
+ mut contents: &'a [u8],
+ mut each_entry: impl FnMut(
+ &'a HgPath,
+ &DirstateEntry,
+ Option<&'a HgPath>,
+ ) -> Result<(), HgError>,
+) -> Result<&'a DirstateParents, HgError> {
let (parents, rest) = DirstateParents::from_bytes(contents)
.map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
contents = rest;
@@ -62,34 +79,98 @@
let path = HgPath::new(
iter.next().expect("splitn always yields at least one item"),
);
- if let Some(copy_source) = iter.next() {
- copies.push((path, HgPath::new(copy_source)));
- }
+ let copy_source = iter.next().map(HgPath::new);
+ each_entry(path, &entry, copy_source)?;
- entries.push((path, entry));
contents = rest;
}
- Ok((parents, entries, copies))
+ Ok(parents)
+}
+
+fn packed_filename_and_copy_source_size(
+ filename: &HgPath,
+ copy_source: Option<&HgPath>,
+) -> usize {
+ filename.len()
+ + if let Some(source) = copy_source {
+ b"\0".len() + source.len()
+ } else {
+ 0
+ }
+}
+
+pub fn packed_entry_size(
+ filename: &HgPath,
+ copy_source: Option<&HgPath>,
+) -> usize {
+ MIN_ENTRY_SIZE
+ + packed_filename_and_copy_source_size(filename, copy_source)
}
-/// `now` is the duration in seconds since the Unix epoch
+pub fn pack_entry(
+ filename: &HgPath,
+ entry: &DirstateEntry,
+ copy_source: Option<&HgPath>,
+ packed: &mut Vec<u8>,
+) {
+ let length = packed_filename_and_copy_source_size(filename, copy_source);
+
+ // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
+ packed.write_u8(entry.state.into()).unwrap();
+ packed.write_i32::<BigEndian>(entry.mode).unwrap();
+ packed.write_i32::<BigEndian>(entry.size).unwrap();
+ packed.write_i32::<BigEndian>(entry.mtime).unwrap();
+ packed.write_i32::<BigEndian>(length as i32).unwrap();
+ packed.extend(filename.as_bytes());
+ if let Some(source) = copy_source {
+ packed.push(b'\0');
+ packed.extend(source.as_bytes());
+ }
+}
+
+/// Seconds since the Unix epoch
+pub struct Timestamp(pub i64);
+
+impl DirstateEntry {
+ pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
+ self.state == EntryState::Normal && self.mtime == now
+ }
+
+ pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
+ let ambiguous = self.mtime_is_ambiguous(now);
+ if ambiguous {
+ // The file was last modified "simultaneously" with the current
+ // write to dirstate (i.e. within the same second for file-
+ // systems with a granularity of 1 sec). This commonly happens
+ // for at least a couple of files on 'update'.
+ // The user could change the file without changing its size
+ // within the same second. Invalidate the file's mtime in
+ // dirstate, forcing future 'status' calls to compare the
+ // contents of the file if the size is the same. This prevents
+ // mistakenly treating such files as clean.
+ self.clear_mtime()
+ }
+ ambiguous
+ }
+
+ pub fn clear_mtime(&mut self) {
+ self.mtime = -1;
+ }
+}
+
pub fn pack_dirstate(
state_map: &mut StateMap,
copy_map: &CopyMap,
parents: DirstateParents,
- now: Duration,
+ now: Timestamp,
) -> Result<Vec<u8>, HgError> {
// TODO move away from i32 before 2038.
- let now: i32 = now.as_secs().try_into().expect("time overflow");
+ let now: i32 = now.0.try_into().expect("time overflow");
let expected_size: usize = state_map
.iter()
.map(|(filename, _)| {
- let mut length = MIN_ENTRY_SIZE + filename.len();
- if let Some(copy) = copy_map.get(filename) {
- length += copy.len() + 1;
- }
- length
+ packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
})
.sum();
let expected_size = expected_size + PARENT_SIZE * 2;
@@ -100,39 +181,13 @@
packed.extend(parents.p2.as_bytes());
for (filename, entry) in state_map.iter_mut() {
- let new_filename = filename.to_owned();
- let mut new_mtime: i32 = entry.mtime;
- if entry.state == EntryState::Normal && entry.mtime == now {
- // The file was last modified "simultaneously" with the current
- // write to dirstate (i.e. within the same second for file-
- // systems with a granularity of 1 sec). This commonly happens
- // for at least a couple of files on 'update'.
- // The user could change the file without changing its size
- // within the same second. Invalidate the file's mtime in
- // dirstate, forcing future 'status' calls to compare the
- // contents of the file if the size is the same. This prevents
- // mistakenly treating such files as clean.
- new_mtime = -1;
- *entry = DirstateEntry {
- mtime: new_mtime,
- ..*entry
- };
- }
- let mut new_filename = new_filename.into_vec();
- if let Some(copy) = copy_map.get(filename) {
- new_filename.push(b'\0');
- new_filename.extend(copy.bytes());
- }
-
- // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
- packed.write_u8(entry.state.into()).unwrap();
- packed.write_i32::<BigEndian>(entry.mode).unwrap();
- packed.write_i32::<BigEndian>(entry.size).unwrap();
- packed.write_i32::<BigEndian>(new_mtime).unwrap();
- packed
- .write_i32::<BigEndian>(new_filename.len() as i32)
- .unwrap();
- packed.extend(new_filename)
+ entry.clear_ambiguous_mtime(now);
+ pack_entry(
+ filename,
+ entry,
+ copy_map.get(filename).map(|p| &**p),
+ &mut packed,
+ )
}
if packed.len() != expected_size {
@@ -160,7 +215,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = b"1234567891011121314100000000000000000000".to_vec();
assert_eq!(
@@ -191,7 +246,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = [
49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -231,7 +286,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = [
49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -271,7 +326,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
@@ -349,7 +404,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
@@ -395,7 +450,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
--- a/rust/hg-core/src/dirstate/status.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/dirstate/status.rs Mon Jun 07 17:10:35 2021 -0400
@@ -9,6 +9,7 @@
//! It is currently missing a lot of functionality compared to the Python one
//! and will only be triggered in narrow cases.
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::utils::path_auditor::PathAuditor;
use crate::{
dirstate::SIZE_FROM_OTHER_PARENT,
@@ -95,9 +96,10 @@
type IoResult<T> = std::io::Result<T>;
-/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait, 'static>`, so add
+/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
/// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
-type IgnoreFnType<'a> = Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
+pub type IgnoreFnType<'a> =
+ Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
/// We have a good mix of owned (from directory traversal) and borrowed (from
/// the dirstate/explicit) paths, this comes up a lot.
@@ -254,18 +256,47 @@
pub collect_traversed_dirs: bool,
}
-#[derive(Debug)]
+#[derive(Debug, Default)]
pub struct DirstateStatus<'a> {
+ /// Tracked files whose contents have changed since the parent revision
pub modified: Vec<HgPathCow<'a>>,
+
+ /// Newly-tracked files that were not present in the parent
pub added: Vec<HgPathCow<'a>>,
+
+ /// Previously-tracked files that have been (re)moved with an hg command
pub removed: Vec<HgPathCow<'a>>,
+
+ /// (Still) tracked files that are missing, (re)moved with an non-hg
+ /// command
pub deleted: Vec<HgPathCow<'a>>,
+
+ /// Tracked files that are up to date with the parent.
+ /// Only pupulated if `StatusOptions::list_clean` is true.
pub clean: Vec<HgPathCow<'a>>,
+
+ /// Files in the working directory that are ignored with `.hgignore`.
+ /// Only pupulated if `StatusOptions::list_ignored` is true.
pub ignored: Vec<HgPathCow<'a>>,
+
+ /// Files in the working directory that are neither tracked nor ignored.
+ /// Only pupulated if `StatusOptions::list_unknown` is true.
pub unknown: Vec<HgPathCow<'a>>,
+
+ /// Was explicitly matched but cannot be found/accessed
pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
+
+ /// Either clean or modified, but we can’t tell from filesystem metadata
+ /// alone. The file contents need to be read and compared with that in
+ /// the parent.
+ pub unsure: Vec<HgPathCow<'a>>,
+
/// Only filled if `collect_traversed_dirs` is `true`
- pub traversed: Vec<HgPathBuf>,
+ pub traversed: Vec<HgPathCow<'a>>,
+
+ /// Whether `status()` made changed to the `DirstateMap` that should be
+ /// written back to disk
+ pub dirty: bool,
}
#[derive(Debug, derive_more::From)]
@@ -276,6 +307,8 @@
Path(HgPathError),
/// An invalid "ignore" pattern was found
Pattern(PatternError),
+ /// Corrupted dirstate
+ DirstateV2ParseError(DirstateV2ParseError),
}
pub type StatusResult<T> = Result<T, StatusError>;
@@ -286,13 +319,16 @@
StatusError::IO(error) => error.fmt(f),
StatusError::Path(error) => error.fmt(f),
StatusError::Pattern(error) => error.fmt(f),
+ StatusError::DirstateV2ParseError(_) => {
+ f.write_str("dirstate-v2 parse error")
+ }
}
}
}
/// Gives information about which files are changed in the working directory
/// and how, compared to the revision we're based on
-pub struct Status<'a, M: Matcher + Sync> {
+pub struct Status<'a, M: ?Sized + Matcher + Sync> {
dmap: &'a DirstateMap,
pub(crate) matcher: &'a M,
root_dir: PathBuf,
@@ -302,7 +338,7 @@
impl<'a, M> Status<'a, M>
where
- M: Matcher + Sync,
+ M: ?Sized + Matcher + Sync,
{
pub fn new(
dmap: &'a DirstateMap,
@@ -848,9 +884,9 @@
#[timed]
pub fn build_response<'a>(
results: impl IntoIterator<Item = DispatchedPath<'a>>,
- traversed: Vec<HgPathBuf>,
-) -> (Vec<HgPathCow<'a>>, DirstateStatus<'a>) {
- let mut lookup = vec![];
+ traversed: Vec<HgPathCow<'a>>,
+) -> DirstateStatus<'a> {
+ let mut unsure = vec![];
let mut modified = vec![];
let mut added = vec![];
let mut removed = vec![];
@@ -863,7 +899,7 @@
for (filename, dispatch) in results.into_iter() {
match dispatch {
Dispatch::Unknown => unknown.push(filename),
- Dispatch::Unsure => lookup.push(filename),
+ Dispatch::Unsure => unsure.push(filename),
Dispatch::Modified => modified.push(filename),
Dispatch::Added => added.push(filename),
Dispatch::Removed => removed.push(filename),
@@ -876,20 +912,19 @@
}
}
- (
- lookup,
- DirstateStatus {
- modified,
- added,
- removed,
- deleted,
- clean,
- ignored,
- unknown,
- bad,
- traversed,
- },
- )
+ DirstateStatus {
+ modified,
+ added,
+ removed,
+ deleted,
+ clean,
+ ignored,
+ unknown,
+ bad,
+ unsure,
+ traversed,
+ dirty: false,
+ }
}
/// Get the status of files in the working directory.
@@ -900,14 +935,11 @@
#[timed]
pub fn status<'a>(
dmap: &'a DirstateMap,
- matcher: &'a (impl Matcher + Sync),
+ matcher: &'a (dyn Matcher + Sync),
root_dir: PathBuf,
ignore_files: Vec<PathBuf>,
options: StatusOptions,
-) -> StatusResult<(
- (Vec<HgPathCow<'a>>, DirstateStatus<'a>),
- Vec<PatternFileWarning>,
-)> {
+) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
let (status, warnings) =
Status::new(dmap, matcher, root_dir, ignore_files, options)?;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,5 @@
+pub mod dirstate_map;
+pub mod dispatch;
+pub mod on_disk;
+pub mod path_with_basename;
+pub mod status;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,1113 @@
+use bytes_cast::BytesCast;
+use micro_timer::timed;
+use std::borrow::Cow;
+use std::convert::TryInto;
+use std::path::PathBuf;
+
+use super::on_disk;
+use super::on_disk::DirstateV2ParseError;
+use super::path_with_basename::WithBasename;
+use crate::dirstate::parsers::pack_entry;
+use crate::dirstate::parsers::packed_entry_size;
+use crate::dirstate::parsers::parse_dirstate_entries;
+use crate::dirstate::parsers::Timestamp;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::FastHashMap;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub struct DirstateMap<'on_disk> {
+ /// Contents of the `.hg/dirstate` file
+ pub(super) on_disk: &'on_disk [u8],
+
+ pub(super) root: ChildNodes<'on_disk>,
+
+ /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
+ pub(super) nodes_with_entry_count: u32,
+
+ /// Number of nodes anywhere in the tree that have
+ /// `.copy_source.is_some()`.
+ pub(super) nodes_with_copy_source_count: u32,
+}
+
+/// Using a plain `HgPathBuf` of the full path from the repository root as a
+/// map key would also work: all paths in a given map have the same parent
+/// path, so comparing full paths gives the same result as comparing base
+/// names. However `HashMap` would waste time always re-hashing the same
+/// string prefix.
+pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
+
+/// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
+/// for on-disk nodes that don’t actually have a `Cow` to borrow.
+pub(super) enum BorrowedPath<'tree, 'on_disk> {
+ InMemory(&'tree HgPathBuf),
+ OnDisk(&'on_disk HgPath),
+}
+
+pub(super) enum ChildNodes<'on_disk> {
+ InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
+ OnDisk(&'on_disk [on_disk::Node]),
+}
+
+pub(super) enum ChildNodesRef<'tree, 'on_disk> {
+ InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
+ OnDisk(&'on_disk [on_disk::Node]),
+}
+
+pub(super) enum NodeRef<'tree, 'on_disk> {
+ InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
+ OnDisk(&'on_disk on_disk::Node),
+}
+
+impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
+ pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
+ match *self {
+ BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
+ BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
+ }
+ }
+}
+
+impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
+ type Target = HgPath;
+
+ fn deref(&self) -> &HgPath {
+ match *self {
+ BorrowedPath::InMemory(in_memory) => in_memory,
+ BorrowedPath::OnDisk(on_disk) => on_disk,
+ }
+ }
+}
+
+impl Default for ChildNodes<'_> {
+ fn default() -> Self {
+ ChildNodes::InMemory(Default::default())
+ }
+}
+
+impl<'on_disk> ChildNodes<'on_disk> {
+ pub(super) fn as_ref<'tree>(
+ &'tree self,
+ ) -> ChildNodesRef<'tree, 'on_disk> {
+ match self {
+ ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
+ ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
+ }
+ }
+
+ pub(super) fn is_empty(&self) -> bool {
+ match self {
+ ChildNodes::InMemory(nodes) => nodes.is_empty(),
+ ChildNodes::OnDisk(nodes) => nodes.is_empty(),
+ }
+ }
+
+ pub(super) fn make_mut(
+ &mut self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<
+ &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
+ DirstateV2ParseError,
+ > {
+ match self {
+ ChildNodes::InMemory(nodes) => Ok(nodes),
+ ChildNodes::OnDisk(nodes) => {
+ let nodes = nodes
+ .iter()
+ .map(|node| {
+ Ok((
+ node.path(on_disk)?,
+ node.to_in_memory_node(on_disk)?,
+ ))
+ })
+ .collect::<Result<_, _>>()?;
+ *self = ChildNodes::InMemory(nodes);
+ match self {
+ ChildNodes::InMemory(nodes) => Ok(nodes),
+ ChildNodes::OnDisk(_) => unreachable!(),
+ }
+ }
+ }
+ }
+}
+
+impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
+ pub(super) fn get(
+ &self,
+ base_name: &HgPath,
+ on_disk: &'on_disk [u8],
+ ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
+ match self {
+ ChildNodesRef::InMemory(nodes) => Ok(nodes
+ .get_key_value(base_name)
+ .map(|(k, v)| NodeRef::InMemory(k, v))),
+ ChildNodesRef::OnDisk(nodes) => {
+ let mut parse_result = Ok(());
+ let search_result = nodes.binary_search_by(|node| {
+ match node.base_name(on_disk) {
+ Ok(node_base_name) => node_base_name.cmp(base_name),
+ Err(e) => {
+ parse_result = Err(e);
+ // Dummy comparison result, `search_result` won’t
+ // be used since `parse_result` is an error
+ std::cmp::Ordering::Equal
+ }
+ }
+ });
+ parse_result.map(|()| {
+ search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
+ })
+ }
+ }
+ }
+
+ /// Iterate in undefined order
+ pub(super) fn iter(
+ &self,
+ ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
+ match self {
+ ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
+ nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
+ ),
+ ChildNodesRef::OnDisk(nodes) => {
+ itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
+ }
+ }
+ }
+
+ /// Iterate in parallel in undefined order
+ pub(super) fn par_iter(
+ &self,
+ ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
+ {
+ use rayon::prelude::*;
+ match self {
+ ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
+ nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
+ ),
+ ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
+ nodes.par_iter().map(NodeRef::OnDisk),
+ ),
+ }
+ }
+
+ pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
+ match self {
+ ChildNodesRef::InMemory(nodes) => {
+ let mut vec: Vec<_> = nodes
+ .iter()
+ .map(|(k, v)| NodeRef::InMemory(k, v))
+ .collect();
+ fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
+ match node {
+ NodeRef::InMemory(path, _node) => path.base_name(),
+ NodeRef::OnDisk(_) => unreachable!(),
+ }
+ }
+ // `sort_unstable_by_key` doesn’t allow keys borrowing from the
+ // value: https://github.com/rust-lang/rust/issues/34162
+ vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
+ vec
+ }
+ ChildNodesRef::OnDisk(nodes) => {
+ // Nodes on disk are already sorted
+ nodes.iter().map(NodeRef::OnDisk).collect()
+ }
+ }
+ }
+}
+
+impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
+ pub(super) fn full_path(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<&'tree HgPath, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(path, _node) => Ok(path.full_path()),
+ NodeRef::OnDisk(node) => node.full_path(on_disk),
+ }
+ }
+
+ /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
+ /// HgPath>` detached from `'tree`
+ pub(super) fn full_path_borrowed(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(path, _node) => match path.full_path() {
+ Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
+ Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
+ },
+ NodeRef::OnDisk(node) => {
+ Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
+ }
+ }
+ }
+
+ pub(super) fn base_name(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<&'tree HgPath, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(path, _node) => Ok(path.base_name()),
+ NodeRef::OnDisk(node) => node.base_name(on_disk),
+ }
+ }
+
+ pub(super) fn children(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
+ NodeRef::OnDisk(node) => {
+ Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
+ }
+ }
+ }
+
+ pub(super) fn has_copy_source(&self) -> bool {
+ match self {
+ NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
+ NodeRef::OnDisk(node) => node.has_copy_source(),
+ }
+ }
+
+ pub(super) fn copy_source(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(_path, node) => {
+ Ok(node.copy_source.as_ref().map(|s| &**s))
+ }
+ NodeRef::OnDisk(node) => node.copy_source(on_disk),
+ }
+ }
+
+ pub(super) fn entry(
+ &self,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(_path, node) => {
+ Ok(node.data.as_entry().copied())
+ }
+ NodeRef::OnDisk(node) => node.entry(),
+ }
+ }
+
+ pub(super) fn state(
+ &self,
+ ) -> Result<Option<EntryState>, DirstateV2ParseError> {
+ match self {
+ NodeRef::InMemory(_path, node) => {
+ Ok(node.data.as_entry().map(|entry| entry.state))
+ }
+ NodeRef::OnDisk(node) => node.state(),
+ }
+ }
+
+ pub(super) fn cached_directory_mtime(
+ &self,
+ ) -> Option<&'tree on_disk::Timestamp> {
+ match self {
+ NodeRef::InMemory(_path, node) => match &node.data {
+ NodeData::CachedDirectory { mtime } => Some(mtime),
+ _ => None,
+ },
+ NodeRef::OnDisk(node) => node.cached_directory_mtime(),
+ }
+ }
+
+ pub(super) fn tracked_descendants_count(&self) -> u32 {
+ match self {
+ NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
+ NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
+ }
+ }
+}
+
+/// Represents a file or a directory
+#[derive(Default)]
+pub(super) struct Node<'on_disk> {
+ pub(super) data: NodeData,
+
+ pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
+
+ pub(super) children: ChildNodes<'on_disk>,
+
+ /// How many (non-inclusive) descendants of this node are tracked files
+ pub(super) tracked_descendants_count: u32,
+}
+
+pub(super) enum NodeData {
+ Entry(DirstateEntry),
+ CachedDirectory { mtime: on_disk::Timestamp },
+ None,
+}
+
+impl Default for NodeData {
+ fn default() -> Self {
+ NodeData::None
+ }
+}
+
+impl NodeData {
+ fn has_entry(&self) -> bool {
+ match self {
+ NodeData::Entry(_) => true,
+ _ => false,
+ }
+ }
+
+ fn as_entry(&self) -> Option<&DirstateEntry> {
+ match self {
+ NodeData::Entry(entry) => Some(entry),
+ _ => None,
+ }
+ }
+}
+
+impl<'on_disk> DirstateMap<'on_disk> {
+ pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
+ Self {
+ on_disk,
+ root: ChildNodes::default(),
+ nodes_with_entry_count: 0,
+ nodes_with_copy_source_count: 0,
+ }
+ }
+
+ #[timed]
+ pub fn new_v2(
+ on_disk: &'on_disk [u8],
+ ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+ Ok(on_disk::read(on_disk)?)
+ }
+
+ #[timed]
+ pub fn new_v1(
+ on_disk: &'on_disk [u8],
+ ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+ let mut map = Self::empty(on_disk);
+ if map.on_disk.is_empty() {
+ return Ok((map, None));
+ }
+
+ let parents = parse_dirstate_entries(
+ map.on_disk,
+ |path, entry, copy_source| {
+ let tracked = entry.state.is_tracked();
+ let node = Self::get_or_insert_node(
+ map.on_disk,
+ &mut map.root,
+ path,
+ WithBasename::to_cow_borrowed,
+ |ancestor| {
+ if tracked {
+ ancestor.tracked_descendants_count += 1
+ }
+ },
+ )?;
+ assert!(
+ !node.data.has_entry(),
+ "duplicate dirstate entry in read"
+ );
+ assert!(
+ node.copy_source.is_none(),
+ "duplicate dirstate entry in read"
+ );
+ node.data = NodeData::Entry(*entry);
+ node.copy_source = copy_source.map(Cow::Borrowed);
+ map.nodes_with_entry_count += 1;
+ if copy_source.is_some() {
+ map.nodes_with_copy_source_count += 1
+ }
+ Ok(())
+ },
+ )?;
+ let parents = Some(parents.clone());
+
+ Ok((map, parents))
+ }
+
+ fn get_node<'tree>(
+ &'tree self,
+ path: &HgPath,
+ ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
+ let mut children = self.root.as_ref();
+ let mut components = path.components();
+ let mut component =
+ components.next().expect("expected at least one components");
+ loop {
+ if let Some(child) = children.get(component, self.on_disk)? {
+ if let Some(next_component) = components.next() {
+ component = next_component;
+ children = child.children(self.on_disk)?;
+ } else {
+ return Ok(Some(child));
+ }
+ } else {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Returns a mutable reference to the node at `path` if it exists
+ ///
+ /// This takes `root` instead of `&mut self` so that callers can mutate
+ /// other fields while the returned borrow is still valid
+ fn get_node_mut<'tree>(
+ on_disk: &'on_disk [u8],
+ root: &'tree mut ChildNodes<'on_disk>,
+ path: &HgPath,
+ ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
+ let mut children = root;
+ let mut components = path.components();
+ let mut component =
+ components.next().expect("expected at least one components");
+ loop {
+ if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
+ {
+ if let Some(next_component) = components.next() {
+ component = next_component;
+ children = &mut child.children;
+ } else {
+ return Ok(Some(child));
+ }
+ } else {
+ return Ok(None);
+ }
+ }
+ }
+
+ pub(super) fn get_or_insert_node<'tree, 'path>(
+ on_disk: &'on_disk [u8],
+ root: &'tree mut ChildNodes<'on_disk>,
+ path: &'path HgPath,
+ to_cow: impl Fn(
+ WithBasename<&'path HgPath>,
+ ) -> WithBasename<Cow<'on_disk, HgPath>>,
+ mut each_ancestor: impl FnMut(&mut Node),
+ ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
+ let mut child_nodes = root;
+ let mut inclusive_ancestor_paths =
+ WithBasename::inclusive_ancestors_of(path);
+ let mut ancestor_path = inclusive_ancestor_paths
+ .next()
+ .expect("expected at least one inclusive ancestor");
+ loop {
+ // TODO: can we avoid allocating an owned key in cases where the
+ // map already contains that key, without introducing double
+ // lookup?
+ let child_node = child_nodes
+ .make_mut(on_disk)?
+ .entry(to_cow(ancestor_path))
+ .or_default();
+ if let Some(next) = inclusive_ancestor_paths.next() {
+ each_ancestor(child_node);
+ ancestor_path = next;
+ child_nodes = &mut child_node.children;
+ } else {
+ return Ok(child_node);
+ }
+ }
+ }
+
+ fn add_or_remove_file(
+ &mut self,
+ path: &HgPath,
+ old_state: EntryState,
+ new_entry: DirstateEntry,
+ ) -> Result<(), DirstateV2ParseError> {
+ let tracked_count_increment =
+ match (old_state.is_tracked(), new_entry.state.is_tracked()) {
+ (false, true) => 1,
+ (true, false) => -1,
+ _ => 0,
+ };
+
+ let node = Self::get_or_insert_node(
+ self.on_disk,
+ &mut self.root,
+ path,
+ WithBasename::to_cow_owned,
+ |ancestor| {
+ // We can’t use `+= increment` because the counter is unsigned,
+ // and we want debug builds to detect accidental underflow
+ // through zero
+ match tracked_count_increment {
+ 1 => ancestor.tracked_descendants_count += 1,
+ -1 => ancestor.tracked_descendants_count -= 1,
+ _ => {}
+ }
+ },
+ )?;
+ if !node.data.has_entry() {
+ self.nodes_with_entry_count += 1
+ }
+ node.data = NodeData::Entry(new_entry);
+ Ok(())
+ }
+
+ fn iter_nodes<'tree>(
+ &'tree self,
+ ) -> impl Iterator<
+ Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
+ > + 'tree {
+ // Depth first tree traversal.
+ //
+ // If we could afford internal iteration and recursion,
+ // this would look like:
+ //
+ // ```
+ // fn traverse_children(
+ // children: &ChildNodes,
+ // each: &mut impl FnMut(&Node),
+ // ) {
+ // for child in children.values() {
+ // traverse_children(&child.children, each);
+ // each(child);
+ // }
+ // }
+ // ```
+ //
+ // However we want an external iterator and therefore can’t use the
+ // call stack. Use an explicit stack instead:
+ let mut stack = Vec::new();
+ let mut iter = self.root.as_ref().iter();
+ std::iter::from_fn(move || {
+ while let Some(child_node) = iter.next() {
+ let children = match child_node.children(self.on_disk) {
+ Ok(children) => children,
+ Err(error) => return Some(Err(error)),
+ };
+ // Pseudo-recursion
+ let new_iter = children.iter();
+ let old_iter = std::mem::replace(&mut iter, new_iter);
+ stack.push((child_node, old_iter));
+ }
+ // Found the end of a `children.iter()` iterator.
+ if let Some((child_node, next_iter)) = stack.pop() {
+ // "Return" from pseudo-recursion by restoring state from the
+ // explicit stack
+ iter = next_iter;
+
+ Some(Ok(child_node))
+ } else {
+ // Reached the bottom of the stack, we’re done
+ None
+ }
+ })
+ }
+
+ fn clear_known_ambiguous_mtimes(
+ &mut self,
+ paths: &[impl AsRef<HgPath>],
+ ) -> Result<(), DirstateV2ParseError> {
+ for path in paths {
+ if let Some(node) = Self::get_node_mut(
+ self.on_disk,
+ &mut self.root,
+ path.as_ref(),
+ )? {
+ if let NodeData::Entry(entry) = &mut node.data {
+ entry.clear_mtime();
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Return a faillilble iterator of full paths of nodes that have an
+ /// `entry` for which the given `predicate` returns true.
+ ///
+ /// Fallibility means that each iterator item is a `Result`, which may
+ /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
+ /// should only happen if Mercurial is buggy or a repository is corrupted.
+ fn filter_full_paths<'tree>(
+ &'tree self,
+ predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
+ ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
+ {
+ filter_map_results(self.iter_nodes(), move |node| {
+ if let Some(entry) = node.entry()? {
+ if predicate(&entry) {
+ return Ok(Some(node.full_path(self.on_disk)?));
+ }
+ }
+ Ok(None)
+ })
+ }
+}
+
+/// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
+///
+/// The callback is only called for incoming `Ok` values. Errors are passed
+/// through as-is. In order to let it use the `?` operator the callback is
+/// expected to return a `Result` of `Option`, instead of an `Option` of
+/// `Result`.
+fn filter_map_results<'a, I, F, A, B, E>(
+ iter: I,
+ f: F,
+) -> impl Iterator<Item = Result<B, E>> + 'a
+where
+ I: Iterator<Item = Result<A, E>> + 'a,
+ F: Fn(A) -> Result<Option<B>, E> + 'a,
+{
+ iter.filter_map(move |result| match result {
+ Ok(node) => f(node).transpose(),
+ Err(e) => Some(Err(e)),
+ })
+}
+
+impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
+ fn clear(&mut self) {
+ self.root = Default::default();
+ self.nodes_with_entry_count = 0;
+ self.nodes_with_copy_source_count = 0;
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateError> {
+ Ok(self.add_or_remove_file(filename, old_state, entry)?)
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateError> {
+ let entry = DirstateEntry {
+ state: EntryState::Removed,
+ mode: 0,
+ size,
+ mtime: 0,
+ };
+ Ok(self.add_or_remove_file(filename, old_state, entry)?)
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateError> {
+ struct Dropped {
+ was_tracked: bool,
+ had_entry: bool,
+ had_copy_source: bool,
+ }
+
+ /// If this returns `Ok(Some((dropped, removed)))`, then
+ ///
+ /// * `dropped` is about the leaf node that was at `filename`
+ /// * `removed` is whether this particular level of recursion just
+ /// removed a node in `nodes`.
+ fn recur<'on_disk>(
+ on_disk: &'on_disk [u8],
+ nodes: &mut ChildNodes<'on_disk>,
+ path: &HgPath,
+ ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
+ let (first_path_component, rest_of_path) =
+ path.split_first_component();
+ let node = if let Some(node) =
+ nodes.make_mut(on_disk)?.get_mut(first_path_component)
+ {
+ node
+ } else {
+ return Ok(None);
+ };
+ let dropped;
+ if let Some(rest) = rest_of_path {
+ if let Some((d, removed)) =
+ recur(on_disk, &mut node.children, rest)?
+ {
+ dropped = d;
+ if dropped.was_tracked {
+ node.tracked_descendants_count -= 1;
+ }
+
+ // Directory caches must be invalidated when removing a
+ // child node
+ if removed {
+ if let NodeData::CachedDirectory { .. } = &node.data {
+ node.data = NodeData::None
+ }
+ }
+ } else {
+ return Ok(None);
+ }
+ } else {
+ let had_entry = node.data.has_entry();
+ if had_entry {
+ node.data = NodeData::None
+ }
+ dropped = Dropped {
+ was_tracked: node
+ .data
+ .as_entry()
+ .map_or(false, |entry| entry.state.is_tracked()),
+ had_entry,
+ had_copy_source: node.copy_source.take().is_some(),
+ };
+ }
+ // After recursion, for both leaf (rest_of_path is None) nodes and
+ // parent nodes, remove a node if it just became empty.
+ let remove = !node.data.has_entry()
+ && node.copy_source.is_none()
+ && node.children.is_empty();
+ if remove {
+ nodes.make_mut(on_disk)?.remove(first_path_component);
+ }
+ Ok(Some((dropped, remove)))
+ }
+
+ if let Some((dropped, _removed)) =
+ recur(self.on_disk, &mut self.root, filename)?
+ {
+ if dropped.had_entry {
+ self.nodes_with_entry_count -= 1
+ }
+ if dropped.had_copy_source {
+ self.nodes_with_copy_source_count -= 1
+ }
+ Ok(dropped.had_entry)
+ } else {
+ debug_assert!(!old_state.is_tracked());
+ Ok(false)
+ }
+ }
+
+ fn clear_ambiguous_times(
+ &mut self,
+ filenames: Vec<HgPathBuf>,
+ now: i32,
+ ) -> Result<(), DirstateV2ParseError> {
+ for filename in filenames {
+ if let Some(node) =
+ Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
+ {
+ if let NodeData::Entry(entry) = &mut node.data {
+ entry.clear_ambiguous_mtime(now);
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn non_normal_entries_contains(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ Ok(if let Some(node) = self.get_node(key)? {
+ node.entry()?.map_or(false, |entry| entry.is_non_normal())
+ } else {
+ false
+ })
+ }
+
+ fn non_normal_entries_remove(&mut self, _key: &HgPath) {
+ // Do nothing, this `DirstateMap` does not have a separate "non normal
+ // entries" set that need to be kept up to date
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
+ {
+ Box::new(self.filter_full_paths(|entry| {
+ entry.is_non_normal() || entry.is_from_other_parent()
+ }))
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
+ // Do nothing, this `DirstateMap` does not have a separate "non normal
+ // entries" and "from other parent" sets that need to be recomputed
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ self.iter_non_normal_paths_panic()
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateError> {
+ if let Some(node) = self.get_node(directory)? {
+ // A node without a `DirstateEntry` was created to hold child
+ // nodes, and is therefore a directory.
+ let state = node.state()?;
+ Ok(state.is_none() && node.tracked_descendants_count() > 0)
+ } else {
+ Ok(false)
+ }
+ }
+
+ fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
+ if let Some(node) = self.get_node(directory)? {
+ // A node without a `DirstateEntry` was created to hold child
+ // nodes, and is therefore a directory.
+ Ok(node.state()?.is_none())
+ } else {
+ Ok(false)
+ }
+ }
+
+ #[timed]
+ fn pack_v1(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ let now: i32 = now.0.try_into().expect("time overflow");
+ let mut ambiguous_mtimes = Vec::new();
+ // Optizimation (to be measured?): pre-compute size to avoid `Vec`
+ // reallocations
+ let mut size = parents.as_bytes().len();
+ for node in self.iter_nodes() {
+ let node = node?;
+ if let Some(entry) = node.entry()? {
+ size += packed_entry_size(
+ node.full_path(self.on_disk)?,
+ node.copy_source(self.on_disk)?,
+ );
+ if entry.mtime_is_ambiguous(now) {
+ ambiguous_mtimes.push(
+ node.full_path_borrowed(self.on_disk)?
+ .detach_from_tree(),
+ )
+ }
+ }
+ }
+ self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
+
+ let mut packed = Vec::with_capacity(size);
+ packed.extend(parents.as_bytes());
+
+ for node in self.iter_nodes() {
+ let node = node?;
+ if let Some(entry) = node.entry()? {
+ pack_entry(
+ node.full_path(self.on_disk)?,
+ &entry,
+ node.copy_source(self.on_disk)?,
+ &mut packed,
+ );
+ }
+ }
+ Ok(packed)
+ }
+
+ #[timed]
+ fn pack_v2(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ // TODO: how do we want to handle this in 2038?
+ let now: i32 = now.0.try_into().expect("time overflow");
+ let mut paths = Vec::new();
+ for node in self.iter_nodes() {
+ let node = node?;
+ if let Some(entry) = node.entry()? {
+ if entry.mtime_is_ambiguous(now) {
+ paths.push(
+ node.full_path_borrowed(self.on_disk)?
+ .detach_from_tree(),
+ )
+ }
+ }
+ }
+ // Borrow of `self` ends here since we collect cloned paths
+
+ self.clear_known_ambiguous_mtimes(&paths)?;
+
+ on_disk::write(self, parents)
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
+ // Do nothing, this `DirstateMap` does not a separate `all_dirs` that
+ // needs to be recomputed
+ Ok(())
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateError> {
+ // Do nothing, this `DirstateMap` does not a separate `dirs` that needs
+ // to be recomputed
+ Ok(())
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ super::status::status(self, matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.nodes_with_copy_source_count as usize
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ Box::new(filter_map_results(self.iter_nodes(), move |node| {
+ Ok(if let Some(source) = node.copy_source(self.on_disk)? {
+ Some((node.full_path(self.on_disk)?, source))
+ } else {
+ None
+ })
+ }))
+ }
+
+ fn copy_map_contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ Ok(if let Some(node) = self.get_node(key)? {
+ node.has_copy_source()
+ } else {
+ false
+ })
+ }
+
+ fn copy_map_get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
+ if let Some(node) = self.get_node(key)? {
+ if let Some(source) = node.copy_source(self.on_disk)? {
+ return Ok(Some(source));
+ }
+ }
+ Ok(None)
+ }
+
+ fn copy_map_remove(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ let count = &mut self.nodes_with_copy_source_count;
+ Ok(
+ Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
+ |node| {
+ if node.copy_source.is_some() {
+ *count -= 1
+ }
+ node.copy_source.take().map(Cow::into_owned)
+ },
+ ),
+ )
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ let node = Self::get_or_insert_node(
+ self.on_disk,
+ &mut self.root,
+ &key,
+ WithBasename::to_cow_owned,
+ |_ancestor| {},
+ )?;
+ if node.copy_source.is_none() {
+ self.nodes_with_copy_source_count += 1
+ }
+ Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
+ }
+
+ fn len(&self) -> usize {
+ self.nodes_with_entry_count as usize
+ }
+
+ fn contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ Ok(self.get(key)?.is_some())
+ }
+
+ fn get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
+ Ok(if let Some(node) = self.get_node(key)? {
+ node.entry()?
+ } else {
+ None
+ })
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ Box::new(filter_map_results(self.iter_nodes(), move |node| {
+ Ok(if let Some(entry) = node.entry()? {
+ Some((node.full_path(self.on_disk)?, entry))
+ } else {
+ None
+ })
+ }))
+ }
+
+ fn iter_directories(
+ &self,
+ ) -> Box<
+ dyn Iterator<
+ Item = Result<
+ (&HgPath, Option<Timestamp>),
+ DirstateV2ParseError,
+ >,
+ > + Send
+ + '_,
+ > {
+ Box::new(filter_map_results(self.iter_nodes(), move |node| {
+ Ok(if node.state()?.is_none() {
+ Some((
+ node.full_path(self.on_disk)?,
+ node.cached_directory_mtime()
+ .map(|mtime| Timestamp(mtime.seconds())),
+ ))
+ } else {
+ None
+ })
+ }))
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dispatch.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,379 @@
+use std::path::PathBuf;
+
+use crate::dirstate::parsers::Timestamp;
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateMap;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub trait DirstateMapMethods {
+ fn clear(&mut self);
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateError>;
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateError>;
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateError>;
+
+ fn clear_ambiguous_times(
+ &mut self,
+ filenames: Vec<HgPathBuf>,
+ now: i32,
+ ) -> Result<(), DirstateV2ParseError>;
+
+ fn non_normal_entries_contains(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError>;
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath);
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool);
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ >;
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ >;
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ >;
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateError>;
+
+ fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
+
+ fn pack_v1(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError>;
+
+ fn pack_v2(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError>;
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateError>;
+
+ fn set_dirs(&mut self) -> Result<(), DirstateError>;
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
+
+ fn copy_map_len(&self) -> usize;
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_>;
+
+ fn copy_map_contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError>;
+
+ fn copy_map_get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
+
+ fn copy_map_remove(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
+
+ fn len(&self) -> usize;
+
+ fn contains_key(&self, key: &HgPath)
+ -> Result<bool, DirstateV2ParseError>;
+
+ fn get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
+
+ fn iter(&self) -> StateMapIter<'_>;
+
+ fn iter_directories(
+ &self,
+ ) -> Box<
+ dyn Iterator<
+ Item = Result<
+ (&HgPath, Option<Timestamp>),
+ DirstateV2ParseError,
+ >,
+ > + Send
+ + '_,
+ >;
+}
+
+impl DirstateMapMethods for DirstateMap {
+ fn clear(&mut self) {
+ self.clear()
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateError> {
+ self.add_file(filename, old_state, entry)
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateError> {
+ self.remove_file(filename, old_state, size)
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateError> {
+ self.drop_file(filename, old_state)
+ }
+
+ fn clear_ambiguous_times(
+ &mut self,
+ filenames: Vec<HgPathBuf>,
+ now: i32,
+ ) -> Result<(), DirstateV2ParseError> {
+ Ok(self.clear_ambiguous_times(filenames, now))
+ }
+
+ fn non_normal_entries_contains(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Ok(non_normal.contains(key))
+ }
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath) {
+ self.non_normal_entries_remove(key)
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
+ {
+ let (non_normal, other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+ self.set_non_normal_other_parent_entries(force)
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(non_normal.iter().map(|p| Ok(&**p)))
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries_panic();
+ Box::new(non_normal.iter().map(|p| Ok(&**p)))
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ let (_non_normal, other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(other_parent.iter().map(|p| Ok(&**p)))
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateError> {
+ self.has_tracked_dir(directory)
+ }
+
+ fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
+ self.has_dir(directory)
+ }
+
+ fn pack_v1(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ self.pack(parents, now)
+ }
+
+ fn pack_v2(
+ &mut self,
+ _parents: DirstateParents,
+ _now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ panic!(
+ "should have used dirstate_tree::DirstateMap to use the v2 format"
+ )
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
+ self.set_all_dirs()
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateError> {
+ self.set_dirs()
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ crate::status(self, matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.copy_map.len()
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ Box::new(
+ self.copy_map
+ .iter()
+ .map(|(key, value)| Ok((&**key, &**value))),
+ )
+ }
+
+ fn copy_map_contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ Ok(self.copy_map.contains_key(key))
+ }
+
+ fn copy_map_get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
+ Ok(self.copy_map.get(key).map(|p| &**p))
+ }
+
+ fn copy_map_remove(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ Ok(self.copy_map.remove(key))
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ Ok(self.copy_map.insert(key, value))
+ }
+
+ fn len(&self) -> usize {
+ (&**self).len()
+ }
+
+ fn contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ Ok((&**self).contains_key(key))
+ }
+
+ fn get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
+ Ok((&**self).get(key).cloned())
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
+ }
+
+ fn iter_directories(
+ &self,
+ ) -> Box<
+ dyn Iterator<
+ Item = Result<
+ (&HgPath, Option<Timestamp>),
+ DirstateV2ParseError,
+ >,
+ > + Send
+ + '_,
+ > {
+ Box::new(std::iter::empty())
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/on_disk.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,574 @@
+//! The "version 2" disk representation of the dirstate
+//!
+//! # File format
+//!
+//! The file starts with a fixed-sized header, whose layout is defined by the
+//! `Header` struct. Its `root` field contains the slice (offset and length) to
+//! the nodes representing the files and directories at the root of the
+//! repository. Each node is also fixed-size, defined by the `Node` struct.
+//! Nodes in turn contain slices to variable-size paths, and to their own child
+//! nodes (if any) for nested files and directories.
+
+use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
+use crate::dirstate_tree::path_with_basename::WithBasename;
+use crate::errors::HgError;
+use crate::utils::hg_path::HgPath;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateParents;
+use crate::EntryState;
+use bytes_cast::unaligned::{I32Be, I64Be, U32Be, U64Be};
+use bytes_cast::BytesCast;
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::time::{Duration, SystemTime, UNIX_EPOCH};
+
+/// Added at the start of `.hg/dirstate` when the "v2" format is used.
+/// This a redundant sanity check more than an actual "magic number" since
+/// `.hg/requires` already governs which format should be used.
+pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
+
+#[derive(BytesCast)]
+#[repr(C)]
+struct Header {
+ marker: [u8; V2_FORMAT_MARKER.len()],
+
+ /// `dirstatemap.parents()` in `mercurial/dirstate.py` relies on this
+ /// `parents` field being at this offset, immediately after `marker`.
+ parents: DirstateParents,
+
+ root: ChildNodes,
+ nodes_with_entry_count: Size,
+ nodes_with_copy_source_count: Size,
+}
+
+#[derive(BytesCast)]
+#[repr(C)]
+pub(super) struct Node {
+ full_path: PathSlice,
+
+ /// In bytes from `self.full_path.start`
+ base_name_start: Size,
+
+ copy_source: OptPathSlice,
+ children: ChildNodes,
+ pub(super) tracked_descendants_count: Size,
+
+ /// Dependending on the value of `state`:
+ ///
+ /// * A null byte: `data` is not used.
+ ///
+ /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
+ /// represent a dirstate entry like in the v1 format.
+ ///
+ /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
+ /// as the `Timestamp` for the mtime of a cached directory.
+ ///
+ /// The presence of this state means that at some point, this path in
+ /// the working directory was observed:
+ ///
+ /// - To be a directory
+ /// - With the modification time as given by `Timestamp`
+ /// - That timestamp was already strictly in the past when observed,
+ /// meaning that later changes cannot happen in the same clock tick
+ /// and must cause a different modification time (unless the system
+ /// clock jumps back and we get unlucky, which is not impossible but
+ /// but deemed unlikely enough).
+ /// - The directory did not contain any child entry that did not have a
+ /// corresponding dirstate node.
+ ///
+ /// This means that if `std::fs::symlink_metadata` later reports the
+ /// same modification time, we don’t need to call `std::fs::read_dir`
+ /// again for this directory and can iterate child dirstate nodes
+ /// instead.
+ state: u8,
+ data: Entry,
+}
+
+#[derive(BytesCast, Copy, Clone)]
+#[repr(C)]
+struct Entry {
+ mode: I32Be,
+ mtime: I32Be,
+ size: I32Be,
+}
+
+/// Duration since the Unix epoch
+#[derive(BytesCast, Copy, Clone, PartialEq)]
+#[repr(C)]
+pub(super) struct Timestamp {
+ seconds: I64Be,
+
+ /// In `0 .. 1_000_000_000`.
+ ///
+ /// This timestamp is later or earlier than `(seconds, 0)` by this many
+ /// nanoseconds, if `seconds` is non-negative or negative, respectively.
+ nanoseconds: U32Be,
+}
+
+/// Counted in bytes from the start of the file
+///
+/// NOTE: If we decide to never support `.hg/dirstate` files larger than 4 GiB
+/// we could save space by using `U32Be` instead.
+type Offset = U64Be;
+
+/// Counted in number of items
+///
+/// NOTE: not supporting directories with more than 4 billion direct children,
+/// or filenames more than 4 GiB.
+type Size = U32Be;
+
+/// Location of consecutive, fixed-size items.
+///
+/// An item can be a single byte for paths, or a struct with
+/// `derive(BytesCast)`.
+#[derive(BytesCast, Copy, Clone)]
+#[repr(C)]
+struct Slice {
+ start: Offset,
+ len: Size,
+}
+
+/// A contiguous sequence of `len` times `Node`, representing the child nodes
+/// of either some other node or of the repository root.
+///
+/// Always sorted by ascending `full_path`, to allow binary search.
+/// Since nodes with the same parent nodes also have the same parent path,
+/// only the `base_name`s need to be compared during binary search.
+type ChildNodes = Slice;
+
+/// A `HgPath` of `len` bytes
+type PathSlice = Slice;
+
+/// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
+type OptPathSlice = Slice;
+
+/// Make sure that size-affecting changes are made knowingly
+fn _static_assert_size_of() {
+ let _ = std::mem::transmute::<Header, [u8; 72]>;
+ let _ = std::mem::transmute::<Node, [u8; 57]>;
+}
+
+/// Unexpected file format found in `.hg/dirstate` with the "v2" format.
+///
+/// This should only happen if Mercurial is buggy or a repository is corrupted.
+#[derive(Debug)]
+pub struct DirstateV2ParseError;
+
+impl From<DirstateV2ParseError> for HgError {
+ fn from(_: DirstateV2ParseError) -> Self {
+ HgError::corrupted("dirstate-v2 parse error")
+ }
+}
+
+impl From<DirstateV2ParseError> for crate::DirstateError {
+ fn from(error: DirstateV2ParseError) -> Self {
+ HgError::from(error).into()
+ }
+}
+
+fn read_header(on_disk: &[u8]) -> Result<&Header, DirstateV2ParseError> {
+ let (header, _) =
+ Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
+ if header.marker == *V2_FORMAT_MARKER {
+ Ok(header)
+ } else {
+ Err(DirstateV2ParseError)
+ }
+}
+
+pub(super) fn read<'on_disk>(
+ on_disk: &'on_disk [u8],
+) -> Result<
+ (DirstateMap<'on_disk>, Option<DirstateParents>),
+ DirstateV2ParseError,
+> {
+ if on_disk.is_empty() {
+ return Ok((DirstateMap::empty(on_disk), None));
+ }
+ let header = read_header(on_disk)?;
+ let dirstate_map = DirstateMap {
+ on_disk,
+ root: dirstate_map::ChildNodes::OnDisk(read_slice::<Node>(
+ on_disk,
+ header.root,
+ )?),
+ nodes_with_entry_count: header.nodes_with_entry_count.get(),
+ nodes_with_copy_source_count: header
+ .nodes_with_copy_source_count
+ .get(),
+ };
+ let parents = Some(header.parents.clone());
+ Ok((dirstate_map, parents))
+}
+
+impl Node {
+ pub(super) fn full_path<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
+ read_hg_path(on_disk, self.full_path)
+ }
+
+ pub(super) fn base_name_start<'on_disk>(
+ &self,
+ ) -> Result<usize, DirstateV2ParseError> {
+ let start = self.base_name_start.get();
+ if start < self.full_path.len.get() {
+ let start = usize::try_from(start)
+ // u32 -> usize, could only panic on a 16-bit CPU
+ .expect("dirstate-v2 base_name_start out of bounds");
+ Ok(start)
+ } else {
+ Err(DirstateV2ParseError)
+ }
+ }
+
+ pub(super) fn base_name<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
+ let full_path = self.full_path(on_disk)?;
+ let base_name_start = self.base_name_start()?;
+ Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
+ }
+
+ pub(super) fn path<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
+ Ok(WithBasename::from_raw_parts(
+ Cow::Borrowed(self.full_path(on_disk)?),
+ self.base_name_start()?,
+ ))
+ }
+
+ pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
+ self.copy_source.start.get() != 0
+ }
+
+ pub(super) fn copy_source<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
+ Ok(if self.has_copy_source() {
+ Some(read_hg_path(on_disk, self.copy_source)?)
+ } else {
+ None
+ })
+ }
+
+ pub(super) fn node_data(
+ &self,
+ ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
+ let entry = |state| {
+ dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
+ };
+
+ match self.state {
+ b'\0' => Ok(dirstate_map::NodeData::None),
+ b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
+ mtime: *self.data.as_timestamp(),
+ }),
+ b'n' => Ok(entry(EntryState::Normal)),
+ b'a' => Ok(entry(EntryState::Added)),
+ b'r' => Ok(entry(EntryState::Removed)),
+ b'm' => Ok(entry(EntryState::Merged)),
+ _ => Err(DirstateV2ParseError),
+ }
+ }
+
+ pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
+ if self.state == b'd' {
+ Some(self.data.as_timestamp())
+ } else {
+ None
+ }
+ }
+
+ pub(super) fn state(
+ &self,
+ ) -> Result<Option<EntryState>, DirstateV2ParseError> {
+ match self.state {
+ b'\0' | b'd' => Ok(None),
+ b'n' => Ok(Some(EntryState::Normal)),
+ b'a' => Ok(Some(EntryState::Added)),
+ b'r' => Ok(Some(EntryState::Removed)),
+ b'm' => Ok(Some(EntryState::Merged)),
+ _ => Err(DirstateV2ParseError),
+ }
+ }
+
+ fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
+ DirstateEntry {
+ state,
+ mode: self.data.mode.get(),
+ mtime: self.data.mtime.get(),
+ size: self.data.size.get(),
+ }
+ }
+
+ pub(super) fn entry(
+ &self,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
+ Ok(self
+ .state()?
+ .map(|state| self.entry_with_given_state(state)))
+ }
+
+ pub(super) fn children<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
+ read_slice::<Node>(on_disk, self.children)
+ }
+
+ pub(super) fn to_in_memory_node<'on_disk>(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
+ Ok(dirstate_map::Node {
+ children: dirstate_map::ChildNodes::OnDisk(
+ self.children(on_disk)?,
+ ),
+ copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
+ data: self.node_data()?,
+ tracked_descendants_count: self.tracked_descendants_count.get(),
+ })
+ }
+}
+
+impl Entry {
+ fn from_timestamp(timestamp: Timestamp) -> Self {
+ // Safety: both types implement the `ByteCast` trait, so we could
+ // safely use `as_bytes` and `from_bytes` to do this conversion. Using
+ // `transmute` instead makes the compiler check that the two types
+ // have the same size, which eliminates the error case of
+ // `from_bytes`.
+ unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
+ }
+
+ fn as_timestamp(&self) -> &Timestamp {
+ // Safety: same as above in `from_timestamp`
+ unsafe { &*(self as *const Entry as *const Timestamp) }
+ }
+}
+
+impl Timestamp {
+ pub fn seconds(&self) -> i64 {
+ self.seconds.get()
+ }
+}
+
+impl From<SystemTime> for Timestamp {
+ fn from(system_time: SystemTime) -> Self {
+ let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
+ Ok(duration) => {
+ (duration.as_secs() as i64, duration.subsec_nanos())
+ }
+ Err(error) => {
+ let negative = error.duration();
+ (-(negative.as_secs() as i64), negative.subsec_nanos())
+ }
+ };
+ Timestamp {
+ seconds: secs.into(),
+ nanoseconds: nanos.into(),
+ }
+ }
+}
+
+impl From<&'_ Timestamp> for SystemTime {
+ fn from(timestamp: &'_ Timestamp) -> Self {
+ let secs = timestamp.seconds.get();
+ let nanos = timestamp.nanoseconds.get();
+ if secs >= 0 {
+ UNIX_EPOCH + Duration::new(secs as u64, nanos)
+ } else {
+ UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
+ }
+ }
+}
+
+fn read_hg_path(
+ on_disk: &[u8],
+ slice: Slice,
+) -> Result<&HgPath, DirstateV2ParseError> {
+ let bytes = read_slice::<u8>(on_disk, slice)?;
+ Ok(HgPath::new(bytes))
+}
+
+fn read_slice<T>(
+ on_disk: &[u8],
+ slice: Slice,
+) -> Result<&[T], DirstateV2ParseError>
+where
+ T: BytesCast,
+{
+ // Either `usize::MAX` would result in "out of bounds" error since a single
+ // `&[u8]` cannot occupy the entire addess space.
+ let start = usize::try_from(slice.start.get()).unwrap_or(std::usize::MAX);
+ let len = usize::try_from(slice.len.get()).unwrap_or(std::usize::MAX);
+ on_disk
+ .get(start..)
+ .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
+ .map(|(slice, _rest)| slice)
+ .ok_or_else(|| DirstateV2ParseError)
+}
+
+pub(crate) fn parse_dirstate_parents(
+ on_disk: &[u8],
+) -> Result<&DirstateParents, HgError> {
+ Ok(&read_header(on_disk)?.parents)
+}
+
+pub(crate) fn for_each_tracked_path<'on_disk>(
+ on_disk: &'on_disk [u8],
+ mut f: impl FnMut(&'on_disk HgPath),
+) -> Result<(), DirstateV2ParseError> {
+ let header = read_header(on_disk)?;
+ fn recur<'on_disk>(
+ on_disk: &'on_disk [u8],
+ nodes: Slice,
+ f: &mut impl FnMut(&'on_disk HgPath),
+ ) -> Result<(), DirstateV2ParseError> {
+ for node in read_slice::<Node>(on_disk, nodes)? {
+ if let Some(state) = node.state()? {
+ if state.is_tracked() {
+ f(node.full_path(on_disk)?)
+ }
+ }
+ recur(on_disk, node.children, f)?
+ }
+ Ok(())
+ }
+ recur(on_disk, header.root, &mut f)
+}
+
+pub(super) fn write(
+ dirstate_map: &mut DirstateMap,
+ parents: DirstateParents,
+) -> Result<Vec<u8>, DirstateError> {
+ let header_len = std::mem::size_of::<Header>();
+
+ // This ignores the space for paths, and for nodes without an entry.
+ // TODO: better estimate? Skip the `Vec` and write to a file directly?
+ let size_guess = header_len
+ + std::mem::size_of::<Node>()
+ * dirstate_map.nodes_with_entry_count as usize;
+ let mut out = Vec::with_capacity(size_guess);
+
+ // Keep space for the header. We’ll fill it out at the end when we know the
+ // actual offset for the root nodes.
+ out.resize(header_len, 0_u8);
+
+ let root =
+ write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
+
+ let header = Header {
+ marker: *V2_FORMAT_MARKER,
+ parents: parents,
+ root,
+ nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
+ nodes_with_copy_source_count: dirstate_map
+ .nodes_with_copy_source_count
+ .into(),
+ };
+ out[..header_len].copy_from_slice(header.as_bytes());
+ Ok(out)
+}
+
+fn write_nodes(
+ dirstate_map: &DirstateMap,
+ nodes: dirstate_map::ChildNodesRef,
+ out: &mut Vec<u8>,
+) -> Result<ChildNodes, DirstateError> {
+ // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
+ // order. Sort to enable binary search in the written file.
+ let nodes = nodes.sorted();
+
+ // First accumulate serialized nodes in a `Vec`
+ let mut on_disk_nodes = Vec::with_capacity(nodes.len());
+ for node in nodes {
+ let children = write_nodes(
+ dirstate_map,
+ node.children(dirstate_map.on_disk)?,
+ out,
+ )?;
+ let full_path = node.full_path(dirstate_map.on_disk)?;
+ let full_path = write_slice::<u8>(full_path.as_bytes(), out);
+ let copy_source =
+ if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
+ write_slice::<u8>(source.as_bytes(), out)
+ } else {
+ Slice {
+ start: 0.into(),
+ len: 0.into(),
+ }
+ };
+ on_disk_nodes.push(match node {
+ NodeRef::InMemory(path, node) => {
+ let (state, data) = match &node.data {
+ dirstate_map::NodeData::Entry(entry) => (
+ entry.state.into(),
+ Entry {
+ mode: entry.mode.into(),
+ mtime: entry.mtime.into(),
+ size: entry.size.into(),
+ },
+ ),
+ dirstate_map::NodeData::CachedDirectory { mtime } => {
+ (b'd', Entry::from_timestamp(*mtime))
+ }
+ dirstate_map::NodeData::None => (
+ b'\0',
+ Entry {
+ mode: 0.into(),
+ mtime: 0.into(),
+ size: 0.into(),
+ },
+ ),
+ };
+ Node {
+ children,
+ copy_source,
+ full_path,
+ base_name_start: u32::try_from(path.base_name_start())
+ // Could only panic for paths over 4 GiB
+ .expect("dirstate-v2 offset overflow")
+ .into(),
+ tracked_descendants_count: node
+ .tracked_descendants_count
+ .into(),
+ state,
+ data,
+ }
+ }
+ NodeRef::OnDisk(node) => Node {
+ children,
+ copy_source,
+ full_path,
+ ..*node
+ },
+ })
+ }
+ // … so we can write them contiguously
+ Ok(write_slice::<Node>(&on_disk_nodes, out))
+}
+
+fn write_slice<T>(slice: &[T], out: &mut Vec<u8>) -> Slice
+where
+ T: BytesCast,
+{
+ let start = u64::try_from(out.len())
+ // Could only panic on a 128-bit CPU with a dirstate over 16 EiB
+ .expect("dirstate-v2 offset overflow")
+ .into();
+ let len = u32::try_from(slice.len())
+ // Could only panic for paths over 4 GiB or nodes with over 4 billions
+ // child nodes
+ .expect("dirstate-v2 offset overflow")
+ .into();
+ out.extend(slice.as_bytes());
+ Slice { start, len }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/path_with_basename.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,187 @@
+use crate::utils::hg_path::HgPath;
+use std::borrow::{Borrow, Cow};
+
+/// Wraps `HgPath` or `HgPathBuf` to make it behave "as" its last path
+/// component, a.k.a. its base name (as in Python’s `os.path.basename`), but
+/// also allow recovering the full path.
+///
+/// "Behaving as" means that equality and comparison consider only the base
+/// name, and `std::borrow::Borrow` is implemented to return only the base
+/// name. This allows using the base name as a map key while still being able
+/// to recover the full path, in a single memory allocation.
+#[derive(Debug)]
+pub struct WithBasename<T> {
+ full_path: T,
+
+ /// The position after the last slash separator in `full_path`, or `0`
+ /// if there is no slash.
+ base_name_start: usize,
+}
+
+impl<T> WithBasename<T> {
+ pub fn full_path(&self) -> &T {
+ &self.full_path
+ }
+}
+
+fn find_base_name_start(full_path: &HgPath) -> usize {
+ if let Some(last_slash_position) =
+ full_path.as_bytes().iter().rposition(|&byte| byte == b'/')
+ {
+ last_slash_position + 1
+ } else {
+ 0
+ }
+}
+
+impl<T: AsRef<HgPath>> WithBasename<T> {
+ pub fn new(full_path: T) -> Self {
+ Self {
+ base_name_start: find_base_name_start(full_path.as_ref()),
+ full_path,
+ }
+ }
+
+ pub fn from_raw_parts(full_path: T, base_name_start: usize) -> Self {
+ debug_assert_eq!(
+ base_name_start,
+ find_base_name_start(full_path.as_ref())
+ );
+ Self {
+ base_name_start,
+ full_path,
+ }
+ }
+
+ pub fn base_name(&self) -> &HgPath {
+ HgPath::new(
+ &self.full_path.as_ref().as_bytes()[self.base_name_start..],
+ )
+ }
+
+ pub fn base_name_start(&self) -> usize {
+ self.base_name_start
+ }
+}
+
+impl<T: AsRef<HgPath>> Borrow<HgPath> for WithBasename<T> {
+ fn borrow(&self) -> &HgPath {
+ self.base_name()
+ }
+}
+
+impl<T: AsRef<HgPath>> std::hash::Hash for WithBasename<T> {
+ fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) {
+ self.base_name().hash(hasher)
+ }
+}
+
+impl<T: AsRef<HgPath> + PartialEq> PartialEq for WithBasename<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.base_name() == other.base_name()
+ }
+}
+
+impl<T: AsRef<HgPath> + Eq> Eq for WithBasename<T> {}
+
+impl<T: AsRef<HgPath> + PartialOrd> PartialOrd for WithBasename<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ self.base_name().partial_cmp(other.base_name())
+ }
+}
+
+impl<T: AsRef<HgPath> + Ord> Ord for WithBasename<T> {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ self.base_name().cmp(other.base_name())
+ }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+ pub fn to_cow_borrowed(self) -> WithBasename<Cow<'a, HgPath>> {
+ WithBasename {
+ full_path: Cow::Borrowed(self.full_path),
+ base_name_start: self.base_name_start,
+ }
+ }
+
+ pub fn to_cow_owned<'b>(self) -> WithBasename<Cow<'b, HgPath>> {
+ WithBasename {
+ full_path: Cow::Owned(self.full_path.to_owned()),
+ base_name_start: self.base_name_start,
+ }
+ }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+ /// Returns an iterator of `WithBasename<&HgPath>` for the ancestor
+ /// directory paths of the given `path`, as well as `path` itself.
+ ///
+ /// For example, the full paths of inclusive ancestors of "a/b/c" are "a",
+ /// "a/b", and "a/b/c" in that order.
+ pub fn inclusive_ancestors_of(
+ path: &'a HgPath,
+ ) -> impl Iterator<Item = WithBasename<&'a HgPath>> {
+ let mut slash_positions =
+ path.as_bytes().iter().enumerate().filter_map(|(i, &byte)| {
+ if byte == b'/' {
+ Some(i)
+ } else {
+ None
+ }
+ });
+ let mut opt_next_component_start = Some(0);
+ std::iter::from_fn(move || {
+ opt_next_component_start.take().map(|next_component_start| {
+ if let Some(slash_pos) = slash_positions.next() {
+ opt_next_component_start = Some(slash_pos + 1);
+ Self {
+ full_path: HgPath::new(&path.as_bytes()[..slash_pos]),
+ base_name_start: next_component_start,
+ }
+ } else {
+ // Not setting `opt_next_component_start` here: there will
+ // be no iteration after this one because `.take()` set it
+ // to `None`.
+ Self {
+ full_path: path,
+ base_name_start: next_component_start,
+ }
+ }
+ })
+ })
+ }
+}
+
+#[test]
+fn test() {
+ let a = WithBasename::new(HgPath::new("a").to_owned());
+ assert_eq!(&**a.full_path(), HgPath::new(b"a"));
+ assert_eq!(a.base_name(), HgPath::new(b"a"));
+
+ let cba = WithBasename::new(HgPath::new("c/b/a").to_owned());
+ assert_eq!(&**cba.full_path(), HgPath::new(b"c/b/a"));
+ assert_eq!(cba.base_name(), HgPath::new(b"a"));
+
+ assert_eq!(a, cba);
+ let borrowed: &HgPath = cba.borrow();
+ assert_eq!(borrowed, HgPath::new("a"));
+}
+
+#[test]
+fn test_inclusive_ancestors() {
+ let mut iter = WithBasename::inclusive_ancestors_of(HgPath::new("a/bb/c"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a"));
+ assert_eq!(next.base_name(), HgPath::new("a"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a/bb"));
+ assert_eq!(next.base_name(), HgPath::new("bb"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a/bb/c"));
+ assert_eq!(next.base_name(), HgPath::new("c"));
+
+ assert!(iter.next().is_none());
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/status.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,685 @@
+use crate::dirstate::status::IgnoreFnType;
+use crate::dirstate_tree::dirstate_map::BorrowedPath;
+use crate::dirstate_tree::dirstate_map::ChildNodesRef;
+use crate::dirstate_tree::dirstate_map::DirstateMap;
+use crate::dirstate_tree::dirstate_map::NodeData;
+use crate::dirstate_tree::dirstate_map::NodeRef;
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
+use crate::dirstate_tree::on_disk::Timestamp;
+use crate::dirstate_tree::path_with_basename::WithBasename;
+use crate::matchers::get_ignore_function;
+use crate::matchers::Matcher;
+use crate::utils::files::get_bytes_from_os_string;
+use crate::utils::files::get_path_from_bytes;
+use crate::utils::hg_path::HgPath;
+use crate::BadMatch;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::HgPathBuf;
+use crate::PatternFileWarning;
+use crate::StatusError;
+use crate::StatusOptions;
+use micro_timer::timed;
+use rayon::prelude::*;
+use std::borrow::Cow;
+use std::io;
+use std::path::Path;
+use std::path::PathBuf;
+use std::sync::Mutex;
+use std::time::SystemTime;
+
+/// Returns the status of the working directory compared to its parent
+/// changeset.
+///
+/// This algorithm is based on traversing the filesystem tree (`fs` in function
+/// and variable names) and dirstate tree at the same time. The core of this
+/// traversal is the recursive `traverse_fs_directory_and_dirstate` function
+/// and its use of `itertools::merge_join_by`. When reaching a path that only
+/// exists in one of the two trees, depending on information requested by
+/// `options` we may need to traverse the remaining subtree.
+#[timed]
+pub fn status<'tree, 'on_disk: 'tree>(
+ dmap: &'tree mut DirstateMap<'on_disk>,
+ matcher: &(dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+) -> Result<(DirstateStatus<'on_disk>, Vec<PatternFileWarning>), StatusError> {
+ let (ignore_fn, warnings): (IgnoreFnType, _) =
+ if options.list_ignored || options.list_unknown {
+ get_ignore_function(ignore_files, &root_dir)?
+ } else {
+ (Box::new(|&_| true), vec![])
+ };
+
+ let common = StatusCommon {
+ dmap,
+ options,
+ matcher,
+ ignore_fn,
+ outcome: Default::default(),
+ cached_directory_mtimes_to_add: Default::default(),
+ filesystem_time_at_status_start: filesystem_now(&root_dir).ok(),
+ };
+ let is_at_repo_root = true;
+ let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
+ let has_ignored_ancestor = false;
+ let root_cached_mtime = None;
+ let root_dir_metadata = None;
+ // If the path we have for the repository root is a symlink, do follow it.
+ // (As opposed to symlinks within the working directory which are not
+ // followed, using `std::fs::symlink_metadata`.)
+ common.traverse_fs_directory_and_dirstate(
+ has_ignored_ancestor,
+ dmap.root.as_ref(),
+ hg_path,
+ &root_dir,
+ root_dir_metadata,
+ root_cached_mtime,
+ is_at_repo_root,
+ )?;
+ let mut outcome = common.outcome.into_inner().unwrap();
+ let to_add = common.cached_directory_mtimes_to_add.into_inner().unwrap();
+ outcome.dirty = !to_add.is_empty();
+ for (path, mtime) in &to_add {
+ let node = DirstateMap::get_or_insert_node(
+ dmap.on_disk,
+ &mut dmap.root,
+ path,
+ WithBasename::to_cow_owned,
+ |_| {},
+ )?;
+ match &node.data {
+ NodeData::Entry(_) => {} // Don’t overwrite an entry
+ NodeData::CachedDirectory { .. } | NodeData::None => {
+ node.data = NodeData::CachedDirectory { mtime: *mtime }
+ }
+ }
+ }
+ Ok((outcome, warnings))
+}
+
+/// Bag of random things needed by various parts of the algorithm. Reduces the
+/// number of parameters passed to functions.
+struct StatusCommon<'a, 'tree, 'on_disk: 'tree> {
+ dmap: &'tree DirstateMap<'on_disk>,
+ options: StatusOptions,
+ matcher: &'a (dyn Matcher + Sync),
+ ignore_fn: IgnoreFnType<'a>,
+ outcome: Mutex<DirstateStatus<'on_disk>>,
+ cached_directory_mtimes_to_add:
+ Mutex<Vec<(Cow<'on_disk, HgPath>, Timestamp)>>,
+
+ /// The current time at the start of the `status()` algorithm, as measured
+ /// and possibly truncated by the filesystem.
+ filesystem_time_at_status_start: Option<SystemTime>,
+}
+
+impl<'a, 'tree, 'on_disk> StatusCommon<'a, 'tree, 'on_disk> {
+ fn read_dir(
+ &self,
+ hg_path: &HgPath,
+ fs_path: &Path,
+ is_at_repo_root: bool,
+ ) -> Result<Vec<DirEntry>, ()> {
+ DirEntry::read_dir(fs_path, is_at_repo_root)
+ .map_err(|error| self.io_error(error, hg_path))
+ }
+
+ fn io_error(&self, error: std::io::Error, hg_path: &HgPath) {
+ let errno = error.raw_os_error().expect("expected real OS error");
+ self.outcome
+ .lock()
+ .unwrap()
+ .bad
+ .push((hg_path.to_owned().into(), BadMatch::OsError(errno)))
+ }
+
+ /// If this returns true, we can get accurate results by only using
+ /// `symlink_metadata` for child nodes that exist in the dirstate and don’t
+ /// need to call `read_dir`.
+ fn can_skip_fs_readdir(
+ &self,
+ directory_metadata: Option<&std::fs::Metadata>,
+ cached_directory_mtime: Option<&Timestamp>,
+ ) -> bool {
+ if !self.options.list_unknown && !self.options.list_ignored {
+ // All states that we care about listing have corresponding
+ // dirstate entries.
+ // This happens for example with `hg status -mard`.
+ return true;
+ }
+ if let Some(cached_mtime) = cached_directory_mtime {
+ // The dirstate contains a cached mtime for this directory, set by
+ // a previous run of the `status` algorithm which found this
+ // directory eligible for `read_dir` caching.
+ if let Some(meta) = directory_metadata {
+ if let Ok(current_mtime) = meta.modified() {
+ if current_mtime == cached_mtime.into() {
+ // The mtime of that directory has not changed since
+ // then, which means that the
+ // results of `read_dir` should also
+ // be unchanged.
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// Returns whether the filesystem directory was found to have any entry
+ /// that does not have a corresponding dirstate tree node.
+ fn traverse_fs_directory_and_dirstate(
+ &self,
+ has_ignored_ancestor: bool,
+ dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
+ directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
+ directory_fs_path: &Path,
+ directory_metadata: Option<&std::fs::Metadata>,
+ cached_directory_mtime: Option<&Timestamp>,
+ is_at_repo_root: bool,
+ ) -> Result<bool, DirstateV2ParseError> {
+ if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
+ {
+ dirstate_nodes
+ .par_iter()
+ .map(|dirstate_node| {
+ let fs_path = directory_fs_path.join(get_path_from_bytes(
+ dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
+ ));
+ match std::fs::symlink_metadata(&fs_path) {
+ Ok(fs_metadata) => self.traverse_fs_and_dirstate(
+ &fs_path,
+ &fs_metadata,
+ dirstate_node,
+ has_ignored_ancestor,
+ ),
+ Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
+ self.traverse_dirstate_only(dirstate_node)
+ }
+ Err(error) => {
+ let hg_path =
+ dirstate_node.full_path(self.dmap.on_disk)?;
+ Ok(self.io_error(error, hg_path))
+ }
+ }
+ })
+ .collect::<Result<_, _>>()?;
+
+ // Conservatively don’t let the caller assume that there aren’t
+ // any, since we don’t know.
+ let directory_has_any_fs_only_entry = true;
+
+ return Ok(directory_has_any_fs_only_entry);
+ }
+
+ let mut fs_entries = if let Ok(entries) = self.read_dir(
+ directory_hg_path,
+ directory_fs_path,
+ is_at_repo_root,
+ ) {
+ entries
+ } else {
+ // Treat an unreadable directory (typically because of insufficient
+ // permissions) like an empty directory. `self.read_dir` has
+ // already called `self.io_error` so a warning will be emitted.
+ Vec::new()
+ };
+
+ // `merge_join_by` requires both its input iterators to be sorted:
+
+ let dirstate_nodes = dirstate_nodes.sorted();
+ // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
+ // https://github.com/rust-lang/rust/issues/34162
+ fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
+
+ // Propagate here any error that would happen inside the comparison
+ // callback below
+ for dirstate_node in &dirstate_nodes {
+ dirstate_node.base_name(self.dmap.on_disk)?;
+ }
+ itertools::merge_join_by(
+ dirstate_nodes,
+ &fs_entries,
+ |dirstate_node, fs_entry| {
+ // This `unwrap` never panics because we already propagated
+ // those errors above
+ dirstate_node
+ .base_name(self.dmap.on_disk)
+ .unwrap()
+ .cmp(&fs_entry.base_name)
+ },
+ )
+ .par_bridge()
+ .map(|pair| {
+ use itertools::EitherOrBoth::*;
+ let is_fs_only = pair.is_right();
+ match pair {
+ Both(dirstate_node, fs_entry) => self
+ .traverse_fs_and_dirstate(
+ &fs_entry.full_path,
+ &fs_entry.metadata,
+ dirstate_node,
+ has_ignored_ancestor,
+ )?,
+ Left(dirstate_node) => {
+ self.traverse_dirstate_only(dirstate_node)?
+ }
+ Right(fs_entry) => self.traverse_fs_only(
+ has_ignored_ancestor,
+ directory_hg_path,
+ fs_entry,
+ ),
+ }
+ Ok(is_fs_only)
+ })
+ .try_reduce(|| false, |a, b| Ok(a || b))
+ }
+
+ fn traverse_fs_and_dirstate(
+ &self,
+ fs_path: &Path,
+ fs_metadata: &std::fs::Metadata,
+ dirstate_node: NodeRef<'tree, 'on_disk>,
+ has_ignored_ancestor: bool,
+ ) -> Result<(), DirstateV2ParseError> {
+ let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
+ let file_type = fs_metadata.file_type();
+ let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+ if !file_or_symlink {
+ // If we previously had a file here, it was removed (with
+ // `hg rm` or similar) or deleted before it could be
+ // replaced by a directory or something else.
+ self.mark_removed_or_deleted_if_file(
+ &hg_path,
+ dirstate_node.state()?,
+ );
+ }
+ if file_type.is_dir() {
+ if self.options.collect_traversed_dirs {
+ self.outcome
+ .lock()
+ .unwrap()
+ .traversed
+ .push(hg_path.detach_from_tree())
+ }
+ let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
+ let is_at_repo_root = false;
+ let directory_has_any_fs_only_entry = self
+ .traverse_fs_directory_and_dirstate(
+ is_ignored,
+ dirstate_node.children(self.dmap.on_disk)?,
+ hg_path,
+ fs_path,
+ Some(fs_metadata),
+ dirstate_node.cached_directory_mtime(),
+ is_at_repo_root,
+ )?;
+ self.maybe_save_directory_mtime(
+ directory_has_any_fs_only_entry,
+ fs_metadata,
+ dirstate_node,
+ )?
+ } else {
+ if file_or_symlink && self.matcher.matches(hg_path) {
+ if let Some(state) = dirstate_node.state()? {
+ match state {
+ EntryState::Added => self
+ .outcome
+ .lock()
+ .unwrap()
+ .added
+ .push(hg_path.detach_from_tree()),
+ EntryState::Removed => self
+ .outcome
+ .lock()
+ .unwrap()
+ .removed
+ .push(hg_path.detach_from_tree()),
+ EntryState::Merged => self
+ .outcome
+ .lock()
+ .unwrap()
+ .modified
+ .push(hg_path.detach_from_tree()),
+ EntryState::Normal => self
+ .handle_normal_file(&dirstate_node, fs_metadata)?,
+ // This variant is not used in DirstateMap
+ // nodes
+ EntryState::Unknown => unreachable!(),
+ }
+ } else {
+ // `node.entry.is_none()` indicates a "directory"
+ // node, but the filesystem has a file
+ self.mark_unknown_or_ignored(has_ignored_ancestor, hg_path)
+ }
+ }
+
+ for child_node in dirstate_node.children(self.dmap.on_disk)?.iter()
+ {
+ self.traverse_dirstate_only(child_node)?
+ }
+ }
+ Ok(())
+ }
+
+ fn maybe_save_directory_mtime(
+ &self,
+ directory_has_any_fs_only_entry: bool,
+ directory_metadata: &std::fs::Metadata,
+ dirstate_node: NodeRef<'tree, 'on_disk>,
+ ) -> Result<(), DirstateV2ParseError> {
+ if !directory_has_any_fs_only_entry {
+ // All filesystem directory entries from `read_dir` have a
+ // corresponding node in the dirstate, so we can reconstitute the
+ // names of those entries without calling `read_dir` again.
+ if let (Some(status_start), Ok(directory_mtime)) = (
+ &self.filesystem_time_at_status_start,
+ directory_metadata.modified(),
+ ) {
+ // Although the Rust standard library’s `SystemTime` type
+ // has nanosecond precision, the times reported for a
+ // directory’s (or file’s) modified time may have lower
+ // resolution based on the filesystem (for example ext3
+ // only stores integer seconds), kernel (see
+ // https://stackoverflow.com/a/14393315/1162888), etc.
+ if &directory_mtime >= status_start {
+ // The directory was modified too recently, don’t cache its
+ // `read_dir` results.
+ //
+ // A timeline like this is possible:
+ //
+ // 1. A change to this directory (direct child was
+ // added or removed) cause its mtime to be set
+ // (possibly truncated) to `directory_mtime`
+ // 2. This `status` algorithm calls `read_dir`
+ // 3. An other change is made to the same directory is
+ // made so that calling `read_dir` agin would give
+ // different results, but soon enough after 1. that
+ // the mtime stays the same
+ //
+ // On a system where the time resolution poor, this
+ // scenario is not unlikely if all three steps are caused
+ // by the same script.
+ } else {
+ // We’ve observed (through `status_start`) that time has
+ // “progressed” since `directory_mtime`, so any further
+ // change to this directory is extremely likely to cause a
+ // different mtime.
+ //
+ // Having the same mtime again is not entirely impossible
+ // since the system clock is not monotonous. It could jump
+ // backward to some point before `directory_mtime`, then a
+ // directory change could potentially happen during exactly
+ // the wrong tick.
+ //
+ // We deem this scenario (unlike the previous one) to be
+ // unlikely enough in practice.
+ let timestamp = directory_mtime.into();
+ let cached = dirstate_node.cached_directory_mtime();
+ if cached != Some(×tamp) {
+ let hg_path = dirstate_node
+ .full_path_borrowed(self.dmap.on_disk)?
+ .detach_from_tree();
+ self.cached_directory_mtimes_to_add
+ .lock()
+ .unwrap()
+ .push((hg_path, timestamp))
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// A file with `EntryState::Normal` in the dirstate was found in the
+ /// filesystem
+ fn handle_normal_file(
+ &self,
+ dirstate_node: &NodeRef<'tree, 'on_disk>,
+ fs_metadata: &std::fs::Metadata,
+ ) -> Result<(), DirstateV2ParseError> {
+ // Keep the low 31 bits
+ fn truncate_u64(value: u64) -> i32 {
+ (value & 0x7FFF_FFFF) as i32
+ }
+ fn truncate_i64(value: i64) -> i32 {
+ (value & 0x7FFF_FFFF) as i32
+ }
+
+ let entry = dirstate_node
+ .entry()?
+ .expect("handle_normal_file called with entry-less node");
+ let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
+ let mode_changed =
+ || self.options.check_exec && entry.mode_changed(fs_metadata);
+ let size_changed = entry.size != truncate_u64(fs_metadata.len());
+ if entry.size >= 0
+ && size_changed
+ && fs_metadata.file_type().is_symlink()
+ {
+ // issue6456: Size returned may be longer due to encryption
+ // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
+ self.outcome
+ .lock()
+ .unwrap()
+ .unsure
+ .push(hg_path.detach_from_tree())
+ } else if dirstate_node.has_copy_source()
+ || entry.is_from_other_parent()
+ || (entry.size >= 0 && (size_changed || mode_changed()))
+ {
+ self.outcome
+ .lock()
+ .unwrap()
+ .modified
+ .push(hg_path.detach_from_tree())
+ } else {
+ let mtime = mtime_seconds(fs_metadata);
+ if truncate_i64(mtime) != entry.mtime
+ || mtime == self.options.last_normal_time
+ {
+ self.outcome
+ .lock()
+ .unwrap()
+ .unsure
+ .push(hg_path.detach_from_tree())
+ } else if self.options.list_clean {
+ self.outcome
+ .lock()
+ .unwrap()
+ .clean
+ .push(hg_path.detach_from_tree())
+ }
+ }
+ Ok(())
+ }
+
+ /// A node in the dirstate tree has no corresponding filesystem entry
+ fn traverse_dirstate_only(
+ &self,
+ dirstate_node: NodeRef<'tree, 'on_disk>,
+ ) -> Result<(), DirstateV2ParseError> {
+ self.mark_removed_or_deleted_if_file(
+ &dirstate_node.full_path_borrowed(self.dmap.on_disk)?,
+ dirstate_node.state()?,
+ );
+ dirstate_node
+ .children(self.dmap.on_disk)?
+ .par_iter()
+ .map(|child_node| self.traverse_dirstate_only(child_node))
+ .collect()
+ }
+
+ /// A node in the dirstate tree has no corresponding *file* on the
+ /// filesystem
+ ///
+ /// Does nothing on a "directory" node
+ fn mark_removed_or_deleted_if_file(
+ &self,
+ hg_path: &BorrowedPath<'tree, 'on_disk>,
+ dirstate_node_state: Option<EntryState>,
+ ) {
+ if let Some(state) = dirstate_node_state {
+ if self.matcher.matches(hg_path) {
+ if let EntryState::Removed = state {
+ self.outcome
+ .lock()
+ .unwrap()
+ .removed
+ .push(hg_path.detach_from_tree())
+ } else {
+ self.outcome
+ .lock()
+ .unwrap()
+ .deleted
+ .push(hg_path.detach_from_tree())
+ }
+ }
+ }
+ }
+
+ /// Something in the filesystem has no corresponding dirstate node
+ fn traverse_fs_only(
+ &self,
+ has_ignored_ancestor: bool,
+ directory_hg_path: &HgPath,
+ fs_entry: &DirEntry,
+ ) {
+ let hg_path = directory_hg_path.join(&fs_entry.base_name);
+ let file_type = fs_entry.metadata.file_type();
+ let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+ if file_type.is_dir() {
+ let is_ignored =
+ has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+ let traverse_children = if is_ignored {
+ // Descendants of an ignored directory are all ignored
+ self.options.list_ignored
+ } else {
+ // Descendants of an unknown directory may be either unknown or
+ // ignored
+ self.options.list_unknown || self.options.list_ignored
+ };
+ if traverse_children {
+ let is_at_repo_root = false;
+ if let Ok(children_fs_entries) = self.read_dir(
+ &hg_path,
+ &fs_entry.full_path,
+ is_at_repo_root,
+ ) {
+ children_fs_entries.par_iter().for_each(|child_fs_entry| {
+ self.traverse_fs_only(
+ is_ignored,
+ &hg_path,
+ child_fs_entry,
+ )
+ })
+ }
+ }
+ if self.options.collect_traversed_dirs {
+ self.outcome.lock().unwrap().traversed.push(hg_path.into())
+ }
+ } else if file_or_symlink && self.matcher.matches(&hg_path) {
+ self.mark_unknown_or_ignored(
+ has_ignored_ancestor,
+ &BorrowedPath::InMemory(&hg_path),
+ )
+ }
+ }
+
+ fn mark_unknown_or_ignored(
+ &self,
+ has_ignored_ancestor: bool,
+ hg_path: &BorrowedPath<'_, 'on_disk>,
+ ) {
+ let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+ if is_ignored {
+ if self.options.list_ignored {
+ self.outcome
+ .lock()
+ .unwrap()
+ .ignored
+ .push(hg_path.detach_from_tree())
+ }
+ } else {
+ if self.options.list_unknown {
+ self.outcome
+ .lock()
+ .unwrap()
+ .unknown
+ .push(hg_path.detach_from_tree())
+ }
+ }
+ }
+}
+
+#[cfg(unix)] // TODO
+fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
+ // Going through `Metadata::modified()` would be portable, but would take
+ // care to construct a `SystemTime` value with sub-second precision just
+ // for us to throw that away here.
+ use std::os::unix::fs::MetadataExt;
+ metadata.mtime()
+}
+
+struct DirEntry {
+ base_name: HgPathBuf,
+ full_path: PathBuf,
+ metadata: std::fs::Metadata,
+}
+
+impl DirEntry {
+ /// Returns **unsorted** entries in the given directory, with name and
+ /// metadata.
+ ///
+ /// If a `.hg` sub-directory is encountered:
+ ///
+ /// * At the repository root, ignore that sub-directory
+ /// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
+ /// list instead.
+ fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
+ let mut results = Vec::new();
+ for entry in path.read_dir()? {
+ let entry = entry?;
+ let metadata = entry.metadata()?;
+ let name = get_bytes_from_os_string(entry.file_name());
+ // FIXME don't do this when cached
+ if name == b".hg" {
+ if is_at_repo_root {
+ // Skip the repo’s own .hg (might be a symlink)
+ continue;
+ } else if metadata.is_dir() {
+ // A .hg sub-directory at another location means a subrepo,
+ // skip it entirely.
+ return Ok(Vec::new());
+ }
+ }
+ results.push(DirEntry {
+ base_name: name.into(),
+ full_path: entry.path(),
+ metadata,
+ })
+ }
+ Ok(results)
+ }
+}
+
+/// Return the `mtime` of a temporary file newly-created in the `.hg` directory
+/// of the give repository.
+///
+/// This is similar to `SystemTime::now()`, with the result truncated to the
+/// same time resolution as other files’ modification times. Using `.hg`
+/// instead of the system’s default temporary directory (such as `/tmp`) makes
+/// it more likely the temporary file is in the same disk partition as contents
+/// of the working directory, which can matter since different filesystems may
+/// store timestamps with different resolutions.
+///
+/// This may fail, typically if we lack write permissions. In that case we
+/// should continue the `status()` algoritm anyway and consider the current
+/// date/time to be unknown.
+fn filesystem_now(repo_root: &Path) -> Result<SystemTime, io::Error> {
+ tempfile::tempfile_in(repo_root.join(".hg"))?
+ .metadata()?
+ .modified()
+}
--- a/rust/hg-core/src/filepatterns.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/filepatterns.rs Mon Jun 07 17:10:35 2021 -0400
@@ -41,7 +41,7 @@
/// Appended to the regexp of globs
const GLOB_SUFFIX: &[u8; 7] = b"(?:/|$)";
-#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PatternSyntax {
/// A regular expression
Regexp,
@@ -65,6 +65,14 @@
Include,
/// A file of patterns to match against files under the same directory
SubInclude,
+ /// SubInclude with the result of parsing the included file
+ ///
+ /// Note: there is no ExpandedInclude because that expansion can be done
+ /// in place by replacing the Include pattern by the included patterns.
+ /// SubInclude requires more handling.
+ ///
+ /// Note: `Box` is used to minimize size impact on other enum variants
+ ExpandedSubInclude(Box<SubInclude>),
}
/// Transforms a glob pattern into a regex
@@ -218,7 +226,9 @@
PatternSyntax::Glob | PatternSyntax::RootGlob => {
[glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
}
- PatternSyntax::Include | PatternSyntax::SubInclude => unreachable!(),
+ PatternSyntax::Include
+ | PatternSyntax::SubInclude
+ | PatternSyntax::ExpandedSubInclude(_) => unreachable!(),
}
}
@@ -318,9 +328,9 @@
NoSuchFile(PathBuf),
}
-pub fn parse_pattern_file_contents<P: AsRef<Path>>(
+pub fn parse_pattern_file_contents(
lines: &[u8],
- file_path: P,
+ file_path: &Path,
warn: bool,
) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
@@ -357,7 +367,7 @@
current_syntax = rel_syntax;
} else if warn {
warnings.push(PatternFileWarning::InvalidSyntax(
- file_path.as_ref().to_owned(),
+ file_path.to_owned(),
syntax.to_owned(),
));
}
@@ -384,32 +394,30 @@
PatternError::UnsupportedSyntax(syntax) => {
PatternError::UnsupportedSyntaxInFile(
syntax,
- file_path.as_ref().to_string_lossy().into(),
+ file_path.to_string_lossy().into(),
line_number,
)
}
_ => e,
})?,
&line,
- &file_path,
+ file_path,
));
}
Ok((inputs, warnings))
}
-pub fn read_pattern_file<P: AsRef<Path>>(
- file_path: P,
+pub fn read_pattern_file(
+ file_path: &Path,
warn: bool,
) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
- let mut f = match File::open(file_path.as_ref()) {
+ let mut f = match File::open(file_path) {
Ok(f) => Ok(f),
Err(e) => match e.kind() {
std::io::ErrorKind::NotFound => {
return Ok((
vec![],
- vec![PatternFileWarning::NoSuchFile(
- file_path.as_ref().to_owned(),
- )],
+ vec![PatternFileWarning::NoSuchFile(file_path.to_owned())],
))
}
_ => Err(e),
@@ -431,15 +439,11 @@
}
impl IgnorePattern {
- pub fn new(
- syntax: PatternSyntax,
- pattern: &[u8],
- source: impl AsRef<Path>,
- ) -> Self {
+ pub fn new(syntax: PatternSyntax, pattern: &[u8], source: &Path) -> Self {
Self {
syntax,
pattern: pattern.to_owned(),
- source: source.as_ref().to_owned(),
+ source: source.to_owned(),
}
}
}
@@ -447,32 +451,47 @@
pub type PatternResult<T> = Result<T, PatternError>;
/// Wrapper for `read_pattern_file` that also recursively expands `include:`
-/// patterns.
+/// and `subinclude:` patterns.
///
-/// `subinclude:` is not treated as a special pattern here: unraveling them
-/// needs to occur in the "ignore" phase.
+/// The former are expanded in place, while `PatternSyntax::ExpandedSubInclude`
+/// is used for the latter to form a tree of patterns.
pub fn get_patterns_from_file(
- pattern_file: impl AsRef<Path>,
- root_dir: impl AsRef<Path>,
+ pattern_file: &Path,
+ root_dir: &Path,
) -> PatternResult<(Vec<IgnorePattern>, Vec<PatternFileWarning>)> {
- let (patterns, mut warnings) = read_pattern_file(&pattern_file, true)?;
+ let (patterns, mut warnings) = read_pattern_file(pattern_file, true)?;
let patterns = patterns
.into_iter()
.flat_map(|entry| -> PatternResult<_> {
- let IgnorePattern {
- syntax, pattern, ..
- } = &entry;
- Ok(match syntax {
+ Ok(match &entry.syntax {
PatternSyntax::Include => {
let inner_include =
- root_dir.as_ref().join(get_path_from_bytes(&pattern));
- let (inner_pats, inner_warnings) = get_patterns_from_file(
- &inner_include,
- root_dir.as_ref(),
- )?;
+ root_dir.join(get_path_from_bytes(&entry.pattern));
+ let (inner_pats, inner_warnings) =
+ get_patterns_from_file(&inner_include, root_dir)?;
warnings.extend(inner_warnings);
inner_pats
}
+ PatternSyntax::SubInclude => {
+ let mut sub_include = SubInclude::new(
+ &root_dir,
+ &entry.pattern,
+ &entry.source,
+ )?;
+ let (inner_patterns, inner_warnings) =
+ get_patterns_from_file(
+ &sub_include.path,
+ &sub_include.root,
+ )?;
+ sub_include.included_patterns = inner_patterns;
+ warnings.extend(inner_warnings);
+ vec![IgnorePattern {
+ syntax: PatternSyntax::ExpandedSubInclude(Box::new(
+ sub_include,
+ )),
+ ..entry
+ }]
+ }
_ => vec![entry],
})
})
@@ -483,6 +502,7 @@
}
/// Holds all the information needed to handle a `subinclude:` pattern.
+#[derive(Debug, PartialEq, Eq, Clone)]
pub struct SubInclude {
/// Will be used for repository (hg) paths that start with this prefix.
/// It is relative to the current working directory, so comparing against
@@ -492,13 +512,15 @@
pub path: PathBuf,
/// Folder in the filesystem where this it applies
pub root: PathBuf,
+
+ pub included_patterns: Vec<IgnorePattern>,
}
impl SubInclude {
pub fn new(
- root_dir: impl AsRef<Path>,
+ root_dir: &Path,
pattern: &[u8],
- source: impl AsRef<Path>,
+ source: &Path,
) -> Result<SubInclude, HgPathError> {
let normalized_source =
normalize_path_bytes(&get_bytes_from_path(source));
@@ -510,7 +532,7 @@
let path = source_root.join(get_path_from_bytes(pattern));
let new_root = path.parent().unwrap_or_else(|| path.deref());
- let prefix = canonical_path(&root_dir, &root_dir, new_root)?;
+ let prefix = canonical_path(root_dir, root_dir, new_root)?;
Ok(Self {
prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
@@ -521,6 +543,7 @@
})?,
path: path.to_owned(),
root: new_root.to_owned(),
+ included_patterns: Vec::new(),
})
}
}
@@ -528,22 +551,17 @@
/// Separate and pre-process subincludes from other patterns for the "ignore"
/// phase.
pub fn filter_subincludes(
- ignore_patterns: &[IgnorePattern],
- root_dir: impl AsRef<Path>,
-) -> Result<(Vec<SubInclude>, Vec<&IgnorePattern>), HgPathError> {
+ ignore_patterns: Vec<IgnorePattern>,
+) -> Result<(Vec<Box<SubInclude>>, Vec<IgnorePattern>), HgPathError> {
let mut subincludes = vec![];
let mut others = vec![];
- for ignore_pattern in ignore_patterns.iter() {
- let IgnorePattern {
- syntax,
- pattern,
- source,
- } = ignore_pattern;
- if *syntax == PatternSyntax::SubInclude {
- subincludes.push(SubInclude::new(&root_dir, pattern, &source)?);
+ for pattern in ignore_patterns {
+ if let PatternSyntax::ExpandedSubInclude(sub_include) = pattern.syntax
+ {
+ subincludes.push(sub_include);
} else {
- others.push(ignore_pattern)
+ others.push(pattern)
}
}
Ok((subincludes, others))
--- a/rust/hg-core/src/lib.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/lib.rs Mon Jun 07 17:10:35 2021 -0400
@@ -8,7 +8,8 @@
pub mod dagops;
pub mod errors;
pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
-mod dirstate;
+pub mod dirstate;
+pub mod dirstate_tree;
pub mod discovery;
pub mod requirements;
pub mod testing; // unconditionally built, for use from integration tests
@@ -82,6 +83,15 @@
Common(errors::HgError),
}
+impl fmt::Display for DirstateError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ DirstateError::Map(error) => error.fmt(f),
+ DirstateError::Common(error) => error.fmt(f),
+ }
+ }
+}
+
#[derive(Debug, derive_more::From)]
pub enum PatternError {
#[from]
--- a/rust/hg-core/src/matchers.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/matchers.rs Mon Jun 07 17:10:35 2021 -0400
@@ -11,7 +11,7 @@
dirstate::dirs_multiset::DirsChildrenMultiset,
filepatterns::{
build_single_regex, filter_subincludes, get_patterns_from_file,
- PatternFileWarning, PatternResult, SubInclude,
+ PatternFileWarning, PatternResult,
},
utils::{
files::find_dirs,
@@ -237,7 +237,7 @@
/// ///
/// let ignore_patterns =
/// vec![IgnorePattern::new(PatternSyntax::RootGlob, b"this*", Path::new(""))];
-/// let (matcher, _) = IncludeMatcher::new(ignore_patterns, "").unwrap();
+/// let matcher = IncludeMatcher::new(ignore_patterns).unwrap();
/// ///
/// assert_eq!(matcher.matches(HgPath::new(b"testing")), false);
/// assert_eq!(matcher.matches(HgPath::new(b"this should work")), true);
@@ -341,8 +341,8 @@
/// Returns the regex pattern and a function that matches an `HgPath` against
/// said regex formed by the given ignore patterns.
-fn build_regex_match<'a>(
- ignore_patterns: &'a [&'a IgnorePattern],
+fn build_regex_match(
+ ignore_patterns: &[IgnorePattern],
) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + Sync>)> {
let mut regexps = vec![];
let mut exact_set = HashSet::new();
@@ -478,32 +478,25 @@
/// Returns a function that checks whether a given file (in the general sense)
/// should be matched.
fn build_match<'a, 'b>(
- ignore_patterns: &'a [IgnorePattern],
- root_dir: impl AsRef<Path>,
-) -> PatternResult<(
- Vec<u8>,
- Box<dyn Fn(&HgPath) -> bool + 'b + Sync>,
- Vec<PatternFileWarning>,
-)> {
+ ignore_patterns: Vec<IgnorePattern>,
+) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + 'b + Sync>)> {
let mut match_funcs: Vec<Box<dyn Fn(&HgPath) -> bool + Sync>> = vec![];
// For debugging and printing
let mut patterns = vec![];
- let mut all_warnings = vec![];
- let (subincludes, ignore_patterns) =
- filter_subincludes(ignore_patterns, root_dir)?;
+ let (subincludes, ignore_patterns) = filter_subincludes(ignore_patterns)?;
if !subincludes.is_empty() {
// Build prefix-based matcher functions for subincludes
let mut submatchers = FastHashMap::default();
let mut prefixes = vec![];
- for SubInclude { prefix, root, path } in subincludes.into_iter() {
- let (match_fn, warnings) =
- get_ignore_function(vec![path.to_path_buf()], root)?;
- all_warnings.extend(warnings);
- prefixes.push(prefix.to_owned());
- submatchers.insert(prefix.to_owned(), match_fn);
+ for sub_include in subincludes {
+ let matcher = IncludeMatcher::new(sub_include.included_patterns)?;
+ let match_fn =
+ Box::new(move |path: &HgPath| matcher.matches(path));
+ prefixes.push(sub_include.prefix.clone());
+ submatchers.insert(sub_include.prefix.clone(), match_fn);
}
let match_subinclude = move |filename: &HgPath| {
@@ -556,14 +549,13 @@
}
Ok(if match_funcs.len() == 1 {
- (patterns, match_funcs.remove(0), all_warnings)
+ (patterns, match_funcs.remove(0))
} else {
(
patterns,
Box::new(move |f: &HgPath| -> bool {
match_funcs.iter().any(|match_func| match_func(f))
}),
- all_warnings,
)
})
}
@@ -573,7 +565,7 @@
/// ignored.
pub fn get_ignore_function<'a>(
all_pattern_files: Vec<PathBuf>,
- root_dir: impl AsRef<Path>,
+ root_dir: &Path,
) -> PatternResult<(
Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>,
Vec<PatternFileWarning>,
@@ -581,15 +573,14 @@
let mut all_patterns = vec![];
let mut all_warnings = vec![];
- for pattern_file in all_pattern_files.into_iter() {
+ for pattern_file in &all_pattern_files {
let (patterns, warnings) =
- get_patterns_from_file(pattern_file, &root_dir)?;
+ get_patterns_from_file(pattern_file, root_dir)?;
all_patterns.extend(patterns.to_owned());
all_warnings.extend(warnings);
}
- let (matcher, warnings) = IncludeMatcher::new(all_patterns, root_dir)?;
- all_warnings.extend(warnings);
+ let matcher = IncludeMatcher::new(all_patterns)?;
Ok((
Box::new(move |path: &HgPath| matcher.matches(path)),
all_warnings,
@@ -597,34 +588,26 @@
}
impl<'a> IncludeMatcher<'a> {
- pub fn new(
- ignore_patterns: Vec<IgnorePattern>,
- root_dir: impl AsRef<Path>,
- ) -> PatternResult<(Self, Vec<PatternFileWarning>)> {
- let (patterns, match_fn, warnings) =
- build_match(&ignore_patterns, root_dir)?;
+ pub fn new(ignore_patterns: Vec<IgnorePattern>) -> PatternResult<Self> {
let RootsDirsAndParents {
roots,
dirs,
parents,
} = roots_dirs_and_parents(&ignore_patterns)?;
-
let prefix = ignore_patterns.iter().any(|k| match k.syntax {
PatternSyntax::Path | PatternSyntax::RelPath => true,
_ => false,
});
+ let (patterns, match_fn) = build_match(ignore_patterns)?;
- Ok((
- Self {
- patterns,
- match_fn,
- prefix,
- roots,
- dirs,
- parents,
- },
- warnings,
- ))
+ Ok(Self {
+ patterns,
+ match_fn,
+ prefix,
+ roots,
+ dirs,
+ parents,
+ })
}
fn get_all_parents_children(&self) -> DirsChildrenMultiset {
@@ -810,14 +793,11 @@
#[test]
fn test_includematcher() {
// VisitchildrensetPrefix
- let (matcher, _) = IncludeMatcher::new(
- vec![IgnorePattern::new(
- PatternSyntax::RelPath,
- b"dir/subdir",
- Path::new(""),
- )],
- "",
- )
+ let matcher = IncludeMatcher::new(vec![IgnorePattern::new(
+ PatternSyntax::RelPath,
+ b"dir/subdir",
+ Path::new(""),
+ )])
.unwrap();
let mut set = HashSet::new();
@@ -848,14 +828,11 @@
);
// VisitchildrensetRootfilesin
- let (matcher, _) = IncludeMatcher::new(
- vec![IgnorePattern::new(
- PatternSyntax::RootFiles,
- b"dir/subdir",
- Path::new(""),
- )],
- "",
- )
+ let matcher = IncludeMatcher::new(vec![IgnorePattern::new(
+ PatternSyntax::RootFiles,
+ b"dir/subdir",
+ Path::new(""),
+ )])
.unwrap();
let mut set = HashSet::new();
@@ -886,14 +863,11 @@
);
// VisitchildrensetGlob
- let (matcher, _) = IncludeMatcher::new(
- vec![IgnorePattern::new(
- PatternSyntax::Glob,
- b"dir/z*",
- Path::new(""),
- )],
- "",
- )
+ let matcher = IncludeMatcher::new(vec![IgnorePattern::new(
+ PatternSyntax::Glob,
+ b"dir/z*",
+ Path::new(""),
+ )])
.unwrap();
let mut set = HashSet::new();
--- a/rust/hg-core/src/operations/dirstate_status.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/operations/dirstate_status.rs Mon Jun 07 17:10:35 2021 -0400
@@ -5,17 +5,12 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
+use crate::dirstate::status::{build_response, Dispatch, Status};
use crate::matchers::Matcher;
use crate::{DirstateStatus, StatusError};
-/// A tuple of the paths that need to be checked in the filelog because it's
-/// ambiguous whether they've changed, and the rest of the already dispatched
-/// files.
-pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
-
-impl<'a, M: Matcher + Sync> Status<'a, M> {
- pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
+impl<'a, M: ?Sized + Matcher + Sync> Status<'a, M> {
+ pub(crate) fn run(&self) -> Result<DirstateStatus<'a>, StatusError> {
let (traversed_sender, traversed_receiver) =
crossbeam_channel::unbounded();
@@ -66,7 +61,10 @@
}
drop(traversed_sender);
- let traversed = traversed_receiver.into_iter().collect();
+ let traversed = traversed_receiver
+ .into_iter()
+ .map(std::borrow::Cow::Owned)
+ .collect();
Ok(build_response(results, traversed))
}
--- a/rust/hg-core/src/operations/list_tracked_files.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/operations/list_tracked_files.rs Mon Jun 07 17:10:35 2021 -0400
@@ -5,7 +5,8 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::dirstate::parsers::parse_dirstate;
+use crate::dirstate::parsers::parse_dirstate_entries;
+use crate::dirstate_tree::on_disk::for_each_tracked_path;
use crate::errors::HgError;
use crate::repo::Repo;
use crate::revlog::changelog::Changelog;
@@ -13,7 +14,7 @@
use crate::revlog::node::Node;
use crate::revlog::revlog::RevlogError;
use crate::utils::hg_path::HgPath;
-use crate::EntryState;
+use crate::DirstateError;
use rayon::prelude::*;
/// List files under Mercurial control in the working directory
@@ -21,23 +22,34 @@
pub struct Dirstate {
/// The `dirstate` content.
content: Vec<u8>,
+ dirstate_v2: bool,
}
impl Dirstate {
pub fn new(repo: &Repo) -> Result<Self, HgError> {
- let content = repo.hg_vfs().read("dirstate")?;
- Ok(Self { content })
+ Ok(Self {
+ content: repo.hg_vfs().read("dirstate")?,
+ dirstate_v2: repo.has_dirstate_v2(),
+ })
}
- pub fn tracked_files(&self) -> Result<Vec<&HgPath>, HgError> {
- let (_, entries, _) = parse_dirstate(&self.content)?;
- let mut files: Vec<&HgPath> = entries
- .into_iter()
- .filter_map(|(path, entry)| match entry.state {
- EntryState::Removed => None,
- _ => Some(path),
- })
- .collect();
+ pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
+ let mut files = Vec::new();
+ if !self.content.is_empty() {
+ if self.dirstate_v2 {
+ for_each_tracked_path(&self.content, |path| files.push(path))?
+ } else {
+ let _parents = parse_dirstate_entries(
+ &self.content,
+ |path, entry, _copy_source| {
+ if entry.state.is_tracked() {
+ files.push(path)
+ }
+ Ok(())
+ },
+ )?;
+ }
+ }
files.par_sort_unstable();
Ok(files)
}
--- a/rust/hg-core/src/repo.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/repo.rs Mon Jun 07 17:10:35 2021 -0400
@@ -218,12 +218,23 @@
}
}
+ pub fn has_dirstate_v2(&self) -> bool {
+ self.requirements
+ .contains(requirements::DIRSTATE_V2_REQUIREMENT)
+ }
+
pub fn dirstate_parents(
&self,
) -> Result<crate::dirstate::DirstateParents, HgError> {
let dirstate = self.hg_vfs().mmap_open("dirstate")?;
- let parents =
- crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?;
+ if dirstate.is_empty() {
+ return Ok(crate::dirstate::DirstateParents::NULL);
+ }
+ let parents = if self.has_dirstate_v2() {
+ crate::dirstate_tree::on_disk::parse_dirstate_parents(&dirstate)?
+ } else {
+ crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
+ };
Ok(parents.clone())
}
}
--- a/rust/hg-core/src/requirements.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/requirements.rs Mon Jun 07 17:10:35 2021 -0400
@@ -82,6 +82,7 @@
SPARSEREVLOG_REQUIREMENT,
RELATIVE_SHARED_REQUIREMENT,
REVLOG_COMPRESSION_ZSTD,
+ DIRSTATE_V2_REQUIREMENT,
// As of this writing everything rhg does is read-only.
// When it starts writing to the repository, it’ll need to either keep the
// persistent nodemap up to date or remove this entry:
@@ -90,6 +91,8 @@
// Copied from mercurial/requirements.py:
+pub(crate) const DIRSTATE_V2_REQUIREMENT: &str = "exp-dirstate-v2";
+
/// When narrowing is finalized and no longer subject to format changes,
/// we should move this to just "narrow" or similar.
#[allow(unused)]
@@ -124,11 +127,6 @@
#[allow(unused)]
pub(crate) const SPARSEREVLOG_REQUIREMENT: &str = "sparserevlog";
-/// A repository with the sidedataflag requirement will allow to store extra
-/// information for revision without altering their original hashes.
-#[allow(unused)]
-pub(crate) const SIDEDATA_REQUIREMENT: &str = "exp-sidedata-flag";
-
/// A repository with the the copies-sidedata-changeset requirement will store
/// copies related information in changeset's sidedata.
#[allow(unused)]
--- a/rust/hg-core/src/revlog/path_encode.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/revlog/path_encode.rs Mon Jun 07 17:10:35 2021 -0400
@@ -1,5 +1,4 @@
-use crypto::digest::Digest;
-use crypto::sha1::Sha1;
+use sha1::{Digest, Sha1};
#[derive(PartialEq, Debug)]
#[allow(non_camel_case_types)]
@@ -621,13 +620,7 @@
panic!("path_encode::hash_encore: string too long: {}", baselen)
};
let dirlen = encode_dir(Some(&mut dired[..]), src);
- let sha = {
- let mut hasher = Sha1::new();
- hasher.input(&dired[..dirlen]);
- let mut hash = vec![0; 20];
- hasher.result(&mut hash);
- hash
- };
+ let sha = Sha1::digest(&dired[..dirlen]);
let lowerlen = lower_encode(Some(&mut lowered[..]), &dired[..dirlen][5..]);
let auxlen = aux_encode(Some(&mut auxed[..]), &lowered[..lowerlen]);
hash_mangle(&auxed[..auxlen], &sha)
--- a/rust/hg-core/src/revlog/revlog.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/revlog/revlog.rs Mon Jun 07 17:10:35 2021 -0400
@@ -4,10 +4,9 @@
use std::path::Path;
use byteorder::{BigEndian, ByteOrder};
-use crypto::digest::Digest;
-use crypto::sha1::Sha1;
use flate2::read::ZlibDecoder;
use micro_timer::timed;
+use sha1::{Digest, Sha1};
use zstd;
use super::index::Index;
@@ -221,7 +220,7 @@
None => &NULL_NODE,
};
- hash(data, h1.as_bytes(), h2.as_bytes()).as_slice() == expected
+ &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
}
/// Build the full data of a revision out its snapshot
@@ -361,20 +360,22 @@
}
/// Calculate the hash of a revision given its data and its parents.
-fn hash(data: &[u8], p1_hash: &[u8], p2_hash: &[u8]) -> Vec<u8> {
+fn hash(
+ data: &[u8],
+ p1_hash: &[u8],
+ p2_hash: &[u8],
+) -> [u8; NODE_BYTES_LENGTH] {
let mut hasher = Sha1::new();
let (a, b) = (p1_hash, p2_hash);
if a > b {
- hasher.input(b);
- hasher.input(a);
+ hasher.update(b);
+ hasher.update(a);
} else {
- hasher.input(a);
- hasher.input(b);
+ hasher.update(a);
+ hasher.update(b);
}
- hasher.input(data);
- let mut hash = vec![0; NODE_BYTES_LENGTH];
- hasher.result(&mut hash);
- hash
+ hasher.update(data);
+ *hasher.finalize().as_ref()
}
#[cfg(test)]
--- a/rust/hg-core/src/utils/files.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/utils/files.rs Mon Jun 07 17:10:35 2021 -0400
@@ -17,7 +17,7 @@
use lazy_static::lazy_static;
use same_file::is_same_file;
use std::borrow::{Cow, ToOwned};
-use std::ffi::OsStr;
+use std::ffi::{OsStr, OsString};
use std::fs::Metadata;
use std::iter::FusedIterator;
use std::ops::Deref;
@@ -53,6 +53,12 @@
str.as_ref().as_bytes().to_vec()
}
+#[cfg(unix)]
+pub fn get_bytes_from_os_string(str: OsString) -> Vec<u8> {
+ use std::os::unix::ffi::OsStringExt;
+ str.into_vec()
+}
+
/// An iterator over repository path yielding itself and its ancestors.
#[derive(Copy, Clone, Debug)]
pub struct Ancestors<'a> {
--- a/rust/hg-core/src/utils/hg_path.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/src/utils/hg_path.rs Mon Jun 07 17:10:35 2021 -0400
@@ -5,7 +5,9 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::utils::SliceExt;
use std::borrow::Borrow;
+use std::borrow::Cow;
use std::convert::TryFrom;
use std::ffi::{OsStr, OsString};
use std::fmt;
@@ -226,6 +228,20 @@
inner.extend(other.as_ref().bytes());
HgPathBuf::from_bytes(&inner)
}
+
+ pub fn components(&self) -> impl Iterator<Item = &HgPath> {
+ self.inner.split(|&byte| byte == b'/').map(HgPath::new)
+ }
+
+ /// Returns the first (that is "root-most") slash-separated component of
+ /// the path, and the rest after the first slash if there is one.
+ pub fn split_first_component(&self) -> (&HgPath, Option<&HgPath>) {
+ match self.inner.split_2(b'/') {
+ Some((a, b)) => (HgPath::new(a), Some(HgPath::new(b))),
+ None => (self, None),
+ }
+ }
+
pub fn parent(&self) -> &Self {
let inner = self.as_bytes();
HgPath::new(match inner.iter().rposition(|b| *b == b'/') {
@@ -530,6 +546,24 @@
}
}
+impl From<HgPathBuf> for Cow<'_, HgPath> {
+ fn from(path: HgPathBuf) -> Self {
+ Cow::Owned(path)
+ }
+}
+
+impl<'a> From<&'a HgPath> for Cow<'a, HgPath> {
+ fn from(path: &'a HgPath) -> Self {
+ Cow::Borrowed(path)
+ }
+}
+
+impl<'a> From<&'a HgPathBuf> for Cow<'a, HgPath> {
+ fn from(path: &'a HgPathBuf) -> Self {
+ Cow::Borrowed(&**path)
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
--- a/rust/hg-core/tests/test_missing_ancestors.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-core/tests/test_missing_ancestors.rs Mon Jun 07 17:10:35 2021 -0400
@@ -156,7 +156,7 @@
if left == right {
return;
}
- panic!(format!(
+ panic!(
"Equality assertion failed (left != right)
left={:?}
right={:?}
@@ -171,7 +171,7 @@
self.bases,
self.history,
self.random_seed,
- ));
+ );
}
}
@@ -231,7 +231,7 @@
.map(|n| n.trim().parse().expect(err_msg))
.collect();
if params.len() != 3 {
- panic!(err_msg);
+ panic!("{}", err_msg);
}
(params[0], params[1], params[2])
}
--- a/rust/hg-cpython/src/cindex.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/cindex.rs Mon Jun 07 17:10:35 2021 -0400
@@ -11,8 +11,8 @@
//! but this will take some time to get there.
use cpython::{
- exc::ImportError, ObjectProtocol, PyClone, PyErr, PyObject, PyResult,
- PyTuple, Python, PythonObject,
+ exc::ImportError, exc::TypeError, ObjectProtocol, PyClone, PyErr,
+ PyObject, PyResult, PyTuple, Python, PythonObject,
};
use hg::revlog::{Node, RevlogIndex};
use hg::{Graph, GraphError, Revision, WORKING_DIRECTORY_REVISION};
@@ -90,6 +90,13 @@
),
));
}
+ let compat: u64 = index.getattr(py, "rust_ext_compat")?.extract(py)?;
+ if compat == 0 {
+ return Err(PyErr::new::<TypeError, _>(
+ py,
+ "index object not compatible with Rust",
+ ));
+ }
Ok(Index { index, capi })
}
--- a/rust/hg-cpython/src/dirstate.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate.rs Mon Jun 07 17:10:35 2021 -0400
@@ -12,7 +12,9 @@
mod copymap;
mod dirs_multiset;
mod dirstate_map;
+mod dispatch;
mod non_normal_entries;
+mod owning;
mod status;
use crate::{
dirstate::{
@@ -24,6 +26,7 @@
exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
PySequence, Python,
};
+use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
use libc::{c_char, c_int};
use std::convert::TryFrom;
@@ -115,6 +118,7 @@
)?;
m.add_class::<Dirs>(py)?;
m.add_class::<DirstateMap>(py)?;
+ m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
m.add(
py,
"status",
--- a/rust/hg-cpython/src/dirstate/copymap.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/copymap.rs Mon Jun 07 17:10:35 2021 -0400
@@ -13,8 +13,11 @@
};
use std::cell::RefCell;
+use crate::dirstate::dirstate_map::v2_error;
use crate::dirstate::dirstate_map::DirstateMap;
-use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
+use hg::dirstate_tree::on_disk::DirstateV2ParseError;
+use hg::utils::hg_path::HgPath;
+use hg::CopyMapIter;
py_class!(pub class CopyMap |py| {
data dirstate_map: DirstateMap;
@@ -87,15 +90,16 @@
}
fn translate_key(
py: Python,
- res: (&HgPathBuf, &HgPathBuf),
+ res: Result<(&HgPath, &HgPath), DirstateV2ParseError>,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_bytes())))
+ let (k, _v) = res.map_err(|e| v2_error(py, e))?;
+ Ok(Some(PyBytes::new(py, k.as_bytes())))
}
fn translate_key_value(
py: Python,
- res: (&HgPathBuf, &HgPathBuf),
+ res: Result<(&HgPath, &HgPath), DirstateV2ParseError>,
) -> PyResult<Option<(PyBytes, PyBytes)>> {
- let (k, v) = res;
+ let (k, v) = res.map_err(|e| v2_error(py, e))?;
Ok(Some((
PyBytes::new(py, k.as_bytes()),
PyBytes::new(py, v.as_bytes()),
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Mon Jun 07 17:10:35 2021 -0400
@@ -20,7 +20,8 @@
use hg::{
errors::HgError,
utils::hg_path::{HgPath, HgPathBuf},
- DirsMultiset, DirsMultisetIter, DirstateMapError, EntryState,
+ DirsMultiset, DirsMultisetIter, DirstateError, DirstateMapError,
+ EntryState,
};
py_class!(pub class Dirs |py| {
@@ -45,8 +46,9 @@
}
let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
let dirstate = extract_dirstate(py, &map)?;
- DirsMultiset::from_dirstate(&dirstate, skip_state)
- .map_err(|e: DirstateMapError| {
+ let dirstate = dirstate.iter().map(|(k, v)| Ok((k, *v)));
+ DirsMultiset::from_dirstate(dirstate, skip_state)
+ .map_err(|e: DirstateError| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
})?
} else {
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Mon Jun 07 17:10:35 2021 -0400
@@ -8,14 +8,13 @@
//! Bindings for the `hg::dirstate::dirstate_map` file provided by the
//! `hg-core` package.
-use std::cell::{Ref, RefCell};
+use std::cell::{RefCell, RefMut};
use std::convert::TryInto;
-use std::time::Duration;
use cpython::{
exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
- PyObject, PyResult, PySet, PyString, PyTuple, Python, PythonObject,
- ToPyObject, UnsafePyLeaked,
+ PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
+ UnsafePyLeaked,
};
use crate::{
@@ -23,15 +22,20 @@
dirstate::non_normal_entries::{
NonNormalEntries, NonNormalEntriesIterator,
},
+ dirstate::owning::OwningDirstateMap,
dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
parsers::dirstate_parents_to_pytuple,
};
use hg::{
+ dirstate::parsers::Timestamp,
+ dirstate_tree::dispatch::DirstateMapMethods,
+ dirstate_tree::on_disk::DirstateV2ParseError,
errors::HgError,
revlog::Node,
+ utils::files::normalize_case,
utils::hg_path::{HgPath, HgPathBuf},
- DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
- DirstateMapError, DirstateParents, EntryState, StateMapIter,
+ DirsMultiset, DirstateEntry, DirstateError,
+ DirstateMap as RustDirstateMap, DirstateParents, EntryState, StateMapIter,
};
// TODO
@@ -47,11 +51,32 @@
// All attributes also have to have a separate refcount data attribute for
// leaks, with all methods that go along for reference sharing.
py_class!(pub class DirstateMap |py| {
- @shared data inner: RustDirstateMap;
+ @shared data inner: Box<dyn DirstateMapMethods + Send>;
- def __new__(_cls, _root: PyObject) -> PyResult<Self> {
- let inner = RustDirstateMap::default();
- Self::create_instance(py, inner)
+ /// Returns a `(dirstate_map, parents)` tuple
+ @staticmethod
+ def new(
+ use_dirstate_tree: bool,
+ use_dirstate_v2: bool,
+ on_disk: PyBytes,
+ ) -> PyResult<PyObject> {
+ let dirstate_error = |e: DirstateError| {
+ PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
+ };
+ let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
+ let (map, parents) =
+ OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
+ .map_err(dirstate_error)?;
+ (Box::new(map) as _, parents)
+ } else {
+ let bytes = on_disk.data(py);
+ let mut map = RustDirstateMap::default();
+ let parents = map.read(bytes).map_err(dirstate_error)?;
+ (Box::new(map) as _, parents)
+ };
+ let map = Self::create_instance(py, inner)?;
+ let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
+ Ok((map, parents).to_py_object(py).into_object())
}
def clear(&self) -> PyResult<PyObject> {
@@ -65,9 +90,14 @@
default: Option<PyObject> = None
) -> PyResult<Option<PyObject>> {
let key = key.extract::<PyBytes>(py)?;
- match self.inner(py).borrow().get(HgPath::new(key.data(py))) {
+ match self
+ .inner(py)
+ .borrow()
+ .get(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))?
+ {
Some(entry) => {
- Ok(Some(make_dirstate_tuple(py, entry)?))
+ Ok(Some(make_dirstate_tuple(py, &entry)?))
},
None => Ok(default)
}
@@ -99,7 +129,7 @@
size: size.extract(py)?,
mtime: mtime.extract(py)?,
},
- ).and(Ok(py.None())).or_else(|e: DirstateMapError| {
+ ).and(Ok(py.None())).or_else(|e: DirstateError| {
Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
})
}
@@ -165,18 +195,18 @@
))
})
.collect();
- self.inner(py).borrow_mut()
- .clear_ambiguous_times(files?, now.extract(py)?);
+ self.inner(py)
+ .borrow_mut()
+ .clear_ambiguous_times(files?, now.extract(py)?)
+ .map_err(|e| v2_error(py, e))?;
Ok(py.None())
}
def other_parent_entries(&self) -> PyResult<PyObject> {
let mut inner_shared = self.inner(py).borrow_mut();
- let (_, other_parent) =
- inner_shared.get_non_normal_other_parent_entries();
-
let set = PySet::empty(py)?;
- for path in other_parent.iter() {
+ for path in inner_shared.iter_other_parent_paths() {
+ let path = path.map_err(|e| v2_error(py, e))?;
set.add(py, PyBytes::new(py, path.as_bytes()))?;
}
Ok(set.into_object())
@@ -188,26 +218,20 @@
def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
- Ok(self
- .inner(py)
+ self.inner(py)
.borrow_mut()
- .get_non_normal_other_parent_entries().0
- .contains(HgPath::new(key.data(py))))
+ .non_normal_entries_contains(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))
}
def non_normal_entries_display(&self) -> PyResult<PyString> {
- Ok(
- PyString::new(
- py,
- &format!(
- "NonNormalEntries: {:?}",
- self
- .inner(py)
- .borrow_mut()
- .get_non_normal_other_parent_entries().0
- .iter().map(|o| o))
- )
- )
+ let mut inner = self.inner(py).borrow_mut();
+ let paths = inner
+ .iter_non_normal_paths()
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(|e| v2_error(py, e))?;
+ let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
+ Ok(PyString::new(py, &formatted))
}
def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
@@ -219,22 +243,12 @@
Ok(py.None())
}
- def non_normal_entries_union(&self, other: PyObject) -> PyResult<PyList> {
- let other: PyResult<_> = other.iter(py)?
- .map(|f| {
- Ok(HgPathBuf::from_bytes(
- f?.extract::<PyBytes>(py)?.data(py),
- ))
- })
- .collect();
-
- let res = self
- .inner(py)
- .borrow_mut()
- .non_normal_entries_union(other?);
+ def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
+ let mut inner = self.inner(py).borrow_mut();
let ret = PyList::new(py, &[]);
- for filename in res.iter() {
+ for filename in inner.non_normal_or_other_parent_paths() {
+ let filename = filename.map_err(|e| v2_error(py, e))?;
let as_pystring = PyBytes::new(py, filename.as_bytes());
ret.append(py, as_pystring.into_object());
}
@@ -252,7 +266,7 @@
NonNormalEntriesIterator::from_inner(py, unsafe {
leaked_ref.map(py, |o| {
- o.get_non_normal_other_parent_entries_panic().0.iter()
+ o.iter_non_normal_paths_panic()
})
})
}
@@ -277,55 +291,26 @@
.to_py_object(py))
}
- def parents(&self, st: PyObject) -> PyResult<PyTuple> {
- self.inner(py).borrow_mut()
- .parents(st.extract::<PyBytes>(py)?.data(py))
- .map(|parents| dirstate_parents_to_pytuple(py, parents))
- .or_else(|_| {
- Err(PyErr::new::<exc::OSError, _>(
- py,
- "Dirstate error".to_string(),
- ))
- })
- }
-
- def setparents(&self, p1: PyObject, p2: PyObject) -> PyResult<PyObject> {
- let p1 = extract_node_id(py, &p1)?;
- let p2 = extract_node_id(py, &p2)?;
-
- self.inner(py).borrow_mut()
- .set_parents(&DirstateParents { p1, p2 });
- Ok(py.None())
- }
-
- def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
- match self.inner(py).borrow_mut()
- .read(st.extract::<PyBytes>(py)?.data(py))
- {
- Ok(Some(parents)) => Ok(Some(
- dirstate_parents_to_pytuple(py, parents)
- .into_object()
- )),
- Ok(None) => Ok(Some(py.None())),
- Err(_) => Err(PyErr::new::<exc::OSError, _>(
- py,
- "Dirstate error".to_string(),
- )),
- }
- }
def write(
&self,
+ use_dirstate_v2: bool,
p1: PyObject,
p2: PyObject,
now: PyObject
) -> PyResult<PyBytes> {
- let now = Duration::new(now.extract(py)?, 0);
+ let now = Timestamp(now.extract(py)?);
let parents = DirstateParents {
p1: extract_node_id(py, &p1)?,
p2: extract_node_id(py, &p2)?,
};
- match self.inner(py).borrow_mut().pack(parents, now) {
+ let mut inner = self.inner(py).borrow_mut();
+ let result = if use_dirstate_v2 {
+ inner.pack_v2(parents, now)
+ } else {
+ inner.pack_v1(parents, now)
+ };
+ match result {
Ok(packed) => Ok(PyBytes::new(py, &packed)),
Err(_) => Err(PyErr::new::<exc::OSError, _>(
py,
@@ -336,14 +321,17 @@
def filefoldmapasdict(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
- for (key, value) in
- self.inner(py).borrow_mut().build_file_fold_map().iter()
- {
- dict.set_item(
- py,
- PyBytes::new(py, key.as_bytes()).into_object(),
- PyBytes::new(py, value.as_bytes()).into_object(),
- )?;
+ for item in self.inner(py).borrow_mut().iter() {
+ let (path, entry) = item.map_err(|e| v2_error(py, e))?;
+ if entry.state != EntryState::Removed {
+ let key = normalize_case(path);
+ let value = path;
+ dict.set_item(
+ py,
+ PyBytes::new(py, key.as_bytes()).into_object(),
+ PyBytes::new(py, value.as_bytes()).into_object(),
+ )?;
+ }
}
Ok(dict)
}
@@ -354,15 +342,23 @@
def __contains__(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
- Ok(self.inner(py).borrow().contains_key(HgPath::new(key.data(py))))
+ self.inner(py)
+ .borrow()
+ .contains_key(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))
}
def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
let key = HgPath::new(key.data(py));
- match self.inner(py).borrow().get(key) {
+ match self
+ .inner(py)
+ .borrow()
+ .get(key)
+ .map_err(|e| v2_error(py, e))?
+ {
Some(entry) => {
- Ok(make_dirstate_tuple(py, entry)?)
+ Ok(make_dirstate_tuple(py, &entry)?)
},
None => Err(PyErr::new::<exc::KeyError, _>(
py,
@@ -404,7 +400,7 @@
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner(py).borrow(),
+ self.inner(py).borrow().iter(),
Some(EntryState::Removed),
)
.map_err(|e| {
@@ -421,7 +417,7 @@
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner(py).borrow(),
+ self.inner(py).borrow().iter(),
None,
).map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -432,7 +428,8 @@
// TODO all copymap* methods, see docstring above
def copymapcopy(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
- for (key, value) in self.inner(py).borrow().copy_map.iter() {
+ for item in self.inner(py).borrow().copy_map_iter() {
+ let (key, value) = item.map_err(|e| v2_error(py, e))?;
dict.set_item(
py,
PyBytes::new(py, key.as_bytes()),
@@ -444,7 +441,12 @@
def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
let key = key.extract::<PyBytes>(py)?;
- match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+ match self
+ .inner(py)
+ .borrow()
+ .copy_map_get(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))?
+ {
Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
None => Err(PyErr::new::<exc::KeyError, _>(
py,
@@ -457,15 +459,14 @@
}
def copymaplen(&self) -> PyResult<usize> {
- Ok(self.inner(py).borrow().copy_map.len())
+ Ok(self.inner(py).borrow().copy_map_len())
}
def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
- Ok(self
- .inner(py)
+ self.inner(py)
.borrow()
- .copy_map
- .contains_key(HgPath::new(key.data(py))))
+ .copy_map_contains_key(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))
}
def copymapget(
&self,
@@ -476,8 +477,8 @@
match self
.inner(py)
.borrow()
- .copy_map
- .get(HgPath::new(key.data(py)))
+ .copy_map_get(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))?
{
Some(copy) => Ok(Some(
PyBytes::new(py, copy.as_bytes()).into_object(),
@@ -492,10 +493,13 @@
) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
let value = value.extract::<PyBytes>(py)?;
- self.inner(py).borrow_mut().copy_map.insert(
- HgPathBuf::from_bytes(key.data(py)),
- HgPathBuf::from_bytes(value.data(py)),
- );
+ self.inner(py)
+ .borrow_mut()
+ .copy_map_insert(
+ HgPathBuf::from_bytes(key.data(py)),
+ HgPathBuf::from_bytes(value.data(py)),
+ )
+ .map_err(|e| v2_error(py, e))?;
Ok(py.None())
}
def copymappop(
@@ -507,8 +511,8 @@
match self
.inner(py)
.borrow_mut()
- .copy_map
- .remove(HgPath::new(key.data(py)))
+ .copy_map_remove(HgPath::new(key.data(py)))
+ .map_err(|e| v2_error(py, e))?
{
Some(_) => Ok(None),
None => Ok(default),
@@ -519,7 +523,7 @@
let leaked_ref = self.inner(py).leak_immutable();
CopyMapKeysIterator::from_inner(
py,
- unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+ unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
)
}
@@ -527,30 +531,43 @@
let leaked_ref = self.inner(py).leak_immutable();
CopyMapItemsIterator::from_inner(
py,
- unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+ unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
)
}
+ def directories(&self) -> PyResult<PyList> {
+ let dirs = PyList::new(py, &[]);
+ for item in self.inner(py).borrow().iter_directories() {
+ let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
+ let path = PyBytes::new(py, path.as_bytes());
+ let mtime = mtime.map(|t| t.0).unwrap_or(-1);
+ let tuple = (path, (b'd', 0, 0, mtime));
+ dirs.append(py, tuple.to_py_object(py).into_object())
+ }
+ Ok(dirs)
+ }
+
});
impl DirstateMap {
- pub fn get_inner<'a>(
+ pub fn get_inner_mut<'a>(
&'a self,
py: Python<'a>,
- ) -> Ref<'a, RustDirstateMap> {
- self.inner(py).borrow()
+ ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
+ self.inner(py).borrow_mut()
}
fn translate_key(
py: Python,
- res: (&HgPathBuf, &DirstateEntry),
+ res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_bytes())))
+ let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
+ Ok(Some(PyBytes::new(py, f.as_bytes())))
}
fn translate_key_value(
py: Python,
- res: (&HgPathBuf, &DirstateEntry),
+ res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
) -> PyResult<Option<(PyBytes, PyObject)>> {
- let (f, entry) = res;
+ let (f, entry) = res.map_err(|e| v2_error(py, e))?;
Ok(Some((
PyBytes::new(py, f.as_bytes()),
make_dirstate_tuple(py, &entry)?,
@@ -579,3 +596,7 @@
Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
}
}
+
+pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
+ PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/dispatch.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,223 @@
+use crate::dirstate::owning::OwningDirstateMap;
+use hg::dirstate::parsers::Timestamp;
+use hg::dirstate_tree::dispatch::DirstateMapMethods;
+use hg::dirstate_tree::on_disk::DirstateV2ParseError;
+use hg::matchers::Matcher;
+use hg::utils::hg_path::{HgPath, HgPathBuf};
+use hg::CopyMapIter;
+use hg::DirstateEntry;
+use hg::DirstateError;
+use hg::DirstateParents;
+use hg::DirstateStatus;
+use hg::EntryState;
+use hg::PatternFileWarning;
+use hg::StateMapIter;
+use hg::StatusError;
+use hg::StatusOptions;
+use std::path::PathBuf;
+
+impl DirstateMapMethods for OwningDirstateMap {
+ fn clear(&mut self) {
+ self.get_mut().clear()
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateError> {
+ self.get_mut().add_file(filename, old_state, entry)
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateError> {
+ self.get_mut().remove_file(filename, old_state, size)
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateError> {
+ self.get_mut().drop_file(filename, old_state)
+ }
+
+ fn clear_ambiguous_times(
+ &mut self,
+ filenames: Vec<HgPathBuf>,
+ now: i32,
+ ) -> Result<(), DirstateV2ParseError> {
+ self.get_mut().clear_ambiguous_times(filenames, now)
+ }
+
+ fn non_normal_entries_contains(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ self.get_mut().non_normal_entries_contains(key)
+ }
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath) {
+ self.get_mut().non_normal_entries_remove(key)
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
+ {
+ self.get_mut().non_normal_or_other_parent_paths()
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+ self.get_mut().set_non_normal_other_parent_entries(force)
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ self.get_mut().iter_non_normal_paths()
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ self.get().iter_non_normal_paths_panic()
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<
+ dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
+ > {
+ self.get_mut().iter_other_parent_paths()
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateError> {
+ self.get_mut().has_tracked_dir(directory)
+ }
+
+ fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
+ self.get_mut().has_dir(directory)
+ }
+
+ fn pack_v1(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ self.get_mut().pack_v1(parents, now)
+ }
+
+ fn pack_v2(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ self.get_mut().pack_v2(parents, now)
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
+ self.get_mut().set_all_dirs()
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateError> {
+ self.get_mut().set_dirs()
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ self.get_mut()
+ .status(matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.get().copy_map_len()
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ self.get().copy_map_iter()
+ }
+
+ fn copy_map_contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ self.get().copy_map_contains_key(key)
+ }
+
+ fn copy_map_get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
+ self.get().copy_map_get(key)
+ }
+
+ fn copy_map_remove(
+ &mut self,
+ key: &HgPath,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ self.get_mut().copy_map_remove(key)
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
+ self.get_mut().copy_map_insert(key, value)
+ }
+
+ fn len(&self) -> usize {
+ self.get().len()
+ }
+
+ fn contains_key(
+ &self,
+ key: &HgPath,
+ ) -> Result<bool, DirstateV2ParseError> {
+ self.get().contains_key(key)
+ }
+
+ fn get(
+ &self,
+ key: &HgPath,
+ ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
+ self.get().get(key)
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ self.get().iter()
+ }
+
+ fn iter_directories(
+ &self,
+ ) -> Box<
+ dyn Iterator<
+ Item = Result<
+ (&HgPath, Option<Timestamp>),
+ DirstateV2ParseError,
+ >,
+ > + Send
+ + '_,
+ > {
+ self.get().iter_directories()
+ }
+}
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs Mon Jun 07 17:10:35 2021 -0400
@@ -7,14 +7,15 @@
use cpython::{
exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
- PyErr, PyList, PyObject, PyResult, PyString, Python, PythonObject,
- ToPyObject, UnsafePyLeaked,
+ PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
+ UnsafePyLeaked,
};
+use crate::dirstate::dirstate_map::v2_error;
use crate::dirstate::DirstateMap;
-use hg::utils::hg_path::HgPathBuf;
+use hg::dirstate_tree::on_disk::DirstateV2ParseError;
+use hg::utils::hg_path::HgPath;
use std::cell::RefCell;
-use std::collections::hash_set;
py_class!(pub class NonNormalEntries |py| {
data dmap: DirstateMap;
@@ -25,9 +26,6 @@
def remove(&self, key: PyObject) -> PyResult<PyObject> {
self.dmap(py).non_normal_entries_remove(py, key)
}
- def union(&self, other: PyObject) -> PyResult<PyList> {
- self.dmap(py).non_normal_entries_union(py, other)
- }
def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
match op {
CompareOp::Eq => self.is_equal_to(py, other),
@@ -60,13 +58,16 @@
fn translate_key(
py: Python,
- key: &HgPathBuf,
+ key: Result<&HgPath, DirstateV2ParseError>,
) -> PyResult<Option<PyBytes>> {
+ let key = key.map_err(|e| v2_error(py, e))?;
Ok(Some(PyBytes::new(py, key.as_bytes())))
}
}
-type NonNormalEntriesIter<'a> = hash_set::Iter<'a, HgPathBuf>;
+type NonNormalEntriesIter<'a> = Box<
+ dyn Iterator<Item = Result<&'a HgPath, DirstateV2ParseError>> + Send + 'a,
+>;
py_shared_iterator!(
NonNormalEntriesIterator,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/owning.rs Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,102 @@
+use cpython::PyBytes;
+use cpython::Python;
+use hg::dirstate_tree::dirstate_map::DirstateMap;
+use hg::DirstateError;
+use hg::DirstateParents;
+
+/// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
+/// borrows. This is similar to the owning-ref crate.
+///
+/// This is similar to [`OwningRef`] which is more limited because it
+/// represents exactly one `&T` reference next to the value it borrows, as
+/// opposed to a struct that may contain an arbitrary number of references in
+/// arbitrarily-nested data structures.
+///
+/// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
+pub(super) struct OwningDirstateMap {
+ /// Owned handle to a bytes buffer with a stable address.
+ ///
+ /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
+ on_disk: PyBytes,
+
+ /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
+ /// language cannot represent a lifetime referencing a sibling field.
+ /// This is not quite a self-referencial struct (moving this struct is not
+ /// a problem as it doesn’t change the address of the bytes buffer owned
+ /// by `PyBytes`) but touches similar borrow-checker limitations.
+ ptr: *mut (),
+}
+
+impl OwningDirstateMap {
+ pub fn new(
+ py: Python,
+ on_disk: PyBytes,
+ use_dirstate_v2: bool,
+ ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+ let bytes: &'_ [u8] = on_disk.data(py);
+ let (map, parents) = if use_dirstate_v2 {
+ DirstateMap::new_v2(bytes)?
+ } else {
+ DirstateMap::new_v1(bytes)?
+ };
+
+ // Like in `bytes` above, this `'_` lifetime parameter borrows from
+ // the bytes buffer owned by `on_disk`.
+ let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
+
+ // Erase the pointed type entirely in order to erase the lifetime.
+ let ptr: *mut () = ptr.cast();
+
+ Ok((Self { on_disk, ptr }, parents))
+ }
+
+ pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
+ // SAFETY: We cast the type-erased pointer back to the same type it had
+ // in `new`, except with a different lifetime parameter. This time we
+ // connect the lifetime to that of `self`. This cast is valid because
+ // `self` owns the same `PyBytes` whose buffer `DirstateMap`
+ // references. That buffer has a stable memory address because the byte
+ // string value of a `PyBytes` is immutable.
+ let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+ // SAFETY: we dereference that pointer, connecting the lifetime of the
+ // new `&mut` to that of `self`. This is valid because the
+ // raw pointer is to a boxed value, and `self` owns that box.
+ unsafe { &mut *ptr }
+ }
+
+ pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
+ // SAFETY: same reasoning as in `get_mut` above.
+ let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+ unsafe { &*ptr }
+ }
+}
+
+impl Drop for OwningDirstateMap {
+ fn drop(&mut self) {
+ // Silence a "field is never read" warning, and demonstrate that this
+ // value is still alive.
+ let _ = &self.on_disk;
+ // SAFETY: this cast is the same as in `get_mut`, and is valid for the
+ // same reason. `self.on_disk` still exists at this point, drop glue
+ // will drop it implicitly after this `drop` method returns.
+ let ptr: *mut DirstateMap<'_> = self.ptr.cast();
+ // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
+ // This is fine because drop glue does nothig for `*mut ()` and we’re
+ // in `drop`, so `get` and `get_mut` cannot be called again.
+ unsafe { drop(Box::from_raw(ptr)) }
+ }
+}
+
+fn _static_assert_is_send<T: Send>() {}
+
+fn _static_assert_fields_are_send() {
+ _static_assert_is_send::<PyBytes>();
+ _static_assert_is_send::<Box<DirstateMap<'_>>>();
+}
+
+// SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
+// thread-safety of raw pointers is unknown in the general case. However this
+// particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
+// own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
+// is sound to mark this struct as `Send` too.
+unsafe impl Send for OwningDirstateMap {}
--- a/rust/hg-cpython/src/dirstate/status.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/status.rs Mon Jun 07 17:10:35 2021 -0400
@@ -17,7 +17,7 @@
};
use hg::{
matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
- parse_pattern_syntax, status,
+ parse_pattern_syntax,
utils::{
files::{get_bytes_from_path, get_path_from_bytes},
hg_path::{HgPath, HgPathBuf},
@@ -25,7 +25,7 @@
BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
StatusOptions,
};
-use std::borrow::{Borrow, Cow};
+use std::borrow::Borrow;
/// This will be useless once trait impls for collection are added to `PyBytes`
/// upstream.
@@ -112,7 +112,7 @@
let root_dir = get_path_from_bytes(bytes.data(py));
let dmap: DirstateMap = dmap.to_py_object(py);
- let dmap = dmap.get_inner(py);
+ let mut dmap = dmap.get_inner_mut(py);
let ignore_files: PyResult<Vec<_>> = ignore_files
.iter(py)
@@ -126,22 +126,22 @@
match matcher.get_type(py).name(py).borrow() {
"alwaysmatcher" => {
let matcher = AlwaysMatcher;
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
- build_response(py, lookup, status_res, warnings)
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
+ build_response(py, status_res, warnings)
}
"exactmatcher" => {
let files = matcher.call_method(
@@ -163,22 +163,22 @@
let files = files?;
let matcher = FileMatcher::new(files.as_ref())
.map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
- build_response(py, lookup, status_res, warnings)
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
+ build_response(py, status_res, warnings)
}
"includematcher" => {
// Get the patterns from Python even though most of them are
@@ -211,32 +211,27 @@
.collect();
let ignore_patterns = ignore_patterns?;
- let mut all_warnings = vec![];
- let (matcher, warnings) =
- IncludeMatcher::new(ignore_patterns, &root_dir)
- .map_err(|e| handle_fallback(py, e.into()))?;
- all_warnings.extend(warnings);
+ let matcher = IncludeMatcher::new(ignore_patterns)
+ .map_err(|e| handle_fallback(py, e.into()))?;
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
- all_warnings.extend(warnings);
-
- build_response(py, lookup, status_res, all_warnings)
+ build_response(py, status_res, warnings)
}
e => Err(PyErr::new::<ValueError, _>(
py,
@@ -247,7 +242,6 @@
fn build_response(
py: Python,
- lookup: Vec<Cow<HgPath>>,
status_res: DirstateStatus,
warnings: Vec<PatternFileWarning>,
) -> PyResult<PyTuple> {
@@ -258,9 +252,10 @@
let clean = collect_pybytes_list(py, status_res.clean.as_ref());
let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
- let lookup = collect_pybytes_list(py, lookup.as_ref());
+ let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
+ let dirty = status_res.dirty.to_py_object(py);
let py_warnings = PyList::new(py, &[]);
for warning in warnings.iter() {
// We use duck-typing on the Python side for dispatch, good enough for
@@ -287,7 +282,7 @@
Ok(PyTuple::new(
py,
&[
- lookup.into_object(),
+ unsure.into_object(),
modified.into_object(),
added.into_object(),
removed.into_object(),
@@ -298,6 +293,7 @@
py_warnings.into_object(),
bad.into_object(),
traversed.into_object(),
+ dirty.into_object(),
][..],
))
}
--- a/rust/hg-cpython/src/parsers.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/parsers.rs Mon Jun 07 17:10:35 2021 -0400
@@ -14,13 +14,13 @@
PythonObject, ToPyObject,
};
use hg::{
- pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
- DirstateParents, FastHashMap, PARENT_SIZE,
+ dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
+ utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
+ PARENT_SIZE,
};
use std::convert::TryInto;
use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
-use std::time::Duration;
fn parse_dirstate_wrapper(
py: Python,
@@ -98,7 +98,7 @@
p1: p1.try_into().unwrap(),
p2: p2.try_into().unwrap(),
},
- Duration::from_secs(now.as_object().extract::<u64>(py)?),
+ Timestamp(now.as_object().extract::<i64>(py)?),
) {
Ok(packed) => {
for (filename, entry) in dirstate_map.iter() {
--- a/rust/hg-cpython/src/revlog.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hg-cpython/src/revlog.rs Mon Jun 07 17:10:35 2021 -0400
@@ -172,6 +172,16 @@
self.call_cindex(py, "clearcaches", args, kw)
}
+ /// return the raw binary string representing a revision
+ def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
+ self.call_cindex(py, "entry_binary", args, kw)
+ }
+
+ /// return a binary packed version of the header
+ def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
+ self.call_cindex(py, "pack_header", args, kw)
+ }
+
/// get an index entry
def get(&self, *args, **kw) -> PyResult<PyObject> {
self.call_cindex(py, "get", args, kw)
@@ -290,6 +300,11 @@
self.cindex(py).borrow().inner().getattr(py, "entry_size")?.extract::<PyInt>(py)
}
+ @property
+ def rust_ext_compat(&self) -> PyResult<PyInt> {
+ self.cindex(py).borrow().inner().getattr(py, "rust_ext_compat")?.extract::<PyInt>(py)
+ }
+
});
impl MixedIndex {
@@ -454,7 +469,10 @@
.and_then(|m| m.get(py, "RevlogError"))
{
Err(e) => e,
- Ok(cls) => PyErr::from_instance(py, cls),
+ Ok(cls) => PyErr::from_instance(
+ py,
+ cls.call(py, (py.None(),), None).ok().into_py_object(py),
+ ),
}
}
--- a/rust/hgcli/pyoxidizer.bzl Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/hgcli/pyoxidizer.bzl Mon Jun 07 17:10:35 2021 -0400
@@ -1,5 +1,37 @@
+# The following variables can be passed in as parameters:
+#
+# VERSION
+# Version string of program being produced.
+#
+# MSI_NAME
+# Root name of MSI installer.
+#
+# EXTRA_MSI_FEATURES
+# ; delimited string of extra features to advertise in the built MSA.
+#
+# SIGNING_PFX_PATH
+# Path to code signing certificate to use.
+#
+# SIGNING_PFX_PASSWORD
+# Password to code signing PFX file defined by SIGNING_PFX_PATH.
+#
+# SIGNING_SUBJECT_NAME
+# String fragment in code signing certificate subject name used to find
+# code signing certificate in Windows certificate store.
+#
+# TIME_STAMP_SERVER_URL
+# URL of time-stamp token authority (RFC 3161) servers to stamp code signatures.
+
ROOT = CWD + "/../.."
+VERSION = VARS.get("VERSION", "5.8")
+MSI_NAME = VARS.get("MSI_NAME", "mercurial")
+EXTRA_MSI_FEATURES = VARS.get("EXTRA_MSI_FEATURES")
+SIGNING_PFX_PATH = VARS.get("SIGNING_PFX_PATH")
+SIGNING_PFX_PASSWORD = VARS.get("SIGNING_PFX_PASSWORD", "")
+SIGNING_SUBJECT_NAME = VARS.get("SIGNING_SUBJECT_NAME")
+TIME_STAMP_SERVER_URL = VARS.get("TIME_STAMP_SERVER_URL", "http://timestamp.digicert.com")
+
IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
# Code to run in Python interpreter.
@@ -8,10 +40,7 @@
set_build_path(ROOT + "/build/pyoxidizer")
def make_distribution():
- return default_python_distribution()
-
-def make_distribution_windows():
- return default_python_distribution(flavor = "standalone_dynamic")
+ return default_python_distribution(python_version = "3.9")
def resource_callback(policy, resource):
if not IS_WINDOWS:
@@ -50,7 +79,7 @@
packaging_policy.register_resource_callback(resource_callback)
config = dist.make_python_interpreter_config()
- config.raw_allocator = "system"
+ config.allocator_backend = "default"
config.run_command = RUN_CODE
# We want to let the user load extensions from the file system
@@ -83,34 +112,162 @@
return m
-def make_embedded_resources(exe):
- return exe.to_embedded_resources()
+
+# This adjusts the InstallManifest produced from exe generation to provide
+# additional files found in a Windows install layout.
+def make_windows_install_layout(manifest):
+ # Copy various files to new install locations. This can go away once
+ # we're using the importlib resource reader.
+ RECURSIVE_COPIES = {
+ "lib/mercurial/locale/": "locale/",
+ "lib/mercurial/templates/": "templates/",
+ }
+ for (search, replace) in RECURSIVE_COPIES.items():
+ for path in manifest.paths():
+ if path.startswith(search):
+ new_path = path.replace(search, replace)
+ print("copy %s to %s" % (path, new_path))
+ file = manifest.get_file(path)
+ manifest.add_file(file, path = new_path)
+
+ # Similar to above, but with filename pattern matching.
+ # lib/mercurial/helptext/**/*.txt -> helptext/
+ # lib/mercurial/defaultrc/*.rc -> defaultrc/
+ for path in manifest.paths():
+ if path.startswith("lib/mercurial/helptext/") and path.endswith(".txt"):
+ new_path = path[len("lib/mercurial/"):]
+ elif path.startswith("lib/mercurial/defaultrc/") and path.endswith(".rc"):
+ new_path = path[len("lib/mercurial/"):]
+ else:
+ continue
+
+ print("copying %s to %s" % (path, new_path))
+ manifest.add_file(manifest.get_file(path), path = new_path)
-register_target("distribution_posix", make_distribution)
-register_target("distribution_windows", make_distribution_windows)
+ # We also install a handful of additional files.
+ EXTRA_CONTRIB_FILES = [
+ "bash_completion",
+ "hgweb.fcgi",
+ "hgweb.wsgi",
+ "logo-droplets.svg",
+ "mercurial.el",
+ "mq.el",
+ "tcsh_completion",
+ "tcsh_completion_build.sh",
+ "xml.rnc",
+ "zsh_completion",
+ ]
+
+ for f in EXTRA_CONTRIB_FILES:
+ manifest.add_file(FileContent(path = ROOT + "/contrib/" + f), directory = "contrib")
-register_target("exe_posix", make_exe, depends = ["distribution_posix"])
-register_target("exe_windows", make_exe, depends = ["distribution_windows"])
+ # Individual files with full source to destination path mapping.
+ EXTRA_FILES = {
+ "contrib/hgk": "contrib/hgk.tcl",
+ "contrib/win32/postinstall.txt": "ReleaseNotes.txt",
+ "contrib/win32/ReadMe.html": "ReadMe.html",
+ "doc/style.css": "doc/style.css",
+ "COPYING": "Copying.txt",
+ }
+
+ for source, dest in EXTRA_FILES.items():
+ print("adding extra file %s" % dest)
+ manifest.add_file(FileContent(path = ROOT + "/" + source), path = dest)
+
+ # And finally some wildcard matches.
+ manifest.add_manifest(glob(
+ include = [ROOT + "/contrib/vim/*"],
+ strip_prefix = ROOT + "/"
+ ))
+ manifest.add_manifest(glob(
+ include = [ROOT + "/doc/*.html"],
+ strip_prefix = ROOT + "/"
+ ))
+
+ # But we don't ship hg-ssh on Windows, so exclude its documentation.
+ manifest.remove("doc/hg-ssh.8.html")
+
+ return manifest
+
-register_target(
- "app_posix",
- make_manifest,
- depends = ["distribution_posix", "exe_posix"],
- default = "windows" not in BUILD_TARGET_TRIPLE,
-)
-register_target(
- "app_windows",
- make_manifest,
- depends = ["distribution_windows", "exe_windows"],
- default = "windows" in BUILD_TARGET_TRIPLE,
-)
+def make_msi(manifest):
+ manifest = make_windows_install_layout(manifest)
+
+ if "x86_64" in BUILD_TARGET_TRIPLE:
+ platform = "x64"
+ else:
+ platform = "x86"
+
+ manifest.add_file(
+ FileContent(path = ROOT + "/contrib/packaging/wix/COPYING.rtf"),
+ path = "COPYING.rtf",
+ )
+ manifest.remove("Copying.txt")
+ manifest.add_file(
+ FileContent(path = ROOT + "/contrib/win32/mercurial.ini"),
+ path = "defaultrc/mercurial.rc",
+ )
+ manifest.add_file(
+ FileContent(filename = "editor.rc", content = "[ui]\neditor = notepad\n"),
+ path = "defaultrc/editor.rc",
+ )
+
+ wix = WiXInstaller("hg", "%s-%s.msi" % (MSI_NAME, VERSION))
+
+ # Materialize files in the manifest to the install layout.
+ wix.add_install_files(manifest)
+
+ # From mercurial.wxs.
+ wix.install_files_root_directory_id = "INSTALLDIR"
+
+ # Pull in our custom .wxs files.
+ defines = {
+ "PyOxidizer": "1",
+ "Platform": platform,
+ "Version": VERSION,
+ "Comments": "Installs Mercurial version %s" % VERSION,
+ "PythonVersion": "3",
+ "MercurialHasLib": "1",
+ }
+
+ if EXTRA_MSI_FEATURES:
+ defines["MercurialExtraFeatures"] = EXTRA_MSI_FEATURES
+
+ wix.add_wxs_file(
+ ROOT + "/contrib/packaging/wix/mercurial.wxs",
+ preprocessor_parameters=defines,
+ )
+
+ # Our .wxs references to other files. Pull those into the build environment.
+ for f in ("defines.wxi", "guids.wxi", "COPYING.rtf"):
+ wix.add_build_file(f, ROOT + "/contrib/packaging/wix/" + f)
+
+ wix.add_build_file("mercurial.ico", ROOT + "/contrib/win32/mercurial.ico")
+
+ return wix
+
+
+def register_code_signers():
+ if not IS_WINDOWS:
+ return
+
+ if SIGNING_PFX_PATH:
+ signer = code_signer_from_pfx_file(SIGNING_PFX_PATH, SIGNING_PFX_PASSWORD)
+ elif SIGNING_SUBJECT_NAME:
+ signer = code_signer_from_windows_store_subject(SIGNING_SUBJECT_NAME)
+ else:
+ signer = None
+
+ if signer:
+ signer.set_time_stamp_server(TIME_STAMP_SERVER_URL)
+ signer.activate()
+
+
+register_code_signers()
+
+register_target("distribution", make_distribution)
+register_target("exe", make_exe, depends = ["distribution"])
+register_target("app", make_manifest, depends = ["distribution", "exe"], default = True)
+register_target("msi", make_msi, depends = ["app"])
resolve_targets()
-
-# END OF COMMON USER-ADJUSTED SETTINGS.
-#
-# Everything below this is typically managed by PyOxidizer and doesn't need
-# to be updated by people.
-
-PYOXIDIZER_VERSION = "0.9.0"
-PYOXIDIZER_COMMIT = "1fbc264cc004226cd76ee452e0a386ffca6ccfb1"
--- a/rust/rhg/src/commands/status.rs Sun Jun 06 01:24:30 2021 +0200
+++ b/rust/rhg/src/commands/status.rs Mon Jun 07 17:10:35 2021 -0400
@@ -9,13 +9,15 @@
use crate::ui::Ui;
use clap::{Arg, SubCommand};
use hg;
+use hg::dirstate_tree::dirstate_map::DirstateMap;
+use hg::errors::HgResultExt;
use hg::errors::IoResultExt;
use hg::matchers::AlwaysMatcher;
use hg::operations::cat;
use hg::repo::Repo;
use hg::revlog::node::Node;
use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
-use hg::{DirstateMap, StatusError};
+use hg::StatusError;
use hg::{HgPathCow, StatusOptions};
use log::{info, warn};
use std::convert::TryInto;
@@ -163,9 +165,17 @@
};
let repo = invocation.repo?;
- let mut dmap = DirstateMap::new();
- let dirstate_data = repo.hg_vfs().mmap_open("dirstate")?;
- let parents = dmap.read(&dirstate_data)?;
+ let dirstate_data =
+ repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
+ let dirstate_data = match &dirstate_data {
+ Some(mmap) => &**mmap,
+ None => b"",
+ };
+ let (mut dmap, parents) = if repo.has_dirstate_v2() {
+ DirstateMap::new_v2(dirstate_data)?
+ } else {
+ DirstateMap::new_v1(dirstate_data)?
+ };
let options = StatusOptions {
// TODO should be provided by the dirstate parsing and
// hence be stored on dmap. Using a value that assumes we aren't
@@ -181,8 +191,8 @@
collect_traversed_dirs: false,
};
let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
- let ((lookup, ds_status), pattern_warnings) = hg::status(
- &dmap,
+ let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
+ &mut dmap,
&AlwaysMatcher,
repo.working_directory_path().to_owned(),
vec![ignore_file],
@@ -195,59 +205,55 @@
if !ds_status.bad.is_empty() {
warn!("Bad matches {:?}", &(ds_status.bad))
}
- if !lookup.is_empty() {
+ if !ds_status.unsure.is_empty() {
info!(
"Files to be rechecked by retrieval from filelog: {:?}",
- &lookup
+ &ds_status.unsure
);
}
- // TODO check ordering to match `hg status` output.
- // (this is as in `hg help status`)
- if display_states.modified {
- display_status_paths(ui, &(ds_status.modified), b"M")?;
- }
- if !lookup.is_empty() {
+ if !ds_status.unsure.is_empty()
+ && (display_states.modified || display_states.clean)
+ {
let p1: Node = parents
.expect(
"Dirstate with no parents should not list any file to
- be rechecked for modifications",
+ be rechecked for modifications",
)
.p1
.into();
let p1_hex = format!("{:x}", p1);
- let mut rechecked_modified: Vec<HgPathCow> = Vec::new();
- let mut rechecked_clean: Vec<HgPathCow> = Vec::new();
- for to_check in lookup {
+ for to_check in ds_status.unsure {
if cat_file_is_modified(repo, &to_check, &p1_hex)? {
- rechecked_modified.push(to_check);
+ if display_states.modified {
+ ds_status.modified.push(to_check);
+ }
} else {
- rechecked_clean.push(to_check);
+ if display_states.clean {
+ ds_status.clean.push(to_check);
+ }
}
}
- if display_states.modified {
- display_status_paths(ui, &rechecked_modified, b"M")?;
- }
- if display_states.clean {
- display_status_paths(ui, &rechecked_clean, b"C")?;
- }
+ }
+ if display_states.modified {
+ display_status_paths(ui, &mut ds_status.modified, b"M")?;
}
if display_states.added {
- display_status_paths(ui, &(ds_status.added), b"A")?;
- }
- if display_states.clean {
- display_status_paths(ui, &(ds_status.clean), b"C")?;
+ display_status_paths(ui, &mut ds_status.added, b"A")?;
}
if display_states.removed {
- display_status_paths(ui, &(ds_status.removed), b"R")?;
+ display_status_paths(ui, &mut ds_status.removed, b"R")?;
}
if display_states.deleted {
- display_status_paths(ui, &(ds_status.deleted), b"!")?;
+ display_status_paths(ui, &mut ds_status.deleted, b"!")?;
}
if display_states.unknown {
- display_status_paths(ui, &(ds_status.unknown), b"?")?;
+ display_status_paths(ui, &mut ds_status.unknown, b"?")?;
}
if display_states.ignored {
- display_status_paths(ui, &(ds_status.ignored), b"I")?;
+ display_status_paths(ui, &mut ds_status.ignored, b"I")?;
+ }
+ if display_states.clean {
+ display_status_paths(ui, &mut ds_status.clean, b"C")?;
}
Ok(())
}
@@ -256,9 +262,10 @@
// harcode HgPathBuf, but probably not really useful at this point
fn display_status_paths(
ui: &Ui,
- paths: &[HgPathCow],
+ paths: &mut [HgPathCow],
status_prefix: &[u8],
) -> Result<(), CommandError> {
+ paths.sort_unstable();
for path in paths {
// Same TODO as in commands::root
let bytes: &[u8] = path.as_bytes();
--- a/tests/drawdag.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/drawdag.py Mon Jun 07 17:10:35 2021 -0400
@@ -86,7 +86,6 @@
import itertools
import re
-from mercurial.node import nullid
from mercurial.i18n import _
from mercurial import (
context,
@@ -299,7 +298,7 @@
self._added = added
self._parents = parentctxs
while len(self._parents) < 2:
- self._parents.append(repo[nullid])
+ self._parents.append(repo[repo.nullid])
def filectx(self, key):
return simplefilectx(key, self._added[key])
@@ -388,7 +387,7 @@
content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
files[name][path] = content
- committed = {None: nullid} # {name: node}
+ committed = {None: repo.nullid} # {name: node}
# for leaf nodes, try to find existing nodes in repo
for name, parents in edges.items():
--- a/tests/flagprocessorext.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/flagprocessorext.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,6 +13,7 @@
util,
)
from mercurial.revlogutils import flagutil
+from mercurial.interfaces import repository
# Test only: These flags are defined here only in the context of testing the
# behavior of the flag processor. The canonical way to add flags is to get in
@@ -131,6 +132,7 @@
# Teach revlog about our test flags
flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
+ repository.REVISION_FLAGS_KNOWN |= util.bitsfrom(flags)
revlog.REVIDX_FLAGS_ORDER.extend(flags)
# Teach exchange to use changegroup 3
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/helper-killhook.py Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,5 @@
+import os
+
+
+def killme(ui, repo, hooktype, **wkargs):
+ os._exit(80)
--- a/tests/hghave.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/hghave.py Mon Jun 07 17:10:35 2021 -0400
@@ -29,7 +29,8 @@
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
stderr = getattr(sys.stderr, 'buffer', sys.stderr)
-if sys.version_info[0] >= 3:
+is_not_python2 = sys.version_info[0] >= 3
+if is_not_python2:
def _sys2bytes(p):
if p is None:
@@ -104,8 +105,8 @@
check, desc = checks[feature]
try:
available = check()
- except Exception:
- result['error'].append('hghave check failed: %s' % feature)
+ except Exception as e:
+ result['error'].append('hghave check %s failed: %r' % (feature, e))
continue
if not negate and not available:
@@ -167,33 +168,25 @@
return matchoutput('baz --version 2>&1', br'baz Bazaar version')
-@check("bzr", "Canonical's Bazaar client")
+@check("bzr", "Breezy library and executable version >= 3.1")
def has_bzr():
+ if not is_not_python2:
+ return False
try:
- import bzrlib
- import bzrlib.bzrdir
- import bzrlib.errors
- import bzrlib.revision
- import bzrlib.revisionspec
+ # Test the Breezy python lib
+ import breezy
+ import breezy.bzr.bzrdir
+ import breezy.errors
+ import breezy.revision
+ import breezy.revisionspec
- bzrlib.revisionspec.RevisionSpec
- return bzrlib.__doc__ is not None
+ breezy.revisionspec.RevisionSpec
+ if breezy.__doc__ is None or breezy.version_info[:2] < (3, 1):
+ return False
except (AttributeError, ImportError):
return False
-
-
-@checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
-def has_bzr_range(v):
- major, minor = v.split('rc')[0].split('.')[0:2]
- try:
- import bzrlib
-
- return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= (
- int(major),
- int(minor),
- )
- except ImportError:
- return False
+ # Test the executable
+ return matchoutput('brz --version 2>&1', br'Breezy \(brz\) ')
@check("chg", "running with chg")
@@ -1045,6 +1038,14 @@
return 'fncache' in getrepofeatures()
+@check('dirstate-v2', 'using the v2 format of .hg/dirstate')
+def has_dirstate_v2():
+ # Keep this logic in sync with `newreporequirements()` in `mercurial/localrepo.py`
+ return has_rust() and matchoutput(
+ 'hg config format.exp-dirstate-v2', b'(?i)1|yes|true|on|always'
+ )
+
+
@check('sqlite', 'sqlite3 module and matching cli is available')
def has_sqlite():
try:
@@ -1121,3 +1122,8 @@
return True
except ImportError:
return False
+
+
+@check("bash", "bash shell")
+def has_bash():
+ return matchoutput("bash -c 'echo hi'", b'^hi$')
--- a/tests/run-tests.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/run-tests.py Mon Jun 07 17:10:35 2021 -0400
@@ -87,21 +87,31 @@
processlock = threading.Lock()
pygmentspresent = False
-# ANSI color is unsupported prior to Windows 10
-if os.name != 'nt':
- try: # is pygments installed
- import pygments
- import pygments.lexers as lexers
- import pygments.lexer as lexer
- import pygments.formatters as formatters
- import pygments.token as token
- import pygments.style as style
-
- pygmentspresent = True
- difflexer = lexers.DiffLexer()
- terminal256formatter = formatters.Terminal256Formatter()
- except ImportError:
- pass
+try: # is pygments installed
+ import pygments
+ import pygments.lexers as lexers
+ import pygments.lexer as lexer
+ import pygments.formatters as formatters
+ import pygments.token as token
+ import pygments.style as style
+
+ if os.name == 'nt':
+ hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ sys.path.append(hgpath)
+ try:
+ from mercurial import win32 # pytype: disable=import-error
+
+ # Don't check the result code because it fails on heptapod, but
+ # something is able to convert to color anyway.
+ win32.enablevtmode()
+ finally:
+ sys.path = sys.path[:-1]
+
+ pygmentspresent = True
+ difflexer = lexers.DiffLexer()
+ terminal256formatter = formatters.Terminal256Formatter()
+except ImportError:
+ pass
if pygmentspresent:
@@ -1376,6 +1386,8 @@
env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
env['HGEMITWARNINGS'] = '1'
env['TESTTMP'] = _bytes2sys(self._testtmp)
+ uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
+ env['HGTEST_UUIDFILE'] = uid_file
env['TESTNAME'] = self.name
env['HOME'] = _bytes2sys(self._testtmp)
if os.name == 'nt':
@@ -2201,7 +2213,13 @@
self.faildata = {}
if options.color == 'auto':
- self.color = pygmentspresent and self.stream.isatty()
+ isatty = self.stream.isatty()
+ # For some reason, redirecting stdout on Windows disables the ANSI
+ # color processing of stderr, which is what is used to print the
+ # output. Therefore, both must be tty on Windows to enable color.
+ if os.name == 'nt':
+ isatty = isatty and sys.stdout.isatty()
+ self.color = pygmentspresent and isatty
elif options.color == 'never':
self.color = False
else: # 'always', for testing purposes
@@ -3544,7 +3562,7 @@
if os.getenv('MSYSTEM'):
with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
f.write(b'#!/bin/sh\n')
- f.write(b'py -3 "$@"\n')
+ f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
exedir, exename = os.path.split(sysexecutable)
vlog(
--- a/tests/simplestorerepo.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/simplestorerepo.py Mon Jun 07 17:10:35 2021 -0400
@@ -18,7 +18,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
)
from mercurial.thirdparty import attr
@@ -136,18 +135,18 @@
self._indexbynode[entry[b'node']] = entry
self._indexbyrev[i] = entry
- self._indexbynode[nullid] = {
- b'node': nullid,
- b'p1': nullid,
- b'p2': nullid,
+ self._indexbynode[self._repo.nullid] = {
+ b'node': self._repo.nullid,
+ b'p1': self._repo.nullid,
+ b'p2': self._repo.nullid,
b'linkrev': nullrev,
b'flags': 0,
}
self._indexbyrev[nullrev] = {
- b'node': nullid,
- b'p1': nullid,
- b'p2': nullid,
+ b'node': self._repo.nullid,
+ b'p1': self._repo.nullid,
+ b'p2': self._repo.nullid,
b'linkrev': nullrev,
b'flags': 0,
}
@@ -160,7 +159,7 @@
(0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
)
- self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+ self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
def __len__(self):
return len(self._indexdata)
@@ -288,7 +287,7 @@
node = nodeorrev
validatenode(node)
- if node == nullid:
+ if node == self._repo.nullid:
return b''
rev = self.rev(node)
@@ -325,7 +324,7 @@
def renamed(self, node):
validatenode(node)
- if self.parents(node)[0] != nullid:
+ if self.parents(node)[0] != self._repo.nullid:
return False
fulltext = self.revision(node)
@@ -451,7 +450,7 @@
sidedata_helpers=None,
):
# TODO this will probably break on some ordering options.
- nodes = [n for n in nodes if n != nullid]
+ nodes = [n for n in nodes if n != self._repo.nullid]
if not nodes:
return
for delta in storageutil.emitrevisions(
@@ -559,7 +558,7 @@
continue
# Need to resolve the fulltext from the delta base.
- if deltabase == nullid:
+ if deltabase == self._repo.nullid:
text = mdiff.patch(b'', delta)
else:
text = mdiff.patch(self.revision(deltabase), delta)
@@ -588,11 +587,11 @@
# This is copied from revlog.py.
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [self._repo.nullid]
return [self.node(r) for r in self._headrevs()]
if start is None:
- start = nullid
+ start = self._repo.nullid
if stop is None:
stop = []
stoprevs = {self.rev(n) for n in stop}
--- a/tests/test-amend.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-amend.t Mon Jun 07 17:10:35 2021 -0400
@@ -196,7 +196,8 @@
$ hg update -q B
$ echo 2 >> B
$ hg amend
- abort: cannot amend changeset with children
+ abort: cannot amend changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
#if obsstore-on
@@ -231,6 +232,24 @@
$ hg debugobsolete -r .
112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'adding bar', 'operation': 'amend', 'user': 'test'}
+
+Cannot cause divergence by default
+
+ $ hg co --hidden 1
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg amend -m divergent
+ abort: cannot amend 112478962961, as that creates content-divergence with 16084da537dd
+ (add --verbose for details or see 'hg help evolution.instability')
+ [10]
+ $ hg amend -m divergent --verbose
+ abort: cannot amend 112478962961, as that creates content-divergence with 16084da537dd
+ changeset 112478962961 already has a successor in changeset 16084da537dd
+ rewriting changeset 112478962961 would create "content-divergence"
+ set experimental.evolution.allowdivergence=True to skip this check
+ (see 'hg help evolution.instability' for details on content-divergence)
+ [10]
+ $ hg amend -m divergent --config experimental.evolution.allowdivergence=true
+ 2 new content-divergent changesets
#endif
Cannot amend public changeset
@@ -238,7 +257,7 @@
$ hg phase -r A --public
$ hg update -C -q A
$ hg amend -m AMEND
- abort: cannot amend public changesets
+ abort: cannot amend public changesets: 426bada5c675
(see 'hg help phases' for details)
[10]
--- a/tests/test-annotate.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-annotate.t Mon Jun 07 17:10:35 2021 -0400
@@ -479,19 +479,19 @@
$ cat > ../legacyrepo.py <<EOF
> from __future__ import absolute_import
- > from mercurial import commit, error, extensions, node
+ > from mercurial import commit, error, extensions
> def _filecommit(orig, repo, fctx, manifest1, manifest2,
> linkrev, tr, includecopymeta, ms):
> fname = fctx.path()
> text = fctx.data()
> flog = repo.file(fname)
- > fparent1 = manifest1.get(fname, node.nullid)
- > fparent2 = manifest2.get(fname, node.nullid)
+ > fparent1 = manifest1.get(fname, repo.nullid)
+ > fparent2 = manifest2.get(fname, repo.nullid)
> meta = {}
> copy = fctx.copysource()
> if copy and copy != fname:
> raise error.Abort('copying is not supported')
- > if fparent2 != node.nullid:
+ > if fparent2 != repo.nullid:
> return flog.add(text, meta, tr, linkrev,
> fparent1, fparent2), 'modified'
> raise error.Abort('only merging is supported')
--- a/tests/test-basic.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-basic.t Mon Jun 07 17:10:35 2021 -0400
@@ -5,6 +5,7 @@
devel.all-warnings=true
devel.default-date=0 0
extensions.fsmonitor= (fsmonitor !)
+ format.exp-dirstate-v2=1 (dirstate-v2 !)
largefiles.usercache=$TESTTMP/.cache/largefiles
lfs.usercache=$TESTTMP/.cache/lfs
ui.slash=True
--- a/tests/test-blackbox.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-blackbox.t Mon Jun 07 17:10:35 2021 -0400
@@ -221,7 +221,7 @@
1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
+ 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
log rotation
--- a/tests/test-branch-change.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-branch-change.t Mon Jun 07 17:10:35 2021 -0400
@@ -57,7 +57,8 @@
Change in middle of the stack (linear commits)
$ hg branch -r 1::3 foo
- abort: cannot change branch of changeset with children
+ abort: cannot change branch of changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
Change with dirty working directory
@@ -128,7 +129,8 @@
Changing on a branch head which is not topological head
$ hg branch -r 2 stable
- abort: cannot change branch of changeset with children
+ abort: cannot change branch of changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
Enabling the allowunstable config and trying to change branch on a branch head
@@ -148,7 +150,8 @@
[255]
$ hg branch -r 4 --hidden foobar
- abort: cannot change branch of a obsolete changeset
+ abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
+ (add --verbose for details or see 'hg help evolution.instability')
[10]
Make sure bookmark movement is correct
@@ -366,7 +369,7 @@
$ hg phase -r . -p
$ hg branch -r . def
- abort: cannot change branch of public changesets
+ abort: cannot change branch of public changesets: d1c2addda4a2
(see 'hg help phases' for details)
[10]
--- a/tests/test-bundle-r.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-bundle-r.t Mon Jun 07 17:10:35 2021 -0400
@@ -224,7 +224,7 @@
adding changesets
transaction abort!
rollback completed
- abort: 00changelog.i@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
+ abort: 00changelog@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
[50]
revision 2
--- a/tests/test-bundle.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-bundle.t Mon Jun 07 17:10:35 2021 -0400
@@ -751,7 +751,7 @@
partial history bundle, fails w/ unknown parent
$ hg -R bundle.hg verify
- abort: 00changelog.i@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
+ abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
[50]
full history bundle, refuses to verify non-local repo
--- a/tests/test-censor.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-censor.t Mon Jun 07 17:10:35 2021 -0400
@@ -52,44 +52,37 @@
Verify target contents before censorship at each revision
- $ hg cat -r $H1 target
+ $ hg cat -r $H1 target | head -n 10
Tainted file is now sanitized
- $ hg cat -r $H2 target
+ $ hg cat -r $H2 target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $C2 target
+ $ hg cat -r $C2 target | head -n 10
Tainted file
Passwords: hunter2
hunter3
- $ hg cat -r $C1 target
+ $ hg cat -r $C1 target | head -n 10
Tainted file
Passwords: hunter2
- $ hg cat -r 0 target
+ $ hg cat -r 0 target | head -n 10
Initially untainted file
-Try to censor revision with too large of a tombstone message
-
- $ hg censor -r $C1 -t 'blah blah blah blah blah blah blah blah bla' target
- abort: censor tombstone must be no longer than censored data
- [255]
-
Censor revision with 2 offenses
(this also tests file pattern matching: path relative to cwd case)
$ mkdir -p foo/bar/baz
$ hg --cwd foo/bar/baz censor -r $C2 -t "remove password" ../../../target
- $ hg cat -r $H1 target
+ $ hg cat -r $H1 target | head -n 10
Tainted file is now sanitized
- $ hg cat -r $H2 target
+ $ hg cat -r $H2 target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $C2 target
+ $ hg cat -r $C2 target | head -n 10
abort: censored node: 1e0247a9a4b7
(set censor.policy to ignore errors)
- [255]
- $ hg cat -r $C1 target
+ $ hg cat -r $C1 target | head -n 10
Tainted file
Passwords: hunter2
- $ hg cat -r 0 target
+ $ hg cat -r 0 target | head -n 10
Initially untainted file
Censor revision with 1 offense
@@ -97,31 +90,27 @@
(this also tests file pattern matching: with 'path:' scheme)
$ hg --cwd foo/bar/baz censor -r $C1 path:target
- $ hg cat -r $H1 target
+ $ hg cat -r $H1 target | head -n 10
Tainted file is now sanitized
- $ hg cat -r $H2 target
+ $ hg cat -r $H2 target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $C2 target
+ $ hg cat -r $C2 target | head -n 10
abort: censored node: 1e0247a9a4b7
(set censor.policy to ignore errors)
- [255]
- $ hg cat -r $C1 target
+ $ hg cat -r $C1 target | head -n 10
abort: censored node: 613bc869fceb
(set censor.policy to ignore errors)
- [255]
- $ hg cat -r 0 target
+ $ hg cat -r 0 target | head -n 10
Initially untainted file
Can only checkout target at uncensored revisions, -X is workaround for --all
- $ hg revert -r $C2 target
+ $ hg revert -r $C2 target | head -n 10
abort: censored node: 1e0247a9a4b7
(set censor.policy to ignore errors)
- [255]
- $ hg revert -r $C1 target
+ $ hg revert -r $C1 target | head -n 10
abort: censored node: 613bc869fceb
(set censor.policy to ignore errors)
- [255]
$ hg revert -r $C1 --all
reverting bystander
reverting target
@@ -129,38 +118,38 @@
(set censor.policy to ignore errors)
[255]
$ hg revert -r $C1 --all -X target
- $ cat target
+ $ cat target | head -n 10
Tainted file now super sanitized
$ hg revert -r 0 --all
reverting target
- $ cat target
+ $ cat target | head -n 10
Initially untainted file
$ hg revert -r $H2 --all
reverting bystander
reverting target
- $ cat target
+ $ cat target | head -n 10
Tainted file now super sanitized
Uncensored file can be viewed at any revision
- $ hg cat -r $H1 bystander
+ $ hg cat -r $H1 bystander | head -n 10
Normal file v2
- $ hg cat -r $C2 bystander
+ $ hg cat -r $C2 bystander | head -n 10
Normal file v2
- $ hg cat -r $C1 bystander
+ $ hg cat -r $C1 bystander | head -n 10
Normal file here
- $ hg cat -r 0 bystander
+ $ hg cat -r 0 bystander | head -n 10
Normal file here
Can update to children of censored revision
$ hg update -r $H1
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Tainted file is now sanitized
$ hg update -r $H2
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Tainted file now super sanitized
Set censor policy to abort in trusted $HGRC so hg verify fails
@@ -221,17 +210,17 @@
$ hg update -r $C2
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
$ hg update -r $C1
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
$ hg update -r 0
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Initially untainted file
$ hg update -r $H2
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Tainted file now super sanitized
Can merge in revision with censored data. Test requires one branch of history
@@ -288,20 +277,19 @@
$ hg ci -m 'delete target so it may be censored'
$ H2=`hg id --debug -i`
$ hg censor -r $C4 target
- $ hg cat -r $C4 target
- $ hg cat -r "$H2^^" target
+ $ hg cat -r $C4 target | head -n 10
+ $ hg cat -r "$H2^^" target | head -n 10
Tainted file now super sanitized
$ echo 'fresh start' > target
$ hg add target
$ hg ci -m reincarnated target
$ H2=`hg id --debug -i`
- $ hg cat -r $H2 target
+ $ hg cat -r $H2 target | head -n 10
fresh start
- $ hg cat -r "$H2^" target
+ $ hg cat -r "$H2^" target | head -n 10
target: no such file in rev 452ec1762369
- [1]
- $ hg cat -r $C4 target
- $ hg cat -r "$H2^^^" target
+ $ hg cat -r $C4 target | head -n 10
+ $ hg cat -r "$H2^^^" target | head -n 10
Tainted file now super sanitized
Can censor after revlog has expanded to no longer permit inline storage
@@ -317,8 +305,8 @@
$ hg ci -m 'cleaned 100k passwords'
$ H2=`hg id --debug -i`
$ hg censor -r $C5 target
- $ hg cat -r $C5 target
- $ hg cat -r $H2 target
+ $ hg cat -r $C5 target | head -n 10
+ $ hg cat -r $H2 target | head -n 10
fresh start
Repo with censored nodes can be cloned and cloned nodes are censored
@@ -328,13 +316,13 @@
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd rclone
- $ hg cat -r $H1 target
+ $ hg cat -r $H1 target | head -n 10
advanced head H1
- $ hg cat -r $H2~5 target
+ $ hg cat -r $H2~5 target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $C2 target
- $ hg cat -r $C1 target
- $ hg cat -r 0 target
+ $ hg cat -r $C2 target | head -n 10
+ $ hg cat -r $C1 target | head -n 10
+ $ hg cat -r 0 target | head -n 10
Initially untainted file
$ hg verify
checking changesets
@@ -346,7 +334,7 @@
Repo cloned before tainted content introduced can pull censored nodes
$ cd ../rpull
- $ hg cat -r tip target
+ $ hg cat -r tip target | head -n 10
Initially untainted file
$ hg verify
checking changesets
@@ -365,15 +353,15 @@
(run 'hg heads' to see heads, 'hg merge' to merge)
$ hg update 4
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $H1 target
+ $ hg cat -r $H1 target | head -n 10
advanced head H1
- $ hg cat -r $H2~5 target
+ $ hg cat -r $H2~5 target | head -n 10
Tainted file now super sanitized
- $ hg cat -r $C2 target
- $ hg cat -r $C1 target
- $ hg cat -r 0 target
+ $ hg cat -r $C2 target | head -n 10
+ $ hg cat -r $C1 target | head -n 10
+ $ hg cat -r 0 target | head -n 10
Initially untainted file
$ hg verify
checking changesets
@@ -393,11 +381,11 @@
$ hg ci -m 're-sanitized' target
$ H2=`hg id --debug -i`
$ CLEANREV=$H2
- $ hg cat -r $REV target
+ $ hg cat -r $REV target | head -n 10
Passwords: hunter2hunter2
$ hg censor -r $REV target
- $ hg cat -r $REV target
- $ hg cat -r $CLEANREV target
+ $ hg cat -r $REV target | head -n 10
+ $ hg cat -r $CLEANREV target | head -n 10
Re-sanitized; nothing to see here
$ hg push -f -r $H2
pushing to $TESTTMP/r
@@ -408,12 +396,12 @@
added 2 changesets with 2 changes to 1 files (+1 heads)
$ cd ../r
- $ hg cat -r $REV target
- $ hg cat -r $CLEANREV target
+ $ hg cat -r $REV target | head -n 10
+ $ hg cat -r $CLEANREV target | head -n 10
Re-sanitized; nothing to see here
$ hg update $CLEANREV
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Re-sanitized; nothing to see here
Censored nodes can be bundled up and unbundled in another repo
@@ -428,12 +416,12 @@
added 2 changesets with 2 changes to 2 files (+1 heads)
new changesets 075be80ac777:dcbaf17bf3a1 (2 drafts)
(run 'hg heads .' to see heads, 'hg merge' to merge)
- $ hg cat -r $REV target
- $ hg cat -r $CLEANREV target
+ $ hg cat -r $REV target | head -n 10
+ $ hg cat -r $CLEANREV target | head -n 10
Re-sanitized; nothing to see here
$ hg update $CLEANREV
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Re-sanitized; nothing to see here
$ hg verify
checking changesets
@@ -492,7 +480,7 @@
(run 'hg heads .' to see heads, 'hg merge' to merge)
$ hg update $H2
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat target
+ $ cat target | head -n 10
Re-sanitized; nothing to see here
$ hg verify
checking changesets
@@ -516,4 +504,4 @@
added 1 changesets with 2 changes to 2 files
new changesets e97f55b2665a (1 drafts)
(run 'hg update' to get a working copy)
- $ hg cat -r 0 target
+ $ hg cat -r 0 target | head -n 10
--- a/tests/test-check-interfaces.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-check-interfaces.py Mon Jun 07 17:10:35 2021 -0400
@@ -282,6 +282,7 @@
revision=b'',
sidedata=b'',
delta=None,
+ protocol_flags=b'',
)
checkzobject(rd)
--- a/tests/test-chg.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-chg.t Mon Jun 07 17:10:35 2021 -0400
@@ -458,6 +458,7 @@
LC_CTYPE=
$ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
> --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ *cannot change locale* (glob) (?)
LC_CTYPE=unsupported_value
$ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
> --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
@@ -467,3 +468,72 @@
LC_ALL=
LC_CTYPE=
LANG=
+
+Profiling isn't permanently enabled or carried over between chg invocations that
+share the same server
+ $ cp $HGRCPATH.orig $HGRCPATH
+ $ hg init $TESTTMP/profiling
+ $ cd $TESTTMP/profiling
+ $ filteredchg() {
+ > CHGDEBUG=1 chg "$@" 2>&1 | egrep 'Sample count|start cmdserver' || true
+ > }
+ $ newchg() {
+ > chg --kill-chg-daemon
+ > filteredchg "$@" | egrep -v 'start cmdserver' || true
+ > }
+(--profile isn't permanently on just because it was specified when chg was
+started)
+ $ newchg log -r . --profile
+ Sample count: * (glob)
+ $ filteredchg log -r .
+(enabling profiling via config works, even on the first chg command that starts
+a cmdserver)
+ $ cat >> $HGRCPATH <<EOF
+ > [profiling]
+ > type=stat
+ > enabled=1
+ > EOF
+ $ newchg log -r .
+ Sample count: * (glob)
+ $ filteredchg log -r .
+ Sample count: * (glob)
+(test that we aren't accumulating more and more samples each run)
+ $ cat > $TESTTMP/debugsleep.py <<EOF
+ > import time
+ > from mercurial import registrar
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command(b'debugsleep', [], b'', norepo=True)
+ > def debugsleep(ui):
+ > start = time.time()
+ > x = 0
+ > while time.time() < start + 0.5:
+ > time.sleep(.1)
+ > x += 1
+ > ui.status(b'%d debugsleep iterations in %.03fs\n' % (x, time.time() - start))
+ > EOF
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > debugsleep = $TESTTMP/debugsleep.py
+ > EOF
+ $ newchg debugsleep > run_1
+ $ filteredchg debugsleep > run_2
+ $ filteredchg debugsleep > run_3
+ $ filteredchg debugsleep > run_4
+FIXME: Run 4 should not be >3x Run 1's number of samples.
+ $ "$PYTHON" <<EOF
+ > r1 = int(open("run_1", "r").read().split()[-1])
+ > r4 = int(open("run_4", "r").read().split()[-1])
+ > print("Run 1: %d samples\nRun 4: %d samples\nRun 4 > 3 * Run 1: %s" %
+ > (r1, r4, r4 > (r1 * 3)))
+ > EOF
+ Run 1: * samples (glob)
+ Run 4: * samples (glob)
+ Run 4 > 3 * Run 1: False
+(Disabling with --no-profile on the commandline still works, but isn't permanent)
+ $ newchg log -r . --no-profile
+ $ filteredchg log -r .
+ Sample count: * (glob)
+ $ filteredchg log -r . --no-profile
+ $ filteredchg log -r .
+ Sample count: * (glob)
--- a/tests/test-clone-uncompressed.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-clone-uncompressed.t Mon Jun 07 17:10:35 2021 -0400
@@ -215,49 +215,86 @@
content-type: application/mercurial-0.2
+#if no-zstd no-rust
$ f --size --hex --bytes 256 body
- body: size=112262 (no-zstd !)
- body: size=109410 (zstd no-rust !)
- body: size=109431 (rust !)
+ body: size=112262
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
- 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
- 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
- 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
- 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
- 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
- 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
- 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
+ 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98|
+ 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030|
+ 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
+ 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
+ 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
+ 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
+ 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
+ 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
+ 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
+ 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
+ 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
+ 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u|
+ 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....|
+ 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
+#endif
+#if zstd no-rust
+ $ f --size --hex --bytes 256 body
+ body: size=109410
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95|
+ 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
- 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
- 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
- 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
- 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
- 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
- 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
- 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
- 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
- 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
- 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
- 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
- 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
- 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
- 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
- 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
- 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
- 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
- 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
- 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
- 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
- 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
- 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
- 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
- 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
- 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
- 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
- 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
+ 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress|
+ 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo|
+ 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl|
+ 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.|
+ 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......|
+ 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................|
+ 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#|
+ 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...|
+ 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda|
+#endif
+#if zstd rust no-dirstate-v2
+ $ f --size --hex --bytes 256 body
+ body: size=109431
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95|
+ 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
+ 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
+ 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
+ 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
+ 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node|
+ 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com|
+ 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C|
+ 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars|
+ 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.|
+ 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..|
+ 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................|
+ 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)|
+ 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.|
+#endif
+#if zstd dirstate-v2
+ $ f --size --hex --bytes 256 body
+ body: size=109449
+ 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
+ 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
+ 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
+ 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
+ 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
+ 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
+ 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
+ 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
+ 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
+ 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
+ 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
+ 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
+ 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
+ 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
+ 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
+#endif
--uncompressed is an alias to --stream
--- a/tests/test-commandserver.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-commandserver.t Mon Jun 07 17:10:35 2021 -0400
@@ -207,6 +207,7 @@
devel.all-warnings=true
devel.default-date=0 0
extensions.fsmonitor= (fsmonitor !)
+ format.exp-dirstate-v2=1 (dirstate-v2 !)
largefiles.usercache=$TESTTMP/.cache/largefiles
lfs.usercache=$TESTTMP/.cache/lfs
ui.slash=True
--- a/tests/test-commit-amend.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-commit-amend.t Mon Jun 07 17:10:35 2021 -0400
@@ -10,7 +10,7 @@
$ hg phase -r . -p
$ hg ci --amend
- abort: cannot amend public changesets
+ abort: cannot amend public changesets: ad120869acf0
(see 'hg help phases' for details)
[10]
$ hg phase -r . -f -d
@@ -406,7 +406,7 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci --amend
- abort: cannot amend while merging
+ abort: cannot amend changesets while merging
[20]
$ hg ci -m 'merge'
@@ -957,6 +957,7 @@
$ cat >> .hg/hgrc <<EOF
> [committemplate]
> changeset.commit.amend = {desc}\n
+ > HG: {revset('parents()') % 'parent: {desc|firstline}\n'}
> HG: M: {file_mods}
> HG: A: {file_adds}
> HG: R: {file_dels}
@@ -971,6 +972,8 @@
$ HGEDITOR=cat hg commit --amend -e -m "expecting diff of foo"
expecting diff of foo
+ HG: parent: editor should be suppressed
+
HG: M:
HG: A: foo
HG: R:
@@ -985,6 +988,8 @@
$ HGEDITOR=cat hg commit --amend -e -m "expecting diff of foo and y"
expecting diff of foo and y
+ HG: parent: expecting diff of foo
+
HG: M:
HG: A: foo y
HG: R:
@@ -1003,6 +1008,8 @@
$ HGEDITOR=cat hg commit --amend -e -m "expecting diff of a, foo and y"
expecting diff of a, foo and y
+ HG: parent: expecting diff of foo and y
+
HG: M:
HG: A: foo y
HG: R: a
@@ -1027,6 +1034,8 @@
$ HGEDITOR=cat hg commit --amend -e -m "expecting diff of a, foo, x and y"
expecting diff of a, foo, x and y
+ HG: parent: expecting diff of a, foo and y
+
HG: M:
HG: A: foo y
HG: R: a x
@@ -1058,6 +1067,8 @@
$ HGEDITOR=cat hg commit --amend -e -m "cc should be excluded" -X cc
cc should be excluded
+ HG: parent: expecting diff of a, foo, x and y
+
HG: M:
HG: A: foo y
HG: R: a x
--- a/tests/test-commit.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-commit.t Mon Jun 07 17:10:35 2021 -0400
@@ -646,14 +646,14 @@
verify pathauditor blocks evil filepaths
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r.changelog.tip(), node.nullid],
+ > c = context.memctx(r, [r.changelog.tip(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
@@ -672,14 +672,14 @@
repository tip rolled back to revision 2 (undo commit)
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = b"HG~1/hgrc"
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
@@ -692,14 +692,14 @@
repository tip rolled back to revision 2 (undo commit)
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = b"HG8B6C~2/hgrc"
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
--- a/tests/test-completion.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-completion.t Mon Jun 07 17:10:35 2021 -0400
@@ -262,7 +262,7 @@
cat: output, rev, decode, include, exclude, template
clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
- config: untrusted, edit, local, shared, non-shared, global, template
+ config: untrusted, edit, local, source, shared, non-shared, global, template
continue: dry-run
copy: forget, after, at-rev, force, include, exclude, dry-run
debugancestor:
@@ -282,7 +282,7 @@
debugdata: changelog, manifest, dir
debugdate: extended
debugdeltachain: changelog, manifest, dir, template
- debugdirstate: nodates, dates, datesort
+ debugdirstate: nodates, dates, datesort, dirs
debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
debugdownload: output
debugextensions: template
--- a/tests/test-config.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-config.t Mon Jun 07 17:10:35 2021 -0400
@@ -159,7 +159,7 @@
true
- $ hg config --config format.dotencode= format -Tjson
+ $ hg config --config format.dotencode= format.dotencode -Tjson
[
{
"defaultvalue": true,
@@ -168,11 +168,11 @@
"value": ""
}
]
- $ hg config --config format.dotencode= format -T'json(defaultvalue)'
+ $ hg config --config format.dotencode= format.dotencode -T'json(defaultvalue)'
[
{"defaultvalue": true}
]
- $ hg config --config format.dotencode= format -T'{defaultvalue}\n'
+ $ hg config --config format.dotencode= format.dotencode -T'{defaultvalue}\n'
True
bytes
@@ -277,8 +277,7 @@
> emptysource = `pwd`/emptysource.py
> EOF
- $ hg config --debug empty.source
- read config from: * (glob)
+ $ hg config --source empty.source
none: value
$ hg config empty.source -Tjson
[
@@ -338,8 +337,14 @@
> EOF
$ hg showconfig paths
+ paths.foo=~/foo
paths.foo:suboption=~/foo
- paths.foo=$TESTTMP/foo
+
+note: The path expansion no longer happens at the config level, but the path is
+still expanded:
+
+ $ hg path | grep foo
+ foo = $TESTTMP/foo
edit failure
@@ -349,16 +354,16 @@
config affected by environment variables
- $ EDITOR=e1 VISUAL=e2 hg config --debug | grep 'ui\.editor'
+ $ EDITOR=e1 VISUAL=e2 hg config --source | grep 'ui\.editor'
$VISUAL: ui.editor=e2
- $ VISUAL=e2 hg config --debug --config ui.editor=e3 | grep 'ui\.editor'
+ $ VISUAL=e2 hg config --source --config ui.editor=e3 | grep 'ui\.editor'
--config: ui.editor=e3
- $ PAGER=p1 hg config --debug | grep 'pager\.pager'
+ $ PAGER=p1 hg config --source | grep 'pager\.pager'
$PAGER: pager.pager=p1
- $ PAGER=p1 hg config --debug --config pager.pager=p2 | grep 'pager\.pager'
+ $ PAGER=p1 hg config --source --config pager.pager=p2 | grep 'pager\.pager'
--config: pager.pager=p2
verify that aliases are evaluated as well
--- a/tests/test-contrib-perf.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-contrib-perf.t Mon Jun 07 17:10:35 2021 -0400
@@ -411,10 +411,10 @@
> from mercurial import (
import newer module separately in try clause for early Mercurial
contrib/perf.py:\d+: (re)
- > origindexpath = orig.opener.join(orig.indexfile)
+ > origindexpath = orig.opener.join(indexfile)
use getvfs()/getsvfs() for early Mercurial
contrib/perf.py:\d+: (re)
- > origdatapath = orig.opener.join(orig.datafile)
+ > origdatapath = orig.opener.join(datafile)
use getvfs()/getsvfs() for early Mercurial
contrib/perf.py:\d+: (re)
> vfs = vfsmod.vfs(tmpdir)
--- a/tests/test-convert-bzr-114.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr-114.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,4 +1,4 @@
-#require bzr bzr114
+#require bzr
$ . "$TESTDIR/bzr-definitions"
@@ -9,18 +9,18 @@
$ mkdir test-replace-file-with-dir
$ cd test-replace-file-with-dir
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo d > d
- $ bzr add -q d
- $ bzr commit -q -m 'add d file'
+ $ brz add -q d
+ $ brz commit -q -m 'add d file'
$ rm d
$ mkdir d
- $ bzr add -q d
- $ bzr commit -q -m 'replace with d dir'
+ $ brz add -q d
+ $ brz commit -q -m 'replace with d dir'
$ echo a > d/a
- $ bzr add -q d/a
- $ bzr commit -q -m 'add d/a'
+ $ brz add -q d/a
+ $ brz commit -q -m 'add d/a'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
--- a/tests/test-convert-bzr-directories.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr-directories.t Mon Jun 07 17:10:35 2021 -0400
@@ -9,17 +9,17 @@
$ mkdir test-empty
$ cd test-empty
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo content > a
- $ bzr add -q a
- $ bzr commit -q -m 'Initial add'
+ $ brz add -q a
+ $ brz commit -q -m 'Initial add'
$ mkdir empty
- $ bzr add -q empty
- $ bzr commit -q -m 'Empty directory added'
+ $ brz add -q empty
+ $ brz commit -q -m 'Empty directory added'
$ echo content > empty/something
- $ bzr add -q empty/something
- $ bzr commit -q -m 'Added file into directory'
+ $ brz add -q empty/something
+ $ brz commit -q -m 'Added file into directory'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -42,15 +42,15 @@
$ mkdir test-dir-rename
$ cd test-dir-rename
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ mkdir tpyo
$ echo content > tpyo/something
- $ bzr add -q tpyo
- $ bzr commit -q -m 'Added directory'
- $ bzr mv tpyo typo
+ $ brz add -q tpyo
+ $ brz commit -q -m 'Added directory'
+ $ brz mv tpyo typo
tpyo => typo
- $ bzr commit -q -m 'Oops, typo'
+ $ brz commit -q -m 'Oops, typo'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -71,16 +71,16 @@
$ mkdir test-nested-dir-rename
$ cd test-nested-dir-rename
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ mkdir -p firstlevel/secondlevel/thirdlevel
$ echo content > firstlevel/secondlevel/file
$ echo this_needs_to_be_there_too > firstlevel/secondlevel/thirdlevel/stuff
- $ bzr add -q firstlevel
- $ bzr commit -q -m 'Added nested directories'
- $ bzr mv firstlevel/secondlevel secondlevel
+ $ brz add -q firstlevel
+ $ brz commit -q -m 'Added nested directories'
+ $ brz mv firstlevel/secondlevel secondlevel
firstlevel/secondlevel => secondlevel
- $ bzr commit -q -m 'Moved secondlevel one level up'
+ $ brz commit -q -m 'Moved secondlevel one level up'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -99,14 +99,14 @@
$ mkdir test-dir-remove
$ cd test-dir-remove
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ mkdir src
$ echo content > src/sourcecode
- $ bzr add -q src
- $ bzr commit -q -m 'Added directory'
- $ bzr rm -q src
- $ bzr commit -q -m 'Removed directory'
+ $ brz add -q src
+ $ brz commit -q -m 'Added directory'
+ $ brz rm -q src
+ $ brz commit -q -m 'Removed directory'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -126,19 +126,19 @@
$ mkdir test-dir-replace
$ cd test-dir-replace
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ mkdir first second
$ echo content > first/file
$ echo morecontent > first/dummy
$ echo othercontent > second/something
- $ bzr add -q first second
- $ bzr commit -q -m 'Initial layout'
- $ bzr mv first/file second/file
+ $ brz add -q first second
+ $ brz commit -q -m 'Initial layout'
+ $ brz mv first/file second/file
first/file => second/file
- $ bzr mv first third
+ $ brz mv first third
first => third
- $ bzr commit -q -m 'Some conflicting moves'
+ $ brz commit -q -m 'Some conflicting moves'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -158,27 +158,27 @@
$ mkdir test-divergent-renames
$ cd test-divergent-renames
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ mkdir -p a/c
$ echo a > a/fa
$ echo c > a/c/fc
- $ bzr add -q a
- $ bzr commit -q -m 'Initial layout'
- $ bzr mv a b
+ $ brz add -q a
+ $ brz commit -q -m 'Initial layout'
+ $ brz mv a b
a => b
$ mkdir a
- $ bzr add a
+ $ brz add a
add(ed|ing) a (re)
- $ bzr mv b/c a/c
+ $ brz mv b/c a/c
b/c => a/c
- $ bzr status
+ $ brz status
added:
a/
renamed:
a/? => b/? (re)
a/c/? => a/c/? (re)
- $ bzr commit -q -m 'Divergent renames'
+ $ brz commit -q -m 'Divergent renames'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
--- a/tests/test-convert-bzr-ghosts.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr-ghosts.t Mon Jun 07 17:10:35 2021 -0400
@@ -3,11 +3,12 @@
$ . "$TESTDIR/bzr-definitions"
$ cat > ghostcreator.py <<EOF
> import sys
- > from bzrlib import workingtree
+ > from breezy import workingtree
+ > import breezy.bzr.bzrdir
> wt = workingtree.WorkingTree.open('.')
>
> message, ghostrev = sys.argv[1:]
- > wt.set_parent_ids(wt.get_parent_ids() + [ghostrev])
+ > wt.set_parent_ids(wt.get_parent_ids() + [ghostrev.encode()])
> wt.commit(message)
> EOF
@@ -15,11 +16,11 @@
$ mkdir test-ghost-revisions
$ cd test-ghost-revisions
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo content > somefile
- $ bzr add -q somefile
- $ bzr commit -q -m 'Initial layout setup'
+ $ brz add -q somefile
+ $ brz commit -q -m 'Initial layout setup'
$ echo morecontent >> somefile
$ "$PYTHON" ../../ghostcreator.py 'Commit with ghost revision' ghostrev
$ cd ..
--- a/tests/test-convert-bzr-merges.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr-merges.t Mon Jun 07 17:10:35 2021 -0400
@@ -10,37 +10,37 @@
$ mkdir test-multimerge
$ cd test-multimerge
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo content > file
$ echo text > rename_me
- $ bzr add -q file rename_me
- $ bzr commit -q -m 'Initial add' '--commit-time=2009-10-10 08:00:00 +0100'
+ $ brz add -q file rename_me
+ $ brz commit -q -m 'Initial add' '--commit-time=2009-10-10 08:00:00 +0100'
$ cd ..
- $ bzr branch -q source source-branch1
+ $ brz branch -q source source-branch1
$ cd source-branch1
$ echo morecontent >> file
$ echo evenmorecontent > file-branch1
- $ bzr add -q file-branch1
- $ bzr commit -q -m 'Added branch1 file' '--commit-time=2009-10-10 08:00:01 +0100'
+ $ brz add -q file-branch1
+ $ brz commit -q -m 'Added branch1 file' '--commit-time=2009-10-10 08:00:01 +0100'
$ cd ../source
$ sleep 1
$ echo content > file-parent
- $ bzr add -q file-parent
- $ bzr commit -q -m 'Added parent file' '--commit-time=2009-10-10 08:00:02 +0100'
+ $ brz add -q file-parent
+ $ brz commit -q -m 'Added parent file' '--commit-time=2009-10-10 08:00:02 +0100'
$ cd ..
- $ bzr branch -q source source-branch2
+ $ brz branch -q source source-branch2
$ cd source-branch2
$ echo somecontent > file-branch2
- $ bzr add -q file-branch2
- $ bzr mv -q rename_me renamed
+ $ brz add -q file-branch2
+ $ brz mv -q rename_me renamed
$ echo change > renamed
- $ bzr commit -q -m 'Added brach2 file' '--commit-time=2009-10-10 08:00:03 +0100'
+ $ brz commit -q -m 'Added brach2 file' '--commit-time=2009-10-10 08:00:03 +0100'
$ sleep 1
$ cd ../source
- $ bzr merge -q ../source-branch1
- $ bzr merge -q --force ../source-branch2
- $ bzr commit -q -m 'Merged branches' '--commit-time=2009-10-10 08:00:04 +0100'
+ $ brz merge -q ../source-branch1
+ $ brz merge -q --force ../source-branch2
+ $ brz commit -q -m 'Merged branches' '--commit-time=2009-10-10 08:00:04 +0100'
$ cd ..
BUG: file-branch2 should not be added in rev 4, and the rename_me -> renamed
--- a/tests/test-convert-bzr-treeroot.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr-treeroot.t Mon Jun 07 17:10:35 2021 -0400
@@ -3,11 +3,12 @@
$ . "$TESTDIR/bzr-definitions"
$ cat > treeset.py <<EOF
> import sys
- > from bzrlib import workingtree
+ > from breezy import workingtree
+ > import breezy.bzr.bzrdir
> wt = workingtree.WorkingTree.open('.')
>
> message, rootid = sys.argv[1:]
- > wt.set_root_id('tree_root-%s' % rootid)
+ > wt.set_root_id(b'tree_root-%s' % rootid.encode())
> wt.commit(message)
> EOF
@@ -15,11 +16,11 @@
$ mkdir test-change-treeroot-id
$ cd test-change-treeroot-id
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo content > file
- $ bzr add -q file
- $ bzr commit -q -m 'Initial add'
+ $ brz add -q file
+ $ brz commit -q -m 'Initial add'
$ "$PYTHON" ../../treeset.py 'Changed root' new
$ cd ..
$ hg convert source source-hg
--- a/tests/test-convert-bzr.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-bzr.t Mon Jun 07 17:10:35 2021 -0400
@@ -6,7 +6,7 @@
$ mkdir test-createandrename
$ cd test-createandrename
- $ bzr init -q source
+ $ brz init -q source
test empty repo conversion (issue3233)
@@ -22,18 +22,18 @@
$ echo a > a
$ echo c > c
$ echo e > e
- $ bzr add -q a c e
- $ bzr commit -q -m 'Initial add: a, c, e'
- $ bzr mv a b
+ $ brz add -q a c e
+ $ brz commit -q -m 'Initial add: a, c, e'
+ $ brz mv a b
a => b
- $ bzr mv c d
+ $ brz mv c d
c => d
- $ bzr mv e f
+ $ brz mv e f
e => f
$ echo a2 >> a
$ mkdir e
- $ bzr add -q a e
- $ bzr commit -q -m 'rename a into b, create a, rename c into d'
+ $ brz add -q a e
+ $ brz commit -q -m 'rename a into b, create a, rename c into d'
$ cd ..
$ hg convert source source-hg
scanning source...
@@ -86,7 +86,7 @@
convert from lightweight checkout
- $ bzr checkout --lightweight source source-light
+ $ brz checkout --lightweight source source-light
$ hg convert -s bzr source-light source-light-hg
initializing destination source-light-hg repository
warning: lightweight checkouts may cause conversion failures, try with a regular branch instead.
@@ -99,7 +99,7 @@
compare timestamps
$ cd source
- $ bzr log | \
+ $ brz log | \
> sed '/timestamp/!d;s/.\{15\}\([0-9: -]\{16\}\):.. \(.[0-9]\{4\}\)/\1 \2/' \
> > ../bzr-timestamps
$ cd ..
@@ -113,20 +113,21 @@
$ cd test-merge
$ cat > helper.py <<EOF
> import sys
- > from bzrlib import workingtree
+ > from breezy import workingtree
+ > import breezy.bzr.bzrdir
> wt = workingtree.WorkingTree.open('.')
>
> message, stamp = sys.argv[1:]
> wt.commit(message, timestamp=int(stamp))
> EOF
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ echo content > a
$ echo content2 > b
- $ bzr add -q a b
- $ bzr commit -q -m 'Initial add'
+ $ brz add -q a b
+ $ brz commit -q -m 'Initial add'
$ cd ..
- $ bzr branch -q source source-improve
+ $ brz branch -q source source-improve
$ cd source
$ echo more >> a
$ "$PYTHON" ../helper.py 'Editing a' 100
@@ -134,8 +135,8 @@
$ echo content3 >> b
$ "$PYTHON" ../helper.py 'Editing b' 200
$ cd ../source
- $ bzr merge -q ../source-improve
- $ bzr commit -q -m 'Merged improve branch'
+ $ brz merge -q ../source-improve
+ $ brz commit -q -m 'Merged improve branch'
$ cd ..
$ hg convert --datesort source source-hg
initializing destination source-hg repository
@@ -163,7 +164,7 @@
$ mkdir test-symlinks
$ cd test-symlinks
- $ bzr init -q source
+ $ brz init -q source
$ cd source
$ touch program
$ chmod +x program
@@ -171,15 +172,15 @@
$ mkdir d
$ echo a > d/a
$ ln -s a syma
- $ bzr add -q altname program syma d/a
- $ bzr commit -q -m 'Initial setup'
+ $ brz add -q altname program syma d/a
+ $ brz commit -q -m 'Initial setup'
$ touch newprog
$ chmod +x newprog
$ rm altname
$ ln -s newprog altname
$ chmod -x program
- $ bzr add -q newprog
- $ bzr commit -q -m 'Symlink changed, x bits changed'
+ $ brz add -q newprog
+ $ brz commit -q -m 'Symlink changed, x bits changed'
$ cd ..
$ hg convert source source-hg
initializing destination source-hg repository
@@ -215,30 +216,28 @@
Multiple branches
- $ bzr init-repo -q --no-trees repo
- $ bzr init -q repo/trunk
- $ bzr co repo/trunk repo-trunk
+ $ brz init-repo -q --no-trees repo
+ $ brz init -q repo/trunk
+ $ brz co repo/trunk repo-trunk
$ cd repo-trunk
$ echo a > a
- $ bzr add -q a
- $ bzr ci -qm adda
- $ bzr tag trunk-tag
+ $ brz add -q a
+ $ brz ci -qm adda
+ $ brz tag trunk-tag
Created tag trunk-tag.
- $ bzr switch -b branch
+ $ brz switch -b branch
Tree is up to date at revision 1.
Switched to branch*repo/branch/ (glob)
- $ sleep 1
$ echo b > b
- $ bzr add -q b
- $ bzr ci -qm addb
- $ bzr tag branch-tag
+ $ brz add -q b
+ $ brz ci -qm addb
+ $ brz tag branch-tag
Created tag branch-tag.
- $ bzr switch --force ../repo/trunk
+ $ brz switch --force ../repo/trunk
Updated to revision 1.
Switched to branch*/repo/trunk/ (glob)
- $ sleep 1
$ echo a >> a
- $ bzr ci -qm changea
+ $ brz ci -qm changea
$ cd ..
$ hg convert --datesort repo repo-bzr
initializing destination repo-bzr repository
@@ -269,13 +268,13 @@
Nested repositories (issue3254)
- $ bzr init-repo -q --no-trees repo/inner
- $ bzr init -q repo/inner/trunk
- $ bzr co repo/inner/trunk inner-trunk
+ $ brz init-repo -q --no-trees repo/inner
+ $ brz init -q repo/inner/trunk
+ $ brz co repo/inner/trunk inner-trunk
$ cd inner-trunk
$ echo b > b
- $ bzr add -q b
- $ bzr ci -qm addb
+ $ brz add -q b
+ $ brz ci -qm addb
$ cd ..
$ hg convert --datesort repo noinner-bzr
initializing destination noinner-bzr repository
--- a/tests/test-convert-filemap.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-filemap.t Mon Jun 07 17:10:35 2021 -0400
@@ -292,12 +292,12 @@
$ rm -rf source/.hg/store/data/dir/file4
#endif
$ hg -q convert --filemap renames.fmap --datesort source dummydest
- abort: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+ abort: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
[50]
$ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
- ignoring: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
- ignoring: data/dir/file4.i@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
+ ignoring: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+ ignoring: data/dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
$ hg up -q -R renames.repo
--- a/tests/test-convert-hg-source.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-convert-hg-source.t Mon Jun 07 17:10:35 2021 -0400
@@ -182,7 +182,7 @@
sorting...
converting...
4 init
- ignoring: data/b.i@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
+ ignoring: data/b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
3 changeall
2 changebagain
--- a/tests/test-copies-chain-merge.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-copies-chain-merge.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel
+#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel pull push pull-upgrade push-upgrade
=====================================================
Test Copy tracing for chain of copies involving merge
@@ -51,11 +51,41 @@
#if sidedata
$ cat >> $HGRCPATH << EOF
> [format]
- > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+#if pull
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+#if push
+ $ cat >> $HGRCPATH << EOF
+ > [format]
> exp-use-copies-side-data-changeset = yes
> EOF
#endif
+#if pull-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-copies-side-data-changeset = no
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+#endif
+
+#if push-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-copies-side-data-changeset = no
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+#endif
$ cat > same-content.txt << EOF
> Here is some content that will be the same accros multiple file.
@@ -1617,12 +1647,12 @@
#if upgraded
$ cat >> $HGRCPATH << EOF
> [format]
- > exp-use-side-data = yes
> exp-use-copies-side-data-changeset = yes
> EOF
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1630,7 +1660,8 @@
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
copies-sdc: no yes no
- revlog-v2: no yes no
+ revlog-v2: no no no
+ changelog-v2: no yes no
plain-cl-delta: yes yes yes
compression: * (glob)
compression-level: default default default
@@ -1639,8 +1670,7 @@
requirements
preserved: * (glob)
- removed: revlogv1
- added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+ added: exp-changelog-v2, exp-copies-sidedata-changeset
processed revlogs:
- all-filelogs
@@ -1652,7 +1682,6 @@
#if upgraded-parallel
$ cat >> $HGRCPATH << EOF
> [format]
- > exp-use-side-data = yes
> exp-use-copies-side-data-changeset = yes
> [experimental]
> worker.repository-upgrade=yes
@@ -1663,6 +1692,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1670,7 +1700,8 @@
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
copies-sdc: no yes no
- revlog-v2: no yes no
+ revlog-v2: no no no
+ changelog-v2: no yes no
plain-cl-delta: yes yes yes
compression: * (glob)
compression-level: default default default
@@ -1679,8 +1710,7 @@
requirements
preserved: * (glob)
- removed: revlogv1
- added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+ added: exp-changelog-v2, exp-copies-sidedata-changeset
processed revlogs:
- all-filelogs
@@ -1689,6 +1719,79 @@
#endif
+#if pull
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-chain
+ $ hg pull ../repo-source
+ pulling from ../repo-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ new changesets a3a31bbefea6:908ce9259ffa
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if pull-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-copies-side-data-changeset = yes
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-chain
+ $ hg pull ../repo-source
+ pulling from ../repo-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ new changesets a3a31bbefea6:908ce9259ffa
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if push
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-source
+ $ hg push ../repo-chain
+ pushing to ../repo-chain
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ $ cd ../repo-chain
+#endif
+
+#if push-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-copies-side-data-changeset = yes
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-source
+ $ hg push ../repo-chain
+ pushing to ../repo-chain
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ $ cd ../repo-chain
+#endif
#if no-compatibility no-filelog no-changeset
@@ -3405,12 +3508,7 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBF-change-m-0")'
M b
A d
- h (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
- h (changeset !)
- h (compatibility !)
+ h
A t
p
R a
@@ -3564,24 +3662,15 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE,Km")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AEm")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
The result from mEAm is the same for the subsequent merge:
@@ -3589,23 +3678,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA,Jm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EAm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
Subcase: chaining conflicting rename resolution
```````````````````````````````````````````````
@@ -3620,24 +3703,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
+ p (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQ,Tm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
+ p (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mT,PQm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
-
+ p (no-changeset no-compatibility no-filelog !)
The result from mQPm is the same for the subsequent merge:
@@ -3652,9 +3728,7 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mS,QPm")' v
A v
r (filelog !)
- r (sidedata !)
- r (upgraded !)
- r (upgraded-parallel !)
+ r (no-changeset no-compatibility no-filelog !)
Subcase: chaining salvage information during a merge
@@ -3733,30 +3807,22 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm")' d
A d
a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility no-filelog !)
Chained output
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mO,FGm")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFG,Om")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGF,Nm")' d
@@ -3779,17 +3845,11 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change,Km")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AE-change-m")' f
A f
@@ -3801,20 +3861,14 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change,Jm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EA-change-m")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
--- a/tests/test-copies-in-changeset.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-copies-in-changeset.t Mon Jun 07 17:10:35 2021 -0400
@@ -35,6 +35,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -42,7 +43,8 @@
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
- revlog-v2: yes yes no
+ revlog-v2: no no no
+ changelog-v2: yes yes no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -51,6 +53,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -59,6 +62,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -419,11 +423,12 @@
Test upgrading/downgrading to sidedata storage
==============================================
-downgrading (keeping some sidedata)
+downgrading
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -431,7 +436,8 @@
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
- revlog-v2: yes yes no
+ revlog-v2: no no no
+ changelog-v2: yes yes no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -445,13 +451,15 @@
$ hg debugsidedata -m -- 0
$ cat << EOF > .hg/hgrc
> [format]
- > exp-use-side-data = yes
> exp-use-copies-side-data-changeset = no
+ > [experimental]
+ > revlogv2 = enable-unstable-format-and-corrupt-my-data
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -460,16 +468,13 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes yes no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugsidedata -c -- 0
- 1 sidedata entries
- entry-0014 size 14
$ hg debugsidedata -c -- 1
- 1 sidedata entries
- entry-0014 size 14
$ hg debugsidedata -m -- 0
upgrading
@@ -482,6 +487,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -489,7 +495,8 @@
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
- revlog-v2: yes yes no
+ revlog-v2: no no no
+ changelog-v2: yes yes no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
--- a/tests/test-copy.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-copy.t Mon Jun 07 17:10:35 2021 -0400
@@ -115,6 +115,7 @@
$ hg mv foo bar
foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg st -A
? foo
@@ -124,14 +125,17 @@
$ hg mv ../foo ../bar
../foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg mv ../foo ../bar --config ui.relative-paths=yes
../foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg mv ../foo ../bar --config ui.relative-paths=no
foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ cd ..
$ rmdir dir
--- a/tests/test-dirstate-race.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-dirstate-race.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,17 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
$ hg init repo
$ cd repo
$ echo a > a
--- a/tests/test-dirstate-race2.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-dirstate-race2.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,17 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
Checking the size/permissions/file-type of files stored in the
dirstate after an update where the files are changed concurrently
outside of hg's control.
--- a/tests/test-dirstate.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-dirstate.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,17 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
------ Test dirstate._dirs refcounting
$ hg init t
--- a/tests/test-doctest.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-doctest.py Mon Jun 07 17:10:35 2021 -0400
@@ -131,7 +131,6 @@
('mercurial.changelog', '{}'),
('mercurial.cmdutil', '{}'),
('mercurial.color', '{}'),
- ('mercurial.config', '{}'),
('mercurial.dagparser', "{'optionflags': 4}"),
('mercurial.encoding', '{}'),
('mercurial.fancyopts', '{}'),
--- a/tests/test-exchange-multi-source.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-exchange-multi-source.t Mon Jun 07 17:10:35 2021 -0400
@@ -611,3 +611,177 @@
|
% A 0
+
+Testing multi-path definition
+----------------------------
+
+ $ hg clone main-repo repo-paths --rev 0
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets 4a2df7238c3b
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cp -R ./branch-E ./branch-E-paths
+ $ cp -R ./branch-G ./branch-G-paths
+ $ cp -R ./branch-H ./branch-H-paths
+ $ cat << EOF >> repo-paths/.hg/hgrc
+ > [paths]
+ > E=../branch-E-paths
+ > G=../branch-G-paths
+ > H=../branch-H-paths
+ > EHG=path://E,path://H,path://G
+ > EHG:multi-urls=yes
+ > GEH=path://G,path://E,path://H
+ > GEH:multi-urls=yes
+ > EOF
+
+Do various operations and verify that order matters
+
+ $ hg -R repo-paths push EHG --force
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ [1]
+ $ hg -R repo-paths push GEH --force
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ [1]
+ $ hg -R repo-paths push EHG GEH --force
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ [1]
+ $ hg -R repo-paths pull EHG
+ pulling from $TESTTMP/branch-E-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files
+ new changesets 27547f69f254:a603bfb5a83e
+ (run 'hg update' to get a working copy)
+ pulling from $TESTTMP/branch-H-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets 40faebb2ec45
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ pulling from $TESTTMP/branch-G-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ new changesets 2f3a4c5c1417:c521a06b234b
+ (run 'hg heads .' to see heads, 'hg merge' to merge)
+ $ hg -R repo-paths pull GEH
+ pulling from $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ $ hg -R repo-paths pull EHG GEH
+ pulling from $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pulling from $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ $ hg -R repo-paths push EHG --force
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files (+2 heads)
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files (+2 heads)
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 4 files (+2 heads)
+ $ hg -R repo-paths push GEH --force
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ [1]
+ $ hg -R repo-paths push EHG GEH --force
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-G-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-E-paths
+ searching for changes
+ no changes found
+ pushing to $TESTTMP/branch-H-paths
+ searching for changes
+ no changes found
+ [1]
--- a/tests/test-fastannotate-hg.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-fastannotate-hg.t Mon Jun 07 17:10:35 2021 -0400
@@ -482,19 +482,19 @@
$ cat > ../legacyrepo.py <<EOF
> from __future__ import absolute_import
- > from mercurial import commit, error, extensions, node
+ > from mercurial import commit, error, extensions
> def _filecommit(orig, repo, fctx, manifest1, manifest2,
> linkrev, tr, includecopymeta, ms):
> fname = fctx.path()
> text = fctx.data()
> flog = repo.file(fname)
- > fparent1 = manifest1.get(fname, node.nullid)
- > fparent2 = manifest2.get(fname, node.nullid)
+ > fparent1 = manifest1.get(fname, repo.nullid)
+ > fparent2 = manifest2.get(fname, repo.nullid)
> meta = {}
> copy = fctx.copysource()
> if copy and copy != fname:
> raise error.Abort('copying is not supported')
- > if fparent2 != node.nullid:
+ > if fparent2 != repo.nullid:
> return flog.add(text, meta, tr, linkrev,
> fparent1, fparent2), 'modified'
> raise error.Abort('only merging is supported')
--- a/tests/test-filelog.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-filelog.py Mon Jun 07 17:10:35 2021 -0400
@@ -4,10 +4,7 @@
"""
from __future__ import absolute_import, print_function
-from mercurial.node import (
- hex,
- nullid,
-)
+from mercurial.node import hex
from mercurial import (
hg,
ui as uimod,
@@ -22,7 +19,7 @@
def addrev(text, renamed=False):
if renamed:
# data doesn't matter. Just make sure filelog.renamed() returns True
- meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
+ meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
else:
meta = {}
@@ -30,7 +27,7 @@
try:
lock = repo.lock()
t = repo.transaction(b'commit')
- node = fl.add(text, meta, t, 0, nullid, nullid)
+ node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
return node
finally:
if t:
--- a/tests/test-fix.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-fix.t Mon Jun 07 17:10:35 2021 -0400
@@ -266,11 +266,11 @@
$ hg commit -Aqm "hello"
$ hg phase -r 0 --public
$ hg fix -r 0
- abort: cannot fix public changesets
+ abort: cannot fix public changesets: 6470986d2e7b
(see 'hg help phases' for details)
[10]
$ hg fix -r 0 --working-dir
- abort: cannot fix public changesets
+ abort: cannot fix public changesets: 6470986d2e7b
(see 'hg help phases' for details)
[10]
$ hg cat -r tip hello.whole
@@ -1174,7 +1174,8 @@
$ printf "two\n" > foo.whole
$ hg commit -m "second"
$ hg --config experimental.evolution.allowunstable=False fix -r '.^'
- abort: cannot fix changeset with children
+ abort: cannot fix changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg fix -r '.^'
1 new orphan changesets
--- a/tests/test-globalopts.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-globalopts.t Mon Jun 07 17:10:35 2021 -0400
@@ -74,6 +74,10 @@
8580ff50825a tip
$ echo '[paths]' >> $HGRCPATH
$ echo 'relativetohome = a' >> $HGRCPATH
+ $ hg path | grep relativetohome
+ relativetohome = $TESTTMP/a
+ $ HOME=`pwd`/../ hg path | grep relativetohome
+ relativetohome = $TESTTMP/a
$ HOME=`pwd`/../ hg -R relativetohome identify
8580ff50825a tip
$ cd ..
@@ -419,6 +423,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -552,6 +557,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
--- a/tests/test-help-hide.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-help-hide.t Mon Jun 07 17:10:35 2021 -0400
@@ -117,6 +117,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -254,6 +255,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
--- a/tests/test-help.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-help.t Mon Jun 07 17:10:35 2021 -0400
@@ -169,6 +169,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -298,6 +299,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -1134,12 +1136,13 @@
the changelog data, root/flat manifest data, treemanifest data, and
filelogs.
- There are 3 versions of changegroups: "1", "2", and "3". From a high-
+ There are 4 versions of changegroups: "1", "2", "3" and "4". From a high-
level, versions "1" and "2" are almost exactly the same, with the only
difference being an additional item in the *delta header*. Version "3"
adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
- "changegroup" part in the bundle2).
+ "changegroup" part in the bundle2). Version "4" adds support for
+ exchanging sidedata (additional revision metadata not part of the digest).
Changegroups when not exchanging treemanifests consist of 3 logical
segments:
@@ -1206,8 +1209,8 @@
existing entry (either that the recipient already has, or previously
specified in the bundle/changegroup).
- The *delta header* is different between versions "1", "2", and "3" of the
- changegroup format.
+ The *delta header* is different between versions "1", "2", "3" and "4" of
+ the changegroup format.
Version 1 (headerlen=80):
@@ -1236,6 +1239,15 @@
| | | | | | |
+------------------------------------------------------------------------------+
+ Version 4 (headerlen=103):
+
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+
The *delta data* consists of "chunklen - 4 - headerlen" bytes, which
contain a series of *delta*s, densely packed (no separators). These deltas
describe a diff from an existing entry (either that the recipient already
@@ -1276,11 +1288,24 @@
delimited metadata defining an object stored elsewhere. Used by the LFS
extension.
+ 4096
+ Contains copy information. This revision changes files in a way that
+ could affect copy tracing. This does *not* affect changegroup handling,
+ but is relevant for other parts of Mercurial.
+
For historical reasons, the integer values are identical to revlog version
1 per-revision storage flags and correspond to bits being set in this
2-byte field. Bits were allocated starting from the most-significant bit,
hence the reverse ordering and allocation of these flags.
+ The *pflags* (protocol flags) field holds bitwise flags affecting the
+ protocol itself. They are first in the header since they may affect the
+ handling of the rest of the fields in a future version. They are defined
+ as such:
+
+ 1 indicates whether to read a chunk of sidedata (of variable length) right
+ after the revision flags.
+
Changeset Segment
=================
@@ -1301,14 +1326,14 @@
Treemanifests Segment
---------------------
- The *treemanifests segment* only exists in changegroup version "3", and
- only if the 'treemanifest' param is part of the bundle2 changegroup part
- (it is not possible to use changegroup version 3 outside of bundle2).
- Aside from the filenames in the *treemanifests segment* containing a
- trailing "/" character, it behaves identically to the *filelogs segment*
- (see below). The final sub-segment is followed by an *empty chunk*
- (logically, a sub-segment with filename size 0). This denotes the boundary
- to the *filelogs segment*.
+ The *treemanifests segment* only exists in changegroup version "3" and
+ "4", and only if the 'treemanifest' param is part of the bundle2
+ changegroup part (it is not possible to use changegroup version 3 or 4
+ outside of bundle2). Aside from the filenames in the *treemanifests
+ segment* containing a trailing "/" character, it behaves identically to
+ the *filelogs segment* (see below). The final sub-segment is followed by
+ an *empty chunk* (logically, a sub-segment with filename size 0). This
+ denotes the boundary to the *filelogs segment*.
Filelogs Segment
================
@@ -1847,6 +1872,12 @@
The following sub-options can be defined:
+ "multi-urls"
+ A boolean option. When enabled the value of the '[paths]' entry will be
+ parsed as a list and the alias will resolve to multiple destination. If
+ some of the list entry use the 'path://' syntax, the suboption will be
+ inherited individually.
+
"pushurl"
The URL to use for push operations. If not defined, the location
defined by the path's main entry is used.
@@ -2274,6 +2305,13 @@
Environment Variables
</td></tr>
<tr><td>
+ <a href="/help/evolution">
+ evolution
+ </a>
+ </td><td>
+ Safely rewriting history (EXPERIMENTAL)
+ </td></tr>
+ <tr><td>
<a href="/help/extensions">
extensions
</a>
@@ -3639,12 +3677,13 @@
filelogs.
</p>
<p>
- There are 3 versions of changegroups: "1", "2", and "3". From a
+ There are 4 versions of changegroups: "1", "2", "3" and "4". From a
high-level, versions "1" and "2" are almost exactly the same, with the
only difference being an additional item in the *delta header*. Version
"3" adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
- "changegroup" part in the bundle2).
+ "changegroup" part in the bundle2). Version "4" adds support for exchanging
+ sidedata (additional revision metadata not part of the digest).
</p>
<p>
Changegroups when not exchanging treemanifests consist of 3 logical
@@ -3724,8 +3763,8 @@
bundle/changegroup).
</p>
<p>
- The *delta header* is different between versions "1", "2", and
- "3" of the changegroup format.
+ The *delta header* is different between versions "1", "2", "3" and "4"
+ of the changegroup format.
</p>
<p>
Version 1 (headerlen=80):
@@ -3761,6 +3800,17 @@
+------------------------------------------------------------------------------+
</pre>
<p>
+ Version 4 (headerlen=103):
+ </p>
+ <pre>
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+ </pre>
+ <p>
The *delta data* consists of "chunklen - 4 - headerlen" bytes, which contain a
series of *delta*s, densely packed (no separators). These deltas describe a diff
from an existing entry (either that the recipient already has, or previously
@@ -3799,6 +3849,8 @@
<dd>Ellipsis revision. Revision hash does not match data (likely due to rewritten parents).
<dt>8192
<dd>Externally stored. The revision fulltext contains "key:value" "\n" delimited metadata defining an object stored elsewhere. Used by the LFS extension.
+ <dt>4096
+ <dd>Contains copy information. This revision changes files in a way that could affect copy tracing. This does *not* affect changegroup handling, but is relevant for other parts of Mercurial.
</dl>
<p>
For historical reasons, the integer values are identical to revlog version 1
@@ -3806,6 +3858,15 @@
field. Bits were allocated starting from the most-significant bit, hence the
reverse ordering and allocation of these flags.
</p>
+ <p>
+ The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+ itself. They are first in the header since they may affect the handling of the
+ rest of the fields in a future version. They are defined as such:
+ </p>
+ <dl>
+ <dt>1 indicates whether to read a chunk of sidedata (of variable length) right
+ <dd>after the revision flags.
+ </dl>
<h2>Changeset Segment</h2>
<p>
The *changeset segment* consists of a single *delta group* holding
@@ -3823,9 +3884,9 @@
</p>
<h3>Treemanifests Segment</h3>
<p>
- The *treemanifests segment* only exists in changegroup version "3", and
- only if the 'treemanifest' param is part of the bundle2 changegroup part
- (it is not possible to use changegroup version 3 outside of bundle2).
+ The *treemanifests segment* only exists in changegroup version "3" and "4",
+ and only if the 'treemanifest' param is part of the bundle2 changegroup part
+ (it is not possible to use changegroup version 3 or 4 outside of bundle2).
Aside from the filenames in the *treemanifests segment* containing a
trailing "/" character, it behaves identically to the *filelogs segment*
(see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/tests/test-hgignore.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-hgignore.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,17 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
$ hg init ignorerepo
$ cd ignorerepo
--- a/tests/test-hgrc.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-hgrc.t Mon Jun 07 17:10:35 2021 -0400
@@ -253,10 +253,9 @@
> [paths]
> foo = bar
> EOF
- $ hg showconfig --debug paths
+ $ hg showconfig --source paths
plain: True
- read config from: $TESTTMP/hgrc
- $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar
+ $TESTTMP/hgrc:17: paths.foo=bar
Test we can skip the user configuration
--- a/tests/test-hgweb-json.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-hgweb-json.t Mon Jun 07 17:10:35 2021 -0400
@@ -2272,6 +2272,10 @@
"topic": "environment"
},
{
+ "summary": "Safely rewriting history (EXPERIMENTAL)",
+ "topic": "evolution"
+ },
+ {
"summary": "Using Additional Features",
"topic": "extensions"
},
--- a/tests/test-histedit-obsolete.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-histedit-obsolete.t Mon Jun 07 17:10:35 2021 -0400
@@ -307,7 +307,7 @@
o 0:cb9a9f314b8b (public) a
$ hg histedit -r '.~2'
- abort: cannot edit public changesets
+ abort: cannot edit public changesets: cb9a9f314b8b, 40db8afa467b
(see 'hg help phases' for details)
[10]
--- a/tests/test-init.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-init.t Mon Jun 07 17:10:35 2021 -0400
@@ -19,6 +19,7 @@
store created
00changelog.i created
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -60,6 +61,7 @@
$ hg --config format.usestore=false init old
$ checknewrepo old
+ exp-dirstate-v2 (dirstate-v2 !)
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
@@ -73,6 +75,7 @@
$ checknewrepo old2
store created
00changelog.i created
+ exp-dirstate-v2 (dirstate-v2 !)
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
@@ -87,6 +90,7 @@
$ checknewrepo old3
store created
00changelog.i created
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -103,6 +107,7 @@
store created
00changelog.i created
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
@@ -221,6 +226,7 @@
store created
00changelog.i created
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -243,6 +249,7 @@
store created
00changelog.i created
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -261,6 +268,7 @@
store created
00changelog.i created
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
--- a/tests/test-lfconvert.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-lfconvert.t Mon Jun 07 17:10:35 2021 -0400
@@ -96,6 +96,7 @@
"lfconvert" adds 'largefiles' to .hg/requires.
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
largefiles
--- a/tests/test-lfs-bundle.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-lfs-bundle.t Mon Jun 07 17:10:35 2021 -0400
@@ -101,7 +101,7 @@
#if windows
$ unset LOCALAPPDATA
$ unset APPDATA
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define LOCALAPPDATA or APPDATA in the environment, or set lfs.usercache)
[255]
@@ -109,7 +109,7 @@
#if osx
$ unset HOME
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define HOME in the environment, or set lfs.usercache)
[255]
@@ -118,7 +118,7 @@
#if no-windows no-osx
$ unset XDG_CACHE_HOME
$ unset HOME
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define XDG_CACHE_HOME or HOME in the environment, or set lfs.usercache)
[255]
--- a/tests/test-lfs-largefiles.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-lfs-largefiles.t Mon Jun 07 17:10:35 2021 -0400
@@ -290,6 +290,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
lfs
--- a/tests/test-lfs-serve.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-lfs-serve.t Mon Jun 07 17:10:35 2021 -0400
@@ -355,11 +355,11 @@
# LFS required- both lfs and non-lfs revlogs have 0x2000 flag
*** runcommand debugprocessors lfs.bin -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand debugprocessors nonlfs2.txt -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions --cwd ../server
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -368,7 +368,7 @@
# LFS not enabled- revlogs don't have 0x2000 flag
*** runcommand debugprocessors nonlfs3.txt
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
*** runcommand config extensions
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -411,11 +411,11 @@
# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
*** runcommand debugprocessors lfs.bin -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand debugprocessors nonlfs2.txt -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions --cwd ../server
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -424,7 +424,7 @@
# LFS enabled without requirement- revlogs have 0x2000 flag
*** runcommand debugprocessors nonlfs3.txt
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -433,7 +433,7 @@
# LFS disabled locally- revlogs don't have 0x2000 flag
*** runcommand debugprocessors nonlfs.txt -R ../nonlfs
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
*** runcommand config extensions --cwd ../nonlfs
extensions.debugprocessors=$TESTTMP/debugprocessors.py
extensions.lfs=!
--- a/tests/test-lfs.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-lfs.t Mon Jun 07 17:10:35 2021 -0400
@@ -785,8 +785,8 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
- large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+ l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+ large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
checked 5 changesets with 10 changes to 4 files
2 integrity errors encountered!
(first damaged changeset appears to be 0)
@@ -895,9 +895,9 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
+ l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
- large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+ large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
checked 5 changesets with 10 changes to 4 files
@@ -939,8 +939,8 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
- large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+ l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+ large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
checked 5 changesets with 10 changes to 4 files
2 integrity errors encountered!
(first damaged changeset appears to be 0)
@@ -965,9 +965,9 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
+ l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
- large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+ large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
checked 5 changesets with 10 changes to 4 files
@@ -985,7 +985,7 @@
Accessing a corrupt file will complain
$ hg --cwd fromcorrupt2 cat -r 0 large
- abort: integrity check failed on data/large.i:0
+ abort: integrity check failed on data/large:0
[50]
lfs -> normal -> lfs round trip conversions are possible. The 'none()'
--- a/tests/test-manifest.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-manifest.py Mon Jun 07 17:10:35 2021 -0400
@@ -81,12 +81,12 @@
raise NotImplementedError('parsemanifest not implemented by test case')
def testEmptyManifest(self):
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
self.assertEqual(0, len(m))
self.assertEqual([], list(m))
def testManifest(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
@@ -95,20 +95,16 @@
with self.assertRaises(KeyError):
m[b'wat']
- def testManifestLongHashes(self):
- m = self.parsemanifest(b'a\0' + b'f' * 64 + b'\n')
- self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
-
def testSetItem(self):
want = BIN_HASH_1
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
m[b'a'] = want
self.assertIn(b'a', m)
self.assertEqual(want, m[b'a'])
self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'a'] = want
self.assertEqual(want, m[b'a'])
self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
@@ -116,14 +112,14 @@
def testSetFlag(self):
want = b'x'
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
# first add a file; a file-less flag makes no sense
m[b'a'] = BIN_HASH_1
m.setflag(b'a', want)
self.assertEqual(want, m.flags(b'a'))
self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
# first add a file; a file-less flag makes no sense
m[b'a'] = BIN_HASH_1
m.setflag(b'a', want)
@@ -133,7 +129,7 @@
)
def testCopy(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'a'] = BIN_HASH_1
m2 = m.copy()
del m
@@ -142,7 +138,7 @@
def testCompaction(self):
unhex = binascii.unhexlify
h1, h2 = unhex(HASH_1), unhex(HASH_2)
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'alpha'] = h1
m[b'beta'] = h2
del m[b'foo']
@@ -164,7 +160,7 @@
m[b'foo']
def testMatchException(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
def filt(path):
@@ -177,7 +173,7 @@
m._matches(match)
def testRemoveItem(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
del m[b'foo']
with self.assertRaises(KeyError):
m[b'foo']
@@ -193,9 +189,9 @@
addl = b'z-only-in-left\0' + HASH_1 + b'\n'
addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
left = self.parsemanifest(
- A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
+ 20, A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
)
- right = self.parsemanifest(A_SHORT_MANIFEST + addr)
+ right = self.parsemanifest(20, A_SHORT_MANIFEST + addr)
want = {
b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
@@ -208,14 +204,18 @@
b'foo': (MISSING, (BIN_HASH_3, b'x')),
b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
}
- self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
+ self.assertEqual(
+ want, self.parsemanifest(20, EMTPY_MANIFEST).diff(left)
+ )
want = {
b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
b'foo': ((BIN_HASH_3, b'x'), MISSING),
b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
}
- self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
+ self.assertEqual(
+ want, left.diff(self.parsemanifest(20, EMTPY_MANIFEST))
+ )
copy = right.copy()
del copy[b'z-only-in-right']
del right[b'foo']
@@ -225,7 +225,7 @@
}
self.assertEqual(want, right.diff(copy))
- short = self.parsemanifest(A_SHORT_MANIFEST)
+ short = self.parsemanifest(20, A_SHORT_MANIFEST)
pruned = short.copy()
del pruned[b'foo']
want = {
@@ -247,27 +247,27 @@
l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
)
try:
- self.parsemanifest(backwards)
+ self.parsemanifest(20, backwards)
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest lines not in sorted order.', str(v))
def testNoTerminalNewline(self):
try:
- self.parsemanifest(A_SHORT_MANIFEST + b'wat')
+ self.parsemanifest(20, A_SHORT_MANIFEST + b'wat')
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest did not end in a newline.', str(v))
def testNoNewLineAtAll(self):
try:
- self.parsemanifest(b'wat')
+ self.parsemanifest(20, b'wat')
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest did not end in a newline.', str(v))
def testHugeManifest(self):
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
self.assertEqual(len(m), len(list(m)))
@@ -275,7 +275,7 @@
"""Tests matches() for a few specific files to make sure that both
the set of files as well as their flags and nodeids are correct in
the resulting manifest."""
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
match = matchmod.exact([b'file1', b'file200', b'file300'])
m2 = m._matches(match)
@@ -291,7 +291,7 @@
"""Tests matches() for a small set of specific files, including one
nonexistent file to make sure in only matches against existing files.
"""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.exact(
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
@@ -305,7 +305,7 @@
def testMatchesNonexistentDirectory(self):
"""Tests matches() for a relpath match on a directory that doesn't
actually exist."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
@@ -316,7 +316,7 @@
def testMatchesExactLarge(self):
"""Tests matches() for files matching a large list of exact files."""
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
flist = m.keys()[80:300]
match = matchmod.exact(flist)
@@ -326,7 +326,7 @@
def testMatchesFull(self):
'''Tests matches() for what should be a full match.'''
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
m2 = m._matches(match)
@@ -336,7 +336,7 @@
def testMatchesDirectory(self):
"""Tests matches() on a relpath match on a directory, which should
match against all files within said directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
@@ -362,7 +362,7 @@
"""Tests matches() on an exact match on a directory, which should
result in an empty manifest because you can't perform an exact match
against a directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.exact([b'a/b'])
m2 = m._matches(match)
@@ -372,7 +372,7 @@
def testMatchesCwd(self):
"""Tests matches() on a relpath match with the current directory ('.')
when not in the root directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
@@ -397,7 +397,7 @@
def testMatchesWithPattern(self):
"""Tests matches() for files matching a pattern that reside
deeper than the specified directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
m2 = m._matches(match)
@@ -408,8 +408,12 @@
class testmanifestdict(unittest.TestCase, basemanifesttests):
- def parsemanifest(self, text):
- return manifestmod.manifestdict(text)
+ def parsemanifest(self, nodelen, text):
+ return manifestmod.manifestdict(nodelen, text)
+
+ def testManifestLongHashes(self):
+ m = self.parsemanifest(32, b'a\0' + b'f' * 64 + b'\n')
+ self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
def testObviouslyBogusManifest(self):
# This is a 163k manifest that came from oss-fuzz. It was a
@@ -433,15 +437,15 @@
b'\xac\xbe'
)
with self.assertRaises(ValueError):
- self.parsemanifest(data)
+ self.parsemanifest(20, data)
class testtreemanifest(unittest.TestCase, basemanifesttests):
- def parsemanifest(self, text):
+ def parsemanifest(self, nodelen, text):
return manifestmod.treemanifest(sha1nodeconstants, b'', text)
def testWalkSubtrees(self):
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
dirs = [s._dir for s in m.walksubtrees()]
self.assertEqual(
--- a/tests/test-merge-subrepos.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-merge-subrepos.t Mon Jun 07 17:10:35 2021 -0400
@@ -61,7 +61,7 @@
> --config blackbox.track='command commandfinish'
9bfe45a197d7+ tip
$ cat .hg/blackbox.log
- * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --cmdserver chgunix * (glob) (chg !)
+ * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
* @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* (glob)
* @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* exited 0 * (glob)
--- a/tests/test-narrow-clone-no-ellipsis.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-clone-no-ellipsis.t Mon Jun 07 17:10:35 2021 -0400
@@ -24,6 +24,7 @@
$ cd narrow
$ cat .hg/requires | grep -v generaldelta
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
narrowhg-experimental
persistent-nodemap (rust !)
--- a/tests/test-narrow-clone-non-narrow-server.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-clone-non-narrow-server.t Mon Jun 07 17:10:35 2021 -0400
@@ -57,6 +57,7 @@
comparing with http://localhost:$HGPORT1/
searching for changes
looking for local changes to affected paths
+ deleting unwanted files from working copy
$ hg tracked --addinclude f1 http://localhost:$HGPORT1/
nothing to widen or narrow
--- a/tests/test-narrow-clone-stream.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-clone-stream.t Mon Jun 07 17:10:35 2021 -0400
@@ -64,6 +64,7 @@
$ cat .hg/requires
dotencode (tree !)
dotencode (flat-fncache !)
+ exp-dirstate-v2 (dirstate-v2 !)
fncache (tree !)
fncache (flat-fncache !)
generaldelta
--- a/tests/test-narrow-clone.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-clone.t Mon Jun 07 17:10:35 2021 -0400
@@ -40,6 +40,7 @@
$ cd narrow
$ cat .hg/requires | grep -v generaldelta
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
narrowhg-experimental
persistent-nodemap (rust !)
--- a/tests/test-narrow-exchange.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-exchange.t Mon Jun 07 17:10:35 2021 -0400
@@ -105,7 +105,7 @@
remote: adding file changes
remote: transaction abort!
remote: rollback completed
- remote: abort: data/inside2/f.i@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
+ remote: abort: data/inside2/f@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
remote: abort: data/inside2/f/index@4a1aa07735e6: no node (reposimplestore !)
abort: stream ended unexpectedly (got 0 bytes, expected 4)
[255]
@@ -218,8 +218,8 @@
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
- remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+ remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
remote: transaction abort! (lfs-on !)
remote: rollback completed (lfs-on !)
- remote: abort: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+ remote: abort: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-patterns.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-patterns.t Mon Jun 07 17:10:35 2021 -0400
@@ -193,6 +193,7 @@
deleting data/dir1/dirA/bar.i (reporevlogstore !)
deleting data/dir1/dirA/bar/0eca1d0cbdaea4651d1d04d71976a6d2d9bfaae5 (reposimplestore !)
deleting data/dir1/dirA/bar/index (reposimplestore !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
@@ -249,6 +250,7 @@
deleting data/dir1/dirA/foo.i (reporevlogstore !)
deleting data/dir1/dirA/foo/162caeb3d55dceb1fee793aa631ac8c73fcb8b5e (reposimplestore !)
deleting data/dir1/dirA/foo/index (reposimplestore !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
--- a/tests/test-narrow-pull.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-pull.t Mon Jun 07 17:10:35 2021 -0400
@@ -147,7 +147,7 @@
$ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
$ cd narrow2
$ hg pull -q -r 1
- remote: abort: unexpected error: unable to resolve parent while packing '00manifest.i' 1 for changeset 0
+ remote: abort: unexpected error: unable to resolve parent while packing '00manifest' 1 for changeset 0
transaction abort!
rollback completed
abort: pull failed on remote
--- a/tests/test-narrow-share.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-share.t Mon Jun 07 17:10:35 2021 -0400
@@ -94,6 +94,7 @@
deleting meta/d1/00manifest.i (tree !)
deleting meta/d3/00manifest.i (tree !)
deleting meta/d5/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg -R main tracked
I path:d7
$ hg -R main files
--- a/tests/test-narrow-sparse.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-sparse.t Mon Jun 07 17:10:35 2021 -0400
@@ -58,6 +58,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
narrowhg-experimental
@@ -69,3 +70,28 @@
treemanifest (tree !)
$ hg debugrebuilddirstate
+
+We only make the following assertions for the flat test case since in the
+treemanifest test case debugsparse fails with "path ends in directory
+separator: outside/" which seems like a bug unrelated to the regression this is
+testing for.
+
+#if flat
+widening with both sparse and narrow is possible
+
+ $ cat >> .hg/hgrc <<EOF
+ > [extensions]
+ > sparse =
+ > narrow =
+ > EOF
+
+ $ hg debugsparse -X outside/f -X widest/f
+ $ hg tracked -q --addinclude outside/f
+ $ find . -name .hg -prune -o -type f -print | sort
+ ./inside/f
+
+ $ hg debugsparse -d outside/f
+ $ find . -name .hg -prune -o -type f -print | sort
+ ./inside/f
+ ./outside/f
+#endif
--- a/tests/test-narrow-trackedcmd.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow-trackedcmd.t Mon Jun 07 17:10:35 2021 -0400
@@ -150,6 +150,7 @@
looking for local changes to affected paths
deleting data/inside/f.i
deleting meta/inside/00manifest.i (tree !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
@@ -191,6 +192,7 @@
looking for local changes to affected paths
deleting data/widest/f.i
deleting meta/widest/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg tracked
I path:outisde
I path:wider
--- a/tests/test-narrow.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-narrow.t Mon Jun 07 17:10:35 2021 -0400
@@ -132,12 +132,14 @@
looking for local changes to affected paths
The following changeset(s) or their ancestors have local changes not on the remote:
* (glob)
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i (reporevlogstore !)
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg log -T "{rev}: {desc} {outsidenarrow}\n"
7: local change to d3
@@ -164,12 +166,14 @@
comparing with ssh://user@dummy/master
searching for changes
looking for local changes to affected paths
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i (reporevlogstore !)
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
Updates off of stripped commit if necessary
$ hg co -r 'desc("local change to d3")' -q
@@ -183,12 +187,14 @@
* (glob)
* (glob)
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d3/f.i (reporevlogstore !)
deleting meta/d3/00manifest.i (tree !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg log -T '{desc}\n' -r .
add d10/f
Updates to nullid if necessary
@@ -206,12 +212,14 @@
The following changeset(s) or their ancestors have local changes not on the remote:
* (glob)
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d3/f.i (reporevlogstore !)
deleting meta/d3/00manifest.i (tree !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg id
000000000000
$ cd ..
@@ -272,6 +280,7 @@
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
$ hg files
[1]
@@ -332,6 +341,7 @@
deleting meta/d6/00manifest.i (tree !)
deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
deleting data/d6/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d0
I path:d3
@@ -355,6 +365,7 @@
deleting data/d3/f.i (reporevlogstore !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d0
I path:d3
@@ -378,6 +389,7 @@
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d3
I path:d9
@@ -478,11 +490,13 @@
path:d2
remove these unused includes (yn)? y
looking for local changes to affected paths
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i
deleting data/d2/f.i
deleting meta/d0/00manifest.i (tree !)
deleting meta/d2/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d1
$ hg files
@@ -504,10 +518,12 @@
path:d2
remove these unused includes (yn)? y
looking for local changes to affected paths
+ deleting unwanted changesets
deleting data/d0/f.i
deleting data/d2/f.i
deleting meta/d0/00manifest.i (tree !)
deleting meta/d2/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ ls .hg/strip-backup/
@@ -521,4 +537,5 @@
looking for local changes to affected paths
deleting data/d0/f.i
deleting meta/d0/00manifest.i (tree !)
+ deleting unwanted files from working copy
not deleting possibly dirty file d0/f
--- a/tests/test-obshistory.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-obshistory.t Mon Jun 07 17:10:35 2021 -0400
@@ -13,6 +13,7 @@
> [experimental]
> evolution.createmarkers = yes
> evolution.effect-flags = yes
+ > evolution.allowdivergence=true
> EOF
Test output on amended commit
--- a/tests/test-obsmarker-template.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-obsmarker-template.t Mon Jun 07 17:10:35 2021 -0400
@@ -11,6 +11,7 @@
> publish=False
> [experimental]
> evolution=true
+ > evolution.allowdivergence=true
> [templates]
> obsfatesuccessors = "{if(successors, " as ")}{join(successors, ", ")}"
> obsfateverb = "{obsfateverb(successors, markers)}"
--- a/tests/test-parseindex2.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-parseindex2.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,13 +14,16 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
+ sha1nodeconstants,
)
from mercurial import (
policy,
pycompat,
)
+from mercurial.revlogutils import (
+ constants,
+)
parsers = policy.importmod('parsers')
@@ -40,7 +43,7 @@
s = 64
cache = None
index = []
- nodemap = {nullid: nullrev}
+ nodemap = {sha1nodeconstants.nullid: nullrev}
n = off = 0
l = len(data) - s
@@ -49,6 +52,12 @@
cache = (0, data)
while off <= l:
e = struct.unpack(indexformatng, data[off : off + s])
+ e = e + (
+ 0,
+ 0,
+ constants.COMP_MODE_INLINE,
+ constants.COMP_MODE_INLINE,
+ )
nodemap[e[7]] = n
append(e)
n += 1
@@ -58,6 +67,12 @@
else:
while off <= l:
e = struct.unpack(indexformatng, data[off : off + s])
+ e = e + (
+ 0,
+ 0,
+ constants.COMP_MODE_INLINE,
+ constants.COMP_MODE_INLINE,
+ )
nodemap[e[7]] = n
append(e)
n += 1
@@ -227,7 +242,7 @@
ix = parsers.parse_index2(data_inlined, True)[0]
for i, r in enumerate(ix):
- if r[7] == nullid:
+ if r[7] == sha1nodeconstants.nullid:
i = -1
try:
self.assertEqual(
@@ -240,7 +255,20 @@
break
def testminusone(self):
- want = (0, 0, 0, -1, -1, -1, -1, nullid)
+ want = (
+ 0,
+ 0,
+ 0,
+ -1,
+ -1,
+ -1,
+ -1,
+ sha1nodeconstants.nullid,
+ 0,
+ 0,
+ constants.COMP_MODE_INLINE,
+ constants.COMP_MODE_INLINE,
+ )
index, junk = parsers.parse_index2(data_inlined, True)
got = index[-1]
self.assertEqual(want, got) # inline data
@@ -262,7 +290,21 @@
# node won't matter for this test, let's just make sure
# they don't collide. Other data don't matter either.
node = hexrev(p1) + hexrev(p2) + b'.' * 12
- index.append((0, 0, 12, 1, 34, p1, p2, node))
+ e = (
+ 0,
+ 0,
+ 12,
+ 1,
+ 34,
+ p1,
+ p2,
+ node,
+ 0,
+ 0,
+ constants.COMP_MODE_INLINE,
+ constants.COMP_MODE_INLINE,
+ )
+ index.append(e)
appendrev(4)
appendrev(5)
--- a/tests/test-paths.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-paths.t Mon Jun 07 17:10:35 2021 -0400
@@ -98,6 +98,9 @@
expand: $TESTTMP/a/$SOMETHING/bar
$ hg log -rnull -T '{get(peerurls, "dupe")}\n'
$TESTTMP/b#tip
+ $ hg log -rnull -T '{peerurls % "{urls|json}\n"}'
+ [{"pushurl": "https://example.com/dupe", "url": "$TESTTMP/b#tip"}]
+ [{"url": "$TESTTMP/a/$SOMETHING/bar"}]
(sub options can be populated by map/dot operation)
@@ -172,7 +175,7 @@
> EOF
$ hg paths
- (paths.default:pushurl not a URL; ignoring)
+ (paths.default:pushurl not a URL; ignoring: "/not/a/url")
default = /path/to/nothing
#fragment is not allowed in :pushurl
@@ -385,3 +388,128 @@
abort: cannot use `path://unknown`, "unknown" is not a known path
[255]
+Test path pointing to multiple urls
+===================================
+
+Simple cases
+------------
+- one layer
+- one list
+- no special option
+
+ $ cat << EOF > .hg/hgrc
+ > [paths]
+ > one-path=foo
+ > multiple-path=foo,bar,baz,https://example.org/
+ > multiple-path:multi-urls=yes
+ > EOF
+ $ hg path
+ gpath1 = http://hg.example.com/
+ multiple-path = $TESTTMP/chained_path/foo
+ multiple-path:multi-urls = yes
+ multiple-path = $TESTTMP/chained_path/bar
+ multiple-path:multi-urls = yes
+ multiple-path = $TESTTMP/chained_path/baz
+ multiple-path:multi-urls = yes
+ multiple-path = https://example.org/
+ multiple-path:multi-urls = yes
+ one-path = $TESTTMP/chained_path/foo
+
+Reference to a list
+-------------------
+
+ $ cat << EOF >> .hg/hgrc
+ > ref-to-multi=path://multiple-path
+ > EOF
+ $ hg path | grep ref-to-multi
+ ref-to-multi = $TESTTMP/chained_path/foo
+ ref-to-multi:multi-urls = yes
+ ref-to-multi = $TESTTMP/chained_path/bar
+ ref-to-multi:multi-urls = yes
+ ref-to-multi = $TESTTMP/chained_path/baz
+ ref-to-multi:multi-urls = yes
+ ref-to-multi = https://example.org/
+ ref-to-multi:multi-urls = yes
+
+List with a reference
+---------------------
+
+ $ cat << EOF >> .hg/hgrc
+ > multi-with-ref=path://one-path, ssh://babar@savannah/celeste-ville
+ > multi-with-ref:multi-urls=yes
+ > EOF
+ $ hg path | grep multi-with-ref
+ multi-with-ref = $TESTTMP/chained_path/foo
+ multi-with-ref:multi-urls = yes
+ multi-with-ref = ssh://babar@savannah/celeste-ville
+ multi-with-ref:multi-urls = yes
+
+List with a reference to a list
+-------------------------------
+
+ $ cat << EOF >> .hg/hgrc
+ > multi-to-multi-ref = path://multiple-path, ssh://celeste@savannah/celeste-ville
+ > multi-to-multi-ref:multi-urls = yes
+ > EOF
+ $ hg path | grep multi-to-multi-ref
+ multi-to-multi-ref = $TESTTMP/chained_path/foo
+ multi-to-multi-ref:multi-urls = yes
+ multi-to-multi-ref = $TESTTMP/chained_path/bar
+ multi-to-multi-ref:multi-urls = yes
+ multi-to-multi-ref = $TESTTMP/chained_path/baz
+ multi-to-multi-ref:multi-urls = yes
+ multi-to-multi-ref = https://example.org/
+ multi-to-multi-ref:multi-urls = yes
+ multi-to-multi-ref = ssh://celeste@savannah/celeste-ville
+ multi-to-multi-ref:multi-urls = yes
+
+individual suboptions are inherited
+-----------------------------------
+
+ $ cat << EOF >> .hg/hgrc
+ > with-pushurl = foo
+ > with-pushurl:pushurl = http://foo.bar/
+ > with-pushrev = bar
+ > with-pushrev:pushrev = draft()
+ > with-both = toto
+ > with-both:pushurl = http://ta.ta
+ > with-both:pushrev = secret()
+ > ref-all-no-opts = path://with-pushurl, path://with-pushrev, path://with-both
+ > ref-all-no-opts:multi-urls = yes
+ > with-overwrite = path://with-pushurl, path://with-pushrev, path://with-both
+ > with-overwrite:multi-urls = yes
+ > with-overwrite:pushrev = public()
+ > EOF
+ $ hg path | grep with-pushurl
+ with-pushurl = $TESTTMP/chained_path/foo
+ with-pushurl:pushurl = http://foo.bar/
+ $ hg path | grep with-pushrev
+ with-pushrev = $TESTTMP/chained_path/bar
+ with-pushrev:pushrev = draft()
+ $ hg path | grep with-both
+ with-both = $TESTTMP/chained_path/toto
+ with-both:pushrev = secret()
+ with-both:pushurl = http://ta.ta/
+ $ hg path | grep ref-all-no-opts
+ ref-all-no-opts = $TESTTMP/chained_path/foo
+ ref-all-no-opts:multi-urls = yes
+ ref-all-no-opts:pushurl = http://foo.bar/
+ ref-all-no-opts = $TESTTMP/chained_path/bar
+ ref-all-no-opts:multi-urls = yes
+ ref-all-no-opts:pushrev = draft()
+ ref-all-no-opts = $TESTTMP/chained_path/toto
+ ref-all-no-opts:multi-urls = yes
+ ref-all-no-opts:pushrev = secret()
+ ref-all-no-opts:pushurl = http://ta.ta/
+ $ hg path | grep with-overwrite
+ with-overwrite = $TESTTMP/chained_path/foo
+ with-overwrite:multi-urls = yes
+ with-overwrite:pushrev = public()
+ with-overwrite:pushurl = http://foo.bar/
+ with-overwrite = $TESTTMP/chained_path/bar
+ with-overwrite:multi-urls = yes
+ with-overwrite:pushrev = public()
+ with-overwrite = $TESTTMP/chained_path/toto
+ with-overwrite:multi-urls = yes
+ with-overwrite:pushrev = public()
+ with-overwrite:pushurl = http://ta.ta/
--- a/tests/test-permissions.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-permissions.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,5 +1,19 @@
#require unix-permissions no-root reporevlogstore
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
$ hg init t
$ cd t
--- a/tests/test-persistent-nodemap.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-persistent-nodemap.t Mon Jun 07 17:10:35 2021 -0400
@@ -57,6 +57,7 @@
$ hg debugformat
format-variant repo
fncache: yes
+ dirstate-v2: no
dotencode: yes
generaldelta: yes
share-safe: no
@@ -64,6 +65,7 @@
persistent-nodemap: yes
copies-sdc: no
revlog-v2: no
+ changelog-v2: no
plain-cl-delta: yes
compression: zlib (no-zstd !)
compression: zstd (zstd !)
@@ -71,14 +73,14 @@
$ hg debugbuilddag .+5000 --new-file
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5000
tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --size .hg/store/00changelog.n
- .hg/store/00changelog.n: size=70
+ .hg/store/00changelog.n: size=62
Simple lookup works
@@ -90,10 +92,10 @@
#if rust
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
+ .hg/store/00changelog-????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
$ f --sha256 .hg/store/00manifest-*.nd
- .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
+ .hg/store/00manifest-????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
@@ -119,7 +121,7 @@
#else
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
+ .hg/store/00changelog-????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
@@ -194,7 +196,7 @@
#if no-pure no-rust
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5001
tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
data-length: 121088
@@ -202,7 +204,7 @@
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5001
tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
data-length: 121344
@@ -211,23 +213,23 @@
#endif
$ f --size .hg/store/00changelog.n
- .hg/store/00changelog.n: size=70
+ .hg/store/00changelog.n: size=62
(The pure code use the debug code that perform incremental update, the C code reencode from scratch)
#if pure
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
+ .hg/store/00changelog-????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
#endif
#if rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
+ .hg/store/00changelog-????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
#endif
#if no-pure no-rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
+ .hg/store/00changelog-????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
#endif
$ hg debugnodemap --check
@@ -251,36 +253,36 @@
#if pure
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121600
data-unused: 512
data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
+ .hg/store/00changelog-????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
#endif
#if rust
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121600
data-unused: 512
data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
+ .hg/store/00changelog-????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
#endif
#if no-pure no-rust
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
+ .hg/store/00changelog-????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
#endif
Test force warming the cache
@@ -290,7 +292,7 @@
$ hg debugupdatecache
#if pure
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121088
@@ -298,7 +300,7 @@
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121088
@@ -312,7 +314,7 @@
First copy old data on the side.
$ mkdir ../tmp-copies
- $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
+ $ cp .hg/store/00changelog-????????.nd .hg/store/00changelog.n ../tmp-copies
Nodemap lagging behind
----------------------
@@ -328,7 +330,7 @@
If the nodemap is lagging behind, it can catch up fine
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5003
tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
data-length: 121344 (pure !)
@@ -342,7 +344,7 @@
data-unused: 0.000% (no-rust no-pure !)
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121088
@@ -373,7 +375,7 @@
the nodemap should detect the changelog have been tampered with and recover.
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
data-length: 121536 (pure !)
@@ -388,7 +390,7 @@
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5002
tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
data-length: 121088
@@ -438,7 +440,7 @@
$ hg add a
$ hg ci -m a
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5003
tip-node: a52c5079765b5865d97b993b303a18740113bbb2
data-length: 121088
@@ -446,7 +448,7 @@
data-unused: 0.000%
$ echo babar2 > babar
$ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5004
tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
data-length: 121280 (pure !)
@@ -459,7 +461,7 @@
data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5004
tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
data-length: 121280 (pure !)
@@ -484,7 +486,7 @@
$ sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-pending && \
> hg debugnodemap --metadata && \
> sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-close sync-repo-read
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5004
tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
data-length: 121280 (pure !)
@@ -497,7 +499,7 @@
data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5005
tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
data-length: 121536 (pure !)
@@ -516,16 +518,16 @@
$ echo plakfe > a
$ f --size --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
- .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
+ .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
+ .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
$ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
transaction abort!
rollback completed
abort: This is a late abort
[255]
$ hg debugnodemap --metadata
- uid: ???????????????? (glob)
+ uid: ???????? (glob)
tip-rev: 5005
tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
data-length: 121536 (pure !)
@@ -538,9 +540,9 @@
data-unused: 0.369% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ f --size --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
- .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
+ .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
+ .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
Check that removing content does not confuse the nodemap
--------------------------------------------------------
@@ -576,6 +578,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -583,6 +586,7 @@
persistent-nodemap: yes no no
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -591,8 +595,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
removed: persistent-nodemap
processed revlogs:
@@ -623,6 +628,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -630,6 +636,7 @@
persistent-nodemap: no yes no
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -638,8 +645,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
added: persistent-nodemap
persistent-nodemap
@@ -678,8 +686,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
optimisations: re-delta-all
@@ -820,9 +829,9 @@
No race condition
$ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
- adding [s] 00manifest.n (70 bytes)
+ adding [s] 00manifest.n (62 bytes)
adding [s] 00manifest-*.nd (118 KB) (glob)
- adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog.n (62 bytes)
adding [s] 00changelog-*.nd (118 KB) (glob)
adding [s] 00manifest.d (452 KB) (no-zstd !)
adding [s] 00manifest.d (491 KB) (zstd !)
@@ -868,7 +877,7 @@
test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
test-repo/.hg/store/00changelog.i: size=320384
- test-repo/.hg/store/00changelog.n: size=70
+ test-repo/.hg/store/00changelog.n: size=62
$ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
uid: * (glob)
tip-rev: 5005
@@ -890,9 +899,9 @@
$ touch $HG_TEST_STREAM_WALKED_FILE_2
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
$ cat clone-output
- adding [s] 00manifest.n (70 bytes)
+ adding [s] 00manifest.n (62 bytes)
adding [s] 00manifest-*.nd (118 KB) (glob)
- adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog.n (62 bytes)
adding [s] 00changelog-*.nd (118 KB) (glob)
adding [s] 00manifest.d (452 KB) (no-zstd !)
adding [s] 00manifest.d (491 KB) (zstd !)
@@ -908,7 +917,7 @@
stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
stream-clone-race-1/.hg/store/00changelog.i: size=320384
- stream-clone-race-1/.hg/store/00changelog.n: size=70
+ stream-clone-race-1/.hg/store/00changelog.n: size=62
$ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
uid: * (glob)
@@ -963,7 +972,7 @@
test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
test-repo/.hg/store/00changelog.i: size=320448
- test-repo/.hg/store/00changelog.n: size=70
+ test-repo/.hg/store/00changelog.n: size=62
$ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
uid: * (glob)
tip-rev: 5006
@@ -989,9 +998,9 @@
$ touch $HG_TEST_STREAM_WALKED_FILE_2
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
$ cat clone-output-2
- adding [s] 00manifest.n (70 bytes)
+ adding [s] 00manifest.n (62 bytes)
adding [s] 00manifest-*.nd (118 KB) (glob)
- adding [s] 00changelog.n (70 bytes)
+ adding [s] 00changelog.n (62 bytes)
adding [s] 00changelog-*.nd (118 KB) (glob)
adding [s] 00manifest.d (492 KB) (zstd !)
adding [s] 00manifest.d (452 KB) (no-zstd !)
@@ -1009,7 +1018,7 @@
stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
stream-clone-race-2/.hg/store/00changelog.i: size=320448
- stream-clone-race-2/.hg/store/00changelog.n: size=70
+ stream-clone-race-2/.hg/store/00changelog.n: size=62
$ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
uid: * (glob)
--- a/tests/test-phabricator.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-phabricator.t Mon Jun 07 17:10:35 2021 -0400
@@ -509,9 +509,8 @@
A bad .arcconfig doesn't error out
$ echo 'garbage' > .arcconfig
- $ hg config phabricator --debug
+ $ hg config phabricator --source
invalid JSON in $TESTTMP/repo/.arcconfig
- read config from: */.hgrc (glob)
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.url=https://phab.mercurial-scm.org/ (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=HG (glob)
@@ -524,8 +523,7 @@
> EOF
$ cp $TESTDIR/../.arcconfig .
$ mv .hg/hgrc .hg/hgrc.bak
- $ hg config phabricator --debug
- read config from: */.hgrc (glob)
+ $ hg config phabricator --source
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.arcconfig: phabricator.callsign=HG
$TESTTMP/repo/.arcconfig: phabricator.url=https://phab.mercurial-scm.org/
@@ -536,8 +534,7 @@
> url = local
> callsign = local
> EOF
- $ hg config phabricator --debug
- read config from: */.hgrc (glob)
+ $ hg config phabricator --source
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.url=local (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=local (glob)
--- a/tests/test-phases.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-phases.t Mon Jun 07 17:10:35 2021 -0400
@@ -884,6 +884,7 @@
$ cd no-internal-phase
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -912,6 +913,7 @@
$ cd internal-phase
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
internal-phase
--- a/tests/test-pull-bundle.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-pull-bundle.t Mon Jun 07 17:10:35 2021 -0400
@@ -185,7 +185,7 @@
adding changesets
adding manifests
adding file changes
- abort: 00changelog.i@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
+ abort: 00changelog@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
[50]
$ cd ..
$ killdaemons.py
--- a/tests/test-purge.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-purge.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,17 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
init
$ hg init t
--- a/tests/test-racy-mutations.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-racy-mutations.t Mon Jun 07 17:10:35 2021 -0400
@@ -91,7 +91,7 @@
$ hg debugrevlogindex -c
rev linkrev nodeid p1 p2
0 0 222799e2f90b 000000000000 000000000000
- 1 1 6f124f6007a0 222799e2f90b 000000000000
+ 1 1 6f124f6007a0 222799e2f90b 000000000000 (missing-correct-output !)
And, because of transactions, there's none in the manifestlog either.
$ hg debugrevlogindex -m
rev linkrev nodeid p1 p2
--- a/tests/test-rebase-collapse.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-rebase-collapse.t Mon Jun 07 17:10:35 2021 -0400
@@ -549,8 +549,8 @@
o 0: f447d5abf5ea 'add'
$ hg rebase --collapse -r 1 -d 0
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
Test collapsing in place
--- a/tests/test-rebase-scenario-global.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-rebase-scenario-global.t Mon Jun 07 17:10:35 2021 -0400
@@ -328,11 +328,11 @@
nothing to rebase
[1]
$ hg rebase -d 5 -b 6
- abort: cannot rebase public changesets
+ abort: cannot rebase public changesets: e1c4361dd923
(see 'hg help phases' for details)
[10]
$ hg rebase -d 5 -r '1 + (6::)'
- abort: cannot rebase public changesets
+ abort: cannot rebase public changesets: e1c4361dd923
(see 'hg help phases' for details)
[10]
@@ -452,8 +452,8 @@
$ hg clone -q -u . ah ah1
$ cd ah1
$ hg rebase -r '2::8' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '2::8' -d 1 -k
rebasing 2:c9e50f6cdc55 "C"
@@ -498,8 +498,8 @@
$ hg clone -q -u . ah ah2
$ cd ah2
$ hg rebase -r '3::8' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::8' -d 1 --keep
rebasing 3:ffd453c31098 "D"
@@ -541,8 +541,8 @@
$ hg clone -q -u . ah ah3
$ cd ah3
$ hg rebase -r '3::7' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::7' -d 1 --keep
rebasing 3:ffd453c31098 "D"
@@ -581,8 +581,8 @@
$ hg clone -q -u . ah ah4
$ cd ah4
$ hg rebase -r '3::(7+5)' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::(7+5)' -d 1 --keep
rebasing 3:ffd453c31098 "D"
--- a/tests/test-remotefilelog-clone-tree.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-clone-tree.t Mon Jun 07 17:10:35 2021 -0400
@@ -27,6 +27,7 @@
$ cd shallow
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
@@ -70,6 +71,7 @@
$ cd shallow2
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
@@ -113,6 +115,7 @@
$ ls shallow3/.hg/store/data
$ cat shallow3/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
--- a/tests/test-remotefilelog-clone.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-clone.t Mon Jun 07 17:10:35 2021 -0400
@@ -24,6 +24,7 @@
$ cd shallow
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
@@ -60,6 +61,7 @@
$ cd shallow2
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
@@ -111,6 +113,7 @@
$ ls shallow3/.hg/store/data
$ cat shallow3/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
--- a/tests/test-remotefilelog-datapack.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-datapack.py Mon Jun 07 17:10:35 2021 -0400
@@ -16,7 +16,7 @@
# Load the local remotefilelog, not the system one
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import policy
if not policy._packageprefs.get(policy.policy, (False, False))[1]:
@@ -63,7 +63,14 @@
def createPack(self, revisions=None, packdir=None):
if revisions is None:
- revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
+ revisions = [
+ (
+ b"filename",
+ self.getFakeHash(),
+ sha1nodeconstants.nullid,
+ b"content",
+ )
+ ]
if packdir is None:
packdir = self.makeTempDir()
@@ -86,7 +93,7 @@
filename = b"foo"
node = self.getHash(content)
- revisions = [(filename, node, nullid, content)]
+ revisions = [(filename, node, sha1nodeconstants.nullid, content)]
pack = self.createPack(revisions)
if self.paramsavailable:
self.assertEqual(
@@ -126,7 +133,7 @@
"""Test putting multiple delta blobs into a pack and read the chain."""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
content = b"abcdef%d" % i
node = self.getHash(content)
@@ -157,7 +164,7 @@
for j in range(random.randint(1, 100)):
content = b"content-%d" % j
node = self.getHash(content)
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
if len(filerevs) > 0:
lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
filerevs.append(node)
@@ -185,7 +192,9 @@
b'Z': b'random_string',
b'_': b'\0' * i,
}
- revisions.append((filename, node, nullid, content, meta))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content, meta)
+ )
pack = self.createPack(revisions)
for name, node, x, content, origmeta in revisions:
parsedmeta = pack.getmeta(name, node)
@@ -198,7 +207,7 @@
"""Test the getmissing() api."""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
content = b"abcdef%d" % i
node = self.getHash(content)
@@ -225,7 +234,7 @@
pack = self.createPack()
try:
- pack.add(b'filename', nullid, b'contents')
+ pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
self.assertTrue(False, "datapack.add should throw")
except RuntimeError:
pass
@@ -264,7 +273,9 @@
content = filename
node = self.getHash(content)
blobs[(filename, node)] = content
- revisions.append((filename, node, nullid, content))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content)
+ )
pack = self.createPack(revisions)
if self.paramsavailable:
@@ -288,7 +299,12 @@
for i in range(numpacks):
chain = []
- revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
+ revision = (
+ b'%d' % i,
+ self.getFakeHash(),
+ sha1nodeconstants.nullid,
+ b"content",
+ )
for _ in range(revisionsperpack):
chain.append(revision)
@@ -346,7 +362,9 @@
filename = b"filename-%d" % i
content = b"content-%d" % i
node = self.getHash(content)
- revisions.append((filename, node, nullid, content))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content)
+ )
path = self.createPack(revisions).path
--- a/tests/test-remotefilelog-histpack.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-histpack.py Mon Jun 07 17:10:35 2021 -0400
@@ -13,7 +13,7 @@
import silenttestrunner
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import (
pycompat,
ui as uimod,
@@ -59,8 +59,8 @@
(
b"filename",
self.getFakeHash(),
- nullid,
- nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
self.getFakeHash(),
None,
)
@@ -119,10 +119,19 @@
"""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
node = self.getFakeHash()
- revisions.append((filename, node, lastnode, nullid, nullid, None))
+ revisions.append(
+ (
+ filename,
+ node,
+ lastnode,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
+ )
lastnode = node
# revisions must be added in topological order, newest first
@@ -148,17 +157,17 @@
for i in range(100):
filename = b"filename-%d" % i
entries = []
- p2 = nullid
- linknode = nullid
+ p2 = sha1nodeconstants.nullid
+ linknode = sha1nodeconstants.nullid
for j in range(random.randint(1, 100)):
node = self.getFakeHash()
- p1 = nullid
+ p1 = sha1nodeconstants.nullid
if len(entries) > 0:
p1 = entries[random.randint(0, len(entries) - 1)]
entries.append(node)
revisions.append((filename, node, p1, p2, linknode, None))
allentries[(filename, node)] = (p1, p2, linknode)
- if p1 == nullid:
+ if p1 == sha1nodeconstants.nullid:
ancestorcounts[(filename, node)] = 1
else:
newcount = ancestorcounts[(filename, p1)] + 1
@@ -182,10 +191,19 @@
def testGetNodeInfo(self):
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
node = self.getFakeHash()
- revisions.append((filename, node, lastnode, nullid, nullid, None))
+ revisions.append(
+ (
+ filename,
+ node,
+ lastnode,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
+ )
lastnode = node
pack = self.createPack(revisions)
@@ -233,7 +251,14 @@
pack = self.createPack()
try:
- pack.add(b'filename', nullid, nullid, nullid, nullid, None)
+ pack.add(
+ b'filename',
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
self.assertTrue(False, "historypack.add should throw")
except RuntimeError:
pass
--- a/tests/test-remotefilelog-log.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-log.t Mon Jun 07 17:10:35 2021 -0400
@@ -27,6 +27,7 @@
$ cd shallow
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
fncache
generaldelta
--- a/tests/test-remotefilelog-prefetch.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-remotefilelog-prefetch.t Mon Jun 07 17:10:35 2021 -0400
@@ -237,6 +237,7 @@
$ hg mv z2 z3
z2: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ find $CACHEDIR -type f | sort
.. The following output line about files fetches is globed because it is
--- a/tests/test-repo-compengines.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-repo-compengines.t Mon Jun 07 17:10:35 2021 -0400
@@ -11,6 +11,7 @@
$ cd default
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -60,6 +61,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -79,6 +81,7 @@
$ cd zstd
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -183,6 +186,7 @@
$ cat none-compression/.hg/requires
dotencode
exp-compression-none
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
--- a/tests/test-requires.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-requires.t Mon Jun 07 17:10:35 2021 -0400
@@ -5,7 +5,7 @@
$ hg commit -m test
$ rm .hg/requires
$ hg tip
- abort: unknown version (65535) in revlog 00changelog.i
+ abort: unknown version (65535) in revlog 00changelog
[50]
$ echo indoor-pool > .hg/requires
$ hg tip
@@ -50,6 +50,7 @@
> EOF
$ hg -R supported debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
featuresetup-test
fncache
generaldelta
--- a/tests/test-revlog-raw.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-revlog-raw.py Mon Jun 07 17:10:35 2021 -0400
@@ -6,7 +6,6 @@
import hashlib
import sys
-from mercurial.node import nullid
from mercurial import (
encoding,
revlog,
@@ -15,10 +14,37 @@
)
from mercurial.revlogutils import (
+ constants,
deltas,
flagutil,
)
+
+class _NoTransaction(object):
+ """transaction like object to update the nodemap outside a transaction"""
+
+ def __init__(self):
+ self._postclose = {}
+
+ def addpostclose(self, callback_id, callback_func):
+ self._postclose[callback_id] = callback_func
+
+ def registertmp(self, *args, **kwargs):
+ pass
+
+ def addbackup(self, *args, **kwargs):
+ pass
+
+ def add(self, *args, **kwargs):
+ pass
+
+ def addabort(self, *args, **kwargs):
+ pass
+
+ def _report(self, *args):
+ pass
+
+
# TESTTMP is optional. This makes it convenient to run without run-tests.py
tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
@@ -79,10 +105,11 @@
return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
-def newrevlog(name=b'_testrevlog.i', recreate=False):
+def newrevlog(name=b'_testrevlog', recreate=False):
if recreate:
- tvfs.tryunlink(name)
- rlog = revlog.revlog(tvfs, name)
+ tvfs.tryunlink(name + b'.i')
+ target = (constants.KIND_OTHER, b'test')
+ rlog = revlog.revlog(tvfs, target=target, radix=name)
return rlog
@@ -93,7 +120,7 @@
"""
nextrev = len(rlog)
p1 = rlog.node(nextrev - 1)
- p2 = nullid
+ p2 = rlog.nullid
if isext:
flags = revlog.REVIDX_EXTSTORED
else:
@@ -110,7 +137,7 @@
rlog._storedeltachains = True
-def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
+def addgroupcopy(rlog, tr, destname=b'_destrevlog', optimaldelta=True):
"""Copy revlog to destname using revlog.addgroup. Return the copied revlog.
This emulates push or pull. They use changegroup. Changegroup requires
@@ -127,7 +154,7 @@
class dummychangegroup(object):
@staticmethod
def deltachunk(pnode):
- pnode = pnode or nullid
+ pnode = pnode or rlog.nullid
parentrev = rlog.rev(pnode)
r = parentrev + 1
if r >= len(rlog):
@@ -142,7 +169,7 @@
return {
b'node': rlog.node(r),
b'p1': pnode,
- b'p2': nullid,
+ b'p2': rlog.nullid,
b'cs': rlog.node(rlog.linkrev(r)),
b'flags': rlog.flags(r),
b'deltabase': rlog.node(deltaparent),
@@ -175,7 +202,7 @@
return dlog
-def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
+def lowlevelcopy(rlog, tr, destname=b'_destrevlog'):
"""Like addgroupcopy, but use the low level revlog._addrevision directly.
It exercises some code paths that are hard to reach easily otherwise.
@@ -183,7 +210,7 @@
dlog = newrevlog(destname, recreate=True)
for r in rlog:
p1 = rlog.node(r - 1)
- p2 = nullid
+ p2 = rlog.nullid
if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
text = rlog.rawdata(r)
cachedelta = None
@@ -200,19 +227,17 @@
text = None
cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
flags = rlog.flags(r)
- ifh = dfh = None
- try:
- ifh = dlog.opener(dlog.indexfile, b'a+')
- if not dlog._inline:
- dfh = dlog.opener(dlog.datafile, b'a+')
+ with dlog._writing(_NoTransaction()):
dlog._addrevision(
- rlog.node(r), text, tr, r, p1, p2, flags, cachedelta, ifh, dfh
+ rlog.node(r),
+ text,
+ tr,
+ r,
+ p1,
+ p2,
+ flags,
+ cachedelta,
)
- finally:
- if dfh is not None:
- dfh.close()
- if ifh is not None:
- ifh.close()
return dlog
@@ -425,7 +450,7 @@
def makesnapshot(tr):
- rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
+ rl = newrevlog(name=b'_snaprevlog3', recreate=True)
for i in data:
appendrev(rl, i, tr)
return rl
@@ -481,7 +506,7 @@
checkrevlog(rl2, expected)
print('addgroupcopy test passed')
# Copy via revlog.clone
- rl3 = newrevlog(name=b'_destrevlog3.i', recreate=True)
+ rl3 = newrevlog(name=b'_destrevlog3', recreate=True)
rl.clone(tr, rl3)
checkrevlog(rl3, expected)
print('clone test passed')
--- a/tests/test-revlog-v2.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-revlog-v2.t Mon Jun 07 17:10:35 2021 -0400
@@ -22,8 +22,10 @@
$ cd empty-repo
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-revlogv2.2
fncache
+ generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
sparserevlog
@@ -37,7 +39,7 @@
... fh.write(b'\xff\x00\xde\xad') and None
$ hg log
- abort: unknown flags (0xff00) in version 57005 revlog 00changelog.i
+ abort: unknown flags (0xff00) in version 57005 revlog 00changelog
[50]
$ cd ..
@@ -56,12 +58,39 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: initial
+
Header written as expected
$ f --hexdump --bytes 4 .hg/store/00changelog.i
.hg/store/00changelog.i:
- 0000: 00 01 de ad |....|
+ 0000: 00 00 de ad |....|
$ f --hexdump --bytes 4 .hg/store/data/foo.i
.hg/store/data/foo.i:
- 0000: 00 01 de ad |....|
+ 0000: 00 00 de ad |....|
+
+Bundle use a compatible changegroup format
+------------------------------------------
+
+ $ hg bundle --all ../basic.hg
+ 1 changesets found
+ $ hg debugbundle --spec ../basic.hg
+ bzip2-v2
+
+The expected files are generated
+--------------------------------
+
+We should have have:
+- a docket
+- a index file with a unique name
+- a data file
+
+ $ ls .hg/store/00changelog* .hg/store/00manifest*
+ .hg/store/00changelog-1335303a.sda
+ .hg/store/00changelog-6b8ab34b.idx
+ .hg/store/00changelog-b875dfc5.dat
+ .hg/store/00changelog.i
+ .hg/store/00manifest-05a21d65.idx
+ .hg/store/00manifest-43c37dde.dat
+ .hg/store/00manifest-e2c9362a.sda
+ .hg/store/00manifest.i
--- a/tests/test-revlog.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-revlog.t Mon Jun 07 17:10:35 2021 -0400
@@ -7,7 +7,7 @@
... fh.write(b'\x00\x01\x00\x00') and None
$ hg log
- abort: unknown flags (0x01) in version 0 revlog 00changelog.i
+ abort: unknown flags (0x01) in version 0 revlog 00changelog
[50]
Unknown flags on revlog version 1 are rejected
@@ -16,7 +16,7 @@
... fh.write(b'\x00\x04\x00\x01') and None
$ hg log
- abort: unknown flags (0x04) in version 1 revlog 00changelog.i
+ abort: unknown flags (0x04) in version 1 revlog 00changelog
[50]
Unknown version is rejected
@@ -25,7 +25,7 @@
... fh.write(b'\x00\x00\xbe\xef') and None
$ hg log
- abort: unknown version (48879) in revlog 00changelog.i
+ abort: unknown version (48879) in revlog 00changelog
[50]
$ cd ..
@@ -45,9 +45,10 @@
0 2 99e0332bd498 000000000000 000000000000
1 3 6674f57a23d8 99e0332bd498 000000000000
+ >>> from mercurial.revlogutils.constants import KIND_OTHER
>>> from mercurial import revlog, vfs
>>> tvfs = vfs.vfs(b'.')
>>> tvfs.options = {b'revlogv1': True}
- >>> rl = revlog.revlog(tvfs, b'a.i')
+ >>> rl = revlog.revlog(tvfs, target=(KIND_OTHER, b'test'), radix=b'a')
>>> rl.revision(1)
mpatchError(*'patch cannot be decoded'*) (glob)
--- a/tests/test-setdiscovery.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-setdiscovery.t Mon Jun 07 17:10:35 2021 -0400
@@ -1536,7 +1536,7 @@
searching for changes
101 102 103 104 105 106 107 108 109 110 (no-eol)
$ hg -R r1 --config extensions.blackbox= blackbox --config blackbox.track=
- * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --cmdserver chgunix * (glob) (chg !)
+ * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 1 roundtrips in *.????s (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
--- a/tests/test-share-safe.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-share-safe.t Mon Jun 07 17:10:35 2021 -0400
@@ -19,6 +19,7 @@
$ hg init source
$ cd source
$ cat .hg/requires
+ exp-dirstate-v2 (dirstate-v2 !)
share-safe
$ cat .hg/store/requires
dotencode
@@ -29,6 +30,7 @@
store
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -52,11 +54,13 @@
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd shared1
$ cat .hg/requires
+ exp-dirstate-v2 (dirstate-v2 !)
share-safe
shared
$ hg debugrequirements -R ../source
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -66,6 +70,7 @@
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -219,7 +224,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
added: revlog-compression-zstd
processed revlogs:
@@ -245,8 +251,10 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
added: persistent-nodemap
processed revlogs:
@@ -319,6 +327,7 @@
$ cd non-share-safe
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -337,6 +346,7 @@
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg debugrequirements -R nss-share
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -349,7 +359,8 @@
$ hg debugupgraderepo -q
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
added: share-safe
processed revlogs:
@@ -361,7 +372,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
added: share-safe
share-safe
@@ -382,6 +394,7 @@
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -390,6 +403,7 @@
store
$ cat .hg/requires
+ exp-dirstate-v2 (dirstate-v2 !)
share-safe
$ cat .hg/store/requires
@@ -439,7 +453,8 @@
$ hg debugupgraderepo -q
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
removed: share-safe
processed revlogs:
@@ -451,7 +466,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
removed: share-safe
processed revlogs:
@@ -469,6 +485,7 @@
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -477,6 +494,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
@@ -534,7 +552,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
+ preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
added: share-safe
processed revlogs:
@@ -545,6 +564,7 @@
repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
$ hg debugrequirements
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
revlogv1
--- a/tests/test-sidedata-exchange.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-sidedata-exchange.t Mon Jun 07 17:10:35 2021 -0400
@@ -8,12 +8,12 @@
Pusher and pushed have sidedata enabled
---------------------------------------
- $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-source/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
> EOF
- $ hg init sidedata-target --config format.exp-use-side-data=yes
+ $ hg init sidedata-target --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-target/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
@@ -71,12 +71,12 @@
---------------------------------------
$ rm -rf sidedata-source sidedata-target
- $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-source/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
> EOF
- $ hg init sidedata-target --config format.exp-use-side-data=yes
+ $ hg init sidedata-target --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-target/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
@@ -138,12 +138,12 @@
--------------------------------------------
$ rm -rf sidedata-source sidedata-target
- $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-source/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
> EOF
- $ hg init sidedata-target --config format.exp-use-side-data=no
+ $ hg init sidedata-target --config experimental.revlogv2=no
$ cd sidedata-source
$ echo a > a
$ echo b > b
@@ -186,12 +186,12 @@
--------------------------------------------
$ rm -rf sidedata-source sidedata-target
- $ hg init sidedata-source --config format.exp-use-side-data=yes
+ $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> sidedata-source/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
> EOF
- $ hg init sidedata-target --config format.exp-use-side-data=no
+ $ hg init sidedata-target --config experimental.revlogv2=no
$ cd sidedata-source
$ echo a > a
$ echo b > b
@@ -239,8 +239,8 @@
(Push) Target has strict superset of the source
-----------------------------------------------
- $ hg init source-repo --config format.exp-use-side-data=yes
- $ hg init target-repo --config format.exp-use-side-data=yes
+ $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
+ $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> target-repo/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata.py
@@ -311,12 +311,12 @@
target.
$ rm -rf source-repo target-repo
- $ hg init source-repo --config format.exp-use-side-data=yes
+ $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> source-repo/.hg/hgrc
> [extensions]
> testsidedata3=$TESTDIR/testlib/ext-sidedata-3.py
> EOF
- $ hg init target-repo --config format.exp-use-side-data=yes
+ $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> target-repo/.hg/hgrc
> [extensions]
> testsidedata4=$TESTDIR/testlib/ext-sidedata-4.py
@@ -412,8 +412,8 @@
-----------------------------------------------
$ rm -rf source-repo target-repo
- $ hg init source-repo --config format.exp-use-side-data=yes
- $ hg init target-repo --config format.exp-use-side-data=yes
+ $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
+ $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cat << EOF >> target-repo/.hg/hgrc
> [extensions]
> testsidedata=$TESTDIR/testlib/ext-sidedata.py
--- a/tests/test-sidedata.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-sidedata.t Mon Jun 07 17:10:35 2021 -0400
@@ -10,7 +10,7 @@
> testsidedata=$TESTDIR/testlib/ext-sidedata.py
> EOF
- $ hg init test-sidedata --config format.exp-use-side-data=yes
+ $ hg init test-sidedata --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ cd test-sidedata
$ echo aaa > a
$ hg add a
@@ -48,10 +48,11 @@
Check that we can upgrade to sidedata
-------------------------------------
- $ hg init up-no-side-data --config format.exp-use-side-data=no
+ $ hg init up-no-side-data --config experimental.revlogv2=no
$ hg debugformat -v -R up-no-side-data
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -60,13 +61,15 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
+ $ hg debugformat -v -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -75,19 +78,21 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no yes no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
+ $ hg debugupgraderepo -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data > /dev/null
Check that we can downgrade from sidedata
-----------------------------------------
- $ hg init up-side-data --config format.exp-use-side-data=yes
+ $ hg init up-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
$ hg debugformat -v -R up-side-data
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -96,13 +101,15 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
+ $ hg debugformat -v -R up-side-data --config experimental.revlogv2=no
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -111,8 +118,9 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
+ $ hg debugupgraderepo -R up-side-data --config experimental.revlogv2=no > /dev/null
--- a/tests/test-single-head.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-single-head.t Mon Jun 07 17:10:35 2021 -0400
@@ -65,6 +65,9 @@
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ mkcommit c_dD0
created new head
+ $ hg log -r 'heads(::branch("default"))' -T '{node|short}\n'
+ 286d02a6e2a2
+ 9bf953aa81f6
$ hg push -f
pushing to $TESTTMP/single-head-server
searching for changes
--- a/tests/test-sparse-requirement.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-sparse-requirement.t Mon Jun 07 17:10:35 2021 -0400
@@ -18,6 +18,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -37,6 +38,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-sparse
fncache
generaldelta
@@ -59,6 +61,7 @@
$ cat .hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
--- a/tests/test-split.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-split.t Mon Jun 07 17:10:35 2021 -0400
@@ -77,7 +77,7 @@
$ hg phase --public -r 'all()'
$ hg split .
- abort: cannot split public changesets
+ abort: cannot split public changesets: 1df0d5c5a3ab
(see 'hg help phases' for details)
[10]
@@ -466,7 +466,8 @@
$ cd $TESTTMP/d
#if obsstore-off
$ runsplit -r 1 --no-rebase
- abort: cannot split changeset with children
+ abort: cannot split changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
#else
$ runsplit -r 1 --no-rebase >/dev/null
@@ -517,7 +518,8 @@
$ eval `hg tags -T '{tag}={node}\n'`
$ rm .hg/localtags
$ hg split $B --config experimental.evolution=createmarkers
- abort: cannot split changeset with children
+ abort: cannot split changeset, as that will orphan 4 descendants
+ (see 'hg help evolution.instability')
[10]
$ cat > $TESTTMP/messages <<EOF
> Split B
--- a/tests/test-sqlitestore.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-sqlitestore.t Mon Jun 07 17:10:35 2021 -0400
@@ -15,6 +15,7 @@
$ hg init empty-no-sqlite
$ cat empty-no-sqlite/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
fncache
generaldelta
persistent-nodemap (rust !)
@@ -28,6 +29,7 @@
$ hg --config storage.new-repo-backend=sqlite init empty-sqlite
$ cat empty-sqlite/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
exp-sqlite-comp-001=zstd (zstd !)
exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
@@ -49,6 +51,7 @@
$ hg --config storage.sqlite.compression=zlib init empty-zlib
$ cat empty-zlib/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
fncache
@@ -64,6 +67,7 @@
$ hg --config storage.sqlite.compression=none init empty-none
$ cat empty-none/.hg/requires
dotencode
+ exp-dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
exp-sqlite-comp-001=none
fncache
--- a/tests/test-status.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-status.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,3 +1,23 @@
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if no-rust
+ $ hg init repo0 --config format.exp-dirstate-v2=1
+ abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
+ [255]
+#endif
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
$ hg init repo1
$ cd repo1
$ mkdir a b a/1 b/1 b/2
@@ -681,6 +701,32 @@
$ ln -s ../repo0/.hg
$ hg status
+If the size hasn’t changed but mtime has, status needs to read the contents
+of the file to check whether it has changed
+
+ $ echo 1 > a
+ $ echo 1 > b
+ $ touch -t 200102030000 a b
+ $ hg commit -Aqm '#0'
+ $ echo 2 > a
+ $ touch -t 200102040000 a b
+ $ hg status
+ M a
+
+Asking specifically for the status of a deleted/removed file
+
+ $ rm a
+ $ rm b
+ $ hg status a
+ ! a
+ $ hg rm a
+ $ hg rm b
+ $ hg status a
+ R a
+ $ hg commit -qm '#1'
+ $ hg status a
+ a: $ENOENT$
+
Check using include flag with pattern when status does not need to traverse
the working directory (issue6483)
@@ -692,6 +738,167 @@
$ hg st -aI "*.py"
A a.py
+Also check exclude pattern
+
+ $ hg st -aX "*.rs"
+ A a.py
+
+issue6335
+When a directory containing a tracked file gets symlinked, as of 5.8
+`hg st` only gives the correct answer about clean (or deleted) files
+if also listing unknowns.
+The tree-based dirstate and status algorithm fix this:
+
+#if symlink no-dirstate-v1
+
+ $ cd ..
+ $ hg init issue6335
+ $ cd issue6335
+ $ mkdir foo
+ $ touch foo/a
+ $ hg ci -Ama
+ adding foo/a
+ $ mv foo bar
+ $ ln -s bar foo
+ $ hg status
+ ! foo/a
+ ? bar/a
+ ? foo
+
+ $ hg status -c # incorrect output with `dirstate-v1`
+ $ hg status -cu
+ ? bar/a
+ ? foo
+ $ hg status -d # incorrect output with `dirstate-v1`
+ ! foo/a
+ $ hg status -du
+ ! foo/a
+ ? bar/a
+ ? foo
+
+#endif
+
+
+Create a repo with files in each possible status
+
+ $ cd ..
+ $ hg init repo7
+ $ cd repo7
+ $ mkdir subdir
+ $ touch clean modified deleted removed
+ $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
+ $ echo ignored > .hgignore
+ $ hg ci -Aqm '#0'
+ $ echo 1 > modified
+ $ echo 1 > subdir/modified
+ $ rm deleted
+ $ rm subdir/deleted
+ $ hg rm removed
+ $ hg rm subdir/removed
+ $ touch unknown ignored
+ $ touch subdir/unknown subdir/ignored
+
+Check the output
+
+ $ hg status
+ M modified
+ M subdir/modified
+ R removed
+ R subdir/removed
+ ! deleted
+ ! subdir/deleted
+ ? subdir/unknown
+ ? unknown
+
+ $ hg status -mard
+ M modified
+ M subdir/modified
+ R removed
+ R subdir/removed
+ ! deleted
+ ! subdir/deleted
+
+ $ hg status -A
+ M modified
+ M subdir/modified
+ R removed
+ R subdir/removed
+ ! deleted
+ ! subdir/deleted
+ ? subdir/unknown
+ ? unknown
+ I ignored
+ I subdir/ignored
+ C .hgignore
+ C clean
+ C subdir/clean
+
+Note: `hg status some-name` creates a patternmatcher which is not supported
+yet by the Rust implementation of status, but includematcher is supported.
+--include is used below for that reason
+
+#if unix-permissions
+
+Not having permission to read a directory that contains tracked files makes
+status emit a warning then behave as if the directory was empty or removed
+entirely:
+
+ $ chmod 0 subdir
+ $ hg status --include subdir
+ subdir: Permission denied
+ R subdir/removed
+ ! subdir/clean
+ ! subdir/deleted
+ ! subdir/modified
+ $ chmod 755 subdir
+
+#endif
+
+Remove a directory that contains tracked files
+
+ $ rm -r subdir
+ $ hg status --include subdir
+ R subdir/removed
+ ! subdir/clean
+ ! subdir/deleted
+ ! subdir/modified
+
+… and replace it by a file
+
+ $ touch subdir
+ $ hg status --include subdir
+ R subdir/removed
+ ! subdir/clean
+ ! subdir/deleted
+ ! subdir/modified
+ ? subdir
+
+Replaced a deleted or removed file with a directory
+
+ $ mkdir deleted removed
+ $ touch deleted/1 removed/1
+ $ hg status --include deleted --include removed
+ R removed
+ ! deleted
+ ? deleted/1
+ ? removed/1
+ $ hg add removed/1
+ $ hg status --include deleted --include removed
+ A removed/1
+ R removed
+ ! deleted
+ ? deleted/1
+
+Deeply nested files in an ignored directory are still listed on request
+
+ $ echo ignored-dir >> .hgignore
+ $ mkdir ignored-dir
+ $ mkdir ignored-dir/subdir
+ $ touch ignored-dir/subdir/1
+ $ hg status --ignored
+ I ignored
+ I ignored-dir/subdir/1
+
Check using include flag while listing ignored composes correctly (issue6514)
$ cd ..
@@ -708,3 +915,60 @@
I A.hs
I B.hs
I ignored-folder/ctest.hs
+
+#if dirstate-v2
+
+Check read_dir caching
+
+ $ cd ..
+ $ hg init repo8
+ $ cd repo8
+ $ mkdir subdir
+ $ touch subdir/a subdir/b
+ $ hg ci -Aqm '#0'
+
+The cached mtime is initially unset
+
+ $ hg debugdirstate --dirs --no-dates | grep '^d'
+ d 0 0 unset subdir
+
+It is still not set when there are unknown files
+
+ $ touch subdir/unknown
+ $ hg status
+ ? subdir/unknown
+ $ hg debugdirstate --dirs --no-dates | grep '^d'
+ d 0 0 unset subdir
+
+Now the directory is eligible for caching, so its mtime is save in the dirstate
+
+ $ rm subdir/unknown
+ $ hg status
+ $ hg debugdirstate --dirs --no-dates | grep '^d'
+ d 0 0 set subdir
+
+This time the command should be ever so slightly faster since it does not need `read_dir("subdir")`
+
+ $ hg status
+
+Creating a new file changes the directory’s mtime, invalidating the cache
+
+ $ touch subdir/unknown
+ $ hg status
+ ? subdir/unknown
+
+ $ rm subdir/unknown
+ $ hg status
+
+Removing a node from the dirstate resets the cache for its parent directory
+
+ $ hg forget subdir/a
+ $ hg debugdirstate --dirs --no-dates | grep '^d'
+ d 0 0 set subdir
+ $ hg ci -qm '#1'
+ $ hg debugdirstate --dirs --no-dates | grep '^d'
+ d 0 0 unset subdir
+ $ hg status
+ ? subdir/a
+
+#endif
--- a/tests/test-stream-bundle-v2.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-stream-bundle-v2.t Mon Jun 07 17:10:35 2021 -0400
@@ -48,11 +48,13 @@
Stream params: {}
stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust no-dirstate-v2 !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
$ hg debugbundle --spec bundle.hg
none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust !)
+ none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust no-dirstate-v2 !)
+ none-v2;stream=v2;requirements%3Ddotencode%2Cexp-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
Test that we can apply the bundle as a stream clone bundle
--- a/tests/test-symlinks.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-symlinks.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,5 +1,19 @@
#require symlink
+#testcases dirstate-v1 dirstate-v1-tree dirstate-v2
+
+#if dirstate-v1-tree
+#require rust
+ $ echo '[experimental]' >> $HGRCPATH
+ $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
+#if dirstate-v2
+#require rust
+ $ echo '[format]' >> $HGRCPATH
+ $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
+#endif
+
== tests added in 0.7 ==
$ hg init test-symlinks-0.7; cd test-symlinks-0.7;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-transaction-rollback-on-revlog-split.t Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,184 @@
+Test correctness of revlog inline -> non-inline transition
+----------------------------------------------------------
+
+Helper extension to intercept renames.
+
+ $ cat > $TESTTMP/intercept_rename.py << EOF
+ > import os
+ > import sys
+ > from mercurial import extensions, util
+ >
+ > def extsetup(ui):
+ > def close(orig, *args, **kwargs):
+ > path = args[0]._atomictempfile__name
+ > if path.endswith(b'/.hg/store/data/file.i'):
+ > os._exit(80)
+ > return orig(*args, **kwargs)
+ > extensions.wrapfunction(util.atomictempfile, 'close', close)
+ > EOF
+
+
+Test offset computation to correctly factor in the index entries themselve.
+Also test that the new data size has the correct size if the transaction is aborted
+after the index has been replaced.
+
+Test repo has one small, one moderate and one big change. The clone has
+the small and moderate change and will transition to non-inline storage when
+adding the big change.
+
+ $ hg init troffset-computation --config format.revlog-compression=none
+ $ cd troffset-computation
+ $ printf '%20d' '1' > file
+ $ hg commit -Aqm_
+ $ printf '%1024d' '1' > file
+ $ hg commit -Aqm_
+ $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1
+ $ hg commit -Aqm_
+ $ cd ..
+
+ $ hg clone -r 1 troffset-computation troffset-computation-copy --config format.revlog-compression=none -q
+ $ cd troffset-computation-copy
+
+Reference size:
+
+ $ f -s .hg/store/data/file*
+ .hg/store/data/file.i: size=1174
+
+ $ cat > .hg/hgrc <<EOF
+ > [hooks]
+ > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
+ > EOF
+#if chg
+ $ hg pull ../troffset-computation
+ pulling from ../troffset-computation
+ [255]
+#else
+ $ hg pull ../troffset-computation
+ pulling from ../troffset-computation
+ [80]
+#endif
+ $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file | tail -1
+ data/file.i 128
+
+The first file.i entry should match the size above.
+The first file.d entry is the temporary record during the split,
+the second entry after the split happened. The sum of the second file.d
+and the second file.i entry should match the first file.i entry.
+
+ $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
+ data/file.i 1174
+ data/file.d 0
+ data/file.d 1046
+ data/file.i 128
+ $ hg recover
+ rolling back interrupted transaction
+ (verify step skipped, run `hg verify` to check your repository content)
+ $ f -s .hg/store/data/file*
+ .hg/store/data/file.d: size=1046
+ .hg/store/data/file.i: size=128
+ $ hg tip
+ changeset: 1:3ce491143aec
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: _
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ warning: revlog 'data/file.d' not in fncache!
+ checked 2 changesets with 2 changes to 1 files
+ 1 warnings encountered!
+ hint: run "hg debugrebuildfncache" to recover from corrupt fncache
+ $ cd ..
+
+
+Now retry the procedure but intercept the rename of the index and check that
+the journal does not contain the new index size. This demonstrates the edge case
+where the data file is left as garbage.
+
+ $ hg clone -r 1 troffset-computation troffset-computation-copy2 --config format.revlog-compression=none -q
+ $ cd troffset-computation-copy2
+ $ cat > .hg/hgrc <<EOF
+ > [extensions]
+ > intercept_rename = $TESTTMP/intercept_rename.py
+ > [hooks]
+ > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
+ > EOF
+#if chg
+ $ hg pull ../troffset-computation
+ pulling from ../troffset-computation
+ [255]
+#else
+ $ hg pull ../troffset-computation
+ pulling from ../troffset-computation
+ [80]
+#endif
+ $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
+ data/file.i 1174
+ data/file.d 0
+ data/file.d 1046
+
+ $ hg recover
+ rolling back interrupted transaction
+ (verify step skipped, run `hg verify` to check your repository content)
+ $ f -s .hg/store/data/file*
+ .hg/store/data/file.d: size=1046
+ .hg/store/data/file.i: size=1174
+ $ hg tip
+ changeset: 1:3ce491143aec
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: _
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 2 changesets with 2 changes to 1 files
+ $ cd ..
+
+
+Repeat the original test but let hg rollback the transaction.
+
+ $ hg clone -r 1 troffset-computation troffset-computation-copy-rb --config format.revlog-compression=none -q
+ $ cd troffset-computation-copy-rb
+ $ cat > .hg/hgrc <<EOF
+ > [hooks]
+ > pretxnchangegroup = false
+ > EOF
+ $ hg pull ../troffset-computation
+ pulling from ../troffset-computation
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ transaction abort!
+ rollback completed
+ abort: pretxnchangegroup hook exited with status 1
+ [40]
+ $ f -s .hg/store/data/file*
+ .hg/store/data/file.d: size=1046
+ .hg/store/data/file.i: size=128
+ $ hg tip
+ changeset: 1:3ce491143aec
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: _
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ warning: revlog 'data/file.d' not in fncache!
+ checked 2 changesets with 2 changes to 1 files
+ 1 warnings encountered!
+ hint: run "hg debugrebuildfncache" to recover from corrupt fncache
+ $ cd ..
+
--- a/tests/test-transaction-rollback-on-sigpipe.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-transaction-rollback-on-sigpipe.t Mon Jun 07 17:10:35 2021 -0400
@@ -1,13 +1,10 @@
-Test that, when an hg push is interrupted and the remote side recieves SIGPIPE,
+#require bash
+Test that, when an hg push is interrupted and the remote side receives SIGPIPE,
the remote hg is able to successfully roll back the transaction.
$ hg init -q remote
$ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
- $ check_for_abandoned_transaction() {
- > [ -f $TESTTMP/remote/.hg/store/journal ] && echo "Abandoned transaction!"
- > }
-
$ pidfile=`pwd`/pidfile
$ >$pidfile
@@ -58,5 +55,7 @@
$ hg push -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd $remotecmd 2>&1 | grep -v $killable_pipe
abort: stream ended unexpectedly (got 0 bytes, expected 4)
- $ check_for_abandoned_transaction
+The remote should be left in a good state
+ $ hg --cwd ../remote recover
+ no interrupted transaction available
[1]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-transaction-safety.t Mon Jun 07 17:10:35 2021 -0400
@@ -0,0 +1,269 @@
+Test transaction safety
+=======================
+
+#testcases revlogv1 revlogv2 changelogv2
+
+#if revlogv1
+
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > revlogv2=no
+ > EOF
+
+#endif
+
+#if revlogv2
+
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > revlogv2=enable-unstable-format-and-corrupt-my-data
+ > EOF
+
+#endif
+
+#if changelogv2
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > exp-use-changelog-v2=enable-unstable-format-and-corrupt-my-data
+ > EOF
+
+#endif
+
+This test basic case to make sure external process do not see transaction
+content until it is committed.
+
+# TODO: also add an external reader accessing revlog files while they are written
+# (instead of during transaction finalisation)
+
+# TODO: also add stream clone and hardlink clone happening during these transaction.
+
+setup
+-----
+
+synchronisation+output script:
+
+ $ mkdir sync
+ $ mkdir output
+ $ mkdir script
+ $ HG_TEST_FILE_EXT_WAITING=$TESTTMP/sync/ext_waiting
+ $ export HG_TEST_FILE_EXT_WAITING
+ $ HG_TEST_FILE_EXT_UNLOCK=$TESTTMP/sync/ext_unlock
+ $ export HG_TEST_FILE_EXT_UNLOCK
+ $ HG_TEST_FILE_EXT_DONE=$TESTTMP/sync/ext_done
+ $ export HG_TEST_FILE_EXT_DONE
+ $ cat << EOF > script/external.sh
+ > #!/bin/sh
+ > "$RUNTESTDIR/testlib/wait-on-file" 5 "$HG_TEST_FILE_EXT_UNLOCK" "$HG_TEST_FILE_EXT_WAITING"
+ > hg log --rev 'tip' -T 'external: {rev} {desc}\n' > "$TESTTMP/output/external.out"
+ > touch "$HG_TEST_FILE_EXT_DONE"
+ > EOF
+ $ cat << EOF > script/internal.sh
+ > #!/bin/sh
+ > hg log --rev 'tip' -T 'internal: {rev} {desc}\n' > "$TESTTMP/output/internal.out"
+ > "$RUNTESTDIR/testlib/wait-on-file" 5 "$HG_TEST_FILE_EXT_DONE" "$HG_TEST_FILE_EXT_UNLOCK"
+ > EOF
+
+
+Automated commands:
+
+ $ make_one_commit() {
+ > rm -f $TESTTMP/sync/*
+ > rm -f $TESTTMP/output/*
+ > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
+ > echo x >> a
+ > sh $TESTTMP/script/external.sh & hg commit -m "$1"
+ > cat $TESTTMP/output/external.out
+ > cat $TESTTMP/output/internal.out
+ > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
+ > }
+
+
+ $ make_one_pull() {
+ > rm -f $TESTTMP/sync/*
+ > rm -f $TESTTMP/output/*
+ > hg log --rev 'tip' -T 'pre-commit: {rev} {desc}\n'
+ > echo x >> a
+ > sh $TESTTMP/script/external.sh & hg pull ../other-repo/ --rev "$1" --force --quiet
+ > cat $TESTTMP/output/external.out
+ > cat $TESTTMP/output/internal.out
+ > hg log --rev 'tip' -T 'post-tr: {rev} {desc}\n'
+ > }
+
+prepare a large source to which to pull from:
+
+The source is large to unsure we don't use inline more after the pull
+
+ $ hg init other-repo
+ $ hg -R other-repo debugbuilddag .+500
+
+
+prepare an empty repository where to make test:
+
+ $ hg init repo
+ $ cd repo
+ $ touch a
+ $ hg add a
+
+prepare a small extension to controll inline size
+
+ $ mkdir $TESTTMP/ext
+ $ cat << EOF > $TESTTMP/ext/small_inline.py
+ > from mercurial import revlog
+ > revlog._maxinline = 64 * 100
+ > EOF
+
+
+
+
+ $ cat << EOF >> $HGRCPATH
+ > [extensions]
+ > small_inline=$TESTTMP/ext/small_inline.py
+ > [hooks]
+ > pretxnclose = sh $TESTTMP/script/internal.sh
+ > EOF
+
+check this is true for the initial commit (inline → inline)
+-----------------------------------------------------------
+
+the repository should still be inline (for relevant format)
+
+ $ make_one_commit first
+ pre-commit: -1
+ external: -1
+ internal: 0 first
+ post-tr: 0 first
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+check this is true for extra commit (inline → inline)
+-----------------------------------------------------
+
+the repository should still be inline (for relevant format)
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+ $ make_one_commit second
+ pre-commit: 0 first
+ external: 0 first
+ internal: 1 second
+ post-tr: 1 second
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+check this is true for a small pull (inline → inline)
+-----------------------------------------------------
+
+the repository should still be inline (for relevant format)
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+ $ make_one_pull 3
+ pre-commit: 1 second
+ warning: repository is unrelated
+ external: 1 second
+ internal: 5 r3
+ post-tr: 5 r3
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+Make a large pull (inline → no-inline)
+---------------------------------------
+
+the repository should no longer be inline (for relevant format)
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ flags : inline
+
+#endif
+
+ $ make_one_pull 400
+ pre-commit: 5 r3
+ external: 5 r3
+ internal: 402 r400
+ post-tr: 402 r400
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ [1]
+
+#endif
+
+check this is true for extra commit (no-inline → no-inline)
+-----------------------------------------------------------
+
+the repository should no longer be inline (for relevant format)
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ [1]
+
+#endif
+
+ $ make_one_commit third
+ pre-commit: 402 r400
+ external: 402 r400
+ internal: 403 third
+ post-tr: 403 third
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ [1]
+
+#endif
+
+
+Make a pull (not-inline → no-inline)
+-------------------------------------
+
+the repository should no longer be inline (for relevant format)
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ [1]
+
+#endif
+
+ $ make_one_pull tip
+ pre-commit: 403 third
+ external: 403 third
+ internal: 503 r500
+ post-tr: 503 r500
+
+#if revlogv1
+
+ $ hg debugrevlog -c | grep inline
+ [1]
+
+#endif
--- a/tests/test-unamend.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-unamend.t Mon Jun 07 17:10:35 2021 -0400
@@ -6,6 +6,7 @@
> glog = log -G -T '{rev}:{node|short} {desc}'
> [experimental]
> evolution = createmarkers, allowunstable
+ > evolution.allowdivergence = true
> [extensions]
> rebase =
> amend =
@@ -283,7 +284,8 @@
$ hg --config experimental.evolution=createmarkers unamend
- abort: cannot unamend changeset with children
+ abort: cannot unamend changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg unamend
@@ -296,7 +298,7 @@
$ hg phase -r . -p
1 new phase-divergent changesets
$ hg unamend
- abort: cannot unamend public changesets
+ abort: cannot unamend public changesets: 03ddd6fc5af1
(see 'hg help phases' for details)
[10]
--- a/tests/test-uncommit.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-uncommit.t Mon Jun 07 17:10:35 2021 -0400
@@ -51,7 +51,7 @@
Uncommit with no commits should fail
$ hg uncommit
- abort: cannot uncommit null changeset
+ abort: cannot uncommit the null revision
(no changeset checked out)
[10]
@@ -410,7 +410,7 @@
[20]
$ hg uncommit --config experimental.uncommitondirtywdir=True
- abort: cannot uncommit while merging
+ abort: cannot uncommit changesets while merging
[20]
$ hg status
--- a/tests/test-upgrade-repo.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-upgrade-repo.t Mon Jun 07 17:10:35 2021 -0400
@@ -57,6 +57,7 @@
$ hg debugformat
format-variant repo
fncache: yes
+ dirstate-v2: no
dotencode: yes
generaldelta: yes
share-safe: no
@@ -65,12 +66,14 @@
persistent-nodemap: yes (rust !)
copies-sdc: no
revlog-v2: no
+ changelog-v2: no
plain-cl-delta: yes
compression: zlib
compression-level: default
$ hg debugformat --verbose
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -79,6 +82,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
@@ -86,6 +90,7 @@
$ hg debugformat --verbose --config format.usefncache=no
format-variant repo config default
fncache: yes no yes
+ dirstate-v2: no no no
dotencode: yes no yes
generaldelta: yes yes yes
share-safe: no no no
@@ -94,6 +99,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
@@ -101,6 +107,7 @@
$ hg debugformat --verbose --config format.usefncache=no --color=debug
format-variant repo config default
[formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
@@ -109,6 +116,7 @@
[formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
[formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
[formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
@@ -122,6 +130,12 @@
"repo": true
},
{
+ "config": false,
+ "default": false,
+ "name": "dirstate-v2",
+ "repo": false
+ },
+ {
"config": true,
"default": true,
"name": "dotencode",
@@ -166,6 +180,12 @@
"repo": false
},
{
+ "config": false,
+ "default": false,
+ "name": "changelog-v2",
+ "repo": false
+ },
+ {
"config": true,
"default": true,
"name": "plain-cl-delta",
@@ -317,6 +337,7 @@
$ hg debugformat
format-variant repo
fncache: no
+ dirstate-v2: no
dotencode: no
generaldelta: no
share-safe: no
@@ -324,12 +345,14 @@
persistent-nodemap: no
copies-sdc: no
revlog-v2: no
+ changelog-v2: no
plain-cl-delta: yes
compression: zlib
compression-level: default
$ hg debugformat --verbose
format-variant repo config default
fncache: no yes yes
+ dirstate-v2: no no no
dotencode: no yes yes
generaldelta: no yes yes
share-safe: no no no
@@ -338,6 +361,7 @@
persistent-nodemap: no yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
@@ -345,6 +369,7 @@
$ hg debugformat --verbose --config format.usegeneraldelta=no
format-variant repo config default
fncache: no yes yes
+ dirstate-v2: no no no
dotencode: no yes yes
generaldelta: no no yes
share-safe: no no no
@@ -353,6 +378,7 @@
persistent-nodemap: no yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
@@ -360,6 +386,7 @@
$ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
format-variant repo config default
[formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
@@ -368,6 +395,7 @@
[formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
[formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
[formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
@@ -1341,6 +1369,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1349,6 +1378,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zlib zstd (zstd !)
@@ -1381,6 +1411,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1389,6 +1420,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
@@ -1424,6 +1456,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1432,6 +1465,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -1448,12 +1482,13 @@
#endif
-Check upgrading to a side-data revlog
--------------------------------------
+Check upgrading to a revlog format supporting sidedata
+------------------------------------------------------
upgrade
- $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
+ $ hg debugsidedata -c 0
+ $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
upgrade will perform the following actions:
requirements
@@ -1461,8 +1496,8 @@
preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
removed: revlogv1
- added: exp-revlogv2.2, exp-sidedata-flag (zstd !)
- added: exp-revlogv2.2, exp-sidedata-flag, sparserevlog (no-zstd !)
+ added: exp-revlogv2.2 (zstd !)
+ added: exp-revlogv2.2, sparserevlog (no-zstd !)
processed revlogs:
- all-filelogs
@@ -1472,6 +1507,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1480,6 +1516,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -1487,7 +1524,6 @@
$ cat .hg/requires
dotencode
exp-revlogv2.2
- exp-sidedata-flag
fncache
generaldelta
persistent-nodemap (rust !)
@@ -1501,14 +1537,14 @@
downgrade
- $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
+ $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
upgrade will perform the following actions:
requirements
preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
- removed: exp-revlogv2.2, exp-sidedata-flag
+ removed: exp-revlogv2.2
added: revlogv1
processed revlogs:
@@ -1519,6 +1555,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1527,6 +1564,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -1545,8 +1583,8 @@
upgrade from hgrc
$ cat >> .hg/hgrc << EOF
- > [format]
- > exp-use-side-data=yes
+ > [experimental]
+ > revlogv2=enable-unstable-format-and-corrupt-my-data
> EOF
$ hg debugupgraderepo --run --no-backup --quiet
upgrade will perform the following actions:
@@ -1556,7 +1594,7 @@
preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
removed: revlogv1
- added: exp-revlogv2.2, exp-sidedata-flag
+ added: exp-revlogv2.2
processed revlogs:
- all-filelogs
@@ -1566,6 +1604,7 @@
$ hg debugformat -v
format-variant repo config default
fncache: yes yes yes
+ dirstate-v2: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
share-safe: no no no
@@ -1574,6 +1613,7 @@
persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes yes no
+ changelog-v2: no no no
plain-cl-delta: yes yes yes
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
@@ -1581,7 +1621,6 @@
$ cat .hg/requires
dotencode
exp-revlogv2.2
- exp-sidedata-flag
fncache
generaldelta
persistent-nodemap (rust !)
@@ -1594,3 +1633,105 @@
$ hg debugupgraderepo --run
nothing to do
+
+#if rust
+
+Upgrade to dirstate-v2
+
+ $ hg debugformat -v --config format.exp-dirstate-v2=1
+ format-variant repo config default
+ fncache: yes yes yes
+ dirstate-v2: no yes no
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ share-safe: no no no
+ sparserevlog: yes yes yes
+ persistent-nodemap: yes yes no
+ copies-sdc: no no no
+ revlog-v2: yes yes no
+ changelog-v2: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zstd zstd
+ compression-level: default default default
+ $ hg debugupgraderepo --config format.exp-dirstate-v2=1 --run
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
+ added: exp-dirstate-v2
+
+ dirstate-v2
+ "hg status" will be faster
+
+ processed revlogs:
+ - all-filelogs
+ - changelog
+ - manifest
+
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ upgrading to dirstate-v2 from v1
+ replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
+ removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
+ $ ls .hg/upgradebackup.*/dirstate
+ .hg/upgradebackup.*/dirstate (glob)
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dirstate-v2: yes no no
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ share-safe: no no no
+ sparserevlog: yes yes yes
+ persistent-nodemap: yes yes no
+ copies-sdc: no no no
+ revlog-v2: yes yes no
+ changelog-v2: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zstd zstd
+ compression-level: default default default
+ $ hg status
+ $ dd status=none bs=12 count=1 if=.hg/dirstate
+ dirstate-v2
+
+Downgrade from dirstate-v2
+
+ $ hg debugupgraderepo --run
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, exp-revlogv2.2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store
+ removed: exp-dirstate-v2
+
+ processed revlogs:
+ - all-filelogs
+ - changelog
+ - manifest
+
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ downgrading from dirstate-v2 to v1
+ replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
+ removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dirstate-v2: no no no
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ share-safe: no no no
+ sparserevlog: yes yes yes
+ persistent-nodemap: yes yes no
+ copies-sdc: no no no
+ revlog-v2: yes yes no
+ changelog-v2: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zstd zstd
+ compression-level: default default default
+ $ hg status
+
+#endif
--- a/tests/test-verify.t Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/test-verify.t Mon Jun 07 17:10:35 2021 -0400
@@ -297,7 +297,7 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- a@1: broken revlog! (index data/a.i is corrupted)
+ a@1: broken revlog! (index data/a is corrupted)
warning: orphan data file 'data/a.i'
checked 2 changesets with 0 changes to 1 files
1 warnings encountered!
@@ -351,7 +351,7 @@
checking manifests
crosschecking files in changesets and manifests
checking files
- base64@0: unpacking 794cee7777cb: integrity check failed on data/base64.i:0
+ base64@0: unpacking 794cee7777cb: integrity check failed on data/base64:0
checked 1 changesets with 1 changes to 1 files
1 integrity errors encountered!
(first damaged changeset appears to be 0)
--- a/tests/testlib/ext-sidedata-2.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/testlib/ext-sidedata-2.py Mon Jun 07 17:10:35 2021 -0400
@@ -14,6 +14,9 @@
import struct
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0) # hoot
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -21,7 +24,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -30,21 +33,23 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
--- a/tests/testlib/ext-sidedata-3.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/testlib/ext-sidedata-3.py Mon Jun 07 17:10:35 2021 -0400
@@ -20,6 +20,9 @@
)
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -27,7 +30,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -36,7 +39,7 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_3(repo, revlog, rev, sidedata, text=None):
@@ -45,7 +48,7 @@
text = revlog.revision(rev)
sha384 = hashlib.sha384(text).digest()
sidedata[sidedatamod.SD_TEST3] = struct.pack('>48s', sha384)
- return sidedata
+ return sidedata, NO_FLAGS
def wrapaddrevision(
@@ -54,8 +57,8 @@
if kwargs.get('sidedata') is None:
kwargs['sidedata'] = {}
sd = kwargs['sidedata']
- sd = compute_sidedata_1(None, self, None, sd, text=text)
- kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)
+ sd, flags = compute_sidedata_1(None, self, None, sd, text=text)
+ kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)[0]
return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
@@ -65,24 +68,27 @@
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST3,
(sidedatamod.SD_TEST3,),
compute_sidedata_3,
+ 0,
)
repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
--- a/tests/testlib/ext-sidedata-5.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/testlib/ext-sidedata-5.py Mon Jun 07 17:10:35 2021 -0400
@@ -21,6 +21,9 @@
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -28,7 +31,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -37,23 +40,25 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
# We don't register sidedata computers because we don't care within these
--- a/tests/testlib/ext-sidedata.py Sun Jun 06 01:24:30 2021 +0200
+++ b/tests/testlib/ext-sidedata.py Mon Jun 07 17:10:35 2021 -0400
@@ -10,10 +10,7 @@
import hashlib
import struct
-from mercurial.node import (
- nullid,
- nullrev,
-)
+from mercurial.node import nullrev
from mercurial import (
extensions,
requirements,
@@ -22,6 +19,7 @@
from mercurial.upgrade_utils import engine as upgrade_engine
+from mercurial.revlogutils import constants
from mercurial.revlogutils import sidedata
@@ -41,12 +39,13 @@
def wrap_revisiondata(orig, self, nodeorrev, *args, **kwargs):
- text, sd = orig(self, nodeorrev, *args, **kwargs)
+ text = orig(self, nodeorrev, *args, **kwargs)
+ sd = self.sidedata(nodeorrev)
if getattr(self, 'sidedatanocheck', False):
- return text, sd
- if self.version & 0xFFFF != 2:
- return text, sd
- if nodeorrev != nullrev and nodeorrev != nullid:
+ return text
+ if self.hassidedata:
+ return text
+ if nodeorrev != nullrev and nodeorrev != self.nullid:
cat1 = sd.get(sidedata.SD_TEST1)
if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
raise RuntimeError('text size mismatch')
@@ -54,16 +53,18 @@
got = hashlib.sha256(text).digest()
if expected is not None and got != expected:
raise RuntimeError('sha256 mismatch')
- return text, sd
+ return text
-def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
- sidedatacompanion = orig(srcrepo, dstrepo)
+def wrapget_sidedata_helpers(orig, srcrepo, dstrepo):
+ repo, computers, removers = orig(srcrepo, dstrepo)
+ assert not computers and not removers # deal with composition later
addedreqs = dstrepo.requirements - srcrepo.requirements
- if requirements.SIDEDATA_REQUIREMENT in addedreqs:
- assert sidedatacompanion is None # deal with composition later
+
+ if requirements.REVLOGV2_REQUIREMENT in addedreqs:
- def sidedatacompanion(revlog, rev):
+ def computer(repo, revlog, rev, old_sidedata):
+ assert not old_sidedata # not supported yet
update = {}
revlog.sidedatanocheck = True
try:
@@ -76,16 +77,25 @@
# and sha2 hashes
sha256 = hashlib.sha256(text).digest()
update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
- return False, (), update, 0, 0
+ return update, (0, 0)
- return sidedatacompanion
+ srcrepo.register_sidedata_computer(
+ constants.KIND_CHANGELOG,
+ b"whatever",
+ (sidedata.SD_TEST1, sidedata.SD_TEST2),
+ computer,
+ 0,
+ )
+ dstrepo.register_wanted_sidedata(b"whatever")
+
+ return sidedata.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
def extsetup(ui):
extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
extensions.wrapfunction(revlog.revlog, '_revisiondata', wrap_revisiondata)
extensions.wrapfunction(
- upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion
+ upgrade_engine, 'get_sidedata_helpers', wrapget_sidedata_helpers
)