changeset 45746:0e06a7ab9e0d stable 5.6rc0

merge with default for 5.6rc0
author Pulkit Goyal <7895pulkit@gmail.com>
date Tue, 20 Oct 2020 22:04:04 +0530
parents 4a146cff76fa (current diff) 94f681b84c70 (diff)
children 57515895bfff
files
diffstat 266 files changed, 16275 insertions(+), 4975 deletions(-) [+]
line wrap: on
line diff
--- a/.editorconfig	Thu Oct 08 13:45:56 2020 -0700
+++ b/.editorconfig	Tue Oct 20 22:04:04 2020 +0530
@@ -6,13 +6,16 @@
 indent_size = 4
 indent_style = space
 trim_trailing_whitespace = true
+end_of_line = lf
 
 [*.{c,h}]
 indent_size = 8
 indent_style = tab
 trim_trailing_whitespace = true
+end_of_line = lf
 
 [*.t]
 indent_size = 2
 indent_style = space
 trim_trailing_whitespace = false
+end_of_line = lf
--- a/Makefile	Thu Oct 08 13:45:56 2020 -0700
+++ b/Makefile	Tue Oct 20 22:04:04 2020 +0530
@@ -133,7 +133,7 @@
 rust-tests:
 	cd $(HGROOT)/rust/hg-cpython \
 		&& $(CARGO) test --quiet --all \
-			--no-default-features --features "$(py_feature)"
+			--no-default-features --features "$(py_feature) $(HG_RUST_FEATURES)"
 
 check-code:
 	hg manifest | xargs python contrib/check-code.py
@@ -234,7 +234,6 @@
 	make -C contrib/chg \
 	  HGPATH=/usr/local/bin/hg \
 	  PYTHON=/usr/bin/python2.7 \
-	  HGEXTDIR=/Library/Python/2.7/site-packages/hgext \
 	  DESTDIR=../../build/mercurial \
 	  PREFIX=/usr/local \
 	  clean install
--- a/contrib/automation/hgautomation/cli.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/automation/hgautomation/cli.py	Tue Oct 20 22:04:04 2020 +0530
@@ -158,7 +158,7 @@
 
         windows.synchronize_hg(SOURCE_ROOT, revision, instance)
 
-        for py_version in ("2.7", "3.7", "3.8"):
+        for py_version in ("2.7", "3.7", "3.8", "3.9"):
             for arch in ("x86", "x64"):
                 windows.purge_hg(winrm_client)
                 windows.build_wheel(
@@ -364,7 +364,7 @@
     sp.add_argument(
         '--python-version',
         help='Python version to build for',
-        choices={'2.7', '3.7', '3.8'},
+        choices={'2.7', '3.7', '3.8', '3.9'},
         nargs='*',
         default=['3.8'],
     )
@@ -476,7 +476,7 @@
     sp.add_argument(
         '--python-version',
         help='Python version to use',
-        choices={'2.7', '3.5', '3.6', '3.7', '3.8'},
+        choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'},
         default='2.7',
     )
     sp.add_argument(
--- a/contrib/automation/hgautomation/linux.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/automation/hgautomation/linux.py	Tue Oct 20 22:04:04 2020 +0530
@@ -26,11 +26,11 @@
 
 INSTALL_PYTHONS = r'''
 PYENV2_VERSIONS="2.7.17 pypy2.7-7.2.0"
-PYENV3_VERSIONS="3.5.9 3.6.10 3.7.7 3.8.2 pypy3.5-7.0.0 pypy3.6-7.3.0"
+PYENV3_VERSIONS="3.5.10 3.6.12 3.7.9 3.8.6 3.9.0 pypy3.5-7.0.0 pypy3.6-7.3.0"
 
 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
 pushd /hgdev/pyenv
-git checkout 3005c4664372ae13fbe376be699313eb428c8bdd
+git checkout 8ac91b4fd678a8c04356f5ec85cfcd565c265e9a
 popd
 
 export PYENV_ROOT="/hgdev/pyenv"
@@ -72,10 +72,10 @@
 
 chmod +x rustup-init
 sudo -H -u hg -g hg ./rustup-init -y
-sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.42.0
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.46.0
 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
 
-sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --version 0.7.0 pyoxidizer
+sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --git https://github.com/indygreg/PyOxidizer.git --rev 4697fb25918dfad6dc73288daeea501063963a08 pyoxidizer
 '''
 
 
--- a/contrib/automation/linux-requirements-py2.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/automation/linux-requirements-py2.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -8,29 +8,29 @@
     --hash=sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756 \
     --hash=sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7 \
     # via pylint
-backports.functools-lru-cache==1.5 \
-    --hash=sha256:9d98697f088eb1b0fa451391f91afb5e3ebde16bbdb272819fd091151fda4f1a \
-    --hash=sha256:f0b0e4eba956de51238e17573b7087e852dfe9854afd2e9c873f73fc0ca0a6dd \
+backports.functools-lru-cache==1.6.1 \
+    --hash=sha256:0bada4c2f8a43d533e4ecb7a12214d9420e66eb206d54bf2d682581ca4b80848 \
+    --hash=sha256:8fde5f188da2d593bd5bc0be98d9abc46c95bb8a9dde93429570192ee6cc2d4a \
     # via astroid, isort, pylint
 bzr==2.7.0 ; python_version <= "2.7" and platform_python_implementation == "CPython" \
-    --hash=sha256:c9f6bbe0a50201dadc5fddadd94ba50174193c6cf6e39e16f6dd0ad98a1df338
-configparser==3.7.4 \
-    --hash=sha256:8be81d89d6e7b4c0d4e44bcc525845f6da25821de80cb5e06e7e0238a2899e32 \
-    --hash=sha256:da60d0014fd8c55eb48c1c5354352e363e2d30bbf7057e5e171a468390184c75 \
+    --hash=sha256:c9f6bbe0a50201dadc5fddadd94ba50174193c6cf6e39e16f6dd0ad98a1df338 \
+    # via -r contrib/automation/linux-requirements.txt.in
+configparser==4.0.2 \
+    --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
+    --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \
     # via pylint
-contextlib2==0.5.5 \
-    --hash=sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48 \
-    --hash=sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00 \
+contextlib2==0.6.0.post1 \
+    --hash=sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e \
+    --hash=sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b \
     # via vcrpy
-docutils==0.15.2 \
-    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
-    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
-    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
-enum34==1.1.6 \
-    --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \
-    --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \
-    --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
-    --hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1 \
+docutils==0.16 \
+    --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
+    --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+    # via -r contrib/automation/linux-requirements.txt.in
+enum34==1.1.10 \
+    --hash=sha256:a98a201d6de3f2ab3db284e70a33b0f896fbf35f8086594e8c9e74b909058d53 \
+    --hash=sha256:c3858660960c984d6ab0ebad691265180da2b43f07e061c0f8dca9ef3cffd328 \
+    --hash=sha256:cce6a7477ed816bd2542d03d53db9f0db935dd013b70f336a95c73979289f248 \
     # via astroid
 funcsigs==1.0.2 \
     --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \
@@ -40,32 +40,37 @@
     --hash=sha256:49b3f5b064b6e3afc3316421a3f25f66c137ae88f068abbf72830170033c5e16 \
     --hash=sha256:7e033af76a5e35f58e56da7a91e687706faf4e7bdfb2cbc3f2cca6b9bcda9794 \
     # via isort
-fuzzywuzzy==0.17.0 \
-    --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
-    --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
+fuzzywuzzy==0.18.0 \
+    --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+    --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 \
+    # via -r contrib/automation/linux-requirements.txt.in
 isort==4.3.21 \
     --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
     --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
     # via pylint
-lazy-object-proxy==1.4.1 \
-    --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \
-    --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \
-    --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \
-    --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \
-    --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \
-    --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \
-    --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \
-    --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \
-    --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \
-    --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \
-    --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \
-    --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \
-    --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \
-    --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \
-    --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \
-    --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \
-    --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \
-    --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \
+lazy-object-proxy==1.5.1 \
+    --hash=sha256:00b78a97a79d0dfefa584d44dd1aba9668d3de7ec82335ba0ff51d53ef107143 \
+    --hash=sha256:042b54fd71c2092e6d10e5e66fa60f65c5954f8145e809f5d9f394c9b13d32ee \
+    --hash=sha256:11f87dc06eb5f376cc6d5f0c19a1b4dca202035622777c4ce8e5b72c87b035d6 \
+    --hash=sha256:19ae6f6511a02008ef3554e158c41bb2a8e5c8455935b98d6da076d9f152fd7c \
+    --hash=sha256:22c1935c6f8e3d6ea2e169eb03928adbdb8a2251d2890f8689368d65e70aa176 \
+    --hash=sha256:30ef2068f4f94660144515380ef04b93d15add2214eab8be4cd46ebc900d681c \
+    --hash=sha256:33da47ba3a581860ddd3d38c950a5fe950ca389f7123edd0d6ab0bc473499fe7 \
+    --hash=sha256:3e8698dc384857413580012f4ca322d89e63ef20fc3d4635a5b606d6d4b61f6a \
+    --hash=sha256:4fdd7113fc5143c72dacf415079eec42fcbe69cc9d3d291b4ca742e3a9455807 \
+    --hash=sha256:63b6d9a5077d54db271fcc6772440f7380ec3fa559d0e2497dbfae2f47c2c814 \
+    --hash=sha256:8133b63b05f12751cddd8e3e7f02ba39dc7cfa7d2ba99d80d7436f0ba26d6b75 \
+    --hash=sha256:89b8e5780e49753e2b4cd5aab45d3df092ddcbba3de2c4d4492a029588fe1758 \
+    --hash=sha256:8d82e27cbbea6edb8821751806f39f5dcfd7b46a5e23d27b98d6d8c8ec751df8 \
+    --hash=sha256:92cedd6e26712505adb1c17fab64651a498cc0102a80ba562ff4a2451088f57a \
+    --hash=sha256:9723364577b79ad9958a68851fe2acb94da6fd25170c595516a8289e6a129043 \
+    --hash=sha256:c484020ad26973a14a7cb1e1d2e0bfe97cf6803273ae9bd154e0213cc74bad49 \
+    --hash=sha256:c697bd1b333b3e6abdff04ef9f5fb4b1936633d9cc4e28d90606705c9083254c \
+    --hash=sha256:d0f7e14ff3424639d33e6bc449e77e4b345e52c21bbd6f6004a1d219196e2664 \
+    --hash=sha256:db2df3eff7ed3e6813638686f1bb5934d1a0662d9d3b4196b5164a86be3a1e8f \
+    --hash=sha256:edbcb4c5efabd93ede05b272296a5a78a67e9b6e82ba7f51a07b8103db06ce01 \
+    --hash=sha256:ef355fb3802e0fc5a71dadb65a3c317bfc9bdf567d357f8e0b1900b432ffe486 \
+    --hash=sha256:fe2f61fed5817bf8db01d9a72309ed5990c478a077e9585b58740c26774bce39 \
     # via astroid
 mccabe==0.6.1 \
     --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
@@ -75,47 +80,50 @@
     --hash=sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3 \
     --hash=sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8 \
     # via vcrpy
-pyflakes==2.1.1 \
-    --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
-    --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
-pygments==2.4.2 \
-    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
-    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
+pyflakes==2.2.0 \
+    --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \
+    --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \
+    # via -r contrib/automation/linux-requirements.txt.in
+pygments==2.5.2 \
+    --hash=sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b \
+    --hash=sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe \
+    # via -r contrib/automation/linux-requirements.txt.in
 pylint==1.9.5 \
     --hash=sha256:367e3d49813d349a905390ac27989eff82ab84958731c5ef0bef867452cfdc42 \
-    --hash=sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300
+    --hash=sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300 \
+    # via -r contrib/automation/linux-requirements.txt.in
 python-levenshtein==0.12.0 \
-    --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
-pyyaml==5.1.2 \
-    --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \
-    --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \
-    --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \
-    --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \
-    --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \
-    --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \
-    --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \
-    --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \
-    --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \
-    --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \
-    --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \
-    --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \
-    --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \
+    --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \
+    # via -r contrib/automation/linux-requirements.txt.in
+pyyaml==5.3.1 \
+    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
+    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
+    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
+    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
+    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
+    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
+    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
+    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
+    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
+    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
+    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \
     # via vcrpy
 singledispatch==3.4.0.3 \
     --hash=sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c \
     --hash=sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8 \
     # via astroid, pylint
-six==1.12.0 \
-    --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
-    --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
+six==1.15.0 \
+    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
+    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
     # via astroid, mock, pylint, singledispatch, vcrpy
-vcrpy==2.0.1 \
-    --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
-    --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
-wrapt==1.11.2 \
-    --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \
+vcrpy==3.0.0 \
+    --hash=sha256:21168d5ae14263a833d4b71acfd8278d8841114f24be1b4ab4a5719d0c7f07bc \
+    --hash=sha256:a2e6b653a627f9f3d6ded4d68587e470b91e4c1444e7dae939510dfeacb65276 \
+    # via -r contrib/automation/linux-requirements.txt.in
+wrapt==1.12.1 \
+    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
     # via astroid, vcrpy
 
 # WARNING: The following packages were not pinned, but pip requires them to be
 # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
-# setuptools==41.0.1        # via python-levenshtein
+# setuptools
--- a/contrib/automation/linux-requirements-py3.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/automation/linux-requirements-py3.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -4,181 +4,212 @@
 #
 #    pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in
 #
-appdirs==1.4.3 \
-    --hash=sha256:9e5896d1372858f8dd3344faf4e5014d21849c756c8d5701f78f8a103b372d92 \
-    --hash=sha256:d8b24664561d0d34ddfaec54636d502d7cea6e29c3eaf68f3df6180863e2166e \
+appdirs==1.4.4 \
+    --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
+    --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 \
     # via black
-astroid==2.2.5 \
-    --hash=sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4 \
-    --hash=sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4 \
+astroid==2.4.2 \
+    --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
+    --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 \
     # via pylint
-attrs==19.3.0 \
-    --hash=sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c \
-    --hash=sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72 \
+attrs==20.2.0 \
+    --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \
+    --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc \
     # via black
 black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \
     --hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \
-    --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539
-click==7.0 \
-    --hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \
-    --hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7 \
+    --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 \
+    # via -r contrib/automation/linux-requirements.txt.in
+click==7.1.2 \
+    --hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \
+    --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc \
     # via black
-docutils==0.15.2 \
-    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
-    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
-    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
-fuzzywuzzy==0.17.0 \
-    --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
-    --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
-idna==2.8 \
-    --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
-    --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
+docutils==0.16 \
+    --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
+    --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+    # via -r contrib/automation/linux-requirements.txt.in
+fuzzywuzzy==0.18.0 \
+    --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+    --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 \
+    # via -r contrib/automation/linux-requirements.txt.in
+idna==2.10 \
+    --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
+    --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \
     # via yarl
 isort==4.3.21 \
     --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
     --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
-    # via pylint
-lazy-object-proxy==1.4.1 \
-    --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \
-    --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \
-    --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \
-    --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \
-    --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \
-    --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \
-    --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \
-    --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \
-    --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \
-    --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \
-    --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \
-    --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \
-    --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \
-    --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \
-    --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \
-    --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \
-    --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \
-    --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \
+    # via -r contrib/automation/linux-requirements.txt.in, pylint
+lazy-object-proxy==1.4.3 \
+    --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
+    --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
+    --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
+    --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
+    --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
+    --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
+    --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
+    --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
+    --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
+    --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
+    --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
+    --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
+    --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
+    --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
+    --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
+    --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
+    --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
+    --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
+    --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
+    --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
+    --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 \
     # via astroid
 mccabe==0.6.1 \
     --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
     --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
     # via pylint
-multidict==4.5.2 \
-    --hash=sha256:024b8129695a952ebd93373e45b5d341dbb87c17ce49637b34000093f243dd4f \
-    --hash=sha256:041e9442b11409be5e4fc8b6a97e4bcead758ab1e11768d1e69160bdde18acc3 \
-    --hash=sha256:045b4dd0e5f6121e6f314d81759abd2c257db4634260abcfe0d3f7083c4908ef \
-    --hash=sha256:047c0a04e382ef8bd74b0de01407e8d8632d7d1b4db6f2561106af812a68741b \
-    --hash=sha256:068167c2d7bbeebd359665ac4fff756be5ffac9cda02375b5c5a7c4777038e73 \
-    --hash=sha256:148ff60e0fffa2f5fad2eb25aae7bef23d8f3b8bdaf947a65cdbe84a978092bc \
-    --hash=sha256:1d1c77013a259971a72ddaa83b9f42c80a93ff12df6a4723be99d858fa30bee3 \
-    --hash=sha256:1d48bc124a6b7a55006d97917f695effa9725d05abe8ee78fd60d6588b8344cd \
-    --hash=sha256:31dfa2fc323097f8ad7acd41aa38d7c614dd1960ac6681745b6da124093dc351 \
-    --hash=sha256:34f82db7f80c49f38b032c5abb605c458bac997a6c3142e0d6c130be6fb2b941 \
-    --hash=sha256:3d5dd8e5998fb4ace04789d1d008e2bb532de501218519d70bb672c4c5a2fc5d \
-    --hash=sha256:4a6ae52bd3ee41ee0f3acf4c60ceb3f44e0e3bc52ab7da1c2b2aa6703363a3d1 \
-    --hash=sha256:4b02a3b2a2f01d0490dd39321c74273fed0568568ea0e7ea23e02bd1fb10a10b \
-    --hash=sha256:4b843f8e1dd6a3195679d9838eb4670222e8b8d01bc36c9894d6c3538316fa0a \
-    --hash=sha256:5de53a28f40ef3c4fd57aeab6b590c2c663de87a5af76136ced519923d3efbb3 \
-    --hash=sha256:61b2b33ede821b94fa99ce0b09c9ece049c7067a33b279f343adfe35108a4ea7 \
-    --hash=sha256:6a3a9b0f45fd75dc05d8e93dc21b18fc1670135ec9544d1ad4acbcf6b86781d0 \
-    --hash=sha256:76ad8e4c69dadbb31bad17c16baee61c0d1a4a73bed2590b741b2e1a46d3edd0 \
-    --hash=sha256:7ba19b777dc00194d1b473180d4ca89a054dd18de27d0ee2e42a103ec9b7d014 \
-    --hash=sha256:7c1b7eab7a49aa96f3db1f716f0113a8a2e93c7375dd3d5d21c4941f1405c9c5 \
-    --hash=sha256:7fc0eee3046041387cbace9314926aa48b681202f8897f8bff3809967a049036 \
-    --hash=sha256:8ccd1c5fff1aa1427100ce188557fc31f1e0a383ad8ec42c559aabd4ff08802d \
-    --hash=sha256:8e08dd76de80539d613654915a2f5196dbccc67448df291e69a88712ea21e24a \
-    --hash=sha256:c18498c50c59263841862ea0501da9f2b3659c00db54abfbf823a80787fde8ce \
-    --hash=sha256:c49db89d602c24928e68c0d510f4fcf8989d77defd01c973d6cbe27e684833b1 \
-    --hash=sha256:ce20044d0317649ddbb4e54dab3c1bcc7483c78c27d3f58ab3d0c7e6bc60d26a \
-    --hash=sha256:d1071414dd06ca2eafa90c85a079169bfeb0e5f57fd0b45d44c092546fcd6fd9 \
-    --hash=sha256:d3be11ac43ab1a3e979dac80843b42226d5d3cccd3986f2e03152720a4297cd7 \
-    --hash=sha256:db603a1c235d110c860d5f39988ebc8218ee028f07a7cbc056ba6424372ca31b \
+multidict==4.7.6 \
+    --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \
+    --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \
+    --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \
+    --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \
+    --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \
+    --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \
+    --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \
+    --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \
+    --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \
+    --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \
+    --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \
+    --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \
+    --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \
+    --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \
+    --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \
+    --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \
+    --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d \
     # via yarl
-pathspec==0.6.0 \
-    --hash=sha256:e285ccc8b0785beadd4c18e5708b12bb8fcf529a1e61215b3feff1d1e559ea5c \
+pathspec==0.8.0 \
+    --hash=sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0 \
+    --hash=sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061 \
     # via black
-pyflakes==2.1.1 \
-    --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
-    --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
-pygments==2.4.2 \
-    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
-    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
-pylint==2.3.1 \
-    --hash=sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09 \
-    --hash=sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1
+pyflakes==2.2.0 \
+    --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \
+    --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \
+    # via -r contrib/automation/linux-requirements.txt.in
+pygments==2.7.1 \
+    --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
+    --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+    # via -r contrib/automation/linux-requirements.txt.in
+pylint==2.6.0 \
+    --hash=sha256:bb4a908c9dadbc3aac18860550e870f58e1a02c9f2c204fdf5693d73be061210 \
+    --hash=sha256:bfe68f020f8a0fece830a22dd4d5dddb4ecc6137db04face4c3420a46a52239f \
+    # via -r contrib/automation/linux-requirements.txt.in
 python-levenshtein==0.12.0 \
-    --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
-pyyaml==5.1.2 \
-    --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \
-    --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \
-    --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \
-    --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \
-    --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \
-    --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \
-    --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \
-    --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \
-    --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \
-    --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \
-    --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \
-    --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \
-    --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \
+    --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \
+    # via -r contrib/automation/linux-requirements.txt.in
+pyyaml==5.3.1 \
+    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
+    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
+    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
+    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
+    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
+    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
+    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
+    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
+    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
+    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
+    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \
     # via vcrpy
-regex==2019.11.1 \
-    --hash=sha256:15454b37c5a278f46f7aa2d9339bda450c300617ca2fca6558d05d870245edc7 \
-    --hash=sha256:1ad40708c255943a227e778b022c6497c129ad614bb7a2a2f916e12e8a359ee7 \
-    --hash=sha256:5e00f65cc507d13ab4dfa92c1232d004fa202c1d43a32a13940ab8a5afe2fb96 \
-    --hash=sha256:604dc563a02a74d70ae1f55208ddc9bfb6d9f470f6d1a5054c4bd5ae58744ab1 \
-    --hash=sha256:720e34a539a76a1fedcebe4397290604cc2bdf6f81eca44adb9fb2ea071c0c69 \
-    --hash=sha256:7caf47e4a9ac6ef08cabd3442cc4ca3386db141fb3c8b2a7e202d0470028e910 \
-    --hash=sha256:c31eaf28c6fe75ea329add0022efeed249e37861c19681960f99bbc7db981fb2 \
-    --hash=sha256:c7393597191fc2043c744db021643549061e12abe0b3ff5c429d806de7b93b66 \
-    --hash=sha256:d2b302f8cdd82c8f48e9de749d1d17f85ce9a0f082880b9a4859f66b07037dc6 \
-    --hash=sha256:e3d8dd0ec0ea280cf89026b0898971f5750a7bd92cb62c51af5a52abd020054a \
-    --hash=sha256:ec032cbfed59bd5a4b8eab943c310acfaaa81394e14f44454ad5c9eba4f24a74 \
+regex==2020.9.27 \
+    --hash=sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef \
+    --hash=sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c \
+    --hash=sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7 \
+    --hash=sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b \
+    --hash=sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c \
+    --hash=sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63 \
+    --hash=sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302 \
+    --hash=sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc \
+    --hash=sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67 \
+    --hash=sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be \
+    --hash=sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab \
+    --hash=sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650 \
+    --hash=sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81 \
+    --hash=sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19 \
+    --hash=sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637 \
+    --hash=sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc \
+    --hash=sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b \
+    --hash=sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d \
+    --hash=sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b \
+    --hash=sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100 \
+    --hash=sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad \
+    --hash=sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3 \
+    --hash=sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121 \
+    --hash=sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b \
+    --hash=sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707 \
+    --hash=sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7 \
+    --hash=sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f \
     # via black
-six==1.12.0 \
-    --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
-    --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
+six==1.15.0 \
+    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
+    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
     # via astroid, vcrpy
-toml==0.10.0 \
-    --hash=sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c \
-    --hash=sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e \
-    # via black
-typed-ast==1.4.0 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
-    --hash=sha256:18511a0b3e7922276346bcb47e2ef9f38fb90fd31cb9223eed42c85d1312344e \
-    --hash=sha256:262c247a82d005e43b5b7f69aff746370538e176131c32dda9cb0f324d27141e \
-    --hash=sha256:2b907eb046d049bcd9892e3076c7a6456c93a25bebfe554e931620c90e6a25b0 \
-    --hash=sha256:354c16e5babd09f5cb0ee000d54cfa38401d8b8891eefa878ac772f827181a3c \
-    --hash=sha256:4e0b70c6fc4d010f8107726af5fd37921b666f5b31d9331f0bd24ad9a088e631 \
-    --hash=sha256:630968c5cdee51a11c05a30453f8cd65e0cc1d2ad0d9192819df9978984529f4 \
-    --hash=sha256:66480f95b8167c9c5c5c87f32cf437d585937970f3fc24386f313a4c97b44e34 \
-    --hash=sha256:71211d26ffd12d63a83e079ff258ac9d56a1376a25bc80b1cdcdf601b855b90b \
-    --hash=sha256:95bd11af7eafc16e829af2d3df510cecfd4387f6453355188342c3e79a2ec87a \
-    --hash=sha256:bc6c7d3fa1325a0c6613512a093bc2a2a15aeec350451cbdf9e1d4bffe3e3233 \
-    --hash=sha256:cc34a6f5b426748a507dd5d1de4c1978f2eb5626d51326e43280941206c209e1 \
-    --hash=sha256:d755f03c1e4a51e9b24d899561fec4ccaf51f210d52abdf8c07ee2849b212a36 \
-    --hash=sha256:d7c45933b1bdfaf9f36c579671fec15d25b06c8398f113dab64c18ed1adda01d \
-    --hash=sha256:d896919306dd0aa22d0132f62a1b78d11aaf4c9fc5b3410d3c666b818191630a \
-    --hash=sha256:ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12
-vcrpy==2.0.1 \
-    --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
-    --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
-wrapt==1.11.2 \
-    --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \
+toml==0.10.1 \
+    --hash=sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f \
+    --hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88 \
+    # via black, pylint
+typed-ast==1.4.1 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+    --hash=sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355 \
+    --hash=sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919 \
+    --hash=sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa \
+    --hash=sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652 \
+    --hash=sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75 \
+    --hash=sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01 \
+    --hash=sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d \
+    --hash=sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1 \
+    --hash=sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907 \
+    --hash=sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c \
+    --hash=sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3 \
+    --hash=sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b \
+    --hash=sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614 \
+    --hash=sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb \
+    --hash=sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b \
+    --hash=sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41 \
+    --hash=sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6 \
+    --hash=sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34 \
+    --hash=sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe \
+    --hash=sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4 \
+    --hash=sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7 \
+    # via -r contrib/automation/linux-requirements.txt.in, astroid, black
+typing-extensions==3.7.4.3 \
+    --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \
+    --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \
+    --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f \
+    # via yarl
+vcrpy==4.1.0 \
+    --hash=sha256:4138e79eb35981ad391406cbb7227bce7eba8bad788dcf1a89c2e4a8b740debe \
+    --hash=sha256:d833248442bbc560599add895c9ab0ef518676579e8dc72d8b0933bdb3880253 \
+    # via -r contrib/automation/linux-requirements.txt.in
+wrapt==1.12.1 \
+    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
     # via astroid, vcrpy
-yarl==1.3.0 \
-    --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
-    --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
-    --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
-    --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
-    --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
-    --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
-    --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
-    --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
-    --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
-    --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
-    --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 \
+yarl==1.6.0 \
+    --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \
+    --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \
+    --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \
+    --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \
+    --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \
+    --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \
+    --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \
+    --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \
+    --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \
+    --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \
+    --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \
+    --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \
+    --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \
+    --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \
+    --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \
+    --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \
+    --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a \
     # via vcrpy
 
 # WARNING: The following packages were not pinned, but pip requires them to be
 # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
-# setuptools==41.6.0        # via python-levenshtein
+# setuptools
--- a/contrib/automation/linux-requirements.txt.in	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/automation/linux-requirements.txt.in	Tue Oct 20 22:04:04 2020 +0530
@@ -1,9 +1,12 @@
 # black pulls in typed-ast, which doesn't install on PyPy.
-black ; python_version >= '3.6' and platform_python_implementation != 'PyPy'
+black==19.10b0 ; python_version >= '3.6' and platform_python_implementation != 'PyPy'
 # Bazaar doesn't work with Python 3 nor PyPy.
 bzr ; python_version <= '2.7' and platform_python_implementation == 'CPython'
 docutils
 fuzzywuzzy
+# isort 5.0 drops support for Python 3.5. We can remove this line when we
+# drop support for 3.5.
+isort < 5.0
 pyflakes
 pygments
 pylint
--- a/contrib/check-py3-compat.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/check-py3-compat.py	Tue Oct 20 22:04:04 2020 +0530
@@ -97,6 +97,15 @@
     if sys.version_info[0] == 2:
         fn = check_compat_py2
     else:
+        # check_compat_py3 will import every filename we specify as long as it
+        # starts with one of a few prefixes. It does this by converting
+        # specified filenames like 'mercurial/foo.py' to 'mercurial.foo' and
+        # importing that. When running standalone (not as part of a test), this
+        # means we actually import the installed versions, not the files we just
+        # specified. When running as test-check-py3-compat.t, we technically
+        # would import the correct paths, but it's cleaner to have both cases
+        # use the same import logic.
+        sys.path.insert(0, '.')
         fn = check_compat_py3
 
     for f in sys.argv[1:]:
--- a/contrib/chg/Makefile	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/chg/Makefile	Tue Oct 20 22:04:04 2020 +0530
@@ -8,6 +8,9 @@
 ifdef HGPATH
 override CPPFLAGS += -DHGPATH=\"$(HGPATH)\"
 endif
+ifdef HGPATHREL
+override CPPFLAGS += -DHGPATHREL=\"$(HGPATHREL)\"
+endif
 
 DESTDIR =
 PREFIX = /usr/local
--- a/contrib/chg/README	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/chg/README	Tue Oct 20 22:04:04 2020 +0530
@@ -30,3 +30,11 @@
  * CHGSOCKNAME specifies the socket path of the background cmdserver.
  * CHGTIMEOUT specifies how many seconds chg will wait before giving up
    connecting to a cmdserver. If it is 0, chg will wait forever. Default: 60
+
+Build environment variables:
+
+ * HGPATH: the path to the hg executable to call when CHGHG and HG are not set,
+   instead of "hg"
+ * HGPATHREL=1: when CHGHG and HG are not set, the hg executable will be ./hg
+   relative to the chg executable. Only works on linux, falls back to "hg"
+   otherwise.
--- a/contrib/chg/chg.c	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/chg/chg.c	Tue Oct 20 22:04:04 2020 +0530
@@ -184,13 +184,46 @@
 		abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
 }
 
+/* If the current program is, say, /a/b/c/chg, returns /a/b/c/hg. */
+static char *getrelhgcmd(void)
+{
+	ssize_t n;
+	char *res, *slash;
+	int maxsize = 4096;
+	res = malloc(maxsize);
+	if (res == NULL)
+		goto cleanup;
+	n = readlink("/proc/self/exe", res, maxsize);
+	if (n < 0 || n >= maxsize)
+		goto cleanup;
+	res[n] = '\0';
+	slash = strrchr(res, '/');
+	if (slash == NULL)
+		goto cleanup;
+	/* 4 is strlen("/hg") + nul byte */
+	if (slash + 4 >= res + maxsize)
+		goto cleanup;
+	memcpy(slash, "/hg", 4);
+	return res;
+cleanup:
+	free(res);
+	return NULL;
+}
+
 static const char *gethgcmd(void)
 {
 	static const char *hgcmd = NULL;
+#ifdef HGPATHREL
+	int tryrelhgcmd = 1;
+#else
+	int tryrelhgcmd = 0;
+#endif
 	if (!hgcmd) {
 		hgcmd = getenv("CHGHG");
 		if (!hgcmd || hgcmd[0] == '\0')
 			hgcmd = getenv("HG");
+		if (tryrelhgcmd && (!hgcmd || hgcmd[0] == '\0'))
+			hgcmd = getrelhgcmd();
 		if (!hgcmd || hgcmd[0] == '\0')
 #ifdef HGPATH
 			hgcmd = (HGPATH);
@@ -373,8 +406,15 @@
 }
 
 /*
- * Test whether the command is unsupported or not. This is not designed to
- * cover all cases. But it's fast, does not depend on the server.
+ * Test whether the command and the environment is unsupported or not.
+ *
+ * If any of the stdio file descriptors are not present (rare, but some tools
+ * might spawn new processes without stdio instead of redirecting them to the
+ * null device), then mark it as not supported because attachio won't work
+ * correctly.
+ *
+ * The command list is not designed to cover all cases. But it's fast, and does
+ * not depend on the server.
  */
 static int isunsupported(int argc, const char *argv[])
 {
@@ -384,6 +424,13 @@
 	};
 	unsigned int state = 0;
 	int i;
+	/* use fcntl to test missing stdio fds */
+	if (fcntl(STDIN_FILENO, F_GETFD) == -1 ||
+	    fcntl(STDOUT_FILENO, F_GETFD) == -1 ||
+	    fcntl(STDERR_FILENO, F_GETFD) == -1) {
+		debugmsg("stdio fds are missing");
+		return 1;
+	}
 	for (i = 0; i < argc; ++i) {
 		if (strcmp(argv[i], "--") == 0)
 			break;
--- a/contrib/examples/fix.hgrc	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/examples/fix.hgrc	Tue Oct 20 22:04:04 2020 +0530
@@ -3,7 +3,7 @@
 clang-format:pattern = set:(**.c or **.cc or **.h) and not "include:contrib/clang-format-ignorelist"
 
 rustfmt:command = rustfmt +nightly
-rustfmt:pattern = set:**.rs
+rustfmt:pattern = set:"**.rs" - "mercurial/thirdparty/**"
 
 black:command = black --config=black.toml -
 black:pattern = set:**.py - mercurial/thirdparty/**
--- a/contrib/heptapod-ci.yml	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/heptapod-ci.yml	Tue Oct 20 22:04:04 2020 +0530
@@ -41,6 +41,12 @@
     variables:
         PYTHON: python3
 
+rust-cargo-test-py3-dirstate-tree:
+    <<: *rust_cargo_test
+    variables:
+        PYTHON: python3
+        HG_RUST_FEATURES: dirstate-tree
+
 test-py2:
     <<: *runtests
     variables:
@@ -82,6 +88,15 @@
         PYTHON: python3
         TEST_HGMODULEPOLICY: "rust+c"
 
+test-py3-rust-dirstate-tree:
+    <<: *runtests
+    variables:
+        HGWITHRUSTEXT: cpython
+        RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
+        PYTHON: python3
+        TEST_HGMODULEPOLICY: "rust+c"
+        HG_RUST_FEATURES: "dirstate-tree"
+
 test-py2-chg:
     <<: *runtests
     variables:
--- a/contrib/install-windows-dependencies.ps1	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/install-windows-dependencies.ps1	Tue Oct 20 22:04:04 2020 +0530
@@ -27,25 +27,20 @@
 $PYTHON27_X86_URL = "https://www.python.org/ftp/python/2.7.18/python-2.7.18.msi"
 $PYTHON27_X86_SHA256 = "d901802e90026e9bad76b8a81f8dd7e43c7d7e8269d9281c9e9df7a9c40480a9"
 
-$PYTHON35_x86_URL = "https://www.python.org/ftp/python/3.5.4/python-3.5.4.exe"
-$PYTHON35_x86_SHA256 = "F27C2D67FD9688E4970F3BFF799BB9D722A0D6C2C13B04848E1F7D620B524B0E"
-$PYTHON35_x64_URL = "https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe"
-$PYTHON35_x64_SHA256 = "9B7741CC32357573A77D2EE64987717E527628C38FD7EAF3E2AACA853D45A1EE"
-
-$PYTHON36_x86_URL = "https://www.python.org/ftp/python/3.6.8/python-3.6.8.exe"
-$PYTHON36_x86_SHA256 = "89871D432BC06E4630D7B64CB1A8451E53C80E68DE29029976B12AAD7DBFA5A0"
-$PYTHON36_x64_URL = "https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe"
-$PYTHON36_x64_SHA256 = "96088A58B7C43BC83B84E6B67F15E8706C614023DD64F9A5A14E81FF824ADADC"
+$PYTHON37_x86_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9.exe"
+$PYTHON37_x86_SHA256 = "769bb7c74ad1df6d7d74071cc16a984ff6182e4016e11b8949b93db487977220"
+$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe"
+$PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987"
 
-$PYTHON37_x86_URL = "https://www.python.org/ftp/python/3.7.7/python-3.7.7.exe"
-$PYTHON37_x86_SHA256 = "27fbffcd342d5055acc64050db4c35d0025661521e642b59c381dcba2e162c6a"
-$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.7/python-3.7.7-amd64.exe"
-$PYTHON37_x64_SHA256 = "1a0368663ceff999d865de955992b6ea3cb0c8cb15a1a296a8eb7df19cc59e69"
+$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6.exe"
+$PYTHON38_x86_SHA256 = "287d5df01ff22ff09e6a487ae018603ee19eade71d462ec703850c96f1d5e8a0"
+$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6-amd64.exe"
+$PYTHON38_x64_SHA256 = "328a257f189cb500606bb26ab0fbdd298ed0e05d8c36540a322a1744f489a0a0"
 
-$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.2/python-3.8.2.exe"
-$PYTHON38_x86_SHA256 = "03ac5754a69c9c11c08d1f4d694c14625a4d27348ad4dd2d1253e2547819db2c"
-$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.2/python-3.8.2-amd64.exe"
-$PYTHON38_x64_SHA256 = "8e400e3f32cdcb746e62e0db4d3ae4cba1f927141ebc4d0d5a4006b0daee8921"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0.exe"
+$PYTHON39_x86_SHA256 = "a4c65917f4225d1543959342f0615c813a4e9e7ff1137c4394ff6a5290ac1913"
+$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe"
+$PYTHON39_x64_SHA256 = "fd2e2c6612d43bb6b213b72fc53f07d73d99059fa72c96e44bde12e7815073ae"
 
 # PIP 19.2.3.
 $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
@@ -126,11 +121,11 @@
 
     Invoke-Process "${prefix}\assets\rustup-init.exe" "-y --default-host x86_64-pc-windows-msvc"
     Invoke-Process "${prefix}\cargo\bin\rustup.exe" "target add i686-pc-windows-msvc"
-    Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.42.0"
+    Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.46.0"
     Invoke-Process "${prefix}\cargo\bin\rustup.exe" "component add clippy"
 
     # Install PyOxidizer for packaging.
-    Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --version 0.7.0 pyoxidizer"
+    Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --git https://github.com/indygreg/PyOxidizer.git --rev 4697fb25918dfad6dc73288daeea501063963a08 pyoxidizer"
 }
 
 function Install-Dependencies($prefix) {
@@ -143,14 +138,12 @@
     Secure-Download $VC9_PYTHON_URL ${prefix}\assets\VCForPython27.msi $VC9_PYTHON_SHA256
     Secure-Download $PYTHON27_x86_URL ${prefix}\assets\python27-x86.msi $PYTHON27_x86_SHA256
     Secure-Download $PYTHON27_x64_URL ${prefix}\assets\python27-x64.msi $PYTHON27_x64_SHA256
-    Secure-Download $PYTHON35_x86_URL ${prefix}\assets\python35-x86.exe $PYTHON35_x86_SHA256
-    Secure-Download $PYTHON35_x64_URL ${prefix}\assets\python35-x64.exe $PYTHON35_x64_SHA256
-    Secure-Download $PYTHON36_x86_URL ${prefix}\assets\python36-x86.exe $PYTHON36_x86_SHA256
-    Secure-Download $PYTHON36_x64_URL ${prefix}\assets\python36-x64.exe $PYTHON36_x64_SHA256
     Secure-Download $PYTHON37_x86_URL ${prefix}\assets\python37-x86.exe $PYTHON37_x86_SHA256
     Secure-Download $PYTHON37_x64_URL ${prefix}\assets\python37-x64.exe $PYTHON37_x64_SHA256
     Secure-Download $PYTHON38_x86_URL ${prefix}\assets\python38-x86.exe $PYTHON38_x86_SHA256
     Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256
+    Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256
+    Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256
     Secure-Download $PIP_URL ${pip} $PIP_SHA256
     Secure-Download $VIRTUALENV_URL ${prefix}\assets\virtualenv.tar.gz $VIRTUALENV_SHA256
     Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256
@@ -169,14 +162,12 @@
     Invoke-Process ${prefix}\python27-x64\python.exe ${prefix}\assets\get-pip.py
     Invoke-Process ${prefix}\python27-x64\Scripts\pip.exe "install ${prefix}\assets\virtualenv.tar.gz"
 
-    Install-Python3 "Python 3.5 32-bit" ${prefix}\assets\python35-x86.exe ${prefix}\python35-x86 ${pip}
-    Install-Python3 "Python 3.5 64-bit" ${prefix}\assets\python35-x64.exe ${prefix}\python35-x64 ${pip}
-    Install-Python3 "Python 3.6 32-bit" ${prefix}\assets\python36-x86.exe ${prefix}\python36-x86 ${pip}
-    Install-Python3 "Python 3.6 64-bit" ${prefix}\assets\python36-x64.exe ${prefix}\python36-x64 ${pip}
     Install-Python3 "Python 3.7 32-bit" ${prefix}\assets\python37-x86.exe ${prefix}\python37-x86 ${pip}
     Install-Python3 "Python 3.7 64-bit" ${prefix}\assets\python37-x64.exe ${prefix}\python37-x64 ${pip}
     Install-Python3 "Python 3.8 32-bit" ${prefix}\assets\python38-x86.exe ${prefix}\python38-x86 ${pip}
     Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip}
+    Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip}
+    Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip}
 
     Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
     Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
--- a/contrib/packaging/hgpackaging/pyoxidizer.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/packaging/hgpackaging/pyoxidizer.py	Tue Oct 20 22:04:04 2020 +0530
@@ -42,10 +42,10 @@
 ]
 
 STAGING_RULES_APP = [
-    ('mercurial/helptext/**/*.txt', 'helptext/'),
-    ('mercurial/defaultrc/*.rc', 'defaultrc/'),
-    ('mercurial/locale/**/*', 'locale/'),
-    ('mercurial/templates/**/*', 'templates/'),
+    ('lib/mercurial/helptext/**/*.txt', 'helptext/'),
+    ('lib/mercurial/defaultrc/*.rc', 'defaultrc/'),
+    ('lib/mercurial/locale/**/*', 'locale/'),
+    ('lib/mercurial/templates/**/*', 'templates/'),
 ]
 
 STAGING_EXCLUDES_WINDOWS = [
@@ -109,11 +109,9 @@
     # Now assemble all the files from PyOxidizer into the staging directory.
     shutil.copytree(build_dir, out_dir)
 
-    # Move some of those files around.
+    # Move some of those files around. We can get rid of this once Mercurial
+    # is taught to use the importlib APIs for reading resources.
     process_install_rules(STAGING_RULES_APP, build_dir, out_dir)
-    # Nuke the mercurial/* directory, as we copied resources
-    # to an appropriate location just above.
-    shutil.rmtree(out_dir / "mercurial")
 
     # We also need to run setup.py build_doc to produce html files,
     # as they aren't built as part of ``pip install``.
--- a/contrib/packaging/hgpackaging/wix.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/packaging/hgpackaging/wix.py	Tue Oct 20 22:04:04 2020 +0530
@@ -165,7 +165,9 @@
         if dir_name == '.':
             parent_directory_id = 'INSTALLDIR'
         else:
-            parent_directory_id = 'hg.dir.%s' % dir_name.replace('/', '.')
+            parent_directory_id = 'hg.dir.%s' % dir_name.replace(
+                '/', '.'
+            ).replace('-', '_')
 
         fragment = doc.createElement('Fragment')
         directory_ref = doc.createElement('DirectoryRef')
@@ -178,7 +180,9 @@
                 and '/' not in possible_child
                 and possible_child != '.'
             ):
-                child_directory_id = 'hg.dir.%s' % possible_child
+                child_directory_id = ('hg.dir.%s' % possible_child).replace(
+                    '-', '_'
+                )
                 name = possible_child
             else:
                 if not possible_child.startswith('%s/' % dir_name):
@@ -189,7 +193,7 @@
 
                 child_directory_id = 'hg.dir.%s' % possible_child.replace(
                     '/', '.'
-                )
+                ).replace('-', '_')
 
             directory = doc.createElement('Directory')
             directory.setAttribute('Id', child_directory_id)
--- a/contrib/packaging/requirements_win32.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/packaging/requirements_win32.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -4,42 +4,114 @@
 #
 #    pip-compile --generate-hashes --output-file=contrib/packaging/requirements_win32.txt contrib/packaging/requirements_win32.txt.in
 #
-certifi==2019.9.11 \
-    --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
-    --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
+certifi==2020.6.20 \
+    --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
+    --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
     # via dulwich
-configparser==4.0.2 \
-    --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
-    --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \
-    # via entrypoints
-docutils==0.15.2 \
-    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
-    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
-    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
-dulwich==0.19.13 \
-    --hash=sha256:0e442f6f96e6d97270a7cca4e75306b6b0228627bdf57dde3759e0e345a6b523 \
-    --hash=sha256:667f49536ccba09d3b90bac80d44048e45566f84b98a5e139cc8c70757a6ae60 \
-    --hash=sha256:82792a9d49b112fa2151fa0fb29b01667855a843ff99325b1c1578a4aec11b57 \
-    --hash=sha256:aa628449c5f594a9a282f4d9e5993fef65481ef5e3b9b6c52ff31200f8f5dc95 \
-    --hash=sha256:ab4668bc4e1996d12eb1910e123a09edcff8e166e7ec46db5aafb5c7e250b99f \
-    --hash=sha256:c35ed2cd5b263ce0d67758ffba590c0466ff13b048457ff060b7d2e6cb55a40e \
-    --hash=sha256:c8b48079a14850cbeb788b38e1061ae6db75061431c1c0f91382460be4c84bbe \
-    --hash=sha256:dfcd9943c69f963dd61a027f480d16f548ea5905c2485be8f4b8f130df2c32de \
-    --hash=sha256:e3693c3238c1a5fc1e4427281c4455d78549f4797f2a7107a5f4443b21efafb4
-entrypoints==0.3 \
-    --hash=sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19 \
-    --hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451 \
-    # via keyring
-keyring==18.0.1 \
-    --hash=sha256:67d6cc0132bd77922725fae9f18366bb314fd8f95ff4d323a4df41890a96a838 \
-    --hash=sha256:7b29ebfcf8678c4da531b2478a912eea01e80007e5ddca9ee0c7038cb3489ec6
-pygments==2.4.2 \
-    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
-    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
+cffi==1.14.3 \
+    --hash=sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d \
+    --hash=sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b \
+    --hash=sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4 \
+    --hash=sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f \
+    --hash=sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3 \
+    --hash=sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579 \
+    --hash=sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537 \
+    --hash=sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e \
+    --hash=sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05 \
+    --hash=sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171 \
+    --hash=sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca \
+    --hash=sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522 \
+    --hash=sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c \
+    --hash=sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc \
+    --hash=sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d \
+    --hash=sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808 \
+    --hash=sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828 \
+    --hash=sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869 \
+    --hash=sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d \
+    --hash=sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9 \
+    --hash=sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0 \
+    --hash=sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc \
+    --hash=sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15 \
+    --hash=sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c \
+    --hash=sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a \
+    --hash=sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3 \
+    --hash=sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1 \
+    --hash=sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768 \
+    --hash=sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d \
+    --hash=sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b \
+    --hash=sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e \
+    --hash=sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d \
+    --hash=sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730 \
+    --hash=sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394 \
+    --hash=sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1 \
+    --hash=sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591 \
+    # via cryptography
+cryptography==3.1.1 \
+    --hash=sha256:21b47c59fcb1c36f1113f3709d37935368e34815ea1d7073862e92f810dc7499 \
+    --hash=sha256:451cdf60be4dafb6a3b78802006a020e6cd709c22d240f94f7a0696240a17154 \
+    --hash=sha256:4549b137d8cbe3c2eadfa56c0c858b78acbeff956bd461e40000b2164d9167c6 \
+    --hash=sha256:48ee615a779ffa749d7d50c291761dc921d93d7cf203dca2db663b4f193f0e49 \
+    --hash=sha256:559d622aef2a2dff98a892eef321433ba5bc55b2485220a8ca289c1ecc2bd54f \
+    --hash=sha256:5d52c72449bb02dd45a773a203196e6d4fae34e158769c896012401f33064396 \
+    --hash=sha256:65beb15e7f9c16e15934569d29fb4def74ea1469d8781f6b3507ab896d6d8719 \
+    --hash=sha256:680da076cad81cdf5ffcac50c477b6790be81768d30f9da9e01960c4b18a66db \
+    --hash=sha256:762bc5a0df03c51ee3f09c621e1cee64e3a079a2b5020de82f1613873d79ee70 \
+    --hash=sha256:89aceb31cd5f9fc2449fe8cf3810797ca52b65f1489002d58fe190bfb265c536 \
+    --hash=sha256:983c0c3de4cb9fcba68fd3f45ed846eb86a2a8b8d8bc5bb18364c4d00b3c61fe \
+    --hash=sha256:99d4984aabd4c7182050bca76176ce2dbc9fa9748afe583a7865c12954d714ba \
+    --hash=sha256:9d9fc6a16357965d282dd4ab6531013935425d0dc4950df2e0cf2a1b1ac1017d \
+    --hash=sha256:a7597ffc67987b37b12e09c029bd1dc43965f75d328076ae85721b84046e9ca7 \
+    --hash=sha256:ab010e461bb6b444eaf7f8c813bb716be2d78ab786103f9608ffd37a4bd7d490 \
+    --hash=sha256:b12e715c10a13ca1bd27fbceed9adc8c5ff640f8e1f7ea76416352de703523c8 \
+    --hash=sha256:b2bded09c578d19e08bd2c5bb8fed7f103e089752c9cf7ca7ca7de522326e921 \
+    --hash=sha256:b372026ebf32fe2523159f27d9f0e9f485092e43b00a5adacf732192a70ba118 \
+    --hash=sha256:cb179acdd4ae1e4a5a160d80b87841b3d0e0be84af46c7bb2cd7ece57a39c4ba \
+    --hash=sha256:e97a3b627e3cb63c415a16245d6cef2139cca18bb1183d1b9375a1c14e83f3b3 \
+    --hash=sha256:f0e099fc4cc697450c3dd4031791559692dd941a95254cb9aeded66a7aa8b9bc \
+    --hash=sha256:f99317a0fa2e49917689b8cf977510addcfaaab769b3f899b9c481bbd76730c2 \
+    # via secretstorage
+docutils==0.16 \
+    --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
+    --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+    # via -r contrib/packaging/requirements_win32.txt.in
+dulwich==0.19.16 \
+    --hash=sha256:10699277c6268d0c16febe141a5b1c1a6e9744f3144c2d2de1706f4b1adafe63 \
+    --hash=sha256:267160904e9a1cb6c248c5efc53597a35d038ecc6f60bdc4546b3053bed11982 \
+    --hash=sha256:4e3aba5e4844e7c700721c1fc696987ea820ee3528a03604dc4e74eff4196826 \
+    --hash=sha256:60bb2c2c92f5025c1b53a556304008f0f624c98ae36f22d870e056b2d4236c11 \
+    --hash=sha256:dddae02d372fc3b5cfb0046d0f62246ef281fa0c088df7601ab5916607add94b \
+    --hash=sha256:f00d132082b8fcc2eb0d722abc773d4aeb5558c1475d7edd1f0f571146c29db9 \
+    --hash=sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850 \
+    # via -r contrib/packaging/requirements_win32.txt.in
+jeepney==0.4.3 \
+    --hash=sha256:3479b861cc2b6407de5188695fa1a8d57e5072d7059322469b62628869b8e36e \
+    --hash=sha256:d6c6b49683446d2407d2fe3acb7a368a77ff063f9182fe427da15d622adc24cf \
+    # via keyring, secretstorage
+keyring==21.4.0 \
+    --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \
+    --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \
+    # via -r contrib/packaging/requirements_win32.txt.in
+pycparser==2.20 \
+    --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \
+    --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \
+    # via cffi
+pygments==2.7.1 \
+    --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
+    --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+    # via -r contrib/packaging/requirements_win32.txt.in
 pywin32-ctypes==0.2.0 \
     --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
-    --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98
-urllib3==1.25.6 \
-    --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
-    --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
+    --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
+    # via -r contrib/packaging/requirements_win32.txt.in
+secretstorage==3.1.2 \
+    --hash=sha256:15da8a989b65498e29be338b3b279965f1b8f09b9668bd8010da183024c8bff6 \
+    --hash=sha256:b5ec909dde94d4ae2fa26af7c089036997030f0cf0a5cb372b4cccabd81c143b \
+    # via keyring
+six==1.15.0 \
+    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
+    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
+    # via cryptography
+urllib3==1.25.10 \
+    --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \
+    --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 \
     # via dulwich
--- a/contrib/packaging/requirements_win32.txt.in	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/packaging/requirements_win32.txt.in	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,6 @@
 docutils
-dulwich
+# Pinned to an old version because 0.20 drops Python 3 compatibility.
+dulwich < 0.20
 keyring
 pygments
 # Need to list explicitly so dependency gets pulled in when
--- a/contrib/perf.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/contrib/perf.py	Tue Oct 20 22:04:04 2020 +0530
@@ -231,7 +231,7 @@
     command = registrar.command(cmdtable)
 elif safehasattr(cmdutil, 'command'):
     command = cmdutil.command(cmdtable)
-    if b'norepo' not in getargspec(command).args:
+    if 'norepo' not in getargspec(command).args:
         # for "historical portability":
         # wrap original cmdutil.command, because "norepo" option has
         # been available since 3.1 (or 75a96326cecb)
@@ -805,7 +805,7 @@
         repo.ui.quiet = True
         matcher = scmutil.match(repo[None])
         opts[b'dry_run'] = True
-        if b'uipathfn' in getargspec(scmutil.addremove).args:
+        if 'uipathfn' in getargspec(scmutil.addremove).args:
             uipathfn = scmutil.getuipathfn(repo)
             timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
         else:
--- a/hgext/absorb.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/absorb.py	Tue Oct 20 22:04:04 2020 +0530
@@ -244,7 +244,7 @@
         return content, mode, copy
 
 
-def overlaycontext(memworkingcopy, ctx, parents=None, extra=None):
+def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None):
     """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
     memworkingcopy overrides file contents.
     """
@@ -253,8 +253,9 @@
         parents = ctx.repo().changelog.parents(ctx.node())
     if extra is None:
         extra = ctx.extra()
+    if desc is None:
+        desc = ctx.description()
     date = ctx.date()
-    desc = ctx.description()
     user = ctx.user()
     files = set(ctx.files()).union(memworkingcopy)
     store = overlaystore(ctx, memworkingcopy)
@@ -923,7 +924,18 @@
         extra = ctx.extra()
         if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
             extra[b'absorb_source'] = ctx.hex()
-        mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra)
+
+        desc = rewriteutil.update_hash_refs(
+            ctx.repo(),
+            ctx.description(),
+            {
+                oldnode: [newnode]
+                for oldnode, newnode in self.replacemap.items()
+            },
+        )
+        mctx = overlaycontext(
+            memworkingcopy, ctx, parents, extra=extra, desc=desc
+        )
         return mctx.commit()
 
     @util.propertycache
--- a/hgext/churn.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/churn.py	Tue Oct 20 22:04:04 2020 +0530
@@ -25,7 +25,6 @@
     registrar,
     scmutil,
 )
-from mercurial.utils import dateutil
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -36,9 +35,8 @@
 testedwith = b'ships-with-hg-core'
 
 
-def changedlines(ui, repo, ctx1, ctx2, fns):
+def changedlines(ui, repo, ctx1, ctx2, fmatch):
     added, removed = 0, 0
-    fmatch = scmutil.matchfiles(repo, fns)
     diff = b''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
     for l in diff.split(b'\n'):
         if l.startswith(b"+") and not l.startswith(b"+++ "):
@@ -73,17 +71,9 @@
         _(b'analyzing'), unit=_(b'revisions'), total=len(repo)
     )
     rate = {}
-    df = False
-    if opts.get(b'date'):
-        df = dateutil.matchdate(opts[b'date'])
 
-    m = scmutil.match(repo[None], pats, opts)
-
-    def prep(ctx, fns):
+    def prep(ctx, fmatch):
         rev = ctx.rev()
-        if df and not df(ctx.date()[0]):  # doesn't match date format
-            return
-
         key = getkey(ctx).strip()
         key = amap.get(key, key)  # alias remap
         if opts.get(b'changesets'):
@@ -95,12 +85,21 @@
                 return
 
             ctx1 = parents[0]
-            lines = changedlines(ui, repo, ctx1, ctx, fns)
+            lines = changedlines(ui, repo, ctx1, ctx, fmatch)
             rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
 
         progress.increment()
 
-    for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
+    wopts = logcmdutil.walkopts(
+        pats=pats,
+        opts=opts,
+        revspec=opts[b'rev'],
+        date=opts[b'date'],
+        include_pats=opts[b'include'],
+        exclude_pats=opts[b'exclude'],
+    )
+    revs, makefilematcher = logcmdutil.makewalker(repo, wopts)
+    for ctx in scmutil.walkchangerevs(repo, revs, makefilematcher, prep):
         continue
 
     progress.complete()
@@ -183,6 +182,9 @@
       # display count of lines changed in every year
       hg churn -f "%Y" -s
 
+      # display count of lines changed in a time range
+      hg churn -d "2020-04 to 2020-09"
+
     It is possible to map alternate email addresses to a main address
     by providing a file using the following format::
 
--- a/hgext/convert/cvsps.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/convert/cvsps.py	Tue Oct 20 22:04:04 2020 +0530
@@ -559,7 +559,7 @@
                     pass  # try next encoding
                 except LookupError as inst:  # unknown encoding, maybe
                     raise error.Abort(
-                        inst,
+                        pycompat.bytestr(inst),
                         hint=_(
                             b'check convert.cvsps.logencoding configuration'
                         ),
--- a/hgext/convert/hg.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/convert/hg.py	Tue Oct 20 22:04:04 2020 +0530
@@ -217,7 +217,8 @@
         """
         anc = [p1ctx.ancestor(p2ctx)]
         # Calculate what files are coming from p2
-        actions, diverge, rename = mergemod.calculateupdates(
+        # TODO: mresult.commitinfo might be able to get that info
+        mresult = mergemod.calculateupdates(
             self.repo,
             p1ctx,
             p2ctx,
@@ -228,7 +229,7 @@
             followcopies=False,
         )
 
-        for file, (action, info, msg) in pycompat.iteritems(actions):
+        for file, (action, info, msg) in mresult.filemap():
             if source.targetfilebelongstosource(file):
                 # If the file belongs to the source repo, ignore the p2
                 # since it will be covered by the existing fileset.
--- a/hgext/extdiff.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/extdiff.py	Tue Oct 20 22:04:04 2020 +0530
@@ -255,7 +255,6 @@
     tmproot,
     dir1a,
     dir1b,
-    dir2root,
     dir2,
     rev1a,
     rev1b,
@@ -267,7 +266,7 @@
     waitprocs = []
     totalfiles = len(commonfiles)
     for idx, commonfile in enumerate(sorted(commonfiles)):
-        path1a = os.path.join(tmproot, dir1a, commonfile)
+        path1a = os.path.join(dir1a, commonfile)
         label1a = commonfile + rev1a
         if not os.path.isfile(path1a):
             path1a = pycompat.osdevnull
@@ -275,12 +274,12 @@
         path1b = b''
         label1b = b''
         if do3way:
-            path1b = os.path.join(tmproot, dir1b, commonfile)
+            path1b = os.path.join(dir1b, commonfile)
             label1b = commonfile + rev1b
             if not os.path.isfile(path1b):
                 path1b = pycompat.osdevnull
 
-        path2 = os.path.join(dir2root, dir2, commonfile)
+        path2 = os.path.join(dir2, commonfile)
         label2 = commonfile + rev2
 
         if confirm:
@@ -457,23 +456,23 @@
     label1b = rev1b
     label2 = rev2
 
-    # If only one change, diff the files instead of the directories
-    # Handle bogus modifies correctly by checking if the files exist
-    if len(common) == 1:
-        common_file = util.localpath(common.pop())
-        dir1a = os.path.join(tmproot, dir1a, common_file)
-        label1a = common_file + rev1a
-        if not os.path.isfile(dir1a):
-            dir1a = pycompat.osdevnull
-        if do3way:
-            dir1b = os.path.join(tmproot, dir1b, common_file)
-            label1b = common_file + rev1b
-            if not os.path.isfile(dir1b):
-                dir1b = pycompat.osdevnull
-        dir2 = os.path.join(dir2root, dir2, common_file)
-        label2 = common_file + rev2
+    if not opts.get(b'per_file'):
+        # If only one change, diff the files instead of the directories
+        # Handle bogus modifies correctly by checking if the files exist
+        if len(common) == 1:
+            common_file = util.localpath(common.pop())
+            dir1a = os.path.join(tmproot, dir1a, common_file)
+            label1a = common_file + rev1a
+            if not os.path.isfile(dir1a):
+                dir1a = pycompat.osdevnull
+            if do3way:
+                dir1b = os.path.join(tmproot, dir1b, common_file)
+                label1b = common_file + rev1b
+                if not os.path.isfile(dir1b):
+                    dir1b = pycompat.osdevnull
+            dir2 = os.path.join(dir2root, dir2, common_file)
+            label2 = common_file + rev2
 
-    if not opts.get(b'per_file'):
         # Run the external tool on the 2 temp directories or the patches
         cmdline = formatcmdline(
             cmdline,
@@ -499,10 +498,9 @@
             confirm=opts.get(b'confirm'),
             commonfiles=common,
             tmproot=tmproot,
-            dir1a=dir1a,
-            dir1b=dir1b,
-            dir2root=dir2root,
-            dir2=dir2,
+            dir1a=os.path.join(tmproot, dir1a),
+            dir1b=os.path.join(tmproot, dir1b) if do3way else None,
+            dir2=os.path.join(dir2root, dir2),
             rev1a=rev1a,
             rev1b=rev1b,
             rev2=rev2,
@@ -711,45 +709,67 @@
         )
 
 
+def _gettooldetails(ui, cmd, path):
+    """
+    returns following things for a
+    ```
+    [extdiff]
+    <cmd> = <path>
+    ```
+    entry:
+
+    cmd: command/tool name
+    path: path to the tool
+    cmdline: the command which should be run
+    isgui: whether the tool uses GUI or not
+
+    Reads all external tools related configs, whether it be extdiff section,
+    diff-tools or merge-tools section, or its specified in an old format or
+    the latest format.
+    """
+    path = util.expandpath(path)
+    if cmd.startswith(b'cmd.'):
+        cmd = cmd[4:]
+        if not path:
+            path = procutil.findexe(cmd)
+            if path is None:
+                path = filemerge.findexternaltool(ui, cmd) or cmd
+        diffopts = ui.config(b'extdiff', b'opts.' + cmd)
+        cmdline = procutil.shellquote(path)
+        if diffopts:
+            cmdline += b' ' + diffopts
+        isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
+    else:
+        if path:
+            # case "cmd = path opts"
+            cmdline = path
+            diffopts = len(pycompat.shlexsplit(cmdline)) > 1
+        else:
+            # case "cmd ="
+            path = procutil.findexe(cmd)
+            if path is None:
+                path = filemerge.findexternaltool(ui, cmd) or cmd
+            cmdline = procutil.shellquote(path)
+            diffopts = False
+        isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
+    # look for diff arguments in [diff-tools] then [merge-tools]
+    if not diffopts:
+        key = cmd + b'.diffargs'
+        for section in (b'diff-tools', b'merge-tools'):
+            args = ui.config(section, key)
+            if args:
+                cmdline += b' ' + args
+                if isgui is None:
+                    isgui = ui.configbool(section, cmd + b'.gui') or False
+                break
+    return cmd, path, cmdline, isgui
+
+
 def uisetup(ui):
     for cmd, path in ui.configitems(b'extdiff'):
-        path = util.expandpath(path)
-        if cmd.startswith(b'cmd.'):
-            cmd = cmd[4:]
-            if not path:
-                path = procutil.findexe(cmd)
-                if path is None:
-                    path = filemerge.findexternaltool(ui, cmd) or cmd
-            diffopts = ui.config(b'extdiff', b'opts.' + cmd)
-            cmdline = procutil.shellquote(path)
-            if diffopts:
-                cmdline += b' ' + diffopts
-            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
-        elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
+        if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
             continue
-        else:
-            if path:
-                # case "cmd = path opts"
-                cmdline = path
-                diffopts = len(pycompat.shlexsplit(cmdline)) > 1
-            else:
-                # case "cmd ="
-                path = procutil.findexe(cmd)
-                if path is None:
-                    path = filemerge.findexternaltool(ui, cmd) or cmd
-                cmdline = procutil.shellquote(path)
-                diffopts = False
-            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
-        # look for diff arguments in [diff-tools] then [merge-tools]
-        if not diffopts:
-            key = cmd + b'.diffargs'
-            for section in (b'diff-tools', b'merge-tools'):
-                args = ui.config(section, key)
-                if args:
-                    cmdline += b' ' + args
-                    if isgui is None:
-                        isgui = ui.configbool(section, cmd + b'.gui') or False
-                    break
+        cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
         command(
             cmd,
             extdiffopts[:],
--- a/hgext/fix.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/fix.py	Tue Oct 20 22:04:04 2020 +0530
@@ -241,15 +241,15 @@
     of files, unless the --whole flag is used. Some tools may always affect the
     whole file regardless of --whole.
 
-    If revisions are specified with --rev, those revisions will be checked, and
-    they may be replaced with new revisions that have fixed file content.  It is
-    desirable to specify all descendants of each specified revision, so that the
-    fixes propagate to the descendants. If all descendants are fixed at the same
-    time, no merging, rebasing, or evolution will be required.
+    If --working-dir is used, files with uncommitted changes in the working copy
+    will be fixed. Note that no backup are made.
 
-    If --working-dir is used, files with uncommitted changes in the working copy
-    will be fixed. If the checked-out revision is also fixed, the working
-    directory will update to the replacement revision.
+    If revisions are specified with --source, those revisions and their
+    descendants will be checked, and they may be replaced with new revisions
+    that have fixed file content. By automatically including the descendants,
+    no merging, rebasing, or evolution will be required. If an ancestor of the
+    working copy is included, then the working copy itself will also be fixed,
+    and the working copy will be updated to the fixed parent.
 
     When determining what lines of each file to fix at each revision, the whole
     set of revisions being fixed is considered, so that fixes to earlier
@@ -815,8 +815,14 @@
         if copysource:
             wctx.markcopied(path, copysource)
 
+    desc = rewriteutil.update_hash_refs(
+        repo,
+        ctx.description(),
+        {oldnode: [newnode] for oldnode, newnode in replacements.items()},
+    )
+
     memctx = wctx.tomemctx(
-        text=ctx.description(),
+        text=desc,
         branch=ctx.branch(),
         extra=extra,
         date=ctx.date(),
--- a/hgext/fsmonitor/__init__.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/fsmonitor/__init__.py	Tue Oct 20 22:04:04 2020 +0530
@@ -73,6 +73,8 @@
 
     [fsmonitor]
     warn_update_file_count = (integer)
+    # or when mercurial is built with rust support
+    warn_update_file_count_rust = (integer)
 
 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
 be printed during working directory updates if this many files will be
@@ -725,7 +727,7 @@
         # An assist for avoiding the dangling-symlink fsevents bug
         extensions.wrapfunction(os, b'symlink', wrapsymlink)
 
-    extensions.wrapfunction(merge, b'update', wrapupdate)
+    extensions.wrapfunction(merge, b'_update', wrapupdate)
 
 
 def wrapsymlink(orig, source, link_name):
--- a/hgext/git/__init__.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/git/__init__.py	Tue Oct 20 22:04:04 2020 +0530
@@ -297,6 +297,10 @@
 
             def commit(self, *args, **kwargs):
                 ret = orig.commit(self, *args, **kwargs)
+                if ret is None:
+                    # there was nothing to commit, so we should skip
+                    # the index fixup logic we'd otherwise do.
+                    return None
                 tid = self.store.git[gitutil.togitnode(ret)].tree.id
                 # DANGER! This will flush any writes staged to the
                 # index in Git, but we're sidestepping the index in a
--- a/hgext/git/dirstate.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/git/dirstate.py	Tue Oct 20 22:04:04 2020 +0530
@@ -129,6 +129,7 @@
             return False
 
     def status(self, match, subrepos, ignored, clean, unknown):
+        listclean = clean
         # TODO handling of clean files - can we get that from git.status()?
         modified, added, removed, deleted, unknown, ignored, clean = (
             [],
@@ -142,6 +143,8 @@
         gstatus = self.git.status()
         for path, status in gstatus.items():
             path = pycompat.fsencode(path)
+            if not match(path):
+                continue
             if status == pygit2.GIT_STATUS_IGNORED:
                 if path.endswith(b'/'):
                     continue
@@ -166,6 +169,22 @@
                     b'unhandled case: status for %r is %r' % (path, status)
                 )
 
+        if listclean:
+            observed = set(
+                modified + added + removed + deleted + unknown + ignored
+            )
+            index = self.git.index
+            index.read()
+            for entry in index:
+                path = pycompat.fsencode(entry.path)
+                if not match(path):
+                    continue
+                if path in observed:
+                    continue  # already in some other set
+                if path[-1] == b'/':
+                    continue  # directory
+                clean.append(path)
+
         # TODO are we really always sure of status here?
         return (
             False,
@@ -276,13 +295,24 @@
         pass
 
     def add(self, f):
-        self.git.index.add(pycompat.fsdecode(f))
+        index = self.git.index
+        index.read()
+        index.add(pycompat.fsdecode(f))
+        index.write()
 
     def drop(self, f):
-        self.git.index.remove(pycompat.fsdecode(f))
+        index = self.git.index
+        index.read()
+        fs = pycompat.fsdecode(f)
+        if fs in index:
+            index.remove(fs)
+            index.write()
 
     def remove(self, f):
-        self.git.index.remove(pycompat.fsdecode(f))
+        index = self.git.index
+        index.read()
+        index.remove(pycompat.fsdecode(f))
+        index.write()
 
     def copied(self, path):
         # TODO: track copies?
--- a/hgext/git/gitlog.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/git/gitlog.py	Tue Oct 20 22:04:04 2020 +0530
@@ -96,6 +96,10 @@
 
 # TODO: an interface for the changelog type?
 class changelog(baselog):
+    # TODO: this appears to be an enumerated type, and should probably
+    # be part of the public changelog interface
+    _copiesstorage = b'extra'
+
     def __contains__(self, rev):
         try:
             self.node(rev)
@@ -385,8 +389,8 @@
         sig = pygit2.Signature(
             encoding.unifromlocal(stringutil.person(user)),
             encoding.unifromlocal(stringutil.email(user)),
-            timestamp,
-            -(tz // 60),
+            int(timestamp),
+            -int(tz // 60),
         )
         oid = self.gitrepo.create_commit(
             None, sig, sig, desc, gitutil.togitnode(manifest), parents
--- a/hgext/git/manifest.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/git/manifest.py	Tue Oct 20 22:04:04 2020 +0530
@@ -217,7 +217,9 @@
             return b''
 
     def copy(self):
-        pass
+        return gittreemanifest(
+            self._git_repo, self._tree, dict(self._pending_changes)
+        )
 
     def items(self):
         for f in self:
@@ -320,7 +322,8 @@
             for part in comps:
                 parent = trees[full]
                 try:
-                    new = self._repo[parent[pycompat.fsdecode(part)]]
+                    parent_tree_id = parent[pycompat.fsdecode(part)].id
+                    new = self._repo[parent_tree_id]
                 except KeyError:
                     # new directory
                     new = None
--- a/hgext/histedit.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/histedit.py	Tue Oct 20 22:04:04 2020 +0530
@@ -635,12 +635,11 @@
 
 def applychanges(ui, repo, ctx, opts):
     """Merge changeset from ctx (only) in the current working directory"""
-    wcpar = repo.dirstate.p1()
-    if ctx.p1().node() == wcpar:
+    if ctx.p1().node() == repo.dirstate.p1():
         # edits are "in place" we do not need to make any merge,
         # just applies changes on parent for editing
         ui.pushbuffer()
-        cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
+        cmdutil.revert(ui, repo, ctx, all=True)
         stats = mergemod.updateresult(0, 0, 0, 0)
         ui.popbuffer()
     else:
@@ -882,8 +881,7 @@
         return False
 
     def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
-        parent = ctx.p1().node()
-        hg.updaterepo(repo, parent, overwrite=False)
+        mergemod.update(ctx.p1())
         ### prepare new commit data
         commitopts = {}
         commitopts[b'user'] = ctx.user()
@@ -927,7 +925,7 @@
             )
         if n is None:
             return ctx, []
-        hg.updaterepo(repo, n, overwrite=False)
+        mergemod.update(repo[n])
         replacements = [
             (oldctx.node(), (newnode,)),
             (ctx.node(), (n,)),
@@ -1152,7 +1150,7 @@
             h,
         )
 
-    @property
+    @util.propertycache
     def desc(self):
         summary = (
             cmdutil.rendertemplate(
@@ -2051,7 +2049,7 @@
 
 def _finishhistedit(ui, repo, state, fm):
     """This action runs when histedit is finishing its session"""
-    hg.updaterepo(repo, state.parentctxnode, overwrite=False)
+    mergemod.update(repo[state.parentctxnode])
 
     mapping, tmpnodes, created, ntm = processreplacement(state)
     if mapping:
--- a/hgext/hooklib/changeset_obsoleted.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/hooklib/changeset_obsoleted.py	Tue Oct 20 22:04:04 2020 +0530
@@ -13,7 +13,7 @@
   messageidseed = myseed
 
   [hooks]
-  pretxnclose.changeset_obsoleted = \
+  txnclose.changeset_obsoleted = \
     python:hgext.hooklib.changeset_obsoleted.hook
 """
 
@@ -26,6 +26,7 @@
 from mercurial import (
     encoding,
     error,
+    formatter,
     logcmdutil,
     mail,
     obsutil,
@@ -62,7 +63,7 @@
         b'notify_obsoleted', b'messageidseed'
     ) or ui.config(b'notify', b'messageidseed')
     template = ui.config(b'notify_obsoleted', b'template')
-    spec = logcmdutil.templatespec(template, None)
+    spec = formatter.literal_templatespec(template)
     templater = logcmdutil.changesettemplater(ui, repo, spec)
     ui.pushbuffer()
     n = notify.notifier(ui, repo, b'incoming')
--- a/hgext/hooklib/changeset_published.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/hooklib/changeset_published.py	Tue Oct 20 22:04:04 2020 +0530
@@ -26,6 +26,7 @@
 from mercurial import (
     encoding,
     error,
+    formatter,
     logcmdutil,
     mail,
     pycompat,
@@ -61,7 +62,7 @@
         b'notify_published', b'messageidseed'
     ) or ui.config(b'notify', b'messageidseed')
     template = ui.config(b'notify_published', b'template')
-    spec = logcmdutil.templatespec(template, None)
+    spec = formatter.literal_templatespec(template)
     templater = logcmdutil.changesettemplater(ui, repo, spec)
     ui.pushbuffer()
     n = notify.notifier(ui, repo, b'incoming')
--- a/hgext/largefiles/__init__.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/largefiles/__init__.py	Tue Oct 20 22:04:04 2020 +0530
@@ -195,7 +195,9 @@
     for name, module in extensions.extensions():
         if name == b'rebase':
             # TODO: teach exthelper to handle this
-            extensions.wrapfunction(module, b'rebase', overrides.overriderebase)
+            extensions.wrapfunction(
+                module, b'rebase', overrides.overriderebasecmd
+            )
 
 
 revsetpredicate = eh.revsetpredicate
--- a/hgext/largefiles/lfcommands.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/largefiles/lfcommands.py	Tue Oct 20 22:04:04 2020 +0530
@@ -485,19 +485,14 @@
     return ([], [])
 
 
-def downloadlfiles(ui, repo, rev=None):
-    match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
-
-    def prepare(ctx, fns):
-        pass
-
+def downloadlfiles(ui, repo):
+    tonode = repo.changelog.node
     totalsuccess = 0
     totalmissing = 0
-    if rev != []:  # walkchangerevs on empty list would return all revs
-        for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': rev}, prepare):
-            success, missing = cachelfiles(ui, repo, ctx.node())
-            totalsuccess += len(success)
-            totalmissing += len(missing)
+    for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
+        success, missing = cachelfiles(ui, repo, tonode(rev))
+        totalsuccess += len(success)
+        totalmissing += len(missing)
     ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
     if totalmissing > 0:
         ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
--- a/hgext/largefiles/overrides.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/largefiles/overrides.py	Tue Oct 20 22:04:04 2020 +0530
@@ -52,6 +52,8 @@
 
 lfstatus = lfutil.lfstatus
 
+MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
+
 # -- Utility functions: commonly/repeatedly needed functionality ---------------
 
 
@@ -543,16 +545,16 @@
     origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
 ):
     overwrite = force and not branchmerge
-    actions, diverge, renamedelete = origfn(
+    mresult = origfn(
         repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
     )
 
     if overwrite:
-        return actions, diverge, renamedelete
+        return mresult
 
     # Convert to dictionary with filename as key and action as value.
     lfiles = set()
-    for f in actions:
+    for f in mresult.files():
         splitstandin = lfutil.splitstandin(f)
         if splitstandin is not None and splitstandin in p1:
             lfiles.add(splitstandin)
@@ -561,8 +563,8 @@
 
     for lfile in sorted(lfiles):
         standin = lfutil.standin(lfile)
-        (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
-        (sm, sargs, smsg) = actions.get(standin, (None, None, None))
+        (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
+        (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
         if sm in (b'g', b'dc') and lm != b'r':
             if sm == b'dc':
                 f1, f2, fa, move, anc = sargs
@@ -578,14 +580,18 @@
                 % lfile
             )
             if repo.ui.promptchoice(usermsg, 0) == 0:  # pick remote largefile
-                actions[lfile] = (b'r', None, b'replaced by standin')
-                actions[standin] = (b'g', sargs, b'replaces standin')
+                mresult.addfile(lfile, b'r', None, b'replaced by standin')
+                mresult.addfile(standin, b'g', sargs, b'replaces standin')
             else:  # keep local normal file
-                actions[lfile] = (b'k', None, b'replaces standin')
+                mresult.addfile(lfile, b'k', None, b'replaces standin')
                 if branchmerge:
-                    actions[standin] = (b'k', None, b'replaced by non-standin')
+                    mresult.addfile(
+                        standin, b'k', None, b'replaced by non-standin',
+                    )
                 else:
-                    actions[standin] = (b'r', None, b'replaced by non-standin')
+                    mresult.addfile(
+                        standin, b'r', None, b'replaced by non-standin',
+                    )
         elif lm in (b'g', b'dc') and sm != b'r':
             if lm == b'dc':
                 f1, f2, fa, move, anc = largs
@@ -603,31 +609,36 @@
             if repo.ui.promptchoice(usermsg, 0) == 0:  # keep local largefile
                 if branchmerge:
                     # largefile can be restored from standin safely
-                    actions[lfile] = (b'k', None, b'replaced by standin')
-                    actions[standin] = (b'k', None, b'replaces standin')
+                    mresult.addfile(
+                        lfile, b'k', None, b'replaced by standin',
+                    )
+                    mresult.addfile(standin, b'k', None, b'replaces standin')
                 else:
                     # "lfile" should be marked as "removed" without
                     # removal of itself
-                    actions[lfile] = (
-                        b'lfmr',
+                    mresult.addfile(
+                        lfile,
+                        MERGE_ACTION_LARGEFILE_MARK_REMOVED,
                         None,
                         b'forget non-standin largefile',
                     )
 
                     # linear-merge should treat this largefile as 're-added'
-                    actions[standin] = (b'a', None, b'keep standin')
+                    mresult.addfile(standin, b'a', None, b'keep standin')
             else:  # pick remote normal file
-                actions[lfile] = (b'g', largs, b'replaces standin')
-                actions[standin] = (b'r', None, b'replaced by non-standin')
+                mresult.addfile(lfile, b'g', largs, b'replaces standin')
+                mresult.addfile(
+                    standin, b'r', None, b'replaced by non-standin',
+                )
 
-    return actions, diverge, renamedelete
+    return mresult
 
 
 @eh.wrapfunction(mergestatemod, b'recordupdates')
 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
-    if b'lfmr' in actions:
+    if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        for lfile, args, msg in actions[b'lfmr']:
+        for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
             # this should be executed before 'orig', to execute 'remove'
             # before all other actions
             repo.dirstate.remove(lfile)
@@ -723,7 +734,7 @@
         try:
             result = orig(ui, repo, pats, opts, rename)
         except error.Abort as e:
-            if pycompat.bytestr(e) != _(b'no files to copy'):
+            if e.message != _(b'no files to copy'):
                 raise e
             else:
                 nonormalfiles = True
@@ -840,7 +851,7 @@
                 lfdirstate.add(destlfile)
         lfdirstate.write()
     except error.Abort as e:
-        if pycompat.bytestr(e) != _(b'no files to copy'):
+        if e.message != _(b'no files to copy'):
             raise e
         else:
             nolfiles = True
@@ -863,7 +874,7 @@
 # the matcher to hit standins instead of largefiles. Based on the
 # resulting standins update the largefiles.
 @eh.wrapfunction(cmdutil, b'revert')
-def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
+def overriderevert(orig, ui, repo, ctx, *pats, **opts):
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
     # prevent others from changing them in their incorrect state.
@@ -926,7 +937,7 @@
             return m
 
         with extensions.wrappedfunction(scmutil, b'match', overridematch):
-            orig(ui, repo, ctx, parents, *pats, **opts)
+            orig(ui, repo, ctx, *pats, **opts)
 
         newstandins = lfutil.getstandinsstate(repo)
         filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
@@ -1083,7 +1094,7 @@
         # truncated at that point.  The user may expect a download count with
         # this option, so attempt whether or not this is a largefile repo.
         if opts.get(b'all_largefiles'):
-            success, missing = lfcommands.downloadlfiles(ui, repo, None)
+            success, missing = lfcommands.downloadlfiles(ui, repo)
 
             if missing != 0:
                 return None
@@ -1092,7 +1103,7 @@
 
 
 @eh.wrapcommand(b'rebase', extension=b'rebase')
-def overriderebase(orig, ui, repo, **opts):
+def overriderebasecmd(orig, ui, repo, **opts):
     if not util.safehasattr(repo, b'_largefilesenabled'):
         return orig(ui, repo, **opts)
 
@@ -1100,12 +1111,30 @@
     repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
     repo._lfstatuswriters.append(lambda *msg, **opts: None)
     try:
-        return orig(ui, repo, **opts)
+        with ui.configoverride(
+            {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
+        ):
+            return orig(ui, repo, **opts)
     finally:
         repo._lfstatuswriters.pop()
         repo._lfcommithooks.pop()
 
 
+@eh.extsetup
+def overriderebase(ui):
+    try:
+        rebase = extensions.find(b'rebase')
+    except KeyError:
+        pass
+    else:
+
+        def _dorebase(orig, *args, **kwargs):
+            kwargs['inmemory'] = False
+            return orig(*args, **kwargs)
+
+        extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
+
+
 @eh.wrapcommand(b'archive')
 def overridearchivecmd(orig, ui, repo, dest, **opts):
     with lfstatus(repo.unfiltered()):
@@ -1688,7 +1717,7 @@
     return err
 
 
-@eh.wrapfunction(merge, b'update')
+@eh.wrapfunction(merge, b'_update')
 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
     matcher = kwargs.get('matcher', None)
     # note if this is a partial update
@@ -1747,10 +1776,13 @@
         lfdirstate.write()
 
         oldstandins = lfutil.getstandinsstate(repo)
-        # Make sure the merge runs on disk, not in-memory. largefiles is not a
-        # good candidate for in-memory merge (large files, custom dirstate,
-        # matcher usage).
-        kwargs['wc'] = repo[None]
+        wc = kwargs.get('wc')
+        if wc and wc.isinmemory():
+            # largefiles is not a good candidate for in-memory merge (large
+            # files, custom dirstate, matcher usage).
+            raise error.ProgrammingError(
+                b'largefiles is not compatible with in-memory merge'
+            )
         result = orig(repo, node, branchmerge, force, *args, **kwargs)
 
         newstandins = lfutil.getstandinsstate(repo)
--- a/hgext/mq.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/mq.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1717,11 +1717,7 @@
             except:  # re-raises
                 self.ui.warn(_(b'cleaning up working directory...\n'))
                 cmdutil.revert(
-                    self.ui,
-                    repo,
-                    repo[b'.'],
-                    repo.dirstate.parents(),
-                    no_backup=True,
+                    self.ui, repo, repo[b'.'], no_backup=True,
                 )
                 # only remove unknown files that we know we touched or
                 # created while patching
--- a/hgext/narrow/__init__.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/narrow/__init__.py	Tue Oct 20 22:04:04 2020 +0530
@@ -11,9 +11,9 @@
 from mercurial import (
     localrepo,
     registrar,
+    requirements,
 )
 
-from mercurial.interfaces import repository
 
 from . import (
     narrowbundle2,
@@ -52,7 +52,7 @@
 
 
 def featuresetup(ui, features):
-    features.add(repository.NARROW_REQUIREMENT)
+    features.add(requirements.NARROW_REQUIREMENT)
 
 
 def uisetup(ui):
@@ -69,7 +69,7 @@
         return
 
     repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
         narrowwirepeer.reposetup(repo)
 
--- a/hgext/narrow/narrowbundle2.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/narrow/narrowbundle2.py	Tue Oct 20 22:04:04 2020 +0530
@@ -20,11 +20,11 @@
     localrepo,
     narrowspec,
     repair,
+    requirements,
     scmutil,
     util,
     wireprototypes,
 )
-from mercurial.interfaces import repository
 from mercurial.utils import stringutil
 
 _NARROWACL_SECTION = b'narrowacl'
@@ -108,7 +108,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', version)
-        if b'treemanifest' in repo.requirements:
+        if scmutil.istreemanifest(repo):
             part.addparam(b'treemanifest', b'1')
 
 
@@ -163,7 +163,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', version)
-        if b'treemanifest' in repo.requirements:
+        if scmutil.istreemanifest(repo):
             part.addparam(b'treemanifest', b'1')
 
 
@@ -178,8 +178,8 @@
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
-    if not repository.NARROW_REQUIREMENT in op.repo.requirements:
-        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
+    if not requirements.NARROW_REQUIREMENT in op.repo.requirements:
+        op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
         scmutil.writereporequirements(op.repo)
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
@@ -194,8 +194,8 @@
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
-    if repository.NARROW_REQUIREMENT not in op.repo.requirements:
-        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
+    if requirements.NARROW_REQUIREMENT not in op.repo.requirements:
+        op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
         scmutil.writereporequirements(op.repo)
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
--- a/hgext/narrow/narrowcommands.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/narrow/narrowcommands.py	Tue Oct 20 22:04:04 2020 +0530
@@ -27,11 +27,11 @@
     registrar,
     repair,
     repoview,
+    requirements,
     sparse,
     util,
     wireprototypes,
 )
-from mercurial.interfaces import repository
 
 table = {}
 command = registrar.command(table)
@@ -133,7 +133,7 @@
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
@@ -150,7 +150,7 @@
 
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
         includes = set(opts.get('include', []))
         excludes = set(opts.get('exclude', []))
@@ -166,7 +166,7 @@
 
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
     if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
@@ -482,7 +482,7 @@
     exclude switches, the changes are applied immediately.
     """
     opts = pycompat.byteskwargs(opts)
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         raise error.Abort(
             _(
                 b'the tracked command is only supported on '
--- a/hgext/narrow/narrowwirepeer.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/narrow/narrowwirepeer.py	Tue Oct 20 22:04:04 2020 +0530
@@ -13,7 +13,6 @@
     extensions,
     hg,
     narrowspec,
-    pycompat,
     wireprototypes,
     wireprotov1peer,
     wireprotov1server,
@@ -125,7 +124,7 @@
             )
     except error.Abort as exc:
         bundler = bundle2.bundle20(repo.ui)
-        manargs = [(b'message', pycompat.bytestr(exc))]
+        manargs = [(b'message', exc.message)]
         advargs = []
         if exc.hint is not None:
             advargs.append((b'hint', exc.hint))
--- a/hgext/patchbomb.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/patchbomb.py	Tue Oct 20 22:04:04 2020 +0530
@@ -207,7 +207,7 @@
     if not tmpl:
         return b' '.join(flags)
     out = util.stringio()
-    spec = formatter.templatespec(b'', templater.unquotestring(tmpl), None)
+    spec = formatter.literal_templatespec(templater.unquotestring(tmpl))
     with formatter.templateformatter(ui, out, b'patchbombflag', {}, spec) as fm:
         fm.startitem()
         fm.context(ctx=repo[rev])
--- a/hgext/phabricator.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/phabricator.py	Tue Oct 20 22:04:04 2020 +0530
@@ -76,6 +76,7 @@
     patch,
     phases,
     pycompat,
+    rewriteutil,
     scmutil,
     smartset,
     tags,
@@ -166,7 +167,7 @@
 
 
 @eh.wrapfunction(localrepo, "loadhgrc")
-def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements):
+def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
     """Load ``.arcconfig`` content into a ui instance on repository open.
     """
     result = False
@@ -200,7 +201,9 @@
     if cfg:
         ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
 
-    return orig(ui, wdirvfs, hgvfs, requirements) or result  # Load .hg/hgrc
+    return (
+        orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
+    )  # Load .hg/hgrc
 
 
 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
@@ -238,8 +241,9 @@
 
     def decorate(fn):
         def inner(*args, **kwargs):
-            if kwargs.get('test_vcr'):
-                cassette = pycompat.fsdecode(kwargs.pop('test_vcr'))
+            vcr = kwargs.pop('test_vcr')
+            if vcr:
+                cassette = pycompat.fsdecode(vcr)
                 import hgdemandimport
 
                 with hgdemandimport.deactivated():
@@ -1510,6 +1514,9 @@
                         mapping.get(old.p1().node(), (old.p1(),))[0],
                         mapping.get(old.p2().node(), (old.p2(),))[0],
                     ]
+                    newdesc = rewriteutil.update_hash_refs(
+                        repo, newdesc, mapping,
+                    )
                     new = context.metadataonlyctx(
                         repo,
                         old,
@@ -1587,7 +1594,9 @@
                     repo,
                     old,
                     parents=parents,
-                    text=old.description(),
+                    text=rewriteutil.update_hash_refs(
+                        repo, old.description(), mapping
+                    ),
                     user=old.user(),
                     date=old.date(),
                     extra=old.extra(),
--- a/hgext/rebase.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/rebase.py	Tue Oct 20 22:04:04 2020 +0530
@@ -34,7 +34,6 @@
     dirstateguard,
     error,
     extensions,
-    hg,
     merge as mergemod,
     mergestate as mergestatemod,
     mergeutil,
@@ -166,7 +165,7 @@
 class rebaseruntime(object):
     """This class is a container for rebase runtime state"""
 
-    def __init__(self, repo, ui, inmemory=False, opts=None):
+    def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None):
         if opts is None:
             opts = {}
 
@@ -212,6 +211,7 @@
         self.obsoletenotrebased = {}
         self.obsoletewithoutsuccessorindestination = set()
         self.inmemory = inmemory
+        self.dryrun = dryrun
         self.stateobj = statemod.cmdstate(repo, b'rebasestate')
 
     @property
@@ -448,7 +448,7 @@
             from mercurial.context import overlayworkingctx
 
             self.wctx = overlayworkingctx(self.repo)
-            self.repo.ui.debug(b"rebasing in-memory\n")
+            self.repo.ui.debug(b"rebasing in memory\n")
         else:
             self.wctx = self.repo[None]
             self.repo.ui.debug(b"rebasing on disk\n")
@@ -517,7 +517,7 @@
         p.complete()
         ui.note(_(b'rebase merging completed\n'))
 
-    def _concludenode(self, rev, p1, editor, commitmsg=None):
+    def _concludenode(self, rev, editor, commitmsg=None):
         '''Commit the wd changes with parents p1 and p2.
 
         Reuse commit info from rev but also store useful information in extra.
@@ -548,7 +548,6 @@
                     user=ctx.user(),
                     date=date,
                 )
-                mergestatemod.mergestate.clean(repo)
             else:
                 newnode = commitnode(
                     repo,
@@ -563,7 +562,6 @@
 
     def _rebasenode(self, tr, rev, allowdivergence, progressfn):
         repo, ui, opts = self.repo, self.ui, self.opts
-        dest = self.destmap[rev]
         ctx = repo[rev]
         desc = _ctxdesc(ctx)
         if self.state[rev] == rev:
@@ -616,21 +614,43 @@
             else:
                 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
                 with ui.configoverride(overrides, b'rebase'):
-                    stats = rebasenode(
-                        repo,
-                        rev,
-                        p1,
-                        p2,
-                        base,
-                        self.collapsef,
-                        dest,
-                        wctx=self.wctx,
-                    )
-                    if stats.unresolvedcount > 0:
-                        if self.inmemory:
-                            raise error.InMemoryMergeConflictsError()
-                        else:
+                    try:
+                        rebasenode(
+                            repo,
+                            rev,
+                            p1,
+                            p2,
+                            base,
+                            self.collapsef,
+                            wctx=self.wctx,
+                        )
+                    except error.InMemoryMergeConflictsError:
+                        if self.dryrun:
                             raise error.ConflictResolutionRequired(b'rebase')
+                        if self.collapsef:
+                            # TODO: Make the overlayworkingctx reflected
+                            # in the working copy here instead of re-raising
+                            # so the entire rebase operation is retried.
+                            raise
+                        ui.status(
+                            _(
+                                b"hit merge conflicts; rebasing that "
+                                b"commit again in the working copy\n"
+                            )
+                        )
+                        cmdutil.bailifchanged(repo)
+                        self.inmemory = False
+                        self._assignworkingcopy()
+                        mergemod.update(repo[p1], wc=self.wctx)
+                        rebasenode(
+                            repo,
+                            rev,
+                            p1,
+                            p2,
+                            base,
+                            self.collapsef,
+                            wctx=self.wctx,
+                        )
             if not self.collapsef:
                 merging = p2 != nullrev
                 editform = cmdutil.mergeeditform(merging, b'rebase')
@@ -643,7 +663,7 @@
                 # parents, and we don't want to create a merge commit here (unless
                 # we're rebasing a merge commit).
                 self.wctx.setparents(repo[p1].node(), repo[p2].node())
-                newnode = self._concludenode(rev, p1, editor)
+                newnode = self._concludenode(rev, editor)
             else:
                 # Skip commit if we are collapsing
                 newnode = None
@@ -710,7 +730,7 @@
 
             self.wctx.setparents(repo[p1].node(), repo[self.external].node())
             newnode = self._concludenode(
-                revtoreuse, p1, editor, commitmsg=commitmsg
+                revtoreuse, editor, commitmsg=commitmsg
             )
 
             if newnode is not None:
@@ -729,7 +749,7 @@
             newwd = self.originalwd
         if newwd not in [c.rev() for c in repo[None].parents()]:
             ui.note(_(b"update back to initial working directory parent\n"))
-            hg.updaterepo(repo, newwd, overwrite=False)
+            mergemod.update(repo[newwd])
 
         collapsedas = None
         if self.collapsef and not self.keepf:
@@ -1072,7 +1092,7 @@
                 )
             # update to the current working revision
             # to clear interrupted merge
-            hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
+            mergemod.clean_update(repo[rbsrt.originalwd])
             rbsrt._finishrebase()
             return 0
     elif inmemory:
@@ -1089,9 +1109,6 @@
                     b' merge\n'
                 )
             )
-            # TODO: Make in-memory merge not use the on-disk merge state, so
-            # we don't have to clean it here
-            mergestatemod.mergestate.clean(repo)
             clearstatus(repo)
             clearcollapsemsg(repo)
             return _dorebase(ui, repo, action, opts, inmemory=False)
@@ -1100,7 +1117,7 @@
 
 
 def _dryrunrebase(ui, repo, action, opts):
-    rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
+    rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts)
     confirm = opts.get(b'confirm')
     if confirm:
         ui.status(_(b'starting in-memory rebase\n'))
@@ -1114,15 +1131,9 @@
             overrides = {(b'rebase', b'singletransaction'): True}
             with ui.configoverride(overrides, b'rebase'):
                 _origrebase(
-                    ui,
-                    repo,
-                    action,
-                    opts,
-                    rbsrt,
-                    inmemory=True,
-                    leaveunfinished=True,
+                    ui, repo, action, opts, rbsrt,
                 )
-        except error.InMemoryMergeConflictsError:
+        except error.ConflictResolutionRequired:
             ui.status(_(b'hit a merge conflict\n'))
             return 1
         except error.Abort:
@@ -1162,13 +1173,11 @@
 
 
 def _dorebase(ui, repo, action, opts, inmemory=False):
-    rbsrt = rebaseruntime(repo, ui, inmemory, opts)
-    return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
+    rbsrt = rebaseruntime(repo, ui, inmemory, opts=opts)
+    return _origrebase(ui, repo, action, opts, rbsrt)
 
 
-def _origrebase(
-    ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
-):
+def _origrebase(ui, repo, action, opts, rbsrt):
     assert action != b'stop'
     with repo.wlock(), repo.lock():
         if opts.get(b'interactive'):
@@ -1213,7 +1222,7 @@
             destmap = _definedestmap(
                 ui,
                 repo,
-                inmemory,
+                rbsrt.inmemory,
                 opts.get(b'dest', None),
                 opts.get(b'source', []),
                 opts.get(b'base', []),
@@ -1238,11 +1247,11 @@
             # Same logic for the dirstate guard, except we don't create one when
             # rebasing in-memory (it's not needed).
             dsguard = None
-            if singletr and not inmemory:
+            if singletr and not rbsrt.inmemory:
                 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
             with util.acceptintervention(dsguard):
                 rbsrt._performrebase(tr)
-                if not leaveunfinished:
+                if not rbsrt.dryrun:
                     rbsrt._finishrebase()
 
 
@@ -1477,7 +1486,7 @@
         return newnode
 
 
-def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
+def rebasenode(repo, rev, p1, p2, base, collapse, wctx):
     """Rebase a single revision rev on top of p1 using base as merge ancestor"""
     # Merge phase
     # Update to destination and merge it with local
@@ -1501,7 +1510,7 @@
 
     # See explanation in merge.graft()
     mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
-    stats = mergemod.update(
+    stats = mergemod._update(
         repo,
         rev,
         branchmerge=True,
@@ -1513,13 +1522,18 @@
     )
     wctx.setparents(p1ctx.node(), repo[p2].node())
     if collapse:
-        copies.graftcopies(wctx, ctx, repo[dest])
+        copies.graftcopies(wctx, ctx, p1ctx)
     else:
         # If we're not using --collapse, we need to
         # duplicate copies between the revision we're
         # rebasing and its first parent.
         copies.graftcopies(wctx, ctx, ctx.p1())
-    return stats
+
+    if stats.unresolvedcount > 0:
+        if wctx.isinmemory():
+            raise error.InMemoryMergeConflictsError()
+        else:
+            raise error.ConflictResolutionRequired(b'rebase')
 
 
 def adjustdest(repo, rev, destmap, state, skipped):
--- a/hgext/remotefilelog/__init__.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/remotefilelog/__init__.py	Tue Oct 20 22:04:04 2020 +0530
@@ -136,7 +136,6 @@
 from mercurial import (
     changegroup,
     changelog,
-    cmdutil,
     commands,
     configitems,
     context,
@@ -150,6 +149,7 @@
     localrepo,
     match as matchmod,
     merge,
+    mergestate as mergestatemod,
     node as nodemod,
     patch,
     pycompat,
@@ -340,7 +340,6 @@
     extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
     extensions.wrapfunction(revset, b'filelog', filelogrevset)
     revset.symbols[b'filelog'] = revset.filelog
-    extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
 
 
 def cloneshallow(orig, ui, repo, *args, **opts):
@@ -361,7 +360,10 @@
                         self.unfiltered().__class__,
                     )
                 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
-                scmutil.writereporequirements(self)
+                with self.lock():
+                    # acquire store lock before writing requirements as some
+                    # requirements might be written to .hg/store/requires
+                    scmutil.writereporequirements(self)
 
                 # Since setupclient hadn't been called, exchange.pull was not
                 # wrapped. So we need to manually invoke our version of it.
@@ -479,36 +481,38 @@
 
 # prefetch files before update
 def applyupdates(
-    orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+    orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
 ):
     if isenabled(repo):
         manifest = mctx.manifest()
         files = []
-        for f, args, msg in actions[b'g']:
+        for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
             files.append((f, hex(manifest[f])))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
-    return orig(
-        repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
-    )
+    return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
 
 
 # Prefetch merge checkunknownfiles
-def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
+def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
     if isenabled(repo):
         files = []
         sparsematch = repo.maybesparsematch(mctx.rev())
-        for f, (m, actionargs, msg) in pycompat.iteritems(actions):
+        for f, (m, actionargs, msg) in mresult.filemap():
             if sparsematch and not sparsematch(f):
                 continue
-            if m in (b'c', b'dc', b'cm'):
+            if m in (
+                mergestatemod.ACTION_CREATED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+                mergestatemod.ACTION_CREATED_MERGE,
+            ):
                 files.append((f, hex(mctx.filenode(f))))
-            elif m == b'dg':
+            elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
                 f2 = actionargs[0]
                 files.append((f2, hex(mctx.filenode(f2))))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
-    return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
+    return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
 
 
 # Prefetch files before status attempts to look at their size and contents
@@ -776,40 +780,6 @@
     return getrenamed
 
 
-def walkfilerevs(orig, repo, match, follow, revs, fncache):
-    if not isenabled(repo):
-        return orig(repo, match, follow, revs, fncache)
-
-    # remotefilelog's can't be walked in rev order, so throw.
-    # The caller will see the exception and walk the commit tree instead.
-    if not follow:
-        raise cmdutil.FileWalkError(b"Cannot walk via filelog")
-
-    wanted = set()
-    minrev, maxrev = min(revs), max(revs)
-
-    pctx = repo[b'.']
-    for filename in match.files():
-        if filename not in pctx:
-            raise error.Abort(
-                _(b'cannot follow file not in parent revision: "%s"') % filename
-            )
-        fctx = pctx[filename]
-
-        linkrev = fctx.linkrev()
-        if linkrev >= minrev and linkrev <= maxrev:
-            fncache.setdefault(linkrev, []).append(filename)
-            wanted.add(linkrev)
-
-        for ancestor in fctx.ancestors():
-            linkrev = ancestor.linkrev()
-            if linkrev >= minrev and linkrev <= maxrev:
-                fncache.setdefault(linkrev, []).append(ancestor.path())
-                wanted.add(linkrev)
-
-    return wanted
-
-
 def filelogrevset(orig, repo, subset, x):
     """``filelog(pattern)``
     Changesets connected to the specified filelog.
--- a/hgext/remotefilelog/remotefilelogserver.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/remotefilelog/remotefilelogserver.py	Tue Oct 20 22:04:04 2020 +0530
@@ -23,6 +23,7 @@
     extensions,
     match,
     pycompat,
+    scmutil,
     store,
     streamclone,
     util,
@@ -169,7 +170,7 @@
                         if kind == stat.S_IFDIR:
                             visit.append(fp)
 
-            if b'treemanifest' in repo.requirements:
+            if scmutil.istreemanifest(repo):
                 for (u, e, s) in repo.store.datafiles():
                     if u.startswith(b'meta/') and (
                         u.endswith(b'.i') or u.endswith(b'.d')
--- a/hgext/sparse.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/sparse.py	Tue Oct 20 22:04:04 2020 +0530
@@ -80,9 +80,9 @@
     dirstate,
     error,
     extensions,
-    hg,
     logcmdutil,
     match as matchmod,
+    merge as mergemod,
     pycompat,
     registrar,
     sparse,
@@ -137,9 +137,9 @@
         )
     )
 
-    def _initialrevs(orig, repo, opts):
-        revs = orig(repo, opts)
-        if opts.get(b'sparse'):
+    def _initialrevs(orig, repo, wopts):
+        revs = orig(repo, wopts)
+        if wopts.opts.get(b'sparse'):
             sparsematch = sparse.matcher(repo)
 
             def ctxmatch(rev):
@@ -173,9 +173,9 @@
     # clone
     if not narrow_pat and (include or exclude or enableprofile):
 
-        def clonesparse(orig, self, node, overwrite, *args, **kwargs):
+        def clonesparse(orig, ctx, *args, **kwargs):
             sparse.updateconfig(
-                self.unfiltered(),
+                ctx.repo().unfiltered(),
                 pat,
                 {},
                 include=include,
@@ -183,9 +183,9 @@
                 enableprofile=enableprofile,
                 usereporootpaths=True,
             )
-            return orig(self, node, overwrite, *args, **kwargs)
+            return orig(ctx, *args, **kwargs)
 
-        extensions.wrapfunction(hg, b'updaterepo', clonesparse)
+        extensions.wrapfunction(mergemod, b'update', clonesparse)
     return orig(ui, repo, *args, **opts)
 
 
--- a/hgext/sqlitestore.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/sqlitestore.py	Tue Oct 20 22:04:04 2020 +0530
@@ -67,6 +67,7 @@
     mdiff,
     pycompat,
     registrar,
+    requirements,
     util,
     verify,
 )
@@ -1151,7 +1152,7 @@
     supported.add(REQUIREMENT_ZLIB)
     supported.add(REQUIREMENT_NONE)
     supported.add(REQUIREMENT_SHALLOW_FILES)
-    supported.add(repository.NARROW_REQUIREMENT)
+    supported.add(requirements.NARROW_REQUIREMENT)
 
 
 def newreporequirements(orig, ui, createopts):
--- a/hgext/strip.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/strip.py	Tue Oct 20 22:04:04 2020 +0530
@@ -269,7 +269,7 @@
             repo.dirstate.write(repo.currenttransaction())
 
             # clear resolve state
-            mergestatemod.mergestate.clean(repo, repo[b'.'].node())
+            mergestatemod.mergestate.clean(repo)
 
             update = False
 
--- a/hgext/transplant.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/hgext/transplant.py	Tue Oct 20 22:04:04 2020 +0530
@@ -198,9 +198,7 @@
                     if pulls:
                         if source != repo:
                             exchange.pull(repo, source.peer(), heads=pulls)
-                        merge.update(
-                            repo, pulls[-1], branchmerge=False, force=False
-                        )
+                        merge.update(repo[pulls[-1]])
                         p1 = repo.dirstate.p1()
                         pulls = []
 
@@ -275,7 +273,7 @@
             tr.close()
             if pulls:
                 exchange.pull(repo, source.peer(), heads=pulls)
-                merge.update(repo, pulls[-1], branchmerge=False, force=False)
+                merge.update(repo[pulls[-1]])
         finally:
             self.saveseries(revmap, merges)
             self.transplants.write()
@@ -476,7 +474,7 @@
         """logic to stop an interrupted transplant"""
         if self.canresume():
             startctx = repo[b'.']
-            hg.updaterepo(repo, startctx.node(), overwrite=True)
+            merge.clean_update(startctx)
             ui.status(_(b"stopped the interrupted transplant\n"))
             ui.status(
                 _(b"working directory is now at %s\n") % startctx.hex()[:12]
--- a/mercurial/bundle2.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/bundle2.py	Tue Oct 20 22:04:04 2020 +0530
@@ -166,6 +166,7 @@
     phases,
     pushkey,
     pycompat,
+    requirements,
     scmutil,
     streamclone,
     tags,
@@ -1963,10 +1964,7 @@
     nbchangesets = None
     if b'nbchanges' in inpart.params:
         nbchangesets = int(inpart.params.get(b'nbchanges'))
-    if (
-        b'treemanifest' in inpart.params
-        and b'treemanifest' not in op.repo.requirements
-    ):
+    if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo):
         if len(op.repo.changelog) != 0:
             raise error.Abort(
                 _(
@@ -1974,7 +1972,7 @@
                     b"non-empty and does not use tree manifests"
                 )
             )
-        op.repo.requirements.add(b'treemanifest')
+        op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
         op.repo.svfs.options = localrepo.resolvestorevfsoptions(
             op.repo.ui, op.repo.requirements, op.repo.features
         )
@@ -2091,7 +2089,7 @@
     except error.Abort as e:
         raise error.Abort(
             _(b'bundle at %s is corrupted:\n%s')
-            % (util.hidepassword(raw_url), bytes(e))
+            % (util.hidepassword(raw_url), e.message)
         )
     assert not inpart.read()
 
@@ -2576,7 +2574,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', cgversion)
-        if b'treemanifest' in repo.requirements:
+        if scmutil.istreemanifest(repo):
             part.addparam(b'treemanifest', b'1')
         if b'exp-sidedata-flag' in repo.requirements:
             part.addparam(b'exp-sidedata', b'1')
--- a/mercurial/changegroup.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/changegroup.py	Tue Oct 20 22:04:04 2020 +0530
@@ -26,6 +26,8 @@
     mdiff,
     phases,
     pycompat,
+    requirements,
+    scmutil,
     util,
 )
 
@@ -331,6 +333,10 @@
             clend = len(cl)
             changesets = clend - clstart
             progress.complete()
+            del deltas
+            # TODO Python 2.7 removal
+            # del efilesset
+            efilesset = None
             self.callback = None
 
             # pull off the manifest group
@@ -948,9 +954,7 @@
         # Treemanifests don't work correctly with fastpathlinkrev
         # either, because we don't discover which directory nodes to
         # send along with files. This could probably be fixed.
-        fastpathlinkrev = fastpathlinkrev and (
-            b'treemanifest' not in repo.requirements
-        )
+        fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
 
         fnodes = {}  # needed file nodes
 
@@ -1467,7 +1471,7 @@
     if (
         repo.ui.configbool(b'experimental', b'changegroup3')
         or repo.ui.configbool(b'experimental', b'treemanifest')
-        or b'treemanifest' in repo.requirements
+        or scmutil.istreemanifest(repo)
     ):
         # we keep version 03 because we need to to exchange treemanifest data
         #
@@ -1495,7 +1499,7 @@
 # Changegroup versions that can be created from the repo
 def supportedoutgoingversions(repo):
     versions = allsupportedversions(repo)
-    if b'treemanifest' in repo.requirements:
+    if scmutil.istreemanifest(repo):
         # Versions 01 and 02 support only flat manifests and it's just too
         # expensive to convert between the flat manifest and tree manifest on
         # the fly. Since tree manifests are hashed differently, all of history
@@ -1503,7 +1507,7 @@
         # support versions 01 and 02.
         versions.discard(b'01')
         versions.discard(b'02')
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
         versions.discard(b'01')
--- a/mercurial/changelog.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/changelog.py	Tue Oct 20 22:04:04 2020 +0530
@@ -26,8 +26,7 @@
     dateutil,
     stringutil,
 )
-
-from .revlogutils import sidedata as sidedatamod
+from .revlogutils import flagutil
 
 _defaultextra = {b'branch': b'default'}
 
@@ -216,6 +215,7 @@
         '_text',
         '_sidedata',
         '_cpsd',
+        '_changes',
     )
 
     def __new__(cls, text, sidedata, cpsd):
@@ -252,6 +252,7 @@
         self._text = text
         self._sidedata = sidedata
         self._cpsd = cpsd
+        self._changes = None
 
         return self
 
@@ -301,7 +302,26 @@
         return decodeextra(raw)
 
     @property
+    def changes(self):
+        if self._changes is not None:
+            return self._changes
+        if self._cpsd:
+            changes = metadata.decode_files_sidedata(self._sidedata)
+        else:
+            changes = metadata.ChangingFiles(
+                touched=self.files or (),
+                added=self.filesadded or (),
+                removed=self.filesremoved or (),
+                p1_copies=self.p1copies or {},
+                p2_copies=self.p2copies or {},
+            )
+        self._changes = changes
+        return changes
+
+    @property
     def files(self):
+        if self._cpsd:
+            return sorted(self.changes.touched)
         off = self._offsets
         if off[2] == off[3]:
             return []
@@ -311,9 +331,7 @@
     @property
     def filesadded(self):
         if self._cpsd:
-            rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
-            if not rawindices:
-                return []
+            return self.changes.added
         else:
             rawindices = self.extra.get(b'filesadded')
         if rawindices is None:
@@ -323,9 +341,7 @@
     @property
     def filesremoved(self):
         if self._cpsd:
-            rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
-            if not rawindices:
-                return []
+            return self.changes.removed
         else:
             rawindices = self.extra.get(b'filesremoved')
         if rawindices is None:
@@ -335,9 +351,7 @@
     @property
     def p1copies(self):
         if self._cpsd:
-            rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
-            if not rawcopies:
-                return {}
+            return self.changes.copied_from_p1
         else:
             rawcopies = self.extra.get(b'p1copies')
         if rawcopies is None:
@@ -347,9 +361,7 @@
     @property
     def p2copies(self):
         if self._cpsd:
-            rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
-            if not rawcopies:
-                return {}
+            return self.changes.copied_from_p2
         else:
             rawcopies = self.extra.get(b'p2copies')
         if rawcopies is None:
@@ -403,9 +415,21 @@
         self._delayed = False
         self._delaybuf = None
         self._divert = False
-        self.filteredrevs = frozenset()
+        self._filteredrevs = frozenset()
+        self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
 
+    @property
+    def filteredrevs(self):
+        return self._filteredrevs
+
+    @filteredrevs.setter
+    def filteredrevs(self, val):
+        # Ensure all updates go through this function
+        assert isinstance(val, frozenset)
+        self._filteredrevs = val
+        self._filteredrevs_hashcache = {}
+
     def delayupdate(self, tr):
         """delay visibility of index updates to other readers"""
 
@@ -524,10 +548,6 @@
         user,
         date=None,
         extra=None,
-        p1copies=None,
-        p2copies=None,
-        filesadded=None,
-        filesremoved=None,
     ):
         # Convert to UTF-8 encoded bytestrings as the very first
         # thing: calling any method on a localstr object will turn it
@@ -559,48 +579,13 @@
                 raise error.StorageError(
                     _(b'the name \'%s\' is reserved') % branch
                 )
-        sortedfiles = sorted(files)
+        sortedfiles = sorted(files.touched)
+        flags = 0
         sidedata = None
-        if extra is not None:
-            for name in (
-                b'p1copies',
-                b'p2copies',
-                b'filesadded',
-                b'filesremoved',
-            ):
-                extra.pop(name, None)
-        if p1copies is not None:
-            p1copies = metadata.encodecopies(sortedfiles, p1copies)
-        if p2copies is not None:
-            p2copies = metadata.encodecopies(sortedfiles, p2copies)
-        if filesadded is not None:
-            filesadded = metadata.encodefileindices(sortedfiles, filesadded)
-        if filesremoved is not None:
-            filesremoved = metadata.encodefileindices(sortedfiles, filesremoved)
-        if self._copiesstorage == b'extra':
-            extrasentries = p1copies, p2copies, filesadded, filesremoved
-            if extra is None and any(x is not None for x in extrasentries):
-                extra = {}
-            if p1copies is not None:
-                extra[b'p1copies'] = p1copies
-            if p2copies is not None:
-                extra[b'p2copies'] = p2copies
-            if filesadded is not None:
-                extra[b'filesadded'] = filesadded
-            if filesremoved is not None:
-                extra[b'filesremoved'] = filesremoved
-        elif self._copiesstorage == b'changeset-sidedata':
-            sidedata = {}
-            if p1copies:
-                sidedata[sidedatamod.SD_P1COPIES] = p1copies
-            if p2copies:
-                sidedata[sidedatamod.SD_P2COPIES] = p2copies
-            if filesadded:
-                sidedata[sidedatamod.SD_FILESADDED] = filesadded
-            if filesremoved:
-                sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
-            if not sidedata:
-                sidedata = None
+        if self._copiesstorage == b'changeset-sidedata':
+            if files.has_copies_info:
+                flags |= flagutil.REVIDX_HASCOPIESINFO
+            sidedata = metadata.encode_files_sidedata(files)
 
         if extra:
             extra = encodeextra(extra)
@@ -608,7 +593,7 @@
         l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
         text = b"\n".join(l)
         return self.addrevision(
-            text, transaction, len(self), p1, p2, sidedata=sidedata
+            text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
         )
 
     def branchinfo(self, rev):
--- a/mercurial/chgserver.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/chgserver.py	Tue Oct 20 22:04:04 2020 +0530
@@ -502,7 +502,7 @@
             self.cresult.write(b'exit 255')
             return
         except error.Abort as inst:
-            self.ui.error(_(b"abort: %s\n") % inst)
+            self.ui.error(_(b"abort: %s\n") % inst.message)
             if inst.hint:
                 self.ui.error(_(b"(%s)\n") % inst.hint)
             self.ui.flush()
--- a/mercurial/cmdutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/cmdutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -16,7 +16,6 @@
 from .node import (
     hex,
     nullid,
-    nullrev,
     short,
 )
 from .pycompat import (
@@ -49,7 +48,6 @@
     revlog,
     rewriteutil,
     scmutil,
-    smartset,
     state as statemod,
     subrepoutil,
     templatekw,
@@ -560,7 +558,7 @@
             # backup continues
             for f in tobackup:
                 fd, tmpname = pycompat.mkstemp(
-                    prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
+                    prefix=os.path.basename(f) + b'.', dir=backupdir
                 )
                 os.close(fd)
                 ui.debug(b'backup %r as %r\n' % (f, tmpname))
@@ -1358,7 +1356,7 @@
         if cl:
             r = repo.unfiltered().changelog
         elif dir:
-            if b'treemanifest' not in repo.requirements:
+            if not scmutil.istreemanifest(repo):
                 raise error.Abort(
                     _(
                         b"--dir can only be used on repos with "
@@ -2229,356 +2227,17 @@
 
 def finddate(ui, repo, date):
     """Find the tipmost changeset that matches the given date spec"""
-
-    df = dateutil.matchdate(date)
-    m = scmutil.matchall(repo)
-    results = {}
-
-    def prep(ctx, fns):
-        d = ctx.date()
-        if df(d[0]):
-            results[ctx.rev()] = d
-
-    for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
-        rev = ctx.rev()
-        if rev in results:
-            ui.status(
-                _(b"found revision %d from %s\n")
-                % (rev, dateutil.datestr(results[rev]))
-            )
-            return b'%d' % rev
-
-    raise error.Abort(_(b"revision matching date not found"))
-
-
-def increasingwindows(windowsize=8, sizelimit=512):
-    while True:
-        yield windowsize
-        if windowsize < sizelimit:
-            windowsize *= 2
-
-
-def _walkrevs(repo, opts):
-    # Default --rev value depends on --follow but --follow behavior
-    # depends on revisions resolved from --rev...
-    follow = opts.get(b'follow') or opts.get(b'follow_first')
-    if opts.get(b'rev'):
-        revs = scmutil.revrange(repo, opts[b'rev'])
-    elif follow and repo.dirstate.p1() == nullid:
-        revs = smartset.baseset()
-    elif follow:
-        revs = repo.revs(b'reverse(:.)')
-    else:
-        revs = smartset.spanset(repo)
-        revs.reverse()
-    return revs
-
-
-class FileWalkError(Exception):
-    pass
-
-
-def walkfilerevs(repo, match, follow, revs, fncache):
-    '''Walks the file history for the matched files.
-
-    Returns the changeset revs that are involved in the file history.
-
-    Throws FileWalkError if the file history can't be walked using
-    filelogs alone.
-    '''
-    wanted = set()
-    copies = []
-    minrev, maxrev = min(revs), max(revs)
-
-    def filerevs(filelog, last):
-        """
-        Only files, no patterns.  Check the history of each file.
-
-        Examines filelog entries within minrev, maxrev linkrev range
-        Returns an iterator yielding (linkrev, parentlinkrevs, copied)
-        tuples in backwards order
-        """
-        cl_count = len(repo)
-        revs = []
-        for j in pycompat.xrange(0, last + 1):
-            linkrev = filelog.linkrev(j)
-            if linkrev < minrev:
-                continue
-            # only yield rev for which we have the changelog, it can
-            # happen while doing "hg log" during a pull or commit
-            if linkrev >= cl_count:
-                break
-
-            parentlinkrevs = []
-            for p in filelog.parentrevs(j):
-                if p != nullrev:
-                    parentlinkrevs.append(filelog.linkrev(p))
-            n = filelog.node(j)
-            revs.append(
-                (linkrev, parentlinkrevs, follow and filelog.renamed(n))
-            )
-
-        return reversed(revs)
-
-    def iterfiles():
-        pctx = repo[b'.']
-        for filename in match.files():
-            if follow:
-                if filename not in pctx:
-                    raise error.Abort(
-                        _(
-                            b'cannot follow file not in parent '
-                            b'revision: "%s"'
-                        )
-                        % filename
-                    )
-                yield filename, pctx[filename].filenode()
-            else:
-                yield filename, None
-        for filename_node in copies:
-            yield filename_node
-
-    for file_, node in iterfiles():
-        filelog = repo.file(file_)
-        if not len(filelog):
-            if node is None:
-                # A zero count may be a directory or deleted file, so
-                # try to find matching entries on the slow path.
-                if follow:
-                    raise error.Abort(
-                        _(b'cannot follow nonexistent file: "%s"') % file_
-                    )
-                raise FileWalkError(b"Cannot walk via filelog")
-            else:
-                continue
-
-        if node is None:
-            last = len(filelog) - 1
-        else:
-            last = filelog.rev(node)
-
-        # keep track of all ancestors of the file
-        ancestors = {filelog.linkrev(last)}
-
-        # iterate from latest to oldest revision
-        for rev, flparentlinkrevs, copied in filerevs(filelog, last):
-            if not follow:
-                if rev > maxrev:
-                    continue
-            else:
-                # Note that last might not be the first interesting
-                # rev to us:
-                # if the file has been changed after maxrev, we'll
-                # have linkrev(last) > maxrev, and we still need
-                # to explore the file graph
-                if rev not in ancestors:
-                    continue
-                # XXX insert 1327 fix here
-                if flparentlinkrevs:
-                    ancestors.update(flparentlinkrevs)
-
-            fncache.setdefault(rev, []).append(file_)
-            wanted.add(rev)
-            if copied:
-                copies.append(copied)
-
-    return wanted
-
-
-class _followfilter(object):
-    def __init__(self, repo, onlyfirst=False):
-        self.repo = repo
-        self.startrev = nullrev
-        self.roots = set()
-        self.onlyfirst = onlyfirst
-
-    def match(self, rev):
-        def realparents(rev):
-            if self.onlyfirst:
-                return self.repo.changelog.parentrevs(rev)[0:1]
-            else:
-                return filter(
-                    lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
-                )
-
-        if self.startrev == nullrev:
-            self.startrev = rev
-            return True
-
-        if rev > self.startrev:
-            # forward: all descendants
-            if not self.roots:
-                self.roots.add(self.startrev)
-            for parent in realparents(rev):
-                if parent in self.roots:
-                    self.roots.add(rev)
-                    return True
-        else:
-            # backwards: all parents
-            if not self.roots:
-                self.roots.update(realparents(self.startrev))
-            if rev in self.roots:
-                self.roots.remove(rev)
-                self.roots.update(realparents(rev))
-                return True
-
-        return False
-
-
-def walkchangerevs(repo, match, opts, prepare):
-    '''Iterate over files and the revs in which they changed.
-
-    Callers most commonly need to iterate backwards over the history
-    in which they are interested. Doing so has awful (quadratic-looking)
-    performance, so we use iterators in a "windowed" way.
-
-    We walk a window of revisions in the desired order.  Within the
-    window, we first walk forwards to gather data, then in the desired
-    order (usually backwards) to display it.
-
-    This function returns an iterator yielding contexts. Before
-    yielding each context, the iterator will first call the prepare
-    function on each context in the window in forward order.'''
-
-    allfiles = opts.get(b'all_files')
-    follow = opts.get(b'follow') or opts.get(b'follow_first')
-    revs = _walkrevs(repo, opts)
-    if not revs:
-        return []
-    wanted = set()
-    slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
-    fncache = {}
-    change = repo.__getitem__
-
-    # First step is to fill wanted, the set of revisions that we want to yield.
-    # When it does not induce extra cost, we also fill fncache for revisions in
-    # wanted: a cache of filenames that were changed (ctx.files()) and that
-    # match the file filtering conditions.
-
-    if match.always() or allfiles:
-        # No files, no patterns.  Display all revs.
-        wanted = revs
-    elif not slowpath:
-        # We only have to read through the filelog to find wanted revisions
-
-        try:
-            wanted = walkfilerevs(repo, match, follow, revs, fncache)
-        except FileWalkError:
-            slowpath = True
-
-            # We decided to fall back to the slowpath because at least one
-            # of the paths was not a file. Check to see if at least one of them
-            # existed in history, otherwise simply return
-            for path in match.files():
-                if path == b'.' or path in repo.store:
-                    break
-            else:
-                return []
-
-    if slowpath:
-        # We have to read the changelog to match filenames against
-        # changed files
-
-        if follow:
-            raise error.Abort(
-                _(b'can only follow copies/renames for explicit filenames')
-            )
-
-        # The slow path checks files modified in every changeset.
-        # This is really slow on large repos, so compute the set lazily.
-        class lazywantedset(object):
-            def __init__(self):
-                self.set = set()
-                self.revs = set(revs)
-
-            # No need to worry about locality here because it will be accessed
-            # in the same order as the increasing window below.
-            def __contains__(self, value):
-                if value in self.set:
-                    return True
-                elif not value in self.revs:
-                    return False
-                else:
-                    self.revs.discard(value)
-                    ctx = change(value)
-                    if allfiles:
-                        matches = list(ctx.manifest().walk(match))
-                    else:
-                        matches = [f for f in ctx.files() if match(f)]
-                    if matches:
-                        fncache[value] = matches
-                        self.set.add(value)
-                        return True
-                    return False
-
-            def discard(self, value):
-                self.revs.discard(value)
-                self.set.discard(value)
-
-        wanted = lazywantedset()
-
-    # it might be worthwhile to do this in the iterator if the rev range
-    # is descending and the prune args are all within that range
-    for rev in opts.get(b'prune', ()):
-        rev = repo[rev].rev()
-        ff = _followfilter(repo)
-        stop = min(revs[0], revs[-1])
-        for x in pycompat.xrange(rev, stop - 1, -1):
-            if ff.match(x):
-                wanted = wanted - [x]
-
-    # Now that wanted is correctly initialized, we can iterate over the
-    # revision range, yielding only revisions in wanted.
-    def iterate():
-        if follow and match.always():
-            ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
-
-            def want(rev):
-                return ff.match(rev) and rev in wanted
-
-        else:
-
-            def want(rev):
-                return rev in wanted
-
-        it = iter(revs)
-        stopiteration = False
-        for windowsize in increasingwindows():
-            nrevs = []
-            for i in pycompat.xrange(windowsize):
-                rev = next(it, None)
-                if rev is None:
-                    stopiteration = True
-                    break
-                elif want(rev):
-                    nrevs.append(rev)
-            for rev in sorted(nrevs):
-                fns = fncache.get(rev)
-                ctx = change(rev)
-                if not fns:
-
-                    def fns_generator():
-                        if allfiles:
-
-                            def bad(f, msg):
-                                pass
-
-                            for f in ctx.matches(matchmod.badmatch(match, bad)):
-                                yield f
-                        else:
-                            for f in ctx.files():
-                                if match(f):
-                                    yield f
-
-                    fns = fns_generator()
-                prepare(ctx, fns)
-            for rev in nrevs:
-                yield change(rev)
-
-            if stopiteration:
-                break
-
-    return iterate()
+    mrevs = repo.revs(b'date(%s)', date)
+    try:
+        rev = mrevs.max()
+    except ValueError:
+        raise error.Abort(_(b"revision matching date not found"))
+
+    ui.status(
+        _(b"found revision %d from %s\n")
+        % (rev, dateutil.datestr(repo[rev].date()))
+    )
+    return b'%d' % rev
 
 
 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
@@ -3258,6 +2917,7 @@
         if opts.get(b'secret'):
             commitphase = phases.secret
         newid = repo.commitctx(new)
+        ms.reset()
 
         # Reroute the working copy parent to the new changeset
         repo.setparents(newid, nullid)
@@ -3375,7 +3035,7 @@
 
 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
     ui = repo.ui
-    spec = formatter.templatespec(ref, None, None)
+    spec = formatter.reference_templatespec(ref)
     t = logcmdutil.changesettemplater(ui, repo, spec)
     t.t.cache.update(
         (k, templater.unquotestring(v))
@@ -3492,9 +3152,9 @@
     return repo.status(match=scmutil.match(repo[None], pats, opts))
 
 
-def revert(ui, repo, ctx, parents, *pats, **opts):
+def revert(ui, repo, ctx, *pats, **opts):
     opts = pycompat.byteskwargs(opts)
-    parent, p2 = parents
+    parent, p2 = repo.dirstate.parents()
     node = ctx.node()
 
     mf = ctx.manifest()
@@ -3780,7 +3440,6 @@
             match = scmutil.match(repo[None], pats)
             _performrevert(
                 repo,
-                parents,
                 ctx,
                 names,
                 uipathfn,
@@ -3806,7 +3465,6 @@
 
 def _performrevert(
     repo,
-    parents,
     ctx,
     names,
     uipathfn,
@@ -3822,7 +3480,7 @@
 
     Make sure you have the working directory locked when calling this function.
     """
-    parent, p2 = parents
+    parent, p2 = repo.dirstate.parents()
     node = ctx.node()
     excluded_files = []
 
@@ -4152,7 +3810,6 @@
         startctx = repo[b'.']
     # whether to strip or not
     cleanup = False
-    from . import hg
 
     if newnodes:
         newnodes = [repo[r].rev() for r in newnodes]
@@ -4180,7 +3837,7 @@
 
         if cleanup:
             with repo.wlock(), repo.lock():
-                hg.updaterepo(repo, startctx.node(), overwrite=True)
+                mergemod.clean_update(startctx)
                 # stripping the new nodes created
                 strippoints = [
                     c.node() for c in repo.set(b"roots(%ld)", newnodes)
@@ -4190,7 +3847,7 @@
     if not cleanup:
         # we don't update to the startnode if we can't strip
         startctx = repo[b'.']
-        hg.updaterepo(repo, startctx.node(), overwrite=True)
+        mergemod.clean_update(startctx)
 
     ui.status(_(b"graft aborted\n"))
     ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
--- a/mercurial/commands.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/commands.py	Tue Oct 20 22:04:04 2020 +0530
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import difflib
 import errno
 import os
 import re
@@ -41,6 +40,7 @@
     filemerge,
     formatter,
     graphmod,
+    grep as grepmod,
     hbisect,
     help,
     hg,
@@ -55,6 +55,7 @@
     pycompat,
     rcutil,
     registrar,
+    requirements,
     revsetlang,
     rewriteutil,
     scmutil,
@@ -66,6 +67,7 @@
     ui as uimod,
     util,
     verify as verifymod,
+    vfs as vfsmod,
     wireprotoserver,
 )
 from .utils import (
@@ -767,11 +769,8 @@
 
 
 def _dobackout(ui, repo, node=None, rev=None, **opts):
+    cmdutil.check_incompatible_arguments(opts, 'no_commit', ['commit', 'merge'])
     opts = pycompat.byteskwargs(opts)
-    if opts.get(b'commit') and opts.get(b'no_commit'):
-        raise error.Abort(_(b"cannot use --commit with --no-commit"))
-    if opts.get(b'merge') and opts.get(b'no_commit'):
-        raise error.Abort(_(b"cannot use --merge with --no-commit"))
 
     if rev and node:
         raise error.Abort(_(b"please specify just one revision"))
@@ -788,7 +787,8 @@
 
     cmdutil.checkunfinished(repo)
     cmdutil.bailifchanged(repo)
-    node = scmutil.revsingle(repo, rev).node()
+    ctx = scmutil.revsingle(repo, rev)
+    node = ctx.node()
 
     op1, op2 = repo.dirstate.parents()
     if not repo.changelog.isancestor(node, op1):
@@ -819,14 +819,7 @@
         with dirstateguard.dirstateguard(repo, b'backout'):
             overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
             with ui.configoverride(overrides, b'backout'):
-                stats = mergemod.update(
-                    repo,
-                    parent,
-                    branchmerge=True,
-                    force=True,
-                    ancestor=node,
-                    mergeancestor=False,
-                )
+                stats = mergemod.back_out(ctx, parent=repo[parent])
             repo.setparents(op1, op2)
         hg._showstats(repo, stats)
         if stats.unresolvedcount:
@@ -837,7 +830,7 @@
     else:
         hg.clean(repo, node, show_stats=False)
         repo.dirstate.setbranch(branch)
-        cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
+        cmdutil.revert(ui, repo, rctx)
 
     if opts.get(b'no_commit'):
         msg = _(b"changeset %s backed out, don't forget to commit.\n")
@@ -2141,6 +2134,12 @@
         (b'u', b'untrusted', None, _(b'show untrusted configuration options')),
         (b'e', b'edit', None, _(b'edit user config')),
         (b'l', b'local', None, _(b'edit repository config')),
+        (
+            b'',
+            b'shared',
+            None,
+            _(b'edit shared source repository config (EXPERIMENTAL)'),
+        ),
         (b'g', b'global', None, _(b'edit global config')),
     ]
     + formatteropts,
@@ -2179,22 +2178,37 @@
       :source:  String. Filename and line number where the item is defined.
       :value:   String. Config value.
 
+      The --shared flag can be used to edit the config file of shared source
+      repository. It only works when you have shared using the experimental
+      share safe feature.
+
     Returns 0 on success, 1 if NAME does not exist.
 
     """
 
     opts = pycompat.byteskwargs(opts)
-    editopts = (b'edit', b'local', b'global')
+    editopts = (b'edit', b'local', b'global', b'shared')
     if any(opts.get(o) for o in editopts):
-        if opts.get(b'local') and opts.get(b'global'):
-            raise error.Abort(_(b"can't use --local and --global together"))
-
+        cmdutil.check_at_most_one_arg(opts, *editopts[1:])
         if opts.get(b'local'):
             if not repo:
                 raise error.Abort(_(b"can't use --local outside a repository"))
             paths = [repo.vfs.join(b'hgrc')]
         elif opts.get(b'global'):
             paths = rcutil.systemrcpath()
+        elif opts.get(b'shared'):
+            if not repo.shared():
+                raise error.Abort(
+                    _(b"repository is not shared; can't use --shared")
+                )
+                if requirements.SHARESAFE_REQUIREMENT not in repo.requirements:
+                    raise error.Abort(
+                        _(
+                            b"share safe feature not unabled; "
+                            b"unable to edit shared source repository config"
+                        )
+                    )
+            paths = [vfsmod.vfs(repo.sharedpath).join(b'hgrc')]
         else:
             paths = rcutil.userrcpath()
 
@@ -2311,7 +2325,7 @@
 @command(
     b'copy|cp',
     [
-        (b'', b'forget', None, _(b'unmark a file as copied')),
+        (b'', b'forget', None, _(b'unmark a destination file as copied')),
         (b'A', b'after', None, _(b'record a copy that has already occurred')),
         (
             b'',
@@ -2343,9 +2357,9 @@
     exist in the working directory. If invoked with -A/--after, the
     operation is recorded, but no copying is performed.
 
-    To undo marking a file as copied, use --forget. With that option,
-    all given (positional) arguments are unmarked as copies. The destination
-    file(s) will be left in place (still tracked).
+    To undo marking a destination file as copied, use --forget. With that
+    option, all given (positional) arguments are unmarked as copies. The
+    destination file(s) will be left in place (still tracked).
 
     This command takes effect with the next commit by default.
 
@@ -3230,7 +3244,7 @@
     if not graftstate.exists():
         raise error.Abort(_(b"no interrupted graft found"))
     pctx = repo[b'.']
-    hg.updaterepo(repo, pctx.node(), overwrite=True)
+    mergemod.clean_update(pctx)
     graftstate.delete()
     ui.status(_(b"stopped the interrupted graft\n"))
     ui.status(_(b"working directory is now at %s\n") % pctx.hex()[:12])
@@ -3252,7 +3266,7 @@
     b'grep',
     [
         (b'0', b'print0', None, _(b'end fields with NUL')),
-        (b'', b'all', None, _(b'print all revisions that match (DEPRECATED) ')),
+        (b'', b'all', None, _(b'an alias to --diff (DEPRECATED)')),
         (
             b'',
             b'diff',
@@ -3351,13 +3365,17 @@
     Returns 0 if a match is found, 1 otherwise.
 
     """
+    cmdutil.check_incompatible_arguments(opts, 'all_files', ['all', 'diff'])
     opts = pycompat.byteskwargs(opts)
     diff = opts.get(b'all') or opts.get(b'diff')
-    if diff and opts.get(b'all_files'):
-        raise error.Abort(_(b'--diff and --all-files are mutually exclusive'))
+    follow = opts.get(b'follow')
     if opts.get(b'all_files') is None and not diff:
         opts[b'all_files'] = True
-    plaingrep = opts.get(b'all_files') and not opts.get(b'rev')
+    plaingrep = (
+        opts.get(b'all_files')
+        and not opts.get(b'rev')
+        and not opts.get(b'follow')
+    )
     all_files = opts.get(b'all_files')
     if plaingrep:
         opts[b'rev'] = [b'wdir()']
@@ -3376,76 +3394,11 @@
     if opts.get(b'print0'):
         sep = eol = b'\0'
 
-    getfile = util.lrucachefunc(repo.file)
-
-    def matchlines(body):
-        begin = 0
-        linenum = 0
-        while begin < len(body):
-            match = regexp.search(body, begin)
-            if not match:
-                break
-            mstart, mend = match.span()
-            linenum += body.count(b'\n', begin, mstart) + 1
-            lstart = body.rfind(b'\n', begin, mstart) + 1 or begin
-            begin = body.find(b'\n', mend) + 1 or len(body) + 1
-            lend = begin - 1
-            yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
-
-    class linestate(object):
-        def __init__(self, line, linenum, colstart, colend):
-            self.line = line
-            self.linenum = linenum
-            self.colstart = colstart
-            self.colend = colend
-
-        def __hash__(self):
-            return hash((self.linenum, self.line))
-
-        def __eq__(self, other):
-            return self.line == other.line
-
-        def findpos(self):
-            """Iterate all (start, end) indices of matches"""
-            yield self.colstart, self.colend
-            p = self.colend
-            while p < len(self.line):
-                m = regexp.search(self.line, p)
-                if not m:
-                    break
-                if m.end() == p:
-                    p += 1
-                else:
-                    yield m.span()
-                    p = m.end()
-
-    matches = {}
-    copies = {}
-
-    def grepbody(fn, rev, body):
-        matches[rev].setdefault(fn, [])
-        m = matches[rev][fn]
-        if body is None:
-            return
-
-        for lnum, cstart, cend, line in matchlines(body):
-            s = linestate(line, lnum, cstart, cend)
-            m.append(s)
-
-    def difflinestates(a, b):
-        sm = difflib.SequenceMatcher(None, a, b)
-        for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag == 'insert':
-                for i in pycompat.xrange(blo, bhi):
-                    yield (b'+', b[i])
-            elif tag == 'delete':
-                for i in pycompat.xrange(alo, ahi):
-                    yield (b'-', a[i])
-            elif tag == 'replace':
-                for i in pycompat.xrange(alo, ahi):
-                    yield (b'-', a[i])
-                for i in pycompat.xrange(blo, bhi):
-                    yield (b'+', b[i])
+    searcher = grepmod.grepsearcher(
+        ui, repo, regexp, all_files=all_files, diff=diff, follow=follow
+    )
+
+    getfile = searcher._getfile
 
     uipathfn = scmutil.getuipathfn(repo)
 
@@ -3471,7 +3424,7 @@
 
         fieldnamemap = {b'linenumber': b'lineno'}
         if diff:
-            iter = difflinestates(pstates, states)
+            iter = grepmod.difflinestates(pstates, states)
         else:
             iter = [(b'', l) for l in states]
         for change, l in iter:
@@ -3540,7 +3493,7 @@
 
     def displaymatches(fm, l):
         p = 0
-        for s, e in l.findpos():
+        for s, e in l.findpos(regexp):
             if p < s:
                 fm.startitem()
                 fm.write(b'text', b'%s', l.line[p:s])
@@ -3555,102 +3508,27 @@
             fm.data(matched=False)
         fm.end()
 
-    skip = set()
-    revfiles = {}
-    match = scmutil.match(repo[None], pats, opts)
     found = False
-    follow = opts.get(b'follow')
-
-    getrenamed = scmutil.getrenamedfn(repo)
-
-    def readfile(ctx, fn):
-        rev = ctx.rev()
-        if rev is None:
-            fctx = ctx[fn]
-            try:
-                return fctx.data()
-            except IOError as e:
-                if e.errno != errno.ENOENT:
-                    raise
-        else:
-            flog = getfile(fn)
-            fnode = ctx.filenode(fn)
-            try:
-                return flog.read(fnode)
-            except error.CensoredNodeError:
-                ui.warn(
-                    _(
-                        b'cannot search in censored file: %(filename)s:%(revnum)s\n'
-                    )
-                    % {b'filename': fn, b'revnum': pycompat.bytestr(rev),}
-                )
-
-    def prep(ctx, fns):
-        rev = ctx.rev()
-        pctx = ctx.p1()
-        matches.setdefault(rev, {})
-        if diff:
-            parent = pctx.rev()
-            matches.setdefault(parent, {})
-        files = revfiles.setdefault(rev, [])
-        if rev is None:
-            # in `hg grep pattern`, 2/3 of the time is spent is spent in
-            # pathauditor checks without this in mozilla-central
-            contextmanager = repo.wvfs.audit.cached
-        else:
-            contextmanager = util.nullcontextmanager
-        with contextmanager():
-            for fn in fns:
-                # fn might not exist in the revision (could be a file removed by
-                # the revision). We could check `fn not in ctx` even when rev is
-                # None, but it's less racy to protect againt that in readfile.
-                if rev is not None and fn not in ctx:
-                    continue
-
-                copy = None
-                if follow:
-                    copy = getrenamed(fn, rev)
-                    if copy:
-                        copies.setdefault(rev, {})[fn] = copy
-                        if fn in skip:
-                            skip.add(copy)
-                if fn in skip:
-                    continue
-                files.append(fn)
-
-                if fn not in matches[rev]:
-                    grepbody(fn, rev, readfile(ctx, fn))
-
-                if diff:
-                    pfn = copy or fn
-                    if pfn not in matches[parent] and pfn in pctx:
-                        grepbody(pfn, parent, readfile(pctx, pfn))
+
+    wopts = logcmdutil.walkopts(
+        pats=pats,
+        opts=opts,
+        revspec=opts[b'rev'],
+        include_pats=opts[b'include'],
+        exclude_pats=opts[b'exclude'],
+        follow=follow,
+        force_changelog_traversal=all_files,
+        filter_revisions_by_pats=not all_files,
+    )
+    revs, makefilematcher = logcmdutil.makewalker(repo, wopts)
 
     ui.pager(b'grep')
     fm = ui.formatter(b'grep', opts)
-    for ctx in cmdutil.walkchangerevs(repo, match, opts, prep):
-        rev = ctx.rev()
-        parent = ctx.p1().rev()
-        for fn in sorted(revfiles.get(rev, [])):
-            states = matches[rev][fn]
-            copy = copies.get(rev, {}).get(fn)
-            if fn in skip:
-                if copy:
-                    skip.add(copy)
-                continue
-            pstates = matches.get(parent, {}).get(copy or fn, [])
-            if pstates or states:
-                r = display(fm, fn, ctx, pstates, states)
-                found = found or r
-                if r and not diff and not all_files:
-                    skip.add(fn)
-                    if copy:
-                        skip.add(copy)
-        del revfiles[rev]
-        # We will keep the matches dict for the duration of the window
-        # clear the matches dict once the window is over
-        if not revfiles:
-            matches.clear()
+    for fn, ctx, pstates, states in searcher.searchfiles(revs, makefilematcher):
+        r = display(fm, fn, ctx, pstates, states)
+        found = found or r
+        if r and not diff and not all_files:
+            searcher.skipfile(fn, ctx.rev())
     fm.end()
 
     return not found
@@ -4162,6 +4040,10 @@
     Returns 0 on success, 1 on partial success (see --partial).
     """
 
+    cmdutil.check_incompatible_arguments(
+        opts, 'no_commit', ['bypass', 'secret']
+    )
+    cmdutil.check_incompatible_arguments(opts, 'exact', ['edit', 'prefix'])
     opts = pycompat.byteskwargs(opts)
     if not patch1:
         raise error.Abort(_(b'need at least one patch to import'))
@@ -4174,10 +4056,6 @@
 
     exact = opts.get(b'exact')
     update = not opts.get(b'bypass')
-    if not update and opts.get(b'no_commit'):
-        raise error.Abort(_(b'cannot use --no-commit with --bypass'))
-    if opts.get(b'secret') and opts.get(b'no_commit'):
-        raise error.Abort(_(b'cannot use --no-commit with --secret'))
     try:
         sim = float(opts.get(b'similarity') or 0)
     except ValueError:
@@ -4186,11 +4064,6 @@
         raise error.Abort(_(b'similarity must be between 0 and 100'))
     if sim and not update:
         raise error.Abort(_(b'cannot use --similarity with --bypass'))
-    if exact:
-        if opts.get(b'edit'):
-            raise error.Abort(_(b'cannot use --exact with --edit'))
-        if opts.get(b'prefix'):
-            raise error.Abort(_(b'cannot use --exact with --prefix'))
 
     base = opts[b"base"]
     msgs = []
@@ -4354,8 +4227,7 @@
         hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
         return 0
 
-    if opts.get(b'bundle') and opts.get(b'subrepos'):
-        raise error.Abort(_(b'cannot combine --bundle and --subrepos'))
+    cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
 
     if opts.get(b'bookmarks'):
         source, branches = hg.parseurl(
@@ -4713,7 +4585,9 @@
         )
 
     repo = scmutil.unhidehashlikerevs(repo, opts.get(b'rev'), b'nowarn')
-    revs, differ = logcmdutil.getrevs(repo, pats, opts)
+    revs, differ = logcmdutil.getrevs(
+        repo, logcmdutil.parseopts(ui, pats, opts)
+    )
     if linerange:
         # TODO: should follow file history from logcmdutil._initialrevs(),
         # then filter the result by logcmdutil._makerevset() and --limit
@@ -5781,6 +5655,13 @@
     [
         (b'A', b'after', None, _(b'record a rename that has already occurred')),
         (
+            b'',
+            b'at-rev',
+            b'',
+            _(b'(un)mark renames in the given revision (EXPERIMENTAL)'),
+            _(b'REV'),
+        ),
+        (
             b'f',
             b'force',
             None,
@@ -5952,18 +5833,12 @@
                 b'resolve.resolved',
                 b'R',
             ),
-            mergestatemod.MERGE_RECORD_DRIVER_RESOLVED: (
-                b'resolve.driverresolved',
-                b'D',
-            ),
         }
 
         for f in ms:
             if not m(f):
                 continue
 
-            if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
-                continue
             label, key = mergestateinfo[ms[f]]
             fm.startitem()
             fm.context(ctx=wctx)
@@ -5982,21 +5857,9 @@
             )
 
         wctx = repo[None]
-
-        if (
-            ms.mergedriver
-            and ms.mdstate() == mergestatemod.MERGE_DRIVER_STATE_UNMARKED
-        ):
-            proceed = mergemod.driverpreprocess(repo, ms, wctx)
-            ms.commit()
-            # allow mark and unmark to go through
-            if not mark and not unmark and not proceed:
-                return 1
-
         m = scmutil.match(wctx, pats, opts)
         ret = 0
         didwork = False
-        runconclude = False
 
         tocomplete = []
         hasconflictmarkers = []
@@ -6011,29 +5874,6 @@
 
             didwork = True
 
-            if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
-                continue
-
-            # don't let driver-resolved files be marked, and run the conclude
-            # step if asked to resolve
-            if ms[f] == mergestatemod.MERGE_RECORD_DRIVER_RESOLVED:
-                exact = m.exact(f)
-                if mark:
-                    if exact:
-                        ui.warn(
-                            _(b'not marking %s as it is driver-resolved\n')
-                            % uipathfn(f)
-                        )
-                elif unmark:
-                    if exact:
-                        ui.warn(
-                            _(b'not unmarking %s as it is driver-resolved\n')
-                            % uipathfn(f)
-                        )
-                else:
-                    runconclude = True
-                continue
-
             # path conflicts must be resolved manually
             if ms[f] in (
                 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
@@ -6155,32 +5995,11 @@
             ui.warn(_(b"arguments do not match paths that need resolving\n"))
             if hint:
                 ui.warn(hint)
-        elif ms.mergedriver and ms.mdstate() != b's':
-            # run conclude step when either a driver-resolved file is requested
-            # or there are no driver-resolved files
-            # we can't use 'ret' to determine whether any files are unresolved
-            # because we might not have tried to resolve some
-            if (runconclude or not list(ms.driverresolved())) and not list(
-                ms.unresolved()
-            ):
-                proceed = mergemod.driverconclude(repo, ms, wctx)
-                ms.commit()
-                if not proceed:
-                    return 1
-
-    # Nudge users into finishing an unfinished operation
+
     unresolvedf = list(ms.unresolved())
-    driverresolvedf = list(ms.driverresolved())
-    if not unresolvedf and not driverresolvedf:
+    if not unresolvedf:
         ui.status(_(b'(no more unresolved files)\n'))
         cmdutil.checkafterresolved(repo)
-    elif not unresolvedf:
-        ui.status(
-            _(
-                b'(no more unresolved files -- '
-                b'run "hg resolve --all" to conclude)\n'
-            )
-        )
 
     return ret
 
@@ -6238,8 +6057,7 @@
 
     opts = pycompat.byteskwargs(opts)
     if opts.get(b"date"):
-        if opts.get(b"rev"):
-            raise error.Abort(_(b"you can't specify a revision and a date"))
+        cmdutil.check_incompatible_arguments(opts, b'date', [b'rev'])
         opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
 
     parent, p2 = repo.dirstate.parents()
@@ -6294,9 +6112,7 @@
             hint = _(b"use --all to revert all files")
         raise error.Abort(msg, hint=hint)
 
-    return cmdutil.revert(
-        ui, repo, ctx, (parent, p2), *pats, **pycompat.strkwargs(opts)
-    )
+    return cmdutil.revert(ui, repo, ctx, *pats, **pycompat.strkwargs(opts))
 
 
 @command(
@@ -6501,9 +6317,8 @@
     Returns 0 on success.
     """
 
+    cmdutil.check_incompatible_arguments(opts, 'stdio', ['cmdserver'])
     opts = pycompat.byteskwargs(opts)
-    if opts[b"stdio"] and opts[b"cmdserver"]:
-        raise error.Abort(_(b"cannot use --stdio with --cmdserver"))
     if opts[b"print_url"] and ui.verbose:
         raise error.Abort(_(b"cannot use --print-url with --verbose"))
 
@@ -7273,6 +7088,7 @@
 
     Returns 0 on success.
     """
+    cmdutil.check_incompatible_arguments(opts, 'remove', ['rev'])
     opts = pycompat.byteskwargs(opts)
     with repo.wlock(), repo.lock():
         rev_ = b"."
@@ -7285,8 +7101,6 @@
                 raise error.Abort(
                     _(b'tag names cannot consist entirely of whitespace')
                 )
-        if opts.get(b'rev') and opts.get(b'remove'):
-            raise error.Abort(_(b"--rev and --remove are incompatible"))
         if opts.get(b'rev'):
             rev_ = opts[b'rev']
         message = opts.get(b'message')
--- a/mercurial/commandserver.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/commandserver.py	Tue Oct 20 22:04:04 2020 +0530
@@ -500,7 +500,7 @@
         # handle exceptions that may be raised by command server. most of
         # known exceptions are caught by dispatch.
         except error.Abort as inst:
-            ui.error(_(b'abort: %s\n') % inst)
+            ui.error(_(b'abort: %s\n') % inst.message)
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/commit.py	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,475 @@
+# commit.py - fonction to perform commit
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from .i18n import _
+from .node import (
+    hex,
+    nullid,
+    nullrev,
+)
+
+from . import (
+    context,
+    mergestate,
+    metadata,
+    phases,
+    scmutil,
+    subrepoutil,
+)
+
+
+def _write_copy_meta(repo):
+    """return a (changelog, filelog) boolean tuple
+
+    changelog: copy related information should be stored in the changeset
+    filelof:   copy related information should be written in the file revision
+    """
+    if repo.filecopiesmode == b'changeset-sidedata':
+        writechangesetcopy = True
+        writefilecopymeta = True
+    else:
+        writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
+        writefilecopymeta = writecopiesto != b'changeset-only'
+        writechangesetcopy = writecopiesto in (
+            b'changeset-only',
+            b'compatibility',
+        )
+    return writechangesetcopy, writefilecopymeta
+
+
+def commitctx(repo, ctx, error=False, origctx=None):
+    """Add a new revision to the target repository.
+    Revision information is passed via the context argument.
+
+    ctx.files() should list all files involved in this commit, i.e.
+    modified/added/removed files. On merge, it may be wider than the
+    ctx.files() to be committed, since any file nodes derived directly
+    from p1 or p2 are excluded from the committed ctx.files().
+
+    origctx is for convert to work around the problem that bug
+    fixes to the files list in changesets change hashes. For
+    convert to be the identity, it can pass an origctx and this
+    function will use the same files list when it makes sense to
+    do so.
+    """
+    repo = repo.unfiltered()
+
+    p1, p2 = ctx.p1(), ctx.p2()
+    user = ctx.user()
+
+    with repo.lock(), repo.transaction(b"commit") as tr:
+        mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
+
+        extra = ctx.extra().copy()
+
+        if extra is not None:
+            for name in (
+                b'p1copies',
+                b'p2copies',
+                b'filesadded',
+                b'filesremoved',
+            ):
+                extra.pop(name, None)
+        if repo.changelog._copiesstorage == b'extra':
+            extra = _extra_with_copies(repo, extra, files)
+
+        # update changelog
+        repo.ui.note(_(b"committing changelog\n"))
+        repo.changelog.delayupdate(tr)
+        n = repo.changelog.add(
+            mn,
+            files,
+            ctx.description(),
+            tr,
+            p1.node(),
+            p2.node(),
+            user,
+            ctx.date(),
+            extra,
+        )
+        xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
+        repo.hook(
+            b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
+        )
+        # set the new commit is proper phase
+        targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
+        if targetphase:
+            # retract boundary do not alter parent changeset.
+            # if a parent have higher the resulting phase will
+            # be compliant anyway
+            #
+            # if minimal phase was 0 we don't need to retract anything
+            phases.registernew(repo, tr, targetphase, [n])
+        return n
+
+
+def _prepare_files(tr, ctx, error=False, origctx=None):
+    repo = ctx.repo()
+    p1 = ctx.p1()
+
+    writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
+    files = metadata.ChangingFiles()
+    ms = mergestate.mergestate.read(repo)
+    salvaged = _get_salvaged(repo, ms, ctx)
+    for s in salvaged:
+        files.mark_salvaged(s)
+
+    if ctx.manifestnode():
+        # reuse an existing manifest revision
+        repo.ui.debug(b'reusing known manifest\n')
+        mn = ctx.manifestnode()
+        files.update_touched(ctx.files())
+        if writechangesetcopy:
+            files.update_added(ctx.filesadded())
+            files.update_removed(ctx.filesremoved())
+    elif not ctx.files():
+        repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
+        mn = p1.manifestnode()
+    else:
+        mn = _process_files(tr, ctx, ms, files, error=error)
+
+    if origctx and origctx.manifestnode() == mn:
+        origfiles = origctx.files()
+        assert files.touched.issubset(origfiles)
+        files.update_touched(origfiles)
+
+    if writechangesetcopy:
+        files.update_copies_from_p1(ctx.p1copies())
+        files.update_copies_from_p2(ctx.p2copies())
+
+    return mn, files
+
+
+def _get_salvaged(repo, ms, ctx):
+    """ returns a list of salvaged files
+
+    returns empty list if config option which process salvaged files are
+    not enabled """
+    salvaged = []
+    copy_sd = repo.filecopiesmode == b'changeset-sidedata'
+    if copy_sd and len(ctx.parents()) > 1:
+        if ms.active():
+            for fname in sorted(ms.allextras().keys()):
+                might_removed = ms.extras(fname).get(b'merge-removal-candidate')
+                if might_removed == b'yes':
+                    if fname in ctx:
+                        salvaged.append(fname)
+    return salvaged
+
+
+def _process_files(tr, ctx, ms, files, error=False):
+    repo = ctx.repo()
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+
+    writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
+
+    m1ctx = p1.manifestctx()
+    m2ctx = p2.manifestctx()
+    mctx = m1ctx.copy()
+
+    m = mctx.read()
+    m1 = m1ctx.read()
+    m2 = m2ctx.read()
+
+    # check in files
+    added = []
+    removed = list(ctx.removed())
+    linkrev = len(repo)
+    repo.ui.note(_(b"committing files:\n"))
+    uipathfn = scmutil.getuipathfn(repo)
+    for f in sorted(ctx.modified() + ctx.added()):
+        repo.ui.note(uipathfn(f) + b"\n")
+        try:
+            fctx = ctx[f]
+            if fctx is None:
+                removed.append(f)
+            else:
+                added.append(f)
+                m[f], is_touched = _filecommit(
+                    repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
+                )
+                if is_touched:
+                    if is_touched == 'added':
+                        files.mark_added(f)
+                    elif is_touched == 'merged':
+                        files.mark_merged(f)
+                    else:
+                        files.mark_touched(f)
+                m.setflag(f, fctx.flags())
+        except OSError:
+            repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
+            raise
+        except IOError as inst:
+            errcode = getattr(inst, 'errno', errno.ENOENT)
+            if error or errcode and errcode != errno.ENOENT:
+                repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
+            raise
+
+    # update manifest
+    removed = [f for f in removed if f in m1 or f in m2]
+    drop = sorted([f for f in removed if f in m])
+    for f in drop:
+        del m[f]
+    if p2.rev() == nullrev:
+        files.update_removed(removed)
+    else:
+        rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
+        for f in removed:
+            if not rf(f):
+                files.mark_removed(f)
+
+    mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
+
+    return mn
+
+
+def _filecommit(
+    repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
+):
+    """
+    commit an individual file as part of a larger transaction
+
+    input:
+
+        fctx:       a file context with the content we are trying to commit
+        manifest1:  manifest of changeset first parent
+        manifest2:  manifest of changeset second parent
+        linkrev:    revision number of the changeset being created
+        tr:         current transation
+        includecopymeta: boolean, set to False to skip storing the copy data
+                    (only used by the Google specific feature of using
+                    changeset extra as copy source of truth).
+        ms:         mergestate object
+
+    output: (filenode, touched)
+
+        filenode: the filenode that should be used by this changeset
+        touched:  one of: None (mean untouched), 'added' or 'modified'
+    """
+
+    fname = fctx.path()
+    fparent1 = manifest1.get(fname, nullid)
+    fparent2 = manifest2.get(fname, nullid)
+    touched = None
+    if fparent1 == fparent2 == nullid:
+        touched = 'added'
+
+    if isinstance(fctx, context.filectx):
+        # This block fast path most comparisons which are usually done. It
+        # assumes that bare filectx is used and no merge happened, hence no
+        # need to create a new file revision in this case.
+        node = fctx.filenode()
+        if node in [fparent1, fparent2]:
+            repo.ui.debug(b'reusing %s filelog entry\n' % fname)
+            if (
+                fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+            ) or (
+                fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+            ):
+                touched = 'modified'
+            return node, touched
+
+    flog = repo.file(fname)
+    meta = {}
+    cfname = fctx.copysource()
+    fnode = None
+
+    if cfname and cfname != fname:
+        # Mark the new revision of this file as a copy of another
+        # file.  This copy data will effectively act as a parent
+        # of this new revision.  If this is a merge, the first
+        # parent will be the nullid (meaning "look up the copy data")
+        # and the second one will be the other parent.  For example:
+        #
+        # 0 --- 1 --- 3   rev1 changes file foo
+        #   \       /     rev2 renames foo to bar and changes it
+        #    \- 2 -/      rev3 should have bar with all changes and
+        #                      should record that bar descends from
+        #                      bar in rev2 and foo in rev1
+        #
+        # this allows this merge to succeed:
+        #
+        # 0 --- 1 --- 3   rev4 reverts the content change from rev2
+        #   \       /     merging rev3 and rev4 should use bar@rev2
+        #    \- 2 --- 4        as the merge base
+        #
+
+        cnode = manifest1.get(cfname)
+        newfparent = fparent2
+
+        if manifest2:  # branch merge
+            if fparent2 == nullid or cnode is None:  # copied on remote side
+                if cfname in manifest2:
+                    cnode = manifest2[cfname]
+                    newfparent = fparent1
+
+        # Here, we used to search backwards through history to try to find
+        # where the file copy came from if the source of a copy was not in
+        # the parent directory. However, this doesn't actually make sense to
+        # do (what does a copy from something not in your working copy even
+        # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
+        # the user that copy information was dropped, so if they didn't
+        # expect this outcome it can be fixed, but this is the correct
+        # behavior in this circumstance.
+
+        if cnode:
+            repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
+            if includecopymeta:
+                meta[b"copy"] = cfname
+                meta[b"copyrev"] = hex(cnode)
+            fparent1, fparent2 = nullid, newfparent
+        else:
+            repo.ui.warn(
+                _(
+                    b"warning: can't find ancestor for '%s' "
+                    b"copied from '%s'!\n"
+                )
+                % (fname, cfname)
+            )
+
+    elif fparent1 == nullid:
+        fparent1, fparent2 = fparent2, nullid
+    elif fparent2 != nullid:
+        # is one parent an ancestor of the other?
+        fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
+        if fparent1 in fparentancestors:
+            fparent1, fparent2 = fparent2, nullid
+        elif fparent2 in fparentancestors:
+            fparent2 = nullid
+        elif not fparentancestors:
+            # TODO: this whole if-else might be simplified much more
+            if (
+                ms.active()
+                and ms.extras(fname).get(b'filenode-source') == b'other'
+            ):
+                fparent1, fparent2 = fparent2, nullid
+
+    force_new_node = False
+    # The file might have been deleted by merge code and user explicitly choose
+    # to revert the file and keep it. The other case can be where there is
+    # change-delete or delete-change conflict and user explicitly choose to keep
+    # the file. The goal is to create a new filenode for users explicit choices
+    if (
+        repo.ui.configbool(b'experimental', b'merge-track-salvaged')
+        and ms.active()
+        and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
+    ):
+        force_new_node = True
+    # is the file changed?
+    text = fctx.data()
+    if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+        if touched is None:  # do not overwrite added
+            if fparent2 == nullid:
+                touched = 'modified'
+            else:
+                touched = 'merged'
+        fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
+    # are just the flags changed during merge?
+    elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
+        touched = 'modified'
+        fnode = fparent1
+    else:
+        fnode = fparent1
+    return fnode, touched
+
+
+def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
+    """make a new manifest entry (or reuse a new one)
+
+    given an initialised manifest context and precomputed list of
+    - files: files affected by the commit
+    - added: new entries in the manifest
+    - drop:  entries present in parents but absent of this one
+
+    Create a new manifest revision, reuse existing ones if possible.
+
+    Return the nodeid of the manifest revision.
+    """
+    repo = ctx.repo()
+
+    md = None
+
+    # all this is cached, so it is find to get them all from the ctx.
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+    m1ctx = p1.manifestctx()
+
+    m1 = m1ctx.read()
+
+    if not files:
+        # if no "files" actually changed in terms of the changelog,
+        # try hard to detect unmodified manifest entry so that the
+        # exact same commit can be reproduced later on convert.
+        md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
+    if not files and md:
+        repo.ui.debug(
+            b'not reusing manifest (no file change in '
+            b'changelog, but manifest differs)\n'
+        )
+    if files or md:
+        repo.ui.note(_(b"committing manifest\n"))
+        # we're using narrowmatch here since it's already applied at
+        # other stages (such as dirstate.walk), so we're already
+        # ignoring things outside of narrowspec in most cases. The
+        # one case where we might have files outside the narrowspec
+        # at this point is merges, and we already error out in the
+        # case where the merge has files outside of the narrowspec,
+        # so this is safe.
+        mn = mctx.write(
+            tr,
+            linkrev,
+            p1.manifestnode(),
+            p2.manifestnode(),
+            added,
+            drop,
+            match=repo.narrowmatch(),
+        )
+    else:
+        repo.ui.debug(
+            b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
+        )
+        mn = p1.manifestnode()
+
+    return mn
+
+
+def _extra_with_copies(repo, extra, files):
+    """encode copy information into a `extra` dictionnary"""
+    p1copies = files.copied_from_p1
+    p2copies = files.copied_from_p2
+    filesadded = files.added
+    filesremoved = files.removed
+    files = sorted(files.touched)
+    if not _write_copy_meta(repo)[1]:
+        # If writing only to changeset extras, use None to indicate that
+        # no entry should be written. If writing to both, write an empty
+        # entry to prevent the reader from falling back to reading
+        # filelogs.
+        p1copies = p1copies or None
+        p2copies = p2copies or None
+        filesadded = filesadded or None
+        filesremoved = filesremoved or None
+
+    extrasentries = p1copies, p2copies, filesadded, filesremoved
+    if extra is None and any(x is not None for x in extrasentries):
+        extra = {}
+    if p1copies is not None:
+        p1copies = metadata.encodecopies(files, p1copies)
+        extra[b'p1copies'] = p1copies
+    if p2copies is not None:
+        p2copies = metadata.encodecopies(files, p2copies)
+        extra[b'p2copies'] = p2copies
+    if filesadded is not None:
+        filesadded = metadata.encodefileindices(files, filesadded)
+        extra[b'filesadded'] = filesadded
+    if filesremoved is not None:
+        filesremoved = metadata.encodefileindices(files, filesremoved)
+        extra[b'filesremoved'] = filesremoved
+    return extra
--- a/mercurial/config.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/config.py	Tue Oct 20 22:04:04 2020 +0530
@@ -21,10 +21,9 @@
 
 
 class config(object):
-    def __init__(self, data=None, includepaths=None):
+    def __init__(self, data=None):
         self._data = {}
         self._unset = []
-        self._includepaths = includepaths or []
         if data:
             for k in data._data:
                 self._data[k] = data[k].copy()
@@ -162,21 +161,15 @@
 
             if m and include:
                 expanded = util.expandpath(m.group(1))
-                includepaths = [os.path.dirname(src)] + self._includepaths
-
-                for base in includepaths:
-                    inc = os.path.normpath(os.path.join(base, expanded))
-
-                    try:
-                        include(inc, remap=remap, sections=sections)
-                        break
-                    except IOError as inst:
-                        if inst.errno != errno.ENOENT:
-                            raise error.ParseError(
-                                _(b"cannot include %s (%s)")
-                                % (inc, encoding.strtolocal(inst.strerror)),
-                                b"%s:%d" % (src, line),
-                            )
+                try:
+                    include(expanded, remap=remap, sections=sections)
+                except IOError as inst:
+                    if inst.errno != errno.ENOENT:
+                        raise error.ParseError(
+                            _(b"cannot include %s (%s)")
+                            % (expanded, encoding.strtolocal(inst.strerror)),
+                            b"%s:%d" % (src, line),
+                        )
                 continue
             if emptyre.match(l):
                 continue
@@ -216,8 +209,15 @@
             b'config files must be opened in binary mode, got fp=%r mode=%r'
             % (fp, fp.mode,)
         )
+
+        dir = os.path.dirname(path)
+
+        def include(rel, remap, sections):
+            abs = os.path.normpath(os.path.join(dir, rel))
+            self.read(abs, remap=remap, sections=sections)
+
         self.parse(
-            path, fp.read(), sections=sections, remap=remap, include=self.read
+            path, fp.read(), sections=sections, remap=remap, include=include
         )
 
 
--- a/mercurial/configitems.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/configitems.py	Tue Oct 20 22:04:04 2020 +0530
@@ -590,6 +590,11 @@
 coreconfigitem(
     b'experimental', b'maxdeltachainspan', default=-1,
 )
+# tracks files which were undeleted (merge might delete them but we explicitly
+# kept/undeleted them) and creates new filenodes for them
+coreconfigitem(
+    b'experimental', b'merge-track-salvaged', default=False,
+)
 coreconfigitem(
     b'experimental', b'mergetempdirprefix', default=None,
 )
@@ -635,9 +640,6 @@
 coreconfigitem(
     b'experimental', b'httppostargs', default=False,
 )
-coreconfigitem(
-    b'experimental', b'mergedriver', default=None,
-)
 coreconfigitem(b'experimental', b'nointerrupt', default=False)
 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
 
@@ -784,6 +786,9 @@
     b'format', b'exp-use-side-data', default=False, experimental=True,
 )
 coreconfigitem(
+    b'format', b'exp-share-safe', default=False, experimental=True,
+)
+coreconfigitem(
     b'format', b'internal-phase', default=False, experimental=True,
 )
 coreconfigitem(
@@ -793,6 +798,9 @@
     b'fsmonitor', b'warn_update_file_count', default=50000,
 )
 coreconfigitem(
+    b'fsmonitor', b'warn_update_file_count_rust', default=400000,
+)
+coreconfigitem(
     b'help', br'hidden-command\..*', default=False, generic=True,
 )
 coreconfigitem(
--- a/mercurial/context.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/context.py	Tue Oct 20 22:04:04 2020 +0530
@@ -271,7 +271,7 @@
                 return self._manifest.find(path)
             except KeyError:
                 raise error.ManifestLookupError(
-                    self._node, path, _(b'not found in manifest')
+                    self._node or b'None', path, _(b'not found in manifest')
                 )
         if '_manifestdelta' in self.__dict__ or path in self.files():
             if path in self._manifestdelta:
@@ -284,7 +284,7 @@
             node, flag = mfl[self._changeset.manifest].find(path)
         except KeyError:
             raise error.ManifestLookupError(
-                self._node, path, _(b'not found in manifest')
+                self._node or b'None', path, _(b'not found in manifest')
             )
 
         return node, flag
@@ -2528,6 +2528,7 @@
         return path in self._cache
 
     def clean(self):
+        self._mergestate = None
         self._cache = {}
 
     def _compact(self):
@@ -2592,6 +2593,11 @@
             self._repo, path, parent=self, filelog=filelog
         )
 
+    def mergestate(self, clean=False):
+        if clean or self._mergestate is None:
+            self._mergestate = mergestatemod.memmergestate(self._repo)
+        return self._mergestate
+
 
 class overlayworkingfilectx(committablefilectx):
     """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
--- a/mercurial/copies.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/copies.py	Tue Oct 20 22:04:04 2020 +0530
@@ -13,8 +13,6 @@
 from .i18n import _
 
 
-from .revlogutils.flagutil import REVIDX_SIDEDATA
-
 from . import (
     match as matchmod,
     node,
@@ -26,6 +24,8 @@
 
 from .utils import stringutil
 
+from .revlogutils import flagutil
+
 
 def _filter(src, dst, t):
     """filters out invalid copies after chaining"""
@@ -172,102 +172,60 @@
     return cm
 
 
-def _revinfogetter(repo):
-    """return a function that return multiple data given a <rev>"i
+def _revinfo_getter(repo):
+    """returns a function that returns the following data given a <rev>"
 
     * p1: revision number of first parent
     * p2: revision number of first parent
-    * p1copies: mapping of copies from p1
-    * p2copies: mapping of copies from p2
-    * removed: a list of removed files
-    * ismerged: a callback to know if file was merged in that revision
+    * changes: a ChangingFiles object
     """
     cl = repo.changelog
     parents = cl.parentrevs
-
-    def get_ismerged(rev):
-        ctx = repo[rev]
+    flags = cl.flags
 
-        def ismerged(path):
-            if path not in ctx.files():
-                return False
-            fctx = ctx[path]
-            parents = fctx._filelog.parents(fctx._filenode)
-            nb_parents = 0
-            for n in parents:
-                if n != node.nullid:
-                    nb_parents += 1
-            return nb_parents >= 2
+    HASCOPIESINFO = flagutil.REVIDX_HASCOPIESINFO
 
-        return ismerged
-
-    if repo.filecopiesmode == b'changeset-sidedata':
-        changelogrevision = cl.changelogrevision
-        flags = cl.flags
+    changelogrevision = cl.changelogrevision
 
-        # A small cache to avoid doing the work twice for merges
-        #
-        # In the vast majority of cases, if we ask information for a revision
-        # about 1 parent, we'll later ask it for the other. So it make sense to
-        # keep the information around when reaching the first parent of a merge
-        # and dropping it after it was provided for the second parents.
-        #
-        # It exists cases were only one parent of the merge will be walked. It
-        # happens when the "destination" the copy tracing is descendant from a
-        # new root, not common with the "source". In that case, we will only walk
-        # through merge parents that are descendant of changesets common
-        # between "source" and "destination".
-        #
-        # With the current case implementation if such changesets have a copy
-        # information, we'll keep them in memory until the end of
-        # _changesetforwardcopies. We don't expect the case to be frequent
-        # enough to matters.
-        #
-        # In addition, it would be possible to reach pathological case, were
-        # many first parent are met before any second parent is reached. In
-        # that case the cache could grow. If this even become an issue one can
-        # safely introduce a maximum cache size. This would trade extra CPU/IO
-        # time to save memory.
-        merge_caches = {}
+    # A small cache to avoid doing the work twice for merges
+    #
+    # In the vast majority of cases, if we ask information for a revision
+    # about 1 parent, we'll later ask it for the other. So it make sense to
+    # keep the information around when reaching the first parent of a merge
+    # and dropping it after it was provided for the second parents.
+    #
+    # It exists cases were only one parent of the merge will be walked. It
+    # happens when the "destination" the copy tracing is descendant from a
+    # new root, not common with the "source". In that case, we will only walk
+    # through merge parents that are descendant of changesets common
+    # between "source" and "destination".
+    #
+    # With the current case implementation if such changesets have a copy
+    # information, we'll keep them in memory until the end of
+    # _changesetforwardcopies. We don't expect the case to be frequent
+    # enough to matters.
+    #
+    # In addition, it would be possible to reach pathological case, were
+    # many first parent are met before any second parent is reached. In
+    # that case the cache could grow. If this even become an issue one can
+    # safely introduce a maximum cache size. This would trade extra CPU/IO
+    # time to save memory.
+    merge_caches = {}
 
-        def revinfo(rev):
-            p1, p2 = parents(rev)
-            value = None
-            if flags(rev) & REVIDX_SIDEDATA:
-                e = merge_caches.pop(rev, None)
-                if e is not None:
-                    return e
-                c = changelogrevision(rev)
-                p1copies = c.p1copies
-                p2copies = c.p2copies
-                removed = c.filesremoved
-                if p1 != node.nullrev and p2 != node.nullrev:
-                    # XXX some case we over cache, IGNORE
-                    value = merge_caches[rev] = (
-                        p1,
-                        p2,
-                        p1copies,
-                        p2copies,
-                        removed,
-                        get_ismerged(rev),
-                    )
-            else:
-                p1copies = {}
-                p2copies = {}
-                removed = []
-
-            if value is None:
-                value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
-            return value
-
-    else:
-
-        def revinfo(rev):
-            p1, p2 = parents(rev)
-            ctx = repo[rev]
-            p1copies, p2copies = ctx._copies
-            removed = ctx.filesremoved()
-            return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
+    def revinfo(rev):
+        p1, p2 = parents(rev)
+        value = None
+        e = merge_caches.pop(rev, None)
+        if e is not None:
+            return e
+        changes = None
+        if flags(rev) & HASCOPIESINFO:
+            changes = changelogrevision(rev).changes
+        value = (p1, p2, changes)
+        if p1 != node.nullrev and p2 != node.nullrev:
+            # XXX some case we over cache, IGNORE
+            merge_caches[rev] = value
+        return value
 
     return revinfo
 
@@ -278,7 +236,6 @@
 
     repo = a.repo().unfiltered()
     children = {}
-    revinfo = _revinfogetter(repo)
 
     cl = repo.changelog
     isancestor = cl.isancestorrev  # XXX we should had chaching to this.
@@ -309,12 +266,20 @@
     iterrevs.update(roots)
     iterrevs.remove(b.rev())
     revs = sorted(iterrevs)
-    return _combinechangesetcopies(
-        revs, children, b.rev(), revinfo, match, isancestor
-    )
+
+    if repo.filecopiesmode == b'changeset-sidedata':
+        revinfo = _revinfo_getter(repo)
+        return _combine_changeset_copies(
+            revs, children, b.rev(), revinfo, match, isancestor
+        )
+    else:
+        revinfo = _revinfo_getter_extra(repo)
+        return _combine_changeset_copies_extra(
+            revs, children, b.rev(), revinfo, match, isancestor
+        )
 
 
-def _combinechangesetcopies(
+def _combine_changeset_copies(
     revs, children, targetrev, revinfo, match, isancestor
 ):
     """combine the copies information for each item of iterrevs
@@ -335,6 +300,161 @@
             # this is a root
             copies = {}
         for i, c in enumerate(children[r]):
+            p1, p2, changes = revinfo(c)
+            childcopies = {}
+            if r == p1:
+                parent = 1
+                if changes is not None:
+                    childcopies = changes.copied_from_p1
+            else:
+                assert r == p2
+                parent = 2
+                if changes is not None:
+                    childcopies = changes.copied_from_p2
+            if not alwaysmatch:
+                childcopies = {
+                    dst: src for dst, src in childcopies.items() if match(dst)
+                }
+            newcopies = copies
+            if childcopies:
+                newcopies = copies.copy()
+                for dest, source in pycompat.iteritems(childcopies):
+                    prev = copies.get(source)
+                    if prev is not None and prev[1] is not None:
+                        source = prev[1]
+                    newcopies[dest] = (c, source)
+                assert newcopies is not copies
+            if changes is not None:
+                for f in changes.removed:
+                    if f in newcopies:
+                        if newcopies is copies:
+                            # copy on write to avoid affecting potential other
+                            # branches.  when there are no other branches, this
+                            # could be avoided.
+                            newcopies = copies.copy()
+                        newcopies[f] = (c, None)
+            othercopies = all_copies.get(c)
+            if othercopies is None:
+                all_copies[c] = newcopies
+            else:
+                # we are the second parent to work on c, we need to merge our
+                # work with the other.
+                #
+                # In case of conflict, parent 1 take precedence over parent 2.
+                # This is an arbitrary choice made anew when implementing
+                # changeset based copies. It was made without regards with
+                # potential filelog related behavior.
+                if parent == 1:
+                    _merge_copies_dict(
+                        othercopies, newcopies, isancestor, changes
+                    )
+                else:
+                    _merge_copies_dict(
+                        newcopies, othercopies, isancestor, changes
+                    )
+                    all_copies[c] = newcopies
+
+    final_copies = {}
+    for dest, (tt, source) in all_copies[targetrev].items():
+        if source is not None:
+            final_copies[dest] = source
+    return final_copies
+
+
+def _merge_copies_dict(minor, major, isancestor, changes):
+    """merge two copies-mapping together, minor and major
+
+    In case of conflict, value from "major" will be picked.
+
+    - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
+                                        ancestors of `high_rev`,
+
+    - `ismerged(path)`: callable return True if `path` have been merged in the
+                        current revision,
+    """
+    for dest, value in major.items():
+        other = minor.get(dest)
+        if other is None:
+            minor[dest] = value
+        else:
+            new_tt = value[0]
+            other_tt = other[0]
+            if value[1] == other[1]:
+                continue
+            # content from "major" wins, unless it is older
+            # than the branch point or there is a merge
+            if new_tt == other_tt:
+                minor[dest] = value
+            elif (
+                changes is not None
+                and value[1] is None
+                and dest in changes.salvaged
+            ):
+                pass
+            elif (
+                changes is not None
+                and other[1] is None
+                and dest in changes.salvaged
+            ):
+                minor[dest] = value
+            elif changes is not None and dest in changes.merged:
+                minor[dest] = value
+            elif not isancestor(new_tt, other_tt):
+                minor[dest] = value
+
+
+def _revinfo_getter_extra(repo):
+    """return a function that return multiple data given a <rev>"i
+
+    * p1: revision number of first parent
+    * p2: revision number of first parent
+    * p1copies: mapping of copies from p1
+    * p2copies: mapping of copies from p2
+    * removed: a list of removed files
+    * ismerged: a callback to know if file was merged in that revision
+    """
+    cl = repo.changelog
+    parents = cl.parentrevs
+
+    def get_ismerged(rev):
+        ctx = repo[rev]
+
+        def ismerged(path):
+            if path not in ctx.files():
+                return False
+            fctx = ctx[path]
+            parents = fctx._filelog.parents(fctx._filenode)
+            nb_parents = 0
+            for n in parents:
+                if n != node.nullid:
+                    nb_parents += 1
+            return nb_parents >= 2
+
+        return ismerged
+
+    def revinfo(rev):
+        p1, p2 = parents(rev)
+        ctx = repo[rev]
+        p1copies, p2copies = ctx._copies
+        removed = ctx.filesremoved()
+        return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
+
+    return revinfo
+
+
+def _combine_changeset_copies_extra(
+    revs, children, targetrev, revinfo, match, isancestor
+):
+    """version of `_combine_changeset_copies` that works with the Google
+    specific "extra" based storage for copy information"""
+    all_copies = {}
+    alwaysmatch = match.always()
+    for r in revs:
+        copies = all_copies.pop(r, None)
+        if copies is None:
+            # this is a root
+            copies = {}
+        for i, c in enumerate(children[r]):
             p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
             if r == p1:
                 parent = 1
@@ -376,11 +496,11 @@
                 # changeset based copies. It was made without regards with
                 # potential filelog related behavior.
                 if parent == 1:
-                    _merge_copies_dict(
+                    _merge_copies_dict_extra(
                         othercopies, newcopies, isancestor, ismerged
                     )
                 else:
-                    _merge_copies_dict(
+                    _merge_copies_dict_extra(
                         newcopies, othercopies, isancestor, ismerged
                     )
                     all_copies[c] = newcopies
@@ -392,17 +512,9 @@
     return final_copies
 
 
-def _merge_copies_dict(minor, major, isancestor, ismerged):
-    """merge two copies-mapping together, minor and major
-
-    In case of conflict, value from "major" will be picked.
-
-    - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
-                                        ancestors of `high_rev`,
-
-    - `ismerged(path)`: callable return True if `path` have been merged in the
-                        current revision,
-    """
+def _merge_copies_dict_extra(minor, major, isancestor, ismerged):
+    """version of `_merge_copies_dict` that works with the Google
+    specific "extra" based storage for copy information"""
     for dest, value in major.items():
         other = minor.get(dest)
         if other is None:
--- a/mercurial/crecord.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/crecord.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1808,7 +1808,7 @@
             try:
                 patch = self.ui.edit(patch.getvalue(), b"", action=b"diff")
             except error.Abort as exc:
-                self.errorstr = stringutil.forcebytestr(exc)
+                self.errorstr = exc.message
                 return None
             finally:
                 self.stdscr.clear()
--- a/mercurial/debugcommands.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/debugcommands.py	Tue Oct 20 22:04:04 2020 +0530
@@ -59,6 +59,7 @@
     lock as lockmod,
     logcmdutil,
     mergestate as mergestatemod,
+    metadata,
     obsolete,
     obsutil,
     pathutil,
@@ -99,6 +100,7 @@
 from .revlogutils import (
     deltas as deltautil,
     nodemap,
+    sidedata,
 )
 
 release = lockmod.release
@@ -478,6 +480,40 @@
                 ui.write(b'    %s\n' % v)
 
 
+@command(b'debugchangedfiles', [], b'REV')
+def debugchangedfiles(ui, repo, rev):
+    """list the stored files changes for a revision"""
+    ctx = scmutil.revsingle(repo, rev, None)
+    sd = repo.changelog.sidedata(ctx.rev())
+    files_block = sd.get(sidedata.SD_FILES)
+    if files_block is not None:
+        files = metadata.decode_files_sidedata(sd)
+        for f in sorted(files.touched):
+            if f in files.added:
+                action = b"added"
+            elif f in files.removed:
+                action = b"removed"
+            elif f in files.merged:
+                action = b"merged"
+            elif f in files.salvaged:
+                action = b"salvaged"
+            else:
+                action = b"touched"
+
+            copy_parent = b""
+            copy_source = b""
+            if f in files.copied_from_p1:
+                copy_parent = b"p1"
+                copy_source = files.copied_from_p1[f]
+            elif f in files.copied_from_p2:
+                copy_parent = b"p2"
+                copy_source = files.copied_from_p2[f]
+
+            data = (action, copy_parent, f, copy_source)
+            template = b"%-8s %2s: %s, %s;\n"
+            ui.write(template % data)
+
+
 @command(b'debugcheckstate', [], b'')
 def debugcheckstate(ui, repo):
     """validate the correctness of the current dirstate"""
@@ -1668,11 +1704,11 @@
     fm.data(re2=bool(util._re2))
 
     # templates
-    p = templater.templatepaths()
-    fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
+    p = templater.templatedir()
+    fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
     fm.condwrite(not p, b'', _(b" no template directories found\n"))
     if p:
-        m = templater.templatepath(b"map-cmdline.default")
+        (m, fp) = templater.try_open_template(b"map-cmdline.default")
         if m:
             # template found, check if it is working
             err = None
@@ -1734,7 +1770,7 @@
     try:
         username = ui.username()
     except error.Abort as e:
-        err = stringutil.forcebytestr(e)
+        err = e.message
         problems += 1
 
     fm.condwrite(
@@ -2016,6 +2052,7 @@
             b'")}'
             b'{extras % "  extra: {key} = {value}\n"}'
             b'"}'
+            b'{extras % "extra: {file} ({key} = {value})\n"}'
         )
 
     ms = mergestatemod.mergestate.read(repo)
@@ -2061,7 +2098,7 @@
                 fm_files.data(renamed_path=state[1])
                 fm_files.data(rename_side=state[2])
             fm_extras = fm_files.nested(b'extras')
-            for k, v in ms.extras(f).items():
+            for k, v in sorted(ms.extras(f).items()):
                 fm_extras.startitem()
                 fm_extras.data(key=k)
                 fm_extras.data(value=v)
@@ -2069,6 +2106,18 @@
 
     fm_files.end()
 
+    fm_extras = fm.nested(b'extras')
+    for f, d in sorted(pycompat.iteritems(ms.allextras())):
+        if f in ms:
+            # If file is in mergestate, we have already processed it's extras
+            continue
+        for k, v in pycompat.iteritems(d):
+            fm_extras.startitem()
+            fm_extras.data(file=f)
+            fm_extras.data(key=k)
+            fm_extras.data(value=v)
+    fm_extras.end()
+
     fm.end()
 
 
--- a/mercurial/dirstate.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/dirstate.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1425,6 +1425,7 @@
         self._opener = opener
         self._root = root
         self._filename = b'dirstate'
+        self._nodelen = 20
 
         self._parents = None
         self._dirtyparents = False
@@ -1609,7 +1610,7 @@
         if not self._parents:
             try:
                 fp = self._opendirstatefile()
-                st = fp.read(40)
+                st = fp.read(2 * self._nodelen)
                 fp.close()
             except IOError as err:
                 if err.errno != errno.ENOENT:
@@ -1618,8 +1619,11 @@
                 st = b''
 
             l = len(st)
-            if l == 40:
-                self._parents = (st[:20], st[20:40])
+            if l == self._nodelen * 2:
+                self._parents = (
+                    st[: self._nodelen],
+                    st[self._nodelen : 2 * self._nodelen],
+                )
             elif l == 0:
                 self._parents = (nullid, nullid)
             else:
@@ -1654,15 +1658,11 @@
 
         if util.safehasattr(parsers, b'dict_new_presized'):
             # Make an estimate of the number of files in the dirstate based on
-            # its size. From a linear regression on a set of real-world repos,
-            # all over 10,000 files, the size of a dirstate entry is 85
-            # bytes. The cost of resizing is significantly higher than the cost
-            # of filling in a larger presized dict, so subtract 20% from the
-            # size.
-            #
-            # This heuristic is imperfect in many ways, so in a future dirstate
-            # format update it makes sense to just record the number of entries
-            # on write.
+            # its size. This trades wasting some memory for avoiding costly
+            # resizes. Each entry have a prefix of 17 bytes followed by one or
+            # two path names. Studies on various large-scale real-world repositories
+            # found 54 bytes a reasonable upper limit for the average path names.
+            # Copy entries are ignored for the sake of this estimate.
             self._map = parsers.dict_new_presized(len(st) // 71)
 
         # Python's garbage collector triggers a GC each time a certain number
--- a/mercurial/dispatch.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/dispatch.py	Tue Oct 20 22:04:04 2020 +0530
@@ -288,7 +288,7 @@
             if req.fmsg:
                 req.ui.fmsg = req.fmsg
         except error.Abort as inst:
-            ferr.write(_(b"abort: %s\n") % inst)
+            ferr.write(_(b"abort: %s\n") % inst.message)
             if inst.hint:
                 ferr.write(_(b"(%s)\n") % inst.hint)
             return -1
@@ -489,34 +489,34 @@
     except error.AmbiguousCommand as inst:
         ui.warn(
             _(b"hg: command '%s' is ambiguous:\n    %s\n")
-            % (inst.args[0], b" ".join(inst.args[1]))
+            % (inst.prefix, b" ".join(inst.matches))
         )
     except error.CommandError as inst:
-        if inst.args[0]:
+        if inst.command:
             ui.pager(b'help')
-            msgbytes = pycompat.bytestr(inst.args[1])
-            ui.warn(_(b"hg %s: %s\n") % (inst.args[0], msgbytes))
-            commands.help_(ui, inst.args[0], full=False, command=True)
+            msgbytes = pycompat.bytestr(inst.message)
+            ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
+            commands.help_(ui, inst.command, full=False, command=True)
         else:
-            ui.warn(_(b"hg: %s\n") % inst.args[1])
+            ui.warn(_(b"hg: %s\n") % inst.message)
             ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
     except error.UnknownCommand as inst:
-        nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.args[0]
+        nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
         try:
             # check if the command is in a disabled extension
             # (but don't check for extensions themselves)
             formatted = help.formattedhelp(
-                ui, commands, inst.args[0], unknowncmd=True
+                ui, commands, inst.command, unknowncmd=True
             )
             ui.warn(nocmdmsg)
             ui.write(formatted)
         except (error.UnknownCommand, error.Abort):
             suggested = False
-            if len(inst.args) == 2:
-                sim = _getsimilar(inst.args[1], inst.args[0])
+            if inst.all_commands:
+                sim = _getsimilar(inst.all_commands, inst.command)
                 if sim:
                     ui.warn(nocmdmsg)
                     _reportsimilar(ui.warn, sim)
--- a/mercurial/encoding.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/encoding.py	Tue Oct 20 22:04:04 2020 +0530
@@ -207,7 +207,9 @@
                 # can't round-trip
                 return u.encode(_sysstr(encoding), "replace")
     except LookupError as k:
-        raise error.Abort(k, hint=b"please check your locale settings")
+        raise error.Abort(
+            pycompat.bytestr(k), hint=b"please check your locale settings"
+        )
 
 
 def fromlocal(s):
--- a/mercurial/error.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/error.py	Tue Oct 20 22:04:04 2020 +0530
@@ -100,6 +100,33 @@
 class CommandError(Exception):
     """Exception raised on errors in parsing the command line."""
 
+    def __init__(self, command, message):
+        self.command = command
+        self.message = message
+        super(CommandError, self).__init__()
+
+    __bytes__ = _tobytes
+
+
+class UnknownCommand(Exception):
+    """Exception raised if command is not in the command table."""
+
+    def __init__(self, command, all_commands=None):
+        self.command = command
+        self.all_commands = all_commands
+        super(UnknownCommand, self).__init__()
+
+    __bytes__ = _tobytes
+
+
+class AmbiguousCommand(Exception):
+    """Exception raised if command shortcut matches more than one command."""
+
+    def __init__(self, prefix, matches):
+        self.prefix = prefix
+        self.matches = matches
+        super(AmbiguousCommand, self).__init__()
+
     __bytes__ = _tobytes
 
 
@@ -128,7 +155,15 @@
 class Abort(Hint, Exception):
     """Raised if a command needs to print an error and exit."""
 
-    __bytes__ = _tobytes
+    def __init__(self, message, hint=None):
+        self.message = message
+        self.hint = hint
+        # Pass the message into the Exception constructor to help extensions
+        # that look for exc.args[0].
+        Exception.__init__(self, message)
+
+    def __bytes__(self):
+        return self.message
 
     if pycompat.ispy3:
 
@@ -290,18 +325,6 @@
     __bytes__ = _tobytes
 
 
-class UnknownCommand(Exception):
-    """Exception raised if command is not in the command table."""
-
-    __bytes__ = _tobytes
-
-
-class AmbiguousCommand(Exception):
-    """Exception raised if command shortcut matches more than one command."""
-
-    __bytes__ = _tobytes
-
-
 # derived from KeyboardInterrupt to simplify some breakout code
 class SignalInterrupt(KeyboardInterrupt):
     """Exception raised on SIGTERM and SIGHUP."""
--- a/mercurial/exchange.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/exchange.py	Tue Oct 20 22:04:04 2020 +0530
@@ -32,6 +32,7 @@
     phases,
     pushkey,
     pycompat,
+    requirements,
     scmutil,
     sslutil,
     streamclone,
@@ -39,7 +40,6 @@
     util,
     wireprototypes,
 )
-from .interfaces import repository
 from .utils import (
     hashutil,
     stringutil,
@@ -1068,7 +1068,7 @@
     cgpart = bundler.newpart(b'changegroup', data=cgstream)
     if cgversions:
         cgpart.addparam(b'version', version)
-    if b'treemanifest' in pushop.repo.requirements:
+    if scmutil.istreemanifest(pushop.repo):
         cgpart.addparam(b'treemanifest', b'1')
     if b'exp-sidedata-flag' in pushop.repo.requirements:
         cgpart.addparam(b'exp-sidedata', b'1')
@@ -1691,7 +1691,7 @@
         old_heads = unficl.heads()
         clstart = len(unficl)
         _pullbundle2(pullop)
-        if repository.NARROW_REQUIREMENT in repo.requirements:
+        if requirements.NARROW_REQUIREMENT in repo.requirements:
             # XXX narrow clones filter the heads on the server side during
             # XXX getbundle and result in partial replies as well.
             # XXX Disable pull bundles in this case as band aid to avoid
@@ -1720,7 +1720,7 @@
         repo = reporef()
         cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
         if repo.ui.promptchoice(cm):
-            raise error.Abort("user aborted")
+            raise error.Abort(b"user aborted")
 
     tr.addvalidator(b'900-pull-prompt', prompt)
 
@@ -2557,7 +2557,7 @@
 
     part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
 
-    if b'treemanifest' in repo.requirements:
+    if scmutil.istreemanifest(repo):
         part.addparam(b'treemanifest', b'1')
 
     if b'exp-sidedata-flag' in repo.requirements:
--- a/mercurial/formatter.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/formatter.py	Tue Oct 20 22:04:04 2020 +0530
@@ -540,6 +540,25 @@
     tmpl = attr.ib()
     mapfile = attr.ib()
     refargs = attr.ib(default=None)
+    fp = attr.ib(default=None)
+
+
+def empty_templatespec():
+    return templatespec(None, None, None)
+
+
+def reference_templatespec(ref, refargs=None):
+    return templatespec(ref, None, None, refargs)
+
+
+def literal_templatespec(tmpl):
+    if pycompat.ispy3:
+        assert not isinstance(tmpl, str), b'tmpl must not be a str'
+    return templatespec(b'', tmpl, None)
+
+
+def mapfile_templatespec(topic, mapfile, fp=None):
+    return templatespec(topic, None, mapfile, fp=fp)
 
 
 def lookuptemplate(ui, topic, tmpl):
@@ -563,33 +582,33 @@
     """
 
     if not tmpl:
-        return templatespec(None, None, None)
+        return empty_templatespec()
 
     # looks like a literal template?
     if b'{' in tmpl:
-        return templatespec(b'', tmpl, None)
+        return literal_templatespec(tmpl)
 
     # a reference to built-in (formatter) template
     if tmpl in {b'cbor', b'json', b'pickle', b'debug'}:
-        return templatespec(tmpl, None, None)
+        return reference_templatespec(tmpl)
 
     # a function-style reference to built-in template
     func, fsep, ftail = tmpl.partition(b'(')
     if func in {b'cbor', b'json'} and fsep and ftail.endswith(b')'):
         templater.parseexpr(tmpl)  # make sure syntax errors are confined
-        return templatespec(func, None, None, refargs=ftail[:-1])
+        return reference_templatespec(func, refargs=ftail[:-1])
 
     # perhaps a stock style?
     if not os.path.split(tmpl)[0]:
-        mapname = templater.templatepath(
+        (mapname, fp) = templater.try_open_template(
             b'map-cmdline.' + tmpl
-        ) or templater.templatepath(tmpl)
-        if mapname and os.path.isfile(mapname):
-            return templatespec(topic, None, mapname)
+        ) or templater.try_open_template(tmpl)
+        if mapname:
+            return mapfile_templatespec(topic, mapname, fp)
 
     # perhaps it's a reference to [templates]
     if ui.config(b'templates', tmpl):
-        return templatespec(tmpl, None, None)
+        return reference_templatespec(tmpl)
 
     if tmpl == b'list':
         ui.write(_(b"available styles: %s\n") % templater.stylelist())
@@ -599,13 +618,13 @@
     if (b'/' in tmpl or b'\\' in tmpl) and os.path.isfile(tmpl):
         # is it a mapfile for a style?
         if os.path.basename(tmpl).startswith(b"map-"):
-            return templatespec(topic, None, os.path.realpath(tmpl))
+            return mapfile_templatespec(topic, os.path.realpath(tmpl))
         with util.posixfile(tmpl, b'rb') as f:
             tmpl = f.read()
-        return templatespec(b'', tmpl, None)
+        return literal_templatespec(tmpl)
 
     # constant string?
-    return templatespec(b'', tmpl, None)
+    return literal_templatespec(tmpl)
 
 
 def templatepartsmap(spec, t, partnames):
@@ -626,9 +645,12 @@
     a map file"""
     assert not (spec.tmpl and spec.mapfile)
     if spec.mapfile:
-        frommapfile = templater.templater.frommapfile
-        return frommapfile(
-            spec.mapfile, defaults=defaults, resources=resources, cache=cache
+        return templater.templater.frommapfile(
+            spec.mapfile,
+            spec.fp,
+            defaults=defaults,
+            resources=resources,
+            cache=cache,
         )
     return maketemplater(
         ui, spec.tmpl, defaults=defaults, resources=resources, cache=cache
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/grep.py	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,224 @@
+# grep.py - logic for history walk and grep
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import difflib
+import errno
+
+from .i18n import _
+
+from . import (
+    error,
+    match as matchmod,
+    pycompat,
+    scmutil,
+    util,
+)
+
+
+def matchlines(body, regexp):
+    begin = 0
+    linenum = 0
+    while begin < len(body):
+        match = regexp.search(body, begin)
+        if not match:
+            break
+        mstart, mend = match.span()
+        linenum += body.count(b'\n', begin, mstart) + 1
+        lstart = body.rfind(b'\n', begin, mstart) + 1 or begin
+        begin = body.find(b'\n', mend) + 1 or len(body) + 1
+        lend = begin - 1
+        yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
+
+
+class linestate(object):
+    def __init__(self, line, linenum, colstart, colend):
+        self.line = line
+        self.linenum = linenum
+        self.colstart = colstart
+        self.colend = colend
+
+    def __hash__(self):
+        return hash(self.line)
+
+    def __eq__(self, other):
+        return self.line == other.line
+
+    def findpos(self, regexp):
+        """Iterate all (start, end) indices of matches"""
+        yield self.colstart, self.colend
+        p = self.colend
+        while p < len(self.line):
+            m = regexp.search(self.line, p)
+            if not m:
+                break
+            if m.end() == p:
+                p += 1
+            else:
+                yield m.span()
+                p = m.end()
+
+
+def difflinestates(a, b):
+    sm = difflib.SequenceMatcher(None, a, b)
+    for tag, alo, ahi, blo, bhi in sm.get_opcodes():
+        if tag == 'insert':
+            for i in pycompat.xrange(blo, bhi):
+                yield (b'+', b[i])
+        elif tag == 'delete':
+            for i in pycompat.xrange(alo, ahi):
+                yield (b'-', a[i])
+        elif tag == 'replace':
+            for i in pycompat.xrange(alo, ahi):
+                yield (b'-', a[i])
+            for i in pycompat.xrange(blo, bhi):
+                yield (b'+', b[i])
+
+
+class grepsearcher(object):
+    """Search files and revisions for lines matching the given pattern
+
+    Options:
+    - all_files to search unchanged files at that revision.
+    - diff to search files in the parent revision so diffs can be generated.
+    - follow to skip files across copies and renames.
+    """
+
+    def __init__(
+        self, ui, repo, regexp, all_files=False, diff=False, follow=False
+    ):
+        self._ui = ui
+        self._repo = repo
+        self._regexp = regexp
+        self._all_files = all_files
+        self._diff = diff
+        self._follow = follow
+
+        self._getfile = util.lrucachefunc(repo.file)
+        self._getrenamed = scmutil.getrenamedfn(repo)
+
+        self._matches = {}
+        self._copies = {}
+        self._skip = set()
+        self._revfiles = {}
+
+    def skipfile(self, fn, rev):
+        """Exclude the given file (and the copy at the specified revision)
+        from future search"""
+        copy = self._copies.get(rev, {}).get(fn)
+        self._skip.add(fn)
+        if copy:
+            self._skip.add(copy)
+
+    def searchfiles(self, revs, makefilematcher):
+        """Walk files and revisions to yield (fn, ctx, pstates, states)
+        matches
+
+        states is a list of linestate objects. pstates may be empty unless
+        diff is True.
+        """
+        for ctx in scmutil.walkchangerevs(
+            self._repo, revs, makefilematcher, self._prep
+        ):
+            rev = ctx.rev()
+            parent = ctx.p1().rev()
+            for fn in sorted(self._revfiles.get(rev, [])):
+                states = self._matches[rev][fn]
+                copy = self._copies.get(rev, {}).get(fn)
+                if fn in self._skip:
+                    if copy:
+                        self._skip.add(copy)
+                    continue
+                pstates = self._matches.get(parent, {}).get(copy or fn, [])
+                if pstates or states:
+                    yield fn, ctx, pstates, states
+            del self._revfiles[rev]
+            # We will keep the matches dict for the duration of the window
+            # clear the matches dict once the window is over
+            if not self._revfiles:
+                self._matches.clear()
+
+    def _grepbody(self, fn, rev, body):
+        self._matches[rev].setdefault(fn, [])
+        m = self._matches[rev][fn]
+        if body is None:
+            return
+
+        for lnum, cstart, cend, line in matchlines(body, self._regexp):
+            s = linestate(line, lnum, cstart, cend)
+            m.append(s)
+
+    def _readfile(self, ctx, fn):
+        rev = ctx.rev()
+        if rev is None:
+            fctx = ctx[fn]
+            try:
+                return fctx.data()
+            except IOError as e:
+                if e.errno != errno.ENOENT:
+                    raise
+        else:
+            flog = self._getfile(fn)
+            fnode = ctx.filenode(fn)
+            try:
+                return flog.read(fnode)
+            except error.CensoredNodeError:
+                self._ui.warn(
+                    _(
+                        b'cannot search in censored file: '
+                        b'%(filename)s:%(revnum)s\n'
+                    )
+                    % {b'filename': fn, b'revnum': pycompat.bytestr(rev)}
+                )
+
+    def _prep(self, ctx, fmatch):
+        rev = ctx.rev()
+        pctx = ctx.p1()
+        self._matches.setdefault(rev, {})
+        if self._diff:
+            parent = pctx.rev()
+            self._matches.setdefault(parent, {})
+        files = self._revfiles.setdefault(rev, [])
+        if rev is None:
+            # in `hg grep pattern`, 2/3 of the time is spent is spent in
+            # pathauditor checks without this in mozilla-central
+            contextmanager = self._repo.wvfs.audit.cached
+        else:
+            contextmanager = util.nullcontextmanager
+        with contextmanager():
+            # TODO: maybe better to warn missing files?
+            if self._all_files:
+                fmatch = matchmod.badmatch(fmatch, lambda f, msg: None)
+                filenames = ctx.matches(fmatch)
+            else:
+                filenames = (f for f in ctx.files() if fmatch(f))
+            for fn in filenames:
+                # fn might not exist in the revision (could be a file removed by
+                # the revision). We could check `fn not in ctx` even when rev is
+                # None, but it's less racy to protect againt that in readfile.
+                if rev is not None and fn not in ctx:
+                    continue
+
+                copy = None
+                if self._follow:
+                    copy = self._getrenamed(fn, rev)
+                    if copy:
+                        self._copies.setdefault(rev, {})[fn] = copy
+                        if fn in self._skip:
+                            self._skip.add(copy)
+                if fn in self._skip:
+                    continue
+                files.append(fn)
+
+                if fn not in self._matches[rev]:
+                    self._grepbody(fn, rev, self._readfile(ctx, fn))
+
+                if self._diff:
+                    pfn = copy or fn
+                    if pfn not in self._matches[parent] and pfn in pctx:
+                        self._grepbody(pfn, parent, self._readfile(pctx, pfn))
--- a/mercurial/help.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/help.py	Tue Oct 20 22:04:04 2020 +0530
@@ -638,6 +638,53 @@
     return re.sub(br'( *)%s' % re.escape(marker), sub, doc)
 
 
+def _getcategorizedhelpcmds(ui, cmdtable, name, select=None):
+    # Category -> list of commands
+    cats = {}
+    # Command -> short description
+    h = {}
+    # Command -> string showing synonyms
+    syns = {}
+    for c, e in pycompat.iteritems(cmdtable):
+        fs = cmdutil.parsealiases(c)
+        f = fs[0]
+        syns[f] = fs
+        func = e[0]
+        if select and not select(f):
+            continue
+        doc = pycompat.getdoc(func)
+        if filtercmd(ui, f, func, name, doc):
+            continue
+        doc = gettext(doc)
+        if not doc:
+            doc = _(b"(no help text available)")
+        h[f] = doc.splitlines()[0].rstrip()
+
+        cat = getattr(func, 'helpcategory', None) or (
+            registrar.command.CATEGORY_NONE
+        )
+        cats.setdefault(cat, []).append(f)
+    return cats, h, syns
+
+
+def _getcategorizedhelptopics(ui, topictable):
+    # Group commands by category.
+    topiccats = {}
+    syns = {}
+    for topic in topictable:
+        names, header, doc = topic[0:3]
+        if len(topic) > 3 and topic[3]:
+            category = topic[3]
+        else:
+            category = TOPIC_CATEGORY_NONE
+
+        topicname = names[0]
+        syns[topicname] = list(names)
+        if not filtertopic(ui, topicname):
+            topiccats.setdefault(category, []).append((topicname, header))
+    return topiccats, syns
+
+
 addtopichook(b'config', inserttweakrc)
 
 
@@ -666,7 +713,7 @@
         except error.AmbiguousCommand as inst:
             # py3 fix: except vars can't be used outside the scope of the
             # except block, nor can be used inside a lambda. python issue4617
-            prefix = inst.args[0]
+            prefix = inst.prefix
             select = lambda c: cmdutil.parsealiases(c)[0].startswith(prefix)
             rst = helplist(select)
             return rst
@@ -760,31 +807,9 @@
         return rst
 
     def helplist(select=None, **opts):
-        # Category -> list of commands
-        cats = {}
-        # Command -> short description
-        h = {}
-        # Command -> string showing synonyms
-        syns = {}
-        for c, e in pycompat.iteritems(commands.table):
-            fs = cmdutil.parsealiases(c)
-            f = fs[0]
-            syns[f] = b', '.join(fs)
-            func = e[0]
-            if select and not select(f):
-                continue
-            doc = pycompat.getdoc(func)
-            if filtercmd(ui, f, func, name, doc):
-                continue
-            doc = gettext(doc)
-            if not doc:
-                doc = _(b"(no help text available)")
-            h[f] = doc.splitlines()[0].rstrip()
-
-            cat = getattr(func, 'helpcategory', None) or (
-                registrar.command.CATEGORY_NONE
-            )
-            cats.setdefault(cat, []).append(f)
+        cats, h, syns = _getcategorizedhelpcmds(
+            ui, commands.table, name, select
+        )
 
         rst = []
         if not h:
@@ -805,7 +830,7 @@
             cmds = sorted(cmds)
             for c in cmds:
                 if ui.verbose:
-                    rst.append(b" :%s: %s\n" % (syns[c], h[c]))
+                    rst.append(b" :%s: %s\n" % (b', '.join(syns[c]), h[c]))
                 else:
                     rst.append(b' :%s: %s\n' % (c, h[c]))
 
@@ -844,20 +869,7 @@
                 rst.extend(exts)
 
             rst.append(_(b"\nadditional help topics:\n"))
-            # Group commands by category.
-            topiccats = {}
-            for topic in helptable:
-                names, header, doc = topic[0:3]
-                if len(topic) > 3 and topic[3]:
-                    category = topic[3]
-                else:
-                    category = TOPIC_CATEGORY_NONE
-
-                topicname = names[0]
-                if not filtertopic(ui, topicname):
-                    topiccats.setdefault(category, []).append(
-                        (topicname, header)
-                    )
+            topiccats, topicsyns = _getcategorizedhelptopics(ui, helptable)
 
             # Check that all categories have an order.
             missing_order = set(topiccats.keys()) - set(TOPIC_CATEGORY_ORDER)
--- a/mercurial/helptext/internals/mergestate.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/helptext/internals/mergestate.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -37,30 +37,18 @@
 | * O: the node of the "other" part of the merge (hexified version)
 | * F: a file to be merged entry
 | * C: a change/delete or delete/change conflict
-| * D: a file that the external merge driver will merge internally
-|      (experimental)
 | * P: a path conflict (file vs directory)
-| * m: the external merge driver defined for this merge plus its run state
-|      (experimental)
 | * f: a (filename, dictionary) tuple of optional values for a given file
 | * X: unsupported mandatory record type (used in tests)
 | * x: unsupported advisory record type (used in tests)
 | * l: the labels for the parts of the merge.
 
-Merge driver run states (experimental):
-
-| * u: driver-resolved files unmarked -- needs to be run next time we're
-|      about to resolve or commit
-| * m: driver-resolved files marked -- only needs to be run before commit
-| * s: success/skipped -- does not need to be run any more
-
 Merge record states (indexed by filename):
 
 | * u: unresolved conflict
 | * r: resolved conflict
 | * pu: unresolved path conflict (file conflicts with directory)
 | * pr: resolved path conflict
-| * d: driver-resolved conflict
 
 The resolve command transitions between 'u' and 'r' for conflicts and
 'pu' and 'pr' for path conflicts.
--- a/mercurial/helptext/internals/requirements.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/helptext/internals/requirements.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -155,3 +155,22 @@
 Note that as of 5.5, only installations compiled with the Rust extension will
 benefit from a speedup. The other installations will do the necessary work to
 keep the index up to date, but will suffer a slowdown.
+
+exp-sharesafe
+=============
+
+NOTE: This requirement is for internal development only. The semantics are not
+frozed yet, the feature is experimental. It's not advised to use it for any
+production repository yet.
+
+Represents that the repository can be shared safely. Requirements and config of
+the source repository will be shared.
+Requirements are stored in ``.hg/store`` instead of directly in ``.hg/`` where
+they used to be stored. Some working copy related requirements are still stored
+in ``.hg/``.
+Shares read the ``.hg/hgrc`` of the source repository.
+
+Support for this requirement was added in Mercurial 5.6 (released
+November 2020). The requirement will only be present on repositories that have
+opted in to this format (by having ``format.exp-share-safe=true`` set when
+they were created).
--- a/mercurial/helptext/internals/revlogs.txt	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/helptext/internals/revlogs.txt	Tue Oct 20 22:04:04 2020 +0530
@@ -215,14 +215,16 @@
 Revision entries consist of an optional 1 byte header followed by an
 encoding of the revision data. The headers are as follows:
 
-\0 (0x00)
-   Revision data is the entirety of the entry, including this header.
-u (0x75)
-   Raw revision data follows.
-x (0x78)
-   zlib (RFC 1950) data.
+\0  (0x00)
+    Revision data is the entirety of the entry, including this header.
+(   (0x28)
+    zstd https://github.com/facebook/zstd
+u   (0x75)
+    Raw revision data follows.
+x   (0x78)
+    zlib (RFC 1950) data.
 
-   The 0x78 value is actually the first byte of the zlib header (CMF byte).
+    The 0x78 value is actually the first byte of the zlib header (CMF byte).
 
 Hash Computation
 ================
@@ -237,3 +239,75 @@
 2. Hash the fulltext of the revision
 
 The 20 byte node ids of the parents are fed into the hasher in ascending order.
+
+Changed Files side-data
+=======================
+
+(This feature is in active development and its behavior is not frozen yet. It
+should not be used in any production repository)
+
+When the `exp-copies-sidedata-changeset` requirement is in use, information
+related to the changed files will be stored as "side-data" for every changeset
+in the changelog.
+
+These data contains the following information:
+
+* set of files actively added by the changeset
+* set of files actively removed by the changeset
+* set of files actively merged by the changeset
+* set of files actively touched by he changeset
+* mapping of copy-source, copy-destination from first parent (p1)
+* mapping of copy-source, copy-destination from second parent (p2)
+
+The block itself is big-endian data, formatted in three sections: header, index,
+and data. See below for details:
+
+Header:
+
+    4 bytes: unsigned integer
+
+        total number of entry in the index
+
+Index:
+
+  The index contains an entry for every involved filename. It is sorted by
+  filename. The entry use the following format:
+
+    1 byte:  bits field
+
+        This byte hold two different bit fields:
+
+        The 2 lower bits carry copy information:
+
+            `00`: file has not copy information,
+            `10`: file is copied from a p1 source,
+            `11`: file is copied from a p2 source.
+
+        The 3 next bits carry action information.
+
+            `000`: file was untouched, it exist in the index as copy source,
+            `001`: file was actively added
+            `010`: file was actively merged
+            `011`: file was actively removed
+            `100`: reserved for future use
+            `101`: file was actively touched in any other way
+
+        (The last 2 bites are unused)
+
+    4 bytes: unsigned integer
+
+        Address (in bytes) of the end of the associated filename in the data
+        block. (This is the address of the first byte not part of the filename)
+
+        The start of the filename can be retrieve by reading that field for the
+        previous index entry. The filename of the first entry starts at zero.
+
+    4 bytes: unsigned integer
+
+        Index (in this very index) of the source of the copy (when a copy is
+        happening). If no copy is happening the value of this field is
+        irrelevant and could have any value. It is set to zero by convention
+
+Data:
+
+  raw bytes block containing all filename concatenated without any separator.
--- a/mercurial/hg.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hg.py	Tue Oct 20 22:04:04 2020 +0530
@@ -38,6 +38,7 @@
     node,
     phases,
     pycompat,
+    requirements,
     scmutil,
     sshpeer,
     statichttprepo,
@@ -49,7 +50,6 @@
     vfs as vfsmod,
 )
 from .utils import hashutil
-from .interfaces import repository as repositorymod
 
 release = lock.release
 
@@ -332,6 +332,28 @@
     return r
 
 
+def _prependsourcehgrc(repo):
+    """ copies the source repo config and prepend it in current repo .hg/hgrc
+    on unshare. This is only done if the share was perfomed using share safe
+    method where we share config of source in shares"""
+    srcvfs = vfsmod.vfs(repo.sharedpath)
+    dstvfs = vfsmod.vfs(repo.path)
+
+    if not srcvfs.exists(b'hgrc'):
+        return
+
+    currentconfig = b''
+    if dstvfs.exists(b'hgrc'):
+        currentconfig = dstvfs.read(b'hgrc')
+
+    with dstvfs(b'hgrc', b'wb') as fp:
+        sourceconfig = srcvfs.read(b'hgrc')
+        fp.write(b"# Config copied from shared source\n")
+        fp.write(sourceconfig)
+        fp.write(b'\n')
+        fp.write(currentconfig)
+
+
 def unshare(ui, repo):
     """convert a shared repository to a normal one
 
@@ -350,12 +372,17 @@
         # fail
         destlock = copystore(ui, repo, repo.path)
         with destlock or util.nullcontextmanager():
+            if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
+                # we were sharing .hg/hgrc of the share source with the current
+                # repo. We need to copy that while unsharing otherwise it can
+                # disable hooks and other checks
+                _prependsourcehgrc(repo)
 
             sharefile = repo.vfs.join(b'sharedpath')
             util.rename(sharefile, sharefile + b'.old')
 
-            repo.requirements.discard(b'shared')
-            repo.requirements.discard(b'relshared')
+            repo.requirements.discard(requirements.SHARED_REQUIREMENT)
+            repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
             scmutil.writereporequirements(repo)
 
     # Removing share changes some fundamental properties of the repo instance.
@@ -388,7 +415,7 @@
     if default:
         template = b'[paths]\ndefault = %s\n'
         destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
-    if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
+    if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
         with destrepo.wlock():
             narrowspec.copytoworkingcopy(destrepo)
 
@@ -1022,7 +1049,11 @@
     When overwrite is set, changes are clobbered, merged else
 
     returns stats (see pydoc mercurial.merge.applyupdates)"""
-    return mergemod.update(
+    repo.ui.deprecwarn(
+        b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
+        b'5.7',
+    )
+    return mergemod._update(
         repo,
         node,
         branchmerge=False,
@@ -1034,7 +1065,7 @@
 
 def update(repo, node, quietempty=False, updatecheck=None):
     """update the working directory to node"""
-    stats = updaterepo(repo, node, False, updatecheck=updatecheck)
+    stats = mergemod.update(repo[node], updatecheck=updatecheck)
     _showstats(repo, stats, quietempty)
     if stats.unresolvedcount:
         repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
@@ -1047,7 +1078,7 @@
 
 def clean(repo, node, show_stats=True, quietempty=False):
     """forcibly switch the working directory to node, clobbering changes"""
-    stats = updaterepo(repo, node, True)
+    stats = mergemod.clean_update(repo[node])
     assert stats.unresolvedcount == 0
     if show_stats:
         _showstats(repo, stats, quietempty)
--- a/mercurial/hgweb/common.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hgweb/common.py	Tue Oct 20 22:04:04 2020 +0530
@@ -21,6 +21,7 @@
 from .. import (
     encoding,
     pycompat,
+    templater,
     util,
 )
 
@@ -178,7 +179,7 @@
     return True
 
 
-def staticfile(directory, fname, res):
+def staticfile(templatepath, directory, fname, res):
     """return a file inside directory with guessed Content-Type header
 
     fname always uses '/' as directory separator and isn't allowed to
@@ -190,24 +191,20 @@
     if not ispathsafe(fname):
         return
 
+    if not directory:
+        tp = templatepath or templater.templatedir()
+        if tp is not None:
+            directory = os.path.join(tp, b'static')
+
     fpath = os.path.join(*fname.split(b'/'))
-    if isinstance(directory, bytes):
-        directory = [directory]
-    for d in directory:
-        path = os.path.join(d, fpath)
-        if os.path.exists(path):
-            break
+    ct = pycompat.sysbytes(
+        mimetypes.guess_type(pycompat.fsdecode(fpath))[0] or r"text/plain"
+    )
+    path = os.path.join(directory, fpath)
     try:
         os.stat(path)
-        ct = pycompat.sysbytes(
-            mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain"
-        )
         with open(path, b'rb') as fh:
             data = fh.read()
-
-        res.headers[b'Content-Type'] = ct
-        res.setbodybytes(data)
-        return res
     except TypeError:
         raise ErrorResponse(HTTP_SERVER_ERROR, b'illegal filename')
     except OSError as err:
@@ -218,6 +215,10 @@
                 HTTP_SERVER_ERROR, encoding.strtolocal(err.strerror)
             )
 
+    res.headers[b'Content-Type'] = ct
+    res.setbodybytes(data)
+    return res
+
 
 def paritygen(stripecount, offset=0):
     """count parity of horizontal stripes for easier reading"""
--- a/mercurial/hgweb/hgweb_mod.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hgweb/hgweb_mod.py	Tue Oct 20 22:04:04 2020 +0530
@@ -53,7 +53,36 @@
         configfn(b'web', b'style'),
         b'paper',
     )
-    return styles, templater.stylemap(styles, templatepath)
+    return styles, _stylemap(styles, templatepath)
+
+
+def _stylemap(styles, path=None):
+    """Return path to mapfile for a given style.
+
+    Searches mapfile in the following locations:
+    1. templatepath/style/map
+    2. templatepath/map-style
+    3. templatepath/map
+    """
+
+    for style in styles:
+        # only plain name is allowed to honor template paths
+        if (
+            not style
+            or style in (pycompat.oscurdir, pycompat.ospardir)
+            or pycompat.ossep in style
+            or pycompat.osaltsep
+            and pycompat.osaltsep in style
+        ):
+            continue
+        locations = (os.path.join(style, b'map'), b'map-' + style, b'map')
+
+        for location in locations:
+            mapfile, fp = templater.try_open_template(location, path)
+            if mapfile:
+                return style, mapfile, fp
+
+    raise RuntimeError(b"No hgweb templates found in %r" % path)
 
 
 def makebreadcrumb(url, prefix=b''):
@@ -117,23 +146,21 @@
         self.csp, self.nonce = cspvalues(self.repo.ui)
 
     # Trust the settings from the .hg/hgrc files by default.
-    def config(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.config(section, name, default, untrusted=untrusted)
+    def config(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.config(*args, **kwargs)
 
-    def configbool(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configbool(
-            section, name, default, untrusted=untrusted
-        )
+    def configbool(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configbool(*args, **kwargs)
 
-    def configint(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configint(
-            section, name, default, untrusted=untrusted
-        )
+    def configint(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configint(*args, **kwargs)
 
-    def configlist(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configlist(
-            section, name, default, untrusted=untrusted
-        )
+    def configlist(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configlist(*args, **kwargs)
 
     def archivelist(self, nodeid):
         return webutil.archivelist(self.repo.ui, nodeid)
@@ -153,7 +180,9 @@
         # figure out which style to use
 
         vars = {}
-        styles, (style, mapfile) = getstyle(req, self.config, self.templatepath)
+        styles, (style, mapfile, fp) = getstyle(
+            req, self.config, self.templatepath
+        )
         if style == styles[0]:
             vars[b'style'] = style
 
@@ -196,10 +225,9 @@
             yield self.config(b'web', b'motd')
 
         tres = formatter.templateresources(self.repo.ui, self.repo)
-        tmpl = templater.templater.frommapfile(
-            mapfile, filters=filters, defaults=defaults, resources=tres
+        return templater.templater.frommapfile(
+            mapfile, fp=fp, filters=filters, defaults=defaults, resources=tres
         )
-        return tmpl
 
     def sendtemplate(self, name, **kwargs):
         """Helper function to send a response generated from a template."""
@@ -465,7 +493,7 @@
         except error.Abort as e:
             res.status = b'403 Forbidden'
             res.headers[b'Content-Type'] = ctype
-            return rctx.sendtemplate(b'error', error=pycompat.bytestr(e))
+            return rctx.sendtemplate(b'error', error=e.message)
         except ErrorResponse as e:
             for k, v in e.headers:
                 res.headers[k] = v
--- a/mercurial/hgweb/hgwebdir_mod.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hgweb/hgwebdir_mod.py	Tue Oct 20 22:04:04 2020 +0530
@@ -413,13 +413,7 @@
                 else:
                     fname = req.qsparams[b'static']
                 static = self.ui.config(b"web", b"static", untrusted=False)
-                if not static:
-                    tp = self.templatepath or templater.templatepaths()
-                    if isinstance(tp, bytes):
-                        tp = [tp]
-                    static = [os.path.join(p, b'static') for p in tp]
-
-                staticfile(static, fname, res)
+                staticfile(self.templatepath, static, fname, res)
                 return res.sendresponse()
 
             # top-level index
@@ -538,11 +532,12 @@
         return res.sendresponse()
 
     def templater(self, req, nonce):
-        def config(section, name, default=uimod._unset, untrusted=True):
-            return self.ui.config(section, name, default, untrusted)
+        def config(*args, **kwargs):
+            kwargs.setdefault('untrusted', True)
+            return self.ui.config(*args, **kwargs)
 
         vars = {}
-        styles, (style, mapfile) = hgweb_mod.getstyle(
+        styles, (style, mapfile, fp) = hgweb_mod.getstyle(
             req, config, self.templatepath
         )
         if style == styles[0]:
@@ -577,5 +572,6 @@
             else:
                 yield config(b'web', b'motd')
 
-        tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
-        return tmpl
+        return templater.templater.frommapfile(
+            mapfile, fp=fp, defaults=defaults
+        )
--- a/mercurial/hgweb/webcommands.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hgweb/webcommands.py	Tue Oct 20 22:04:04 2020 +0530
@@ -36,7 +36,6 @@
     revsetlang,
     scmutil,
     smartset,
-    templater,
     templateutil,
 )
 
@@ -1318,13 +1317,7 @@
     # a repo owner may set web.static in .hg/hgrc to get any file
     # readable by the user running the CGI script
     static = web.config(b"web", b"static", untrusted=False)
-    if not static:
-        tp = web.templatepath or templater.templatepaths()
-        if isinstance(tp, bytes):
-            tp = [tp]
-        static = [os.path.join(p, b'static') for p in tp]
-
-    staticfile(static, fname, web.res)
+    staticfile(web.templatepath, static, fname, web.res)
     return web.res.sendresponse()
 
 
--- a/mercurial/hook.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/hook.py	Tue Oct 20 22:04:04 2020 +0530
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import contextlib
+import errno
 import os
 import sys
 
@@ -289,10 +290,18 @@
         # The stderr is fully buffered on Windows when connected to a pipe.
         # A forcible flush is required to make small stderr data in the
         # remote side available to the client immediately.
-        procutil.stderr.flush()
+        try:
+            procutil.stderr.flush()
+        except IOError as err:
+            if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
+                raise error.StdioError(err)
 
         if _redirect and oldstdout >= 0:
-            procutil.stdout.flush()  # write hook output to stderr fd
+            try:
+                procutil.stdout.flush()  # write hook output to stderr fd
+            except IOError as err:
+                if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
+                    raise error.StdioError(err)
             os.dup2(oldstdout, stdoutno)
             os.close(oldstdout)
 
--- a/mercurial/interfaces/repository.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/interfaces/repository.py	Tue Oct 20 22:04:04 2020 +0530
@@ -11,10 +11,6 @@
 from .. import error
 from . import util as interfaceutil
 
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = b'narrowhg-experimental'
-
 # Local repository feature string.
 
 # Revlogs are being used for file storage.
@@ -32,12 +28,14 @@
 REVISION_FLAG_ELLIPSIS = 1 << 14
 REVISION_FLAG_EXTSTORED = 1 << 13
 REVISION_FLAG_SIDEDATA = 1 << 12
+REVISION_FLAG_HASCOPIESINFO = 1 << 11
 
 REVISION_FLAGS_KNOWN = (
     REVISION_FLAG_CENSORED
     | REVISION_FLAG_ELLIPSIS
     | REVISION_FLAG_EXTSTORED
     | REVISION_FLAG_SIDEDATA
+    | REVISION_FLAG_HASCOPIESINFO
 )
 
 CG_DELTAMODE_STD = b'default'
--- a/mercurial/localrepo.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/localrepo.py	Tue Oct 20 22:04:04 2020 +0530
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import errno
+import functools
 import os
 import random
 import sys
@@ -32,6 +33,7 @@
     bundle2,
     changegroup,
     color,
+    commit,
     context,
     dirstate,
     dirstateguard,
@@ -46,7 +48,6 @@
     match as matchmod,
     mergestate as mergestatemod,
     mergeutil,
-    metadata,
     namespaces,
     narrowspec,
     obsolete,
@@ -56,6 +57,7 @@
     pycompat,
     rcutil,
     repoview,
+    requirements as requirementsmod,
     revset,
     revsetlang,
     scmutil,
@@ -192,6 +194,7 @@
 def unfilteredmethod(orig):
     """decorate method that always need to be run on unfiltered version"""
 
+    @functools.wraps(orig)
     def wrapper(repo, *args, **kwargs):
         return orig(repo.unfiltered(), *args, **kwargs)
 
@@ -425,30 +428,6 @@
     # End of baselegacywirecommands interface.
 
 
-# Increment the sub-version when the revlog v2 format changes to lock out old
-# clients.
-REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
-
-# A repository with the sparserevlog feature will have delta chains that
-# can spread over a larger span. Sparse reading cuts these large spans into
-# pieces, so that each piece isn't too big.
-# Without the sparserevlog capability, reading from the repository could use
-# huge amounts of memory, because the whole span would be read at once,
-# including all the intermediate revisions that aren't pertinent for the chain.
-# This is why once a repository has enabled sparse-read, it becomes required.
-SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
-
-# A repository with the sidedataflag requirement will allow to store extra
-# information for revision without altering their original hashes.
-SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
-
-# A repository with the the copies-sidedata-changeset requirement will store
-# copies related information in changeset's sidedata.
-COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
-
-# The repository use persistent nodemap for the changelog and the manifest.
-NODEMAP_REQUIREMENT = b'persistent-nodemap'
-
 # Functions receiving (ui, features) that extensions can register to impact
 # the ability to load repositories with custom requirements. Only
 # functions defined in loaded extensions are called.
@@ -459,6 +438,50 @@
 featuresetupfuncs = set()
 
 
+def _getsharedvfs(hgvfs, requirements):
+    """ returns the vfs object pointing to root of shared source
+    repo for a shared repository
+
+    hgvfs is vfs pointing at .hg/ of current repo (shared one)
+    requirements is a set of requirements of current repo (shared one)
+    """
+    # The ``shared`` or ``relshared`` requirements indicate the
+    # store lives in the path contained in the ``.hg/sharedpath`` file.
+    # This is an absolute path for ``shared`` and relative to
+    # ``.hg/`` for ``relshared``.
+    sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
+    if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
+        sharedpath = hgvfs.join(sharedpath)
+
+    sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
+
+    if not sharedvfs.exists():
+        raise error.RepoError(
+            _(b'.hg/sharedpath points to nonexistent directory %s')
+            % sharedvfs.base
+        )
+    return sharedvfs
+
+
+def _readrequires(vfs, allowmissing):
+    """ reads the require file present at root of this vfs
+    and return a set of requirements
+
+    If allowmissing is True, we suppress ENOENT if raised"""
+    # requires file contains a newline-delimited list of
+    # features/capabilities the opener (us) must have in order to use
+    # the repository. This file was introduced in Mercurial 0.9.2,
+    # which means very old repositories may not have one. We assume
+    # a missing file translates to no requirements.
+    try:
+        requirements = set(vfs.read(b'requires').splitlines())
+    except IOError as e:
+        if not (allowmissing and e.errno == errno.ENOENT):
+            raise
+        requirements = set()
+    return requirements
+
+
 def makelocalrepository(baseui, path, intents=None):
     """Create a local repository object.
 
@@ -500,6 +523,10 @@
     # Main VFS for .hg/ directory.
     hgpath = wdirvfs.join(b'.hg')
     hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
+    # Whether this repository is shared one or not
+    shared = False
+    # If this repository is shared, vfs pointing to shared repo
+    sharedvfs = None
 
     # The .hg/ path should exist and should be a directory. All other
     # cases are errors.
@@ -517,22 +544,32 @@
 
         raise error.RepoError(_(b'repository %s not found') % path)
 
-    # .hg/requires file contains a newline-delimited list of
-    # features/capabilities the opener (us) must have in order to use
-    # the repository. This file was introduced in Mercurial 0.9.2,
-    # which means very old repositories may not have one. We assume
-    # a missing file translates to no requirements.
-    try:
-        requirements = set(hgvfs.read(b'requires').splitlines())
-    except IOError as e:
-        if e.errno != errno.ENOENT:
-            raise
-        requirements = set()
+    requirements = _readrequires(hgvfs, True)
+    shared = (
+        requirementsmod.SHARED_REQUIREMENT in requirements
+        or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
+    )
+    if shared:
+        sharedvfs = _getsharedvfs(hgvfs, requirements)
+
+    # if .hg/requires contains the sharesafe requirement, it means
+    # there exists a `.hg/store/requires` too and we should read it
+    # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
+    # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
+    # is not present, refer checkrequirementscompat() for that
+    if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
+        if shared:
+            # This is a shared repo
+            storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
+        else:
+            storevfs = vfsmod.vfs(hgvfs.join(b'store'))
+
+        requirements |= _readrequires(storevfs, False)
 
     # The .hg/hgrc file may load extensions or contain config options
     # that influence repository construction. Attempt to load it and
     # process any new extensions that it may have pulled in.
-    if loadhgrc(ui, wdirvfs, hgvfs, requirements):
+    if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
         afterhgrcload(ui, wdirvfs, hgvfs, requirements)
         extensions.loadall(ui)
         extensions.populateui(ui)
@@ -567,27 +604,13 @@
     features = set()
 
     # The "store" part of the repository holds versioned data. How it is
-    # accessed is determined by various requirements. The ``shared`` or
-    # ``relshared`` requirements indicate the store lives in the path contained
-    # in the ``.hg/sharedpath`` file. This is an absolute path for
-    # ``shared`` and relative to ``.hg/`` for ``relshared``.
-    if b'shared' in requirements or b'relshared' in requirements:
-        sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
-        if b'relshared' in requirements:
-            sharedpath = hgvfs.join(sharedpath)
-
-        sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
-
-        if not sharedvfs.exists():
-            raise error.RepoError(
-                _(b'.hg/sharedpath points to nonexistent directory %s')
-                % sharedvfs.base
-            )
-
-        features.add(repository.REPO_FEATURE_SHARED_STORAGE)
-
+    # accessed is determined by various requirements. If `shared` or
+    # `relshared` requirements are present, this indicates current repository
+    # is a share and store exists in path mentioned in `.hg/sharedpath`
+    if shared:
         storebasepath = sharedvfs.base
         cachepath = sharedvfs.join(b'cache')
+        features.add(repository.REPO_FEATURE_SHARED_STORAGE)
     else:
         storebasepath = hgvfs.base
         cachepath = hgvfs.join(b'cache')
@@ -674,7 +697,7 @@
     )
 
 
-def loadhgrc(ui, wdirvfs, hgvfs, requirements):
+def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
     """Load hgrc files/content into a ui instance.
 
     This is called during repository opening to load any additional
@@ -685,9 +708,20 @@
     Extensions should monkeypatch this function to modify how per-repo
     configs are loaded. For example, an extension may wish to pull in
     configs from alternate files or sources.
+
+    sharedvfs is vfs object pointing to source repo if the current one is a
+    shared one
     """
     if not rcutil.use_repo_hgrc():
         return False
+
+    # first load config from shared source if we has to
+    if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
+        try:
+            ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
+        except IOError:
+            pass
+
     try:
         ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
         return True
@@ -790,7 +824,10 @@
 
     ``error.RepoError`` should be raised on failure.
     """
-    if b'exp-sparse' in requirements and not sparse.enabled:
+    if (
+        requirementsmod.SPARSE_REQUIREMENT in requirements
+        and not sparse.enabled
+    ):
         raise error.RepoError(
             _(
                 b'repository is using sparse feature but '
@@ -820,7 +857,7 @@
     """
     options = {}
 
-    if b'treemanifest' in requirements:
+    if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
         options[b'treemanifest'] = True
 
     # experimental config: format.manifestcachesize
@@ -833,12 +870,15 @@
     # This revlog format is super old and we don't bother trying to parse
     # opener options for it because those options wouldn't do anything
     # meaningful on such old repos.
-    if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
+    if (
+        b'revlogv1' in requirements
+        or requirementsmod.REVLOGV2_REQUIREMENT in requirements
+    ):
         options.update(resolverevlogstorevfsoptions(ui, requirements, features))
     else:  # explicitly mark repo as using revlogv0
         options[b'revlogv0'] = True
 
-    if COPIESSDC_REQUIREMENT in requirements:
+    if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
         options[b'copies-storage'] = b'changeset-sidedata'
     else:
         writecopiesto = ui.config(b'experimental', b'copies.write-to')
@@ -857,7 +897,7 @@
 
     if b'revlogv1' in requirements:
         options[b'revlogv1'] = True
-    if REVLOGV2_REQUIREMENT in requirements:
+    if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
         options[b'revlogv2'] = True
 
     if b'generaldelta' in requirements:
@@ -901,12 +941,12 @@
     options[b'sparse-read-density-threshold'] = srdensitythres
     options[b'sparse-read-min-gap-size'] = srmingapsize
 
-    sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
+    sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
     options[b'sparse-revlog'] = sparserevlog
     if sparserevlog:
         options[b'generaldelta'] = True
 
-    sidedata = SIDEDATA_REQUIREMENT in requirements
+    sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
     options[b'side-data'] = sidedata
 
     maxchainlen = None
@@ -937,12 +977,12 @@
             msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
             raise error.Abort(msg % options[b'zstd.level'])
 
-    if repository.NARROW_REQUIREMENT in requirements:
+    if requirementsmod.NARROW_REQUIREMENT in requirements:
         options[b'enableellipsis'] = True
 
     if ui.configbool(b'experimental', b'rust.index'):
         options[b'rust.index'] = True
-    if NODEMAP_REQUIREMENT in requirements:
+    if requirementsmod.NODEMAP_REQUIREMENT in requirements:
         options[b'persistent-nodemap'] = True
     if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
         options[b'persistent-nodemap.mmap'] = True
@@ -986,7 +1026,7 @@
     features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
     features.add(repository.REPO_FEATURE_STREAM_CLONE)
 
-    if repository.NARROW_REQUIREMENT in requirements:
+    if requirementsmod.NARROW_REQUIREMENT in requirements:
         return revlognarrowfilestorage
     else:
         return revlogfilestorage
@@ -1027,22 +1067,23 @@
     supportedformats = {
         b'revlogv1',
         b'generaldelta',
-        b'treemanifest',
-        COPIESSDC_REQUIREMENT,
-        REVLOGV2_REQUIREMENT,
-        SIDEDATA_REQUIREMENT,
-        SPARSEREVLOG_REQUIREMENT,
-        NODEMAP_REQUIREMENT,
+        requirementsmod.TREEMANIFEST_REQUIREMENT,
+        requirementsmod.COPIESSDC_REQUIREMENT,
+        requirementsmod.REVLOGV2_REQUIREMENT,
+        requirementsmod.SIDEDATA_REQUIREMENT,
+        requirementsmod.SPARSEREVLOG_REQUIREMENT,
+        requirementsmod.NODEMAP_REQUIREMENT,
         bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
+        requirementsmod.SHARESAFE_REQUIREMENT,
     }
     _basesupported = supportedformats | {
         b'store',
         b'fncache',
-        b'shared',
-        b'relshared',
+        requirementsmod.SHARED_REQUIREMENT,
+        requirementsmod.RELATIVE_SHARED_REQUIREMENT,
         b'dotencode',
-        b'exp-sparse',
-        b'internal-phase',
+        requirementsmod.SPARSE_REQUIREMENT,
+        requirementsmod.INTERNAL_PHASE_REQUIREMENT,
     }
 
     # list of prefix for file which can be written without 'wlock'
@@ -1211,7 +1252,7 @@
         self._extrafilterid = repoview.extrafilter(ui)
 
         self.filecopiesmode = None
-        if COPIESSDC_REQUIREMENT in self.requirements:
+        if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
             self.filecopiesmode = b'changeset-sidedata'
 
     def _getvfsward(self, origfunc):
@@ -1236,7 +1277,12 @@
                 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
                 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
             # path prefixes covered by 'lock'
-            vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
+            vfs_path_prefixes = (
+                b'journal.',
+                b'undo.',
+                b'strip-backup/',
+                b'cache/',
+            )
             if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
                 if repo._currentlock(repo._lockref) is None:
                     repo.ui.develwarn(
@@ -1503,14 +1549,14 @@
 
     @storecache(narrowspec.FILENAME)
     def _storenarrowmatch(self):
-        if repository.NARROW_REQUIREMENT not in self.requirements:
+        if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always()
         include, exclude = self.narrowpats
         return narrowspec.match(self.root, include=include, exclude=exclude)
 
     @storecache(narrowspec.FILENAME)
     def _narrowmatch(self):
-        if repository.NARROW_REQUIREMENT not in self.requirements:
+        if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always()
         narrowspec.checkworkingcopynarrowspec(self)
         include, exclude = self.narrowpats
@@ -1551,7 +1597,7 @@
     def _quick_access_changeid_wc(self):
         # also fast path access to the working copy parents
         # however, only do it for filter that ensure wc is visible.
-        quick = {}
+        quick = self._quick_access_changeid_null.copy()
         cl = self.unfiltered().changelog
         for node in self.dirstate.parents():
             if node == nullid:
@@ -1590,11 +1636,9 @@
         This contains a list of symbol we can recognise right away without
         further processing.
         """
-        mapping = self._quick_access_changeid_null
         if self.filtername in repoview.filter_has_wc:
-            mapping = mapping.copy()
-            mapping.update(self._quick_access_changeid_wc)
-        return mapping
+            return self._quick_access_changeid_wc
+        return self._quick_access_changeid_null
 
     def __getitem__(self, changeid):
         # dealing with special cases
@@ -2472,7 +2516,7 @@
                 ui.status(
                     _(b'working directory now based on revision %d\n') % parents
                 )
-            mergestatemod.mergestate.clean(self, self[b'.'].node())
+            mergestatemod.mergestate.clean(self)
 
         # TODO: if we know which new heads may result from this rollback, pass
         # them to destroy(), which will prevent the branchhead cache from being
@@ -2634,22 +2678,8 @@
             ce.refresh()
 
     def _lock(
-        self,
-        vfs,
-        lockname,
-        wait,
-        releasefn,
-        acquirefn,
-        desc,
-        inheritchecker=None,
-        parentenvvar=None,
+        self, vfs, lockname, wait, releasefn, acquirefn, desc,
     ):
-        parentlock = None
-        # the contents of parentenvvar are used by the underlying lock to
-        # determine whether it can be inherited
-        if parentenvvar is not None:
-            parentlock = encoding.environ.get(parentenvvar)
-
         timeout = 0
         warntimeout = 0
         if wait:
@@ -2667,8 +2697,6 @@
             releasefn=releasefn,
             acquirefn=acquirefn,
             desc=desc,
-            inheritchecker=inheritchecker,
-            parentlock=parentlock,
             signalsafe=signalsafe,
         )
         return l
@@ -2709,12 +2737,6 @@
         self._lockref = weakref.ref(l)
         return l
 
-    def _wlockchecktransaction(self):
-        if self.currenttransaction() is not None:
-            raise error.LockInheritanceContractViolation(
-                b'wlock cannot be inherited in the middle of a transaction'
-            )
-
     def wlock(self, wait=True):
         '''Lock the non-store parts of the repository (everything under
         .hg except .hg/store) and return a weak reference to the lock.
@@ -2752,8 +2774,6 @@
             unlock,
             self.invalidatedirstate,
             _(b'working directory of %s') % self.origroot,
-            inheritchecker=self._wlockchecktransaction,
-            parentenvvar=b'HG_WLOCK_LOCKER',
         )
         self._wlockref = weakref.ref(l)
         return l
@@ -2771,140 +2791,6 @@
         """Returns the wlock if it's held, or None if it's not."""
         return self._currentlock(self._wlockref)
 
-    def _filecommit(
-        self,
-        fctx,
-        manifest1,
-        manifest2,
-        linkrev,
-        tr,
-        changelist,
-        includecopymeta,
-    ):
-        """
-        commit an individual file as part of a larger transaction
-
-        input:
-
-            fctx:       a file context with the content we are trying to commit
-            manifest1:  manifest of changeset first parent
-            manifest2:  manifest of changeset second parent
-            linkrev:    revision number of the changeset being created
-            tr:         current transation
-            changelist: list of file being changed (modified inplace)
-            individual: boolean, set to False to skip storing the copy data
-                        (only used by the Google specific feature of using
-                        changeset extra as copy source of truth).
-
-        output:
-
-            The resulting filenode
-        """
-
-        fname = fctx.path()
-        fparent1 = manifest1.get(fname, nullid)
-        fparent2 = manifest2.get(fname, nullid)
-        if isinstance(fctx, context.filectx):
-            node = fctx.filenode()
-            if node in [fparent1, fparent2]:
-                self.ui.debug(b'reusing %s filelog entry\n' % fname)
-                if (
-                    fparent1 != nullid
-                    and manifest1.flags(fname) != fctx.flags()
-                ) or (
-                    fparent2 != nullid
-                    and manifest2.flags(fname) != fctx.flags()
-                ):
-                    changelist.append(fname)
-                return node
-
-        flog = self.file(fname)
-        meta = {}
-        cfname = fctx.copysource()
-        if cfname and cfname != fname:
-            # Mark the new revision of this file as a copy of another
-            # file.  This copy data will effectively act as a parent
-            # of this new revision.  If this is a merge, the first
-            # parent will be the nullid (meaning "look up the copy data")
-            # and the second one will be the other parent.  For example:
-            #
-            # 0 --- 1 --- 3   rev1 changes file foo
-            #   \       /     rev2 renames foo to bar and changes it
-            #    \- 2 -/      rev3 should have bar with all changes and
-            #                      should record that bar descends from
-            #                      bar in rev2 and foo in rev1
-            #
-            # this allows this merge to succeed:
-            #
-            # 0 --- 1 --- 3   rev4 reverts the content change from rev2
-            #   \       /     merging rev3 and rev4 should use bar@rev2
-            #    \- 2 --- 4        as the merge base
-            #
-
-            cnode = manifest1.get(cfname)
-            newfparent = fparent2
-
-            if manifest2:  # branch merge
-                if fparent2 == nullid or cnode is None:  # copied on remote side
-                    if cfname in manifest2:
-                        cnode = manifest2[cfname]
-                        newfparent = fparent1
-
-            # Here, we used to search backwards through history to try to find
-            # where the file copy came from if the source of a copy was not in
-            # the parent directory. However, this doesn't actually make sense to
-            # do (what does a copy from something not in your working copy even
-            # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
-            # the user that copy information was dropped, so if they didn't
-            # expect this outcome it can be fixed, but this is the correct
-            # behavior in this circumstance.
-
-            if cnode:
-                self.ui.debug(
-                    b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
-                )
-                if includecopymeta:
-                    meta[b"copy"] = cfname
-                    meta[b"copyrev"] = hex(cnode)
-                fparent1, fparent2 = nullid, newfparent
-            else:
-                self.ui.warn(
-                    _(
-                        b"warning: can't find ancestor for '%s' "
-                        b"copied from '%s'!\n"
-                    )
-                    % (fname, cfname)
-                )
-
-        elif fparent1 == nullid:
-            fparent1, fparent2 = fparent2, nullid
-        elif fparent2 != nullid:
-            # is one parent an ancestor of the other?
-            fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
-            if fparent1 in fparentancestors:
-                fparent1, fparent2 = fparent2, nullid
-            elif fparent2 in fparentancestors:
-                fparent2 = nullid
-            elif not fparentancestors:
-                # TODO: this whole if-else might be simplified much more
-                ms = mergestatemod.mergestate.read(self)
-                if (
-                    fname in ms
-                    and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
-                ):
-                    fparent1, fparent2 = fparent2, nullid
-
-        # is the file changed?
-        text = fctx.data()
-        if fparent2 != nullid or meta or flog.cmp(fparent1, text):
-            changelist.append(fname)
-            return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
-        # are just the flags changed during merge?
-        elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
-            changelist.append(fname)
-
-        return fparent1
-
     def checkcommitpatterns(self, wctx, match, status, fail):
         """check for commit arguments that aren't committable"""
         if match.isexact() or match.prefix():
@@ -3062,203 +2948,7 @@
 
     @unfilteredmethod
     def commitctx(self, ctx, error=False, origctx=None):
-        """Add a new revision to current repository.
-        Revision information is passed via the context argument.
-
-        ctx.files() should list all files involved in this commit, i.e.
-        modified/added/removed files. On merge, it may be wider than the
-        ctx.files() to be committed, since any file nodes derived directly
-        from p1 or p2 are excluded from the committed ctx.files().
-
-        origctx is for convert to work around the problem that bug
-        fixes to the files list in changesets change hashes. For
-        convert to be the identity, it can pass an origctx and this
-        function will use the same files list when it makes sense to
-        do so.
-        """
-
-        p1, p2 = ctx.p1(), ctx.p2()
-        user = ctx.user()
-
-        if self.filecopiesmode == b'changeset-sidedata':
-            writechangesetcopy = True
-            writefilecopymeta = True
-            writecopiesto = None
-        else:
-            writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
-            writefilecopymeta = writecopiesto != b'changeset-only'
-            writechangesetcopy = writecopiesto in (
-                b'changeset-only',
-                b'compatibility',
-            )
-        p1copies, p2copies = None, None
-        if writechangesetcopy:
-            p1copies = ctx.p1copies()
-            p2copies = ctx.p2copies()
-        filesadded, filesremoved = None, None
-        with self.lock(), self.transaction(b"commit") as tr:
-            trp = weakref.proxy(tr)
-
-            if ctx.manifestnode():
-                # reuse an existing manifest revision
-                self.ui.debug(b'reusing known manifest\n')
-                mn = ctx.manifestnode()
-                files = ctx.files()
-                if writechangesetcopy:
-                    filesadded = ctx.filesadded()
-                    filesremoved = ctx.filesremoved()
-            elif ctx.files():
-                m1ctx = p1.manifestctx()
-                m2ctx = p2.manifestctx()
-                mctx = m1ctx.copy()
-
-                m = mctx.read()
-                m1 = m1ctx.read()
-                m2 = m2ctx.read()
-
-                # check in files
-                added = []
-                changed = []
-                removed = list(ctx.removed())
-                linkrev = len(self)
-                self.ui.note(_(b"committing files:\n"))
-                uipathfn = scmutil.getuipathfn(self)
-                for f in sorted(ctx.modified() + ctx.added()):
-                    self.ui.note(uipathfn(f) + b"\n")
-                    try:
-                        fctx = ctx[f]
-                        if fctx is None:
-                            removed.append(f)
-                        else:
-                            added.append(f)
-                            m[f] = self._filecommit(
-                                fctx,
-                                m1,
-                                m2,
-                                linkrev,
-                                trp,
-                                changed,
-                                writefilecopymeta,
-                            )
-                            m.setflag(f, fctx.flags())
-                    except OSError:
-                        self.ui.warn(
-                            _(b"trouble committing %s!\n") % uipathfn(f)
-                        )
-                        raise
-                    except IOError as inst:
-                        errcode = getattr(inst, 'errno', errno.ENOENT)
-                        if error or errcode and errcode != errno.ENOENT:
-                            self.ui.warn(
-                                _(b"trouble committing %s!\n") % uipathfn(f)
-                            )
-                        raise
-
-                # update manifest
-                removed = [f for f in removed if f in m1 or f in m2]
-                drop = sorted([f for f in removed if f in m])
-                for f in drop:
-                    del m[f]
-                if p2.rev() != nullrev:
-                    rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
-                    removed = [f for f in removed if not rf(f)]
-
-                files = changed + removed
-                md = None
-                if not files:
-                    # if no "files" actually changed in terms of the changelog,
-                    # try hard to detect unmodified manifest entry so that the
-                    # exact same commit can be reproduced later on convert.
-                    md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
-                if not files and md:
-                    self.ui.debug(
-                        b'not reusing manifest (no file change in '
-                        b'changelog, but manifest differs)\n'
-                    )
-                if files or md:
-                    self.ui.note(_(b"committing manifest\n"))
-                    # we're using narrowmatch here since it's already applied at
-                    # other stages (such as dirstate.walk), so we're already
-                    # ignoring things outside of narrowspec in most cases. The
-                    # one case where we might have files outside the narrowspec
-                    # at this point is merges, and we already error out in the
-                    # case where the merge has files outside of the narrowspec,
-                    # so this is safe.
-                    mn = mctx.write(
-                        trp,
-                        linkrev,
-                        p1.manifestnode(),
-                        p2.manifestnode(),
-                        added,
-                        drop,
-                        match=self.narrowmatch(),
-                    )
-
-                    if writechangesetcopy:
-                        filesadded = [
-                            f for f in changed if not (f in m1 or f in m2)
-                        ]
-                        filesremoved = removed
-                else:
-                    self.ui.debug(
-                        b'reusing manifest from p1 (listed files '
-                        b'actually unchanged)\n'
-                    )
-                    mn = p1.manifestnode()
-            else:
-                self.ui.debug(b'reusing manifest from p1 (no file change)\n')
-                mn = p1.manifestnode()
-                files = []
-
-            if writecopiesto == b'changeset-only':
-                # If writing only to changeset extras, use None to indicate that
-                # no entry should be written. If writing to both, write an empty
-                # entry to prevent the reader from falling back to reading
-                # filelogs.
-                p1copies = p1copies or None
-                p2copies = p2copies or None
-                filesadded = filesadded or None
-                filesremoved = filesremoved or None
-
-            if origctx and origctx.manifestnode() == mn:
-                files = origctx.files()
-
-            # update changelog
-            self.ui.note(_(b"committing changelog\n"))
-            self.changelog.delayupdate(tr)
-            n = self.changelog.add(
-                mn,
-                files,
-                ctx.description(),
-                trp,
-                p1.node(),
-                p2.node(),
-                user,
-                ctx.date(),
-                ctx.extra().copy(),
-                p1copies,
-                p2copies,
-                filesadded,
-                filesremoved,
-            )
-            xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
-            self.hook(
-                b'pretxncommit',
-                throw=True,
-                node=hex(n),
-                parent1=xp1,
-                parent2=xp2,
-            )
-            # set the new commit is proper phase
-            targetphase = subrepoutil.newcommitphase(self.ui, ctx)
-            if targetphase:
-                # retract boundary do not alter parent changeset.
-                # if a parent have higher the resulting phase will
-                # be compliant anyway
-                #
-                # if minimal phase was 0 we don't need to retract anything
-                phases.registernew(self, tr, targetphase, [n])
-            return n
+        return commit.commitctx(self, ctx, error=error, origctx=origctx)
 
     @unfilteredmethod
     def destroying(self):
@@ -3553,9 +3243,9 @@
     if b'sharedrepo' in createopts:
         requirements = set(createopts[b'sharedrepo'].requirements)
         if createopts.get(b'sharedrelative'):
-            requirements.add(b'relshared')
+            requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
         else:
-            requirements.add(b'shared')
+            requirements.add(requirementsmod.SHARED_REQUIREMENT)
 
         return requirements
 
@@ -3608,30 +3298,30 @@
     if scmutil.gdinitconfig(ui):
         requirements.add(b'generaldelta')
         if ui.configbool(b'format', b'sparse-revlog'):
-            requirements.add(SPARSEREVLOG_REQUIREMENT)
+            requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
 
     # experimental config: format.exp-use-side-data
     if ui.configbool(b'format', b'exp-use-side-data'):
-        requirements.add(SIDEDATA_REQUIREMENT)
+        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
     # experimental config: format.exp-use-copies-side-data-changeset
     if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
-        requirements.add(SIDEDATA_REQUIREMENT)
-        requirements.add(COPIESSDC_REQUIREMENT)
+        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+        requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
     if ui.configbool(b'experimental', b'treemanifest'):
-        requirements.add(b'treemanifest')
+        requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
 
     revlogv2 = ui.config(b'experimental', b'revlogv2')
     if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
         requirements.remove(b'revlogv1')
         # generaldelta is implied by revlogv2.
         requirements.discard(b'generaldelta')
-        requirements.add(REVLOGV2_REQUIREMENT)
+        requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
     # experimental config: format.internal-phase
     if ui.configbool(b'format', b'internal-phase'):
-        requirements.add(b'internal-phase')
+        requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
 
     if createopts.get(b'narrowfiles'):
-        requirements.add(repository.NARROW_REQUIREMENT)
+        requirements.add(requirementsmod.NARROW_REQUIREMENT)
 
     if createopts.get(b'lfs'):
         requirements.add(b'lfs')
@@ -3640,11 +3330,59 @@
         requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
 
     if ui.configbool(b'format', b'use-persistent-nodemap'):
-        requirements.add(NODEMAP_REQUIREMENT)
+        requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
+
+    # if share-safe is enabled, let's create the new repository with the new
+    # requirement
+    if ui.configbool(b'format', b'exp-share-safe'):
+        requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
 
     return requirements
 
 
+def checkrequirementscompat(ui, requirements):
+    """ Checks compatibility of repository requirements enabled and disabled.
+
+    Returns a set of requirements which needs to be dropped because dependend
+    requirements are not enabled. Also warns users about it """
+
+    dropped = set()
+
+    if b'store' not in requirements:
+        if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
+            ui.warn(
+                _(
+                    b'ignoring enabled \'format.bookmarks-in-store\' config '
+                    b'beacuse it is incompatible with disabled '
+                    b'\'format.usestore\' config\n'
+                )
+            )
+            dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+
+        if (
+            requirementsmod.SHARED_REQUIREMENT in requirements
+            or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
+        ):
+            raise error.Abort(
+                _(
+                    b"cannot create shared repository as source was created"
+                    b" with 'format.usestore' config disabled"
+                )
+            )
+
+        if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
+            ui.warn(
+                _(
+                    b"ignoring enabled 'format.exp-share-safe' config because "
+                    b"it is incompatible with disabled 'format.usestore'"
+                    b" config\n"
+                )
+            )
+            dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
+
+    return dropped
+
+
 def filterknowncreateopts(ui, createopts):
     """Filters a dict of repo creation options against options that are known.
 
@@ -3719,6 +3457,7 @@
         )
 
     requirements = newreporequirements(ui, createopts=createopts)
+    requirements -= checkrequirementscompat(ui, requirements)
 
     wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
 
@@ -3765,7 +3504,17 @@
             b'layout',
         )
 
-    scmutil.writerequires(hgvfs, requirements)
+    # Filter the requirements into working copy and store ones
+    wcreq, storereq = scmutil.filterrequirements(requirements)
+    # write working copy ones
+    scmutil.writerequires(hgvfs, wcreq)
+    # If there are store requirements and the current repository
+    # is not a shared one, write stored requirements
+    # For new shared repository, we don't need to write the store
+    # requirements as they are already present in store requires
+    if storereq and b'sharedrepo' not in createopts:
+        storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
+        scmutil.writerequires(storevfs, storereq)
 
     # Write out file telling readers where to find the shared store.
     if b'sharedrepo' in createopts:
--- a/mercurial/lock.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/lock.py	Tue Oct 20 22:04:04 2020 +0530
@@ -202,8 +202,6 @@
         releasefn=None,
         acquirefn=None,
         desc=None,
-        inheritchecker=None,
-        parentlock=None,
         signalsafe=True,
         dolock=True,
     ):
@@ -214,10 +212,6 @@
         self.releasefn = releasefn
         self.acquirefn = acquirefn
         self.desc = desc
-        self._inheritchecker = inheritchecker
-        self.parentlock = parentlock
-        self._parentheld = False
-        self._inherited = False
         if signalsafe:
             self._maybedelayedinterrupt = _delayedinterrupt
         else:
@@ -290,14 +284,6 @@
                     if locker is None:
                         continue
 
-                    # special case where a parent process holds the lock -- this
-                    # is different from the pid being different because we do
-                    # want the unlock and postrelease functions to be called,
-                    # but the lockfile to not be removed.
-                    if locker == self.parentlock:
-                        self._parentheld = True
-                        self.held = 1
-                        return
                     locker = self._testlock(locker)
                     if locker is not None:
                         raise error.LockHeld(
@@ -377,38 +363,6 @@
         locker = self._readlock()
         return self._testlock(locker)
 
-    @contextlib.contextmanager
-    def inherit(self):
-        """context for the lock to be inherited by a Mercurial subprocess.
-
-        Yields a string that will be recognized by the lock in the subprocess.
-        Communicating this string to the subprocess needs to be done separately
-        -- typically by an environment variable.
-        """
-        if not self.held:
-            raise error.LockInheritanceContractViolation(
-                b'inherit can only be called while lock is held'
-            )
-        if self._inherited:
-            raise error.LockInheritanceContractViolation(
-                b'inherit cannot be called while lock is already inherited'
-            )
-        if self._inheritchecker is not None:
-            self._inheritchecker()
-        if self.releasefn:
-            self.releasefn()
-        if self._parentheld:
-            lockname = self.parentlock
-        else:
-            lockname = b'%s:%d' % (lock._host, self.pid)
-        self._inherited = True
-        try:
-            yield lockname
-        finally:
-            if self.acquirefn:
-                self.acquirefn()
-            self._inherited = False
-
     def release(self, success=True):
         """release the lock and execute callback function if any
 
@@ -425,18 +379,16 @@
                 if self.releasefn:
                     self.releasefn()
             finally:
-                if not self._parentheld:
-                    try:
-                        self.vfs.unlink(self.f)
-                    except OSError:
-                        pass
+                try:
+                    self.vfs.unlink(self.f)
+                except OSError:
+                    pass
             # The postrelease functions typically assume the lock is not held
             # at all.
-            if not self._parentheld:
-                for callback in self.postrelease:
-                    callback(success)
-                # Prevent double usage and help clear cycles.
-                self.postrelease = None
+            for callback in self.postrelease:
+                callback(success)
+            # Prevent double usage and help clear cycles.
+            self.postrelease = None
 
 
 def release(*locks):
--- a/mercurial/logcmdutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/logcmdutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -18,6 +18,8 @@
     wdirrev,
 )
 
+from .thirdparty import attr
+
 from . import (
     dagop,
     error,
@@ -45,11 +47,14 @@
 if pycompat.TYPE_CHECKING:
     from typing import (
         Any,
+        Callable,
+        Dict,
+        List,
         Optional,
         Tuple,
     )
 
-    for t in (Any, Optional, Tuple):
+    for t in (Any, Callable, Dict, List, Optional, Tuple):
         assert t
 
 
@@ -603,12 +608,11 @@
 
 
 def templatespec(tmpl, mapfile):
-    if pycompat.ispy3:
-        assert not isinstance(tmpl, str), b'tmpl must not be a str'
+    assert not (tmpl and mapfile)
     if mapfile:
-        return formatter.templatespec(b'changeset', tmpl, mapfile)
+        return formatter.mapfile_templatespec(b'changeset', mapfile)
     else:
-        return formatter.templatespec(b'', tmpl, None)
+        return formatter.literal_templatespec(tmpl)
 
 
 def _lookuptemplate(ui, tmpl, style):
@@ -621,19 +625,20 @@
     if not tmpl and not style:  # template are stronger than style
         tmpl = ui.config(b'ui', b'logtemplate')
         if tmpl:
-            return templatespec(templater.unquotestring(tmpl), None)
+            return formatter.literal_templatespec(templater.unquotestring(tmpl))
         else:
             style = util.expandpath(ui.config(b'ui', b'style'))
 
     if not tmpl and style:
         mapfile = style
+        fp = None
         if not os.path.split(mapfile)[0]:
-            mapname = templater.templatepath(
+            (mapname, fp) = templater.try_open_template(
                 b'map-cmdline.' + mapfile
-            ) or templater.templatepath(mapfile)
+            ) or templater.try_open_template(mapfile)
             if mapname:
                 mapfile = mapname
-        return templatespec(None, mapfile)
+        return formatter.mapfile_templatespec(b'changeset', mapfile, fp)
 
     return formatter.lookuptemplate(ui, b'changeset', tmpl)
 
@@ -641,7 +646,7 @@
 def maketemplater(ui, repo, tmpl, buffered=False):
     """Create a changesettemplater from a literal template 'tmpl'
     byte-string."""
-    spec = templatespec(tmpl, None)
+    spec = formatter.literal_templatespec(tmpl)
     return changesettemplater(ui, repo, spec, buffered=buffered)
 
 
@@ -672,7 +677,95 @@
     return changesettemplater(ui, repo, spec, *postargs)
 
 
-def _makematcher(repo, revs, pats, opts):
+@attr.s
+class walkopts(object):
+    """Options to configure a set of revisions and file matcher factory
+    to scan revision/file history
+    """
+
+    # raw command-line parameters, which a matcher will be built from
+    pats = attr.ib()  # type: List[bytes]
+    opts = attr.ib()  # type: Dict[bytes, Any]
+
+    # a list of revset expressions to be traversed; if follow, it specifies
+    # the start revisions
+    revspec = attr.ib()  # type: List[bytes]
+
+    # miscellaneous queries to filter revisions (see "hg help log" for details)
+    branches = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+    date = attr.ib(default=None)  # type: Optional[bytes]
+    keywords = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+    no_merges = attr.ib(default=False)  # type: bool
+    only_merges = attr.ib(default=False)  # type: bool
+    prune_ancestors = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+    users = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+
+    # miscellaneous matcher arguments
+    include_pats = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+    exclude_pats = attr.ib(default=attr.Factory(list))  # type: List[bytes]
+
+    # 0: no follow, 1: follow first, 2: follow both parents
+    follow = attr.ib(default=0)  # type: int
+
+    # do not attempt filelog-based traversal, which may be fast but cannot
+    # include revisions where files were removed
+    force_changelog_traversal = attr.ib(default=False)  # type: bool
+
+    # filter revisions by file patterns, which should be disabled only if
+    # you want to include revisions where files were unmodified
+    filter_revisions_by_pats = attr.ib(default=True)  # type: bool
+
+    # sort revisions prior to traversal: 'desc', 'topo', or None
+    sort_revisions = attr.ib(default=None)  # type: Optional[bytes]
+
+    # limit number of changes displayed; None means unlimited
+    limit = attr.ib(default=None)  # type: Optional[int]
+
+
+def parseopts(ui, pats, opts):
+    # type: (Any, List[bytes], Dict[bytes, Any]) -> walkopts
+    """Parse log command options into walkopts
+
+    The returned walkopts will be passed in to getrevs() or makewalker().
+    """
+    if opts.get(b'follow_first'):
+        follow = 1
+    elif opts.get(b'follow'):
+        follow = 2
+    else:
+        follow = 0
+
+    if opts.get(b'graph'):
+        if ui.configbool(b'experimental', b'log.topo'):
+            sort_revisions = b'topo'
+        else:
+            sort_revisions = b'desc'
+    else:
+        sort_revisions = None
+
+    return walkopts(
+        pats=pats,
+        opts=opts,
+        revspec=opts.get(b'rev', []),
+        # branch and only_branch are really aliases and must be handled at
+        # the same time
+        branches=opts.get(b'branch', []) + opts.get(b'only_branch', []),
+        date=opts.get(b'date'),
+        keywords=opts.get(b'keyword', []),
+        no_merges=bool(opts.get(b'no_merges')),
+        only_merges=bool(opts.get(b'only_merges')),
+        prune_ancestors=opts.get(b'prune', []),
+        users=opts.get(b'user', []),
+        include_pats=opts.get(b'include', []),
+        exclude_pats=opts.get(b'exclude', []),
+        follow=follow,
+        force_changelog_traversal=bool(opts.get(b'removed')),
+        sort_revisions=sort_revisions,
+        limit=getlimit(opts),
+    )
+
+
+def _makematcher(repo, revs, wopts):
     """Build matcher and expanded patterns from log options
 
     If --follow, revs are the revisions to follow from.
@@ -683,47 +776,67 @@
     - slowpath: True if patterns aren't as simple as scanning filelogs
     """
     # pats/include/exclude are passed to match.match() directly in
-    # _matchfiles() revset but walkchangerevs() builds its matcher with
-    # scmutil.match(). The difference is input pats are globbed on
+    # _matchfiles() revset, but a log-like command should build its matcher
+    # with scmutil.match(). The difference is input pats are globbed on
     # platforms without shell expansion (windows).
     wctx = repo[None]
-    match, pats = scmutil.matchandpats(wctx, pats, opts)
-    slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
+    match, pats = scmutil.matchandpats(wctx, wopts.pats, wopts.opts)
+    slowpath = match.anypats() or (
+        not match.always() and wopts.force_changelog_traversal
+    )
     if not slowpath:
-        follow = opts.get(b'follow') or opts.get(b'follow_first')
-        startctxs = []
-        if follow and opts.get(b'rev'):
+        if wopts.follow and wopts.revspec:
+            # There may be the case that a path doesn't exist in some (but
+            # not all) of the specified start revisions, but let's consider
+            # the path is valid. Missing files will be warned by the matcher.
             startctxs = [repo[r] for r in revs]
-        for f in match.files():
-            if follow and startctxs:
-                # No idea if the path was a directory at that revision, so
-                # take the slow path.
-                if any(f not in c for c in startctxs):
-                    slowpath = True
-                    continue
-            elif follow and f not in wctx:
-                # If the file exists, it may be a directory, so let it
-                # take the slow path.
-                if os.path.exists(repo.wjoin(f)):
-                    slowpath = True
-                    continue
-                else:
+            for f in match.files():
+                found = False
+                for c in startctxs:
+                    if f in c:
+                        found = True
+                    elif c.hasdir(f):
+                        # If a directory exists in any of the start revisions,
+                        # take the slow path.
+                        found = slowpath = True
+                if not found:
                     raise error.Abort(
                         _(
-                            b'cannot follow file not in parent '
-                            b'revision: "%s"'
+                            b'cannot follow file not in any of the specified '
+                            b'revisions: "%s"'
                         )
                         % f
                     )
-            filelog = repo.file(f)
-            if not filelog:
-                # A zero count may be a directory or deleted file, so
-                # try to find matching entries on the slow path.
-                if follow:
+        elif wopts.follow:
+            for f in match.files():
+                if f not in wctx:
+                    # If the file exists, it may be a directory, so let it
+                    # take the slow path.
+                    if os.path.exists(repo.wjoin(f)):
+                        slowpath = True
+                        continue
+                    else:
+                        raise error.Abort(
+                            _(
+                                b'cannot follow file not in parent '
+                                b'revision: "%s"'
+                            )
+                            % f
+                        )
+                filelog = repo.file(f)
+                if not filelog:
+                    # A file exists in wdir but not in history, which means
+                    # the file isn't committed yet.
                     raise error.Abort(
                         _(b'cannot follow nonexistent file: "%s"') % f
                     )
-                slowpath = True
+        else:
+            for f in match.files():
+                filelog = repo.file(f)
+                if not filelog:
+                    # A zero count may be a directory or deleted file, so
+                    # try to find matching entries on the slow path.
+                    slowpath = True
 
         # We decided to fall back to the slowpath because at least one
         # of the paths was not a file. Check to see if at least one of them
@@ -781,20 +894,19 @@
 }
 
 
-def _makerevset(repo, match, pats, slowpath, opts):
+def _makerevset(repo, wopts, slowpath):
     """Return a revset string built from log options and file patterns"""
-    opts = dict(opts)
-    # follow or not follow?
-    follow = opts.get(b'follow') or opts.get(b'follow_first')
+    opts = {
+        b'branch': [repo.lookupbranch(b) for b in wopts.branches],
+        b'date': wopts.date,
+        b'keyword': wopts.keywords,
+        b'no_merges': wopts.no_merges,
+        b'only_merges': wopts.only_merges,
+        b'prune': wopts.prune_ancestors,
+        b'user': wopts.users,
+    }
 
-    # branch and only_branch are really aliases and must be handled at
-    # the same time
-    opts[b'branch'] = opts.get(b'branch', []) + opts.get(b'only_branch', [])
-    opts[b'branch'] = [repo.lookupbranch(b) for b in opts[b'branch']]
-
-    if slowpath:
-        # See walkchangerevs() slow path.
-        #
+    if wopts.filter_revisions_by_pats and slowpath:
         # pats/include/exclude cannot be represented as separate
         # revset expressions as their filtering logic applies at file
         # level. For instance "-I a -X b" matches a revision touching
@@ -802,22 +914,20 @@
         # not. Besides, filesets are evaluated against the working
         # directory.
         matchargs = [b'r:', b'd:relpath']
-        for p in pats:
+        for p in wopts.pats:
             matchargs.append(b'p:' + p)
-        for p in opts.get(b'include', []):
+        for p in wopts.include_pats:
             matchargs.append(b'i:' + p)
-        for p in opts.get(b'exclude', []):
+        for p in wopts.exclude_pats:
             matchargs.append(b'x:' + p)
         opts[b'_matchfiles'] = matchargs
-    elif not follow:
-        opts[b'_patslog'] = list(pats)
+    elif wopts.filter_revisions_by_pats and not wopts.follow:
+        opts[b'_patslog'] = list(wopts.pats)
 
     expr = []
     for op, val in sorted(pycompat.iteritems(opts)):
         if not val:
             continue
-        if op not in _opt2logrevset:
-            continue
         revop, listop = _opt2logrevset[op]
         if revop and b'%' not in revop:
             expr.append(revop)
@@ -835,14 +945,13 @@
     return expr
 
 
-def _initialrevs(repo, opts):
+def _initialrevs(repo, wopts):
     """Return the initial set of revisions to be filtered or followed"""
-    follow = opts.get(b'follow') or opts.get(b'follow_first')
-    if opts.get(b'rev'):
-        revs = scmutil.revrange(repo, opts[b'rev'])
-    elif follow and repo.dirstate.p1() == nullid:
+    if wopts.revspec:
+        revs = scmutil.revrange(repo, wopts.revspec)
+    elif wopts.follow and repo.dirstate.p1() == nullid:
         revs = smartset.baseset()
-    elif follow:
+    elif wopts.follow:
         revs = repo.revs(b'.')
     else:
         revs = smartset.spanset(repo)
@@ -850,50 +959,66 @@
     return revs
 
 
-def getrevs(repo, pats, opts):
-    # type: (Any, Any, Any) -> Tuple[smartset.abstractsmartset, Optional[changesetdiffer]]
-    """Return (revs, differ) where revs is a smartset
+def makewalker(repo, wopts):
+    # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[Callable[[Any], matchmod.basematcher]]]
+    """Build (revs, makefilematcher) to scan revision/file history
 
-    differ is a changesetdiffer with pre-configured file matcher.
+    - revs is the smartset to be traversed.
+    - makefilematcher is a function to map ctx to a matcher for that revision
     """
-    follow = opts.get(b'follow') or opts.get(b'follow_first')
-    followfirst = opts.get(b'follow_first')
-    limit = getlimit(opts)
-    revs = _initialrevs(repo, opts)
+    revs = _initialrevs(repo, wopts)
     if not revs:
         return smartset.baseset(), None
-    match, pats, slowpath = _makematcher(repo, revs, pats, opts)
+    # TODO: might want to merge slowpath with wopts.force_changelog_traversal
+    match, pats, slowpath = _makematcher(repo, revs, wopts)
+    wopts = attr.evolve(wopts, pats=pats)
+
     filematcher = None
-    if follow:
+    if wopts.follow:
         if slowpath or match.always():
-            revs = dagop.revancestors(repo, revs, followfirst=followfirst)
+            revs = dagop.revancestors(repo, revs, followfirst=wopts.follow == 1)
         else:
-            revs, filematcher = _fileancestors(repo, revs, match, followfirst)
+            assert not wopts.force_changelog_traversal
+            revs, filematcher = _fileancestors(
+                repo, revs, match, followfirst=wopts.follow == 1
+            )
         revs.reverse()
     if filematcher is None:
-        filematcher = _makenofollowfilematcher(repo, pats, opts)
+        filematcher = _makenofollowfilematcher(repo, wopts.pats, wopts.opts)
     if filematcher is None:
 
         def filematcher(ctx):
             return match
 
-    expr = _makerevset(repo, match, pats, slowpath, opts)
-    if opts.get(b'graph'):
-        # User-specified revs might be unsorted, but don't sort before
-        # _makerevset because it might depend on the order of revs
-        if repo.ui.configbool(b'experimental', b'log.topo'):
+    expr = _makerevset(repo, wopts, slowpath)
+    if wopts.sort_revisions:
+        assert wopts.sort_revisions in {b'topo', b'desc'}
+        if wopts.sort_revisions == b'topo':
             if not revs.istopo():
                 revs = dagop.toposort(revs, repo.changelog.parentrevs)
                 # TODO: try to iterate the set lazily
                 revs = revset.baseset(list(revs), istopo=True)
         elif not (revs.isdescending() or revs.istopo()):
+            # User-specified revs might be unsorted
             revs.sort(reverse=True)
     if expr:
         matcher = revset.match(None, expr)
         revs = matcher(repo, revs)
-    if limit is not None:
-        revs = revs.slice(0, limit)
+    if wopts.limit is not None:
+        revs = revs.slice(0, wopts.limit)
+
+    return revs, filematcher
+
 
+def getrevs(repo, wopts):
+    # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[changesetdiffer]]
+    """Return (revs, differ) where revs is a smartset
+
+    differ is a changesetdiffer with pre-configured file matcher.
+    """
+    revs, filematcher = makewalker(repo, wopts)
+    if not revs:
+        return revs, None
     differ = changesetdiffer()
     differ._makefilematcher = filematcher
     return revs, differ
--- a/mercurial/manifest.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/manifest.py	Tue Oct 20 22:04:04 2020 +0530
@@ -315,16 +315,9 @@
                 b"Manifest values must be a tuple of (node, flags)."
             )
         hashval = value[0]
-        # hashes are either 20 or 32 bytes (sha1 or its replacement),
-        # and allow one extra byte taht won't be persisted to disk but
-        # is sometimes used in memory.
-        if not isinstance(hashval, bytes) or not (
-            20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
-        ):
+        if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
             raise TypeError(b"node must be a 20-byte or 32-byte byte string")
         flags = value[1]
-        if len(hashval) == 22:
-            hashval = hashval[:-1]
         if not isinstance(flags, bytes) or len(flags) > 1:
             raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
         needle, found = self.bsearch2(key)
--- a/mercurial/match.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/match.py	Tue Oct 20 22:04:04 2020 +0530
@@ -355,7 +355,10 @@
             except error.Abort as inst:
                 raise error.Abort(
                     b'%s: %s'
-                    % (pat, inst[0])  # pytype: disable=unsupported-operands
+                    % (
+                        pat,
+                        inst.message,
+                    )  # pytype: disable=unsupported-operands
                 )
             except IOError as inst:
                 if warn:
--- a/mercurial/merge.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/merge.py	Tue Oct 20 22:04:04 2020 +0530
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+import collections
 import errno
 import stat
 import struct
@@ -126,7 +127,7 @@
         return None
 
 
-def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
+def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
     """
     Considers any actions that care about the presence of conflicting unknown
     files. For some actions, the result is to abort; for others, it is to
@@ -150,20 +151,23 @@
                 warnconflicts.update(conflicts)
 
         checkunknowndirs = _unknowndirschecker()
-        for f, (m, args, msg) in pycompat.iteritems(actions):
-            if m in (
+        for f in mresult.files(
+            (
                 mergestatemod.ACTION_CREATED,
                 mergestatemod.ACTION_DELETED_CHANGED,
-            ):
-                if _checkunknownfile(repo, wctx, mctx, f):
-                    fileconflicts.add(f)
-                elif pathconfig and f not in wctx:
-                    path = checkunknowndirs(repo, wctx, f)
-                    if path is not None:
-                        pathconflicts.add(path)
-            elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
-                if _checkunknownfile(repo, wctx, mctx, f, args[0]):
-                    fileconflicts.add(f)
+            )
+        ):
+            if _checkunknownfile(repo, wctx, mctx, f):
+                fileconflicts.add(f)
+            elif pathconfig and f not in wctx:
+                path = checkunknowndirs(repo, wctx, f)
+                if path is not None:
+                    pathconflicts.add(path)
+        for f, args, msg in mresult.getactions(
+            [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
+        ):
+            if _checkunknownfile(repo, wctx, mctx, f, args[0]):
+                fileconflicts.add(f)
 
         allconflicts = fileconflicts | pathconflicts
         ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
@@ -171,49 +175,50 @@
         collectconflicts(ignoredconflicts, ignoredconfig)
         collectconflicts(unknownconflicts, unknownconfig)
     else:
-        for f, (m, args, msg) in pycompat.iteritems(actions):
-            if m == mergestatemod.ACTION_CREATED_MERGE:
-                fl2, anc = args
-                different = _checkunknownfile(repo, wctx, mctx, f)
-                if repo.dirstate._ignore(f):
-                    config = ignoredconfig
-                else:
-                    config = unknownconfig
+        for f, args, msg in list(
+            mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
+        ):
+            fl2, anc = args
+            different = _checkunknownfile(repo, wctx, mctx, f)
+            if repo.dirstate._ignore(f):
+                config = ignoredconfig
+            else:
+                config = unknownconfig
 
-                # The behavior when force is True is described by this table:
-                #  config  different  mergeforce  |    action    backup
-                #    *         n          *       |      get        n
-                #    *         y          y       |     merge       -
-                #   abort      y          n       |     merge       -   (1)
-                #   warn       y          n       |  warn + get     y
-                #  ignore      y          n       |      get        y
-                #
-                # (1) this is probably the wrong behavior here -- we should
-                #     probably abort, but some actions like rebases currently
-                #     don't like an abort happening in the middle of
-                #     merge.update.
-                if not different:
-                    actions[f] = (
-                        mergestatemod.ACTION_GET,
-                        (fl2, False),
-                        b'remote created',
-                    )
-                elif mergeforce or config == b'abort':
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, None, False, anc),
-                        b'remote differs from untracked local',
-                    )
-                elif config == b'abort':
-                    abortconflicts.add(f)
-                else:
-                    if config == b'warn':
-                        warnconflicts.add(f)
-                    actions[f] = (
-                        mergestatemod.ACTION_GET,
-                        (fl2, True),
-                        b'remote created',
-                    )
+            # The behavior when force is True is described by this table:
+            #  config  different  mergeforce  |    action    backup
+            #    *         n          *       |      get        n
+            #    *         y          y       |     merge       -
+            #   abort      y          n       |     merge       -   (1)
+            #   warn       y          n       |  warn + get     y
+            #  ignore      y          n       |      get        y
+            #
+            # (1) this is probably the wrong behavior here -- we should
+            #     probably abort, but some actions like rebases currently
+            #     don't like an abort happening in the middle of
+            #     merge.update.
+            if not different:
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_GET,
+                    (fl2, False),
+                    b'remote created',
+                )
+            elif mergeforce or config == b'abort':
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_MERGE,
+                    (f, f, None, False, anc),
+                    b'remote differs from untracked local',
+                )
+            elif config == b'abort':
+                abortconflicts.add(f)
+            else:
+                if config == b'warn':
+                    warnconflicts.add(f)
+                mresult.addfile(
+                    f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
+                )
 
     for f in sorted(abortconflicts):
         warn = repo.ui.warn
@@ -238,18 +243,19 @@
         else:
             repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
 
-    for f, (m, args, msg) in pycompat.iteritems(actions):
-        if m == mergestatemod.ACTION_CREATED:
-            backup = (
-                f in fileconflicts
-                or f in pathconflicts
-                or any(p in pathconflicts for p in pathutil.finddirs(f))
-            )
-            (flags,) = args
-            actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
+    for f, args, msg in list(
+        mresult.getactions([mergestatemod.ACTION_CREATED])
+    ):
+        backup = (
+            f in fileconflicts
+            or f in pathconflicts
+            or any(p in pathconflicts for p in pathutil.finddirs(f))
+        )
+        (flags,) = args
+        mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
 
 
-def _forgetremoved(wctx, mctx, branchmerge):
+def _forgetremoved(wctx, mctx, branchmerge, mresult):
     """
     Forget removed files
 
@@ -264,27 +270,22 @@
     as removed.
     """
 
-    actions = {}
     m = mergestatemod.ACTION_FORGET
     if branchmerge:
         m = mergestatemod.ACTION_REMOVE
     for f in wctx.deleted():
         if f not in mctx:
-            actions[f] = m, None, b"forget deleted"
+            mresult.addfile(f, m, None, b"forget deleted")
 
     if not branchmerge:
         for f in wctx.removed():
             if f not in mctx:
-                actions[f] = (
-                    mergestatemod.ACTION_FORGET,
-                    None,
-                    b"forget removed",
+                mresult.addfile(
+                    f, mergestatemod.ACTION_FORGET, None, b"forget removed",
                 )
 
-    return actions
 
-
-def _checkcollision(repo, wmf, actions):
+def _checkcollision(repo, wmf, mresult):
     """
     Check for case-folding collisions.
     """
@@ -292,39 +293,38 @@
     narrowmatch = repo.narrowmatch()
     if not narrowmatch.always():
         pmmf = set(wmf.walk(narrowmatch))
-        if actions:
-            narrowactions = {}
-            for m, actionsfortype in pycompat.iteritems(actions):
-                narrowactions[m] = []
-                for (f, args, msg) in actionsfortype:
-                    if narrowmatch(f):
-                        narrowactions[m].append((f, args, msg))
-            actions = narrowactions
+        if mresult:
+            for f in list(mresult.files()):
+                if not narrowmatch(f):
+                    mresult.removefile(f)
     else:
         # build provisional merged manifest up
         pmmf = set(wmf)
 
-    if actions:
+    if mresult:
         # KEEP and EXEC are no-op
-        for m in (
-            mergestatemod.ACTION_ADD,
-            mergestatemod.ACTION_ADD_MODIFIED,
-            mergestatemod.ACTION_FORGET,
-            mergestatemod.ACTION_GET,
-            mergestatemod.ACTION_CHANGED_DELETED,
-            mergestatemod.ACTION_DELETED_CHANGED,
+        for f in mresult.files(
+            (
+                mergestatemod.ACTION_ADD,
+                mergestatemod.ACTION_ADD_MODIFIED,
+                mergestatemod.ACTION_FORGET,
+                mergestatemod.ACTION_GET,
+                mergestatemod.ACTION_CHANGED_DELETED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+            )
         ):
-            for f, args, msg in actions[m]:
-                pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
+            pmmf.add(f)
+        for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
             pmmf.discard(f)
-        for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
+        for f, args, msg in mresult.getactions(
+            [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
+        ):
             f2, flags = args
             pmmf.discard(f2)
             pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
+        for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
             pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
+        for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
             f1, f2, fa, move, anc = args
             if move:
                 pmmf.discard(f1)
@@ -355,20 +355,6 @@
         lastfull = f
 
 
-def driverpreprocess(repo, ms, wctx, labels=None):
-    """run the preprocess step of the merge driver, if any
-
-    This is currently not implemented -- it's an extension point."""
-    return True
-
-
-def driverconclude(repo, ms, wctx, labels=None):
-    """run the conclude step of the merge driver, if any
-
-    This is currently not implemented -- it's an extension point."""
-    return True
-
-
 def _filesindirs(repo, manifest, dirs):
     """
     Generator that yields pairs of all the files in the manifest that are found
@@ -382,7 +368,7 @@
                 break
 
 
-def checkpathconflicts(repo, wctx, mctx, actions):
+def checkpathconflicts(repo, wctx, mctx, mresult):
     """
     Check if any actions introduce path conflicts in the repository, updating
     actions to record or handle the path conflict accordingly.
@@ -407,30 +393,33 @@
     # The set of files deleted by all the actions.
     deletedfiles = set()
 
-    for f, (m, args, msg) in actions.items():
-        if m in (
+    for f in mresult.files(
+        (
             mergestatemod.ACTION_CREATED,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_MERGE,
             mergestatemod.ACTION_CREATED_MERGE,
-        ):
-            # This action may create a new local file.
-            createdfiledirs.update(pathutil.finddirs(f))
-            if mf.hasdir(f):
-                # The file aliases a local directory.  This might be ok if all
-                # the files in the local directory are being deleted.  This
-                # will be checked once we know what all the deleted files are.
-                remoteconflicts.add(f)
-        # Track the names of all deleted files.
-        if m == mergestatemod.ACTION_REMOVE:
-            deletedfiles.add(f)
-        if m == mergestatemod.ACTION_MERGE:
-            f1, f2, fa, move, anc = args
-            if move:
-                deletedfiles.add(f1)
-        if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
-            f2, flags = args
-            deletedfiles.add(f2)
+        )
+    ):
+        # This action may create a new local file.
+        createdfiledirs.update(pathutil.finddirs(f))
+        if mf.hasdir(f):
+            # The file aliases a local directory.  This might be ok if all
+            # the files in the local directory are being deleted.  This
+            # will be checked once we know what all the deleted files are.
+            remoteconflicts.add(f)
+    # Track the names of all deleted files.
+    for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
+        deletedfiles.add(f)
+    for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
+        f1, f2, fa, move, anc = args
+        if move:
+            deletedfiles.add(f1)
+    for (f, args, msg) in mresult.getactions(
+        (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
+    ):
+        f2, flags = args
+        deletedfiles.add(f2)
 
     # Check all directories that contain created files for path conflicts.
     for p in createdfiledirs:
@@ -444,7 +433,8 @@
                 # A file is in a directory which aliases a local file.
                 # We will need to rename the local file.
                 localconflicts.add(p)
-        if p in actions and actions[p][0] in (
+        pd = mresult.getfile(p)
+        if pd and pd[0] in (
             mergestatemod.ACTION_CREATED,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_MERGE,
@@ -459,14 +449,16 @@
     for p in localconflicts:
         if p not in deletedfiles:
             ctxname = bytes(wctx).rstrip(b'+')
-            pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+            pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
             porig = wctx[p].copysource() or p
-            actions[pnew] = (
+            mresult.addfile(
+                pnew,
                 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
                 (p, porig),
                 b'local path conflict',
             )
-            actions[p] = (
+            mresult.addfile(
+                p,
                 mergestatemod.ACTION_PATH_CONFLICT,
                 (pnew, b'l'),
                 b'path conflict',
@@ -477,23 +469,25 @@
         ctxname = bytes(mctx).rstrip(b'+')
         for f, p in _filesindirs(repo, mf, remoteconflicts):
             if f not in deletedfiles:
-                m, args, msg = actions[p]
-                pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+                m, args, msg = mresult.getfile(p)
+                pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
                 if m in (
                     mergestatemod.ACTION_DELETED_CHANGED,
                     mergestatemod.ACTION_MERGE,
                 ):
                     # Action was merge, just update target.
-                    actions[pnew] = (m, args, msg)
+                    mresult.addfile(pnew, m, args, msg)
                 else:
                     # Action was create, change to renamed get action.
                     fl = args[0]
-                    actions[pnew] = (
+                    mresult.addfile(
+                        pnew,
                         mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
                         (p, fl),
                         b'remote path conflict',
                     )
-                actions[p] = (
+                mresult.addfile(
+                    p,
                     mergestatemod.ACTION_PATH_CONFLICT,
                     (pnew, mergestatemod.ACTION_REMOVE),
                     b'path conflict',
@@ -507,24 +501,33 @@
         raise error.Abort(_(b"destination manifest contains path conflicts"))
 
 
-def _filternarrowactions(narrowmatch, branchmerge, actions):
+def _filternarrowactions(narrowmatch, branchmerge, mresult):
     """
     Filters out actions that can ignored because the repo is narrowed.
 
     Raise an exception if the merge cannot be completed because the repo is
     narrowed.
     """
-    nooptypes = {b'k'}  # TODO: handle with nonconflicttypes
-    nonconflicttypes = set(b'a am c cm f g gs r e'.split())
+    # TODO: handle with nonconflicttypes
+    nonconflicttypes = {
+        mergestatemod.ACTION_ADD,
+        mergestatemod.ACTION_ADD_MODIFIED,
+        mergestatemod.ACTION_CREATED,
+        mergestatemod.ACTION_CREATED_MERGE,
+        mergestatemod.ACTION_FORGET,
+        mergestatemod.ACTION_GET,
+        mergestatemod.ACTION_REMOVE,
+        mergestatemod.ACTION_EXEC,
+    }
     # We mutate the items in the dict during iteration, so iterate
     # over a copy.
-    for f, action in list(actions.items()):
+    for f, action in mresult.filemap():
         if narrowmatch(f):
             pass
         elif not branchmerge:
-            del actions[f]  # just updating, ignore changes outside clone
-        elif action[0] in nooptypes:
-            del actions[f]  # merge does not affect file
+            mresult.removefile(f)  # just updating, ignore changes outside clone
+        elif action[0] in mergestatemod.NO_OP_ACTIONS:
+            mresult.removefile(f)  # merge does not affect file
         elif action[0] in nonconflicttypes:
             raise error.Abort(
                 _(
@@ -540,6 +543,171 @@
             )
 
 
+class mergeresult(object):
+    ''''An object representing result of merging manifests.
+
+    It has information about what actions need to be performed on dirstate
+    mapping of divergent renames and other such cases. '''
+
+    def __init__(self):
+        """
+        filemapping: dict of filename as keys and action related info as values
+        diverge: mapping of source name -> list of dest name for
+                 divergent renames
+        renamedelete: mapping of source name -> list of destinations for files
+                      deleted on one side and renamed on other.
+        commitinfo: dict containing data which should be used on commit
+                    contains a filename -> info mapping
+        actionmapping: dict of action names as keys and values are dict of
+                       filename as key and related data as values
+        """
+        self._filemapping = {}
+        self._diverge = {}
+        self._renamedelete = {}
+        self._commitinfo = collections.defaultdict(dict)
+        self._actionmapping = collections.defaultdict(dict)
+
+    def updatevalues(self, diverge, renamedelete):
+        self._diverge = diverge
+        self._renamedelete = renamedelete
+
+    def addfile(self, filename, action, data, message):
+        """ adds a new file to the mergeresult object
+
+        filename: file which we are adding
+        action: one of mergestatemod.ACTION_*
+        data: a tuple of information like fctx and ctx related to this merge
+        message: a message about the merge
+        """
+        # if the file already existed, we need to delete it's old
+        # entry form _actionmapping too
+        if filename in self._filemapping:
+            a, d, m = self._filemapping[filename]
+            del self._actionmapping[a][filename]
+
+        self._filemapping[filename] = (action, data, message)
+        self._actionmapping[action][filename] = (data, message)
+
+    def getfile(self, filename, default_return=None):
+        """ returns (action, args, msg) about this file
+
+        returns default_return if the file is not present """
+        if filename in self._filemapping:
+            return self._filemapping[filename]
+        return default_return
+
+    def files(self, actions=None):
+        """ returns files on which provided action needs to perfromed
+
+        If actions is None, all files are returned
+        """
+        # TODO: think whether we should return renamedelete and
+        # diverge filenames also
+        if actions is None:
+            for f in self._filemapping:
+                yield f
+
+        else:
+            for a in actions:
+                for f in self._actionmapping[a]:
+                    yield f
+
+    def removefile(self, filename):
+        """ removes a file from the mergeresult object as the file might
+        not merging anymore """
+        action, data, message = self._filemapping[filename]
+        del self._filemapping[filename]
+        del self._actionmapping[action][filename]
+
+    def getactions(self, actions, sort=False):
+        """ get list of files which are marked with these actions
+        if sort is true, files for each action is sorted and then added
+
+        Returns a list of tuple of form (filename, data, message)
+        """
+        for a in actions:
+            if sort:
+                for f in sorted(self._actionmapping[a]):
+                    args, msg = self._actionmapping[a][f]
+                    yield f, args, msg
+            else:
+                for f, (args, msg) in pycompat.iteritems(
+                    self._actionmapping[a]
+                ):
+                    yield f, args, msg
+
+    def len(self, actions=None):
+        """ returns number of files which needs actions
+
+        if actions is passed, total of number of files in that action
+        only is returned """
+
+        if actions is None:
+            return len(self._filemapping)
+
+        return sum(len(self._actionmapping[a]) for a in actions)
+
+    def filemap(self, sort=False):
+        if sorted:
+            for key, val in sorted(pycompat.iteritems(self._filemapping)):
+                yield key, val
+        else:
+            for key, val in pycompat.iteritems(self._filemapping):
+                yield key, val
+
+    def addcommitinfo(self, filename, key, value):
+        """ adds key-value information about filename which will be required
+        while committing this merge """
+        self._commitinfo[filename][key] = value
+
+    @property
+    def diverge(self):
+        return self._diverge
+
+    @property
+    def renamedelete(self):
+        return self._renamedelete
+
+    @property
+    def commitinfo(self):
+        return self._commitinfo
+
+    @property
+    def actionsdict(self):
+        """ returns a dictionary of actions to be perfomed with action as key
+        and a list of files and related arguments as values """
+        res = collections.defaultdict(list)
+        for a, d in pycompat.iteritems(self._actionmapping):
+            for f, (args, msg) in pycompat.iteritems(d):
+                res[a].append((f, args, msg))
+        return res
+
+    def setactions(self, actions):
+        self._filemapping = actions
+        self._actionmapping = collections.defaultdict(dict)
+        for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
+            self._actionmapping[act][f] = data, msg
+
+    def hasconflicts(self):
+        """ tells whether this merge resulted in some actions which can
+        result in conflicts or not """
+        for a in self._actionmapping.keys():
+            if (
+                a
+                not in (
+                    mergestatemod.ACTION_GET,
+                    mergestatemod.ACTION_EXEC,
+                    mergestatemod.ACTION_REMOVE,
+                    mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+                )
+                and self._actionmapping[a]
+                and a not in mergestatemod.NO_OP_ACTIONS
+            ):
+                return True
+
+        return False
+
+
 def manifestmerge(
     repo,
     wctx,
@@ -559,13 +727,9 @@
     matcher = matcher to filter file lists
     acceptremote = accept the incoming changes without prompting
 
-    Returns:
-
-    actions: dict of filename as keys and action related info as values
-    diverge: mapping of source name -> list of dest name for divergent renames
-    renamedelete: mapping of source name -> list of destinations for files
-                  deleted on one side and renamed on other.
+    Returns an object of mergeresult class
     """
+    mresult = mergeresult()
     if matcher is not None and matcher.always():
         matcher = None
 
@@ -578,6 +742,9 @@
     branch_copies1 = copies.branch_copies()
     branch_copies2 = copies.branch_copies()
     diverge = {}
+    # information from merge which is needed at commit time
+    # for example choosing filelog of which parent to commit
+    # TODO: use specific constants in future for this mapping
     if followcopies:
         branch_copies1, branch_copies2, diverge = copies.mergecopies(
             repo, wctx, p2, pa
@@ -609,7 +776,16 @@
     # - ma is the same as m1 or m2, which we're just going to diff again later
     # - The caller specifically asks for a full diff, which is useful during bid
     #   merge.
-    if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
+    # - we are tracking salvaged files specifically hence should process all
+    #   files
+    if (
+        pa not in ([wctx, p2] + wctx.parents())
+        and not forcefulldiff
+        and not (
+            repo.ui.configbool(b'experimental', b'merge-track-salvaged')
+            or repo.filecopiesmode == b'changeset-sidedata'
+        )
+    ):
         # Identify which files are relevant to the merge, so we can limit the
         # total m1-vs-m2 diff to just those files. This has significant
         # performance benefits in large repositories.
@@ -626,7 +802,6 @@
 
     diff = m1.diff(m2, match=matcher)
 
-    actions = {}
     for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
         if n1 and n2:  # file exists on both local and remote side
             if f not in ma:
@@ -634,59 +809,60 @@
                 fa = branch_copies1.copy.get(
                     f, None
                 ) or branch_copies2.copy.get(f, None)
+                args, msg = None, None
                 if fa is not None:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, fa, False, pa.node()),
-                        b'both renamed from %s' % fa,
-                    )
+                    args = (f, f, fa, False, pa.node())
+                    msg = b'both renamed from %s' % fa
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, None, False, pa.node()),
-                        b'both created',
-                    )
+                    args = (f, f, None, False, pa.node())
+                    msg = b'both created'
+                mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
             else:
                 a = ma[f]
                 fla = ma.flags(f)
                 nol = b'l' not in fl1 + fl2 + fla
                 if n2 == a and fl2 == fla:
-                    actions[f] = (
-                        mergestatemod.ACTION_KEEP,
-                        (),
-                        b'remote unchanged',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
                     )
                 elif n1 == a and fl1 == fla:  # local unchanged - use remote
                     if n1 == n2:  # optimization: keep local content
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_EXEC,
                             (fl2,),
                             b'update permissions',
                         )
                     else:
-                        actions[f] = (
-                            mergestatemod.ACTION_GET_OTHER_AND_STORE
-                            if branchmerge
-                            else mergestatemod.ACTION_GET,
+                        mresult.addfile(
+                            f,
+                            mergestatemod.ACTION_GET,
                             (fl2, False),
                             b'remote is newer',
                         )
+                        if branchmerge:
+                            mresult.addcommitinfo(
+                                f, b'filenode-source', b'other'
+                            )
                 elif nol and n2 == a:  # remote only changed 'x'
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_EXEC,
                         (fl2,),
                         b'update permissions',
                     )
                 elif nol and n1 == a:  # local only changed 'x'
-                    actions[f] = (
-                        mergestatemod.ACTION_GET_OTHER_AND_STORE
-                        if branchmerge
-                        else mergestatemod.ACTION_GET,
+                    mresult.addfile(
+                        f,
+                        mergestatemod.ACTION_GET,
                         (fl1, False),
                         b'remote is newer',
                     )
+                    if branchmerge:
+                        mresult.addcommitinfo(f, b'filenode-source', b'other')
                 else:  # both changed something
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_MERGE,
                         (f, f, f, False, pa.node()),
                         b'versions differ',
@@ -699,20 +875,23 @@
             ):  # directory rename, move local
                 f2 = branch_copies1.movewithdir[f]
                 if f2 in m2:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_MERGE,
                         (f, f2, None, True, pa.node()),
                         b'remote directory rename, both created',
                     )
                 else:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
                         (f, fl1),
                         b'remote directory rename - move from %s' % f,
                     )
             elif f in branch_copies1.copy:
                 f2 = branch_copies1.copy[f]
-                actions[f] = (
+                mresult.addfile(
+                    f,
                     mergestatemod.ACTION_MERGE,
                     (f, f2, f2, False, pa.node()),
                     b'local copied/moved from %s' % f2,
@@ -720,62 +899,79 @@
             elif f in ma:  # clean, a different, no remote
                 if n1 != ma[f]:
                     if acceptremote:
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_REMOVE,
                             None,
                             b'remote delete',
                         )
                     else:
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_CHANGED_DELETED,
                             (f, None, f, False, pa.node()),
                             b'prompt changed/deleted',
                         )
+                        if branchmerge:
+                            mresult.addcommitinfo(
+                                f, b'merge-removal-candidate', b'yes'
+                            )
                 elif n1 == addednodeid:
                     # This file was locally added. We should forget it instead of
                     # deleting it.
-                    actions[f] = (
-                        mergestatemod.ACTION_FORGET,
-                        None,
-                        b'remote deleted',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
                     )
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_REMOVE,
-                        None,
-                        b'other deleted',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
                     )
+                    if branchmerge:
+                        # the file must be absent after merging,
+                        # howeber the user might make
+                        # the file reappear using revert and if they does,
+                        # we force create a new node
+                        mresult.addcommitinfo(
+                            f, b'merge-removal-candidate', b'yes'
+                        )
+
+            else:  # file not in ancestor, not in remote
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_KEEP_NEW,
+                    None,
+                    b'ancestor missing, remote missing',
+                )
+
         elif n2:  # file exists only on remote side
             if f in copied1:
                 pass  # we'll deal with it on m1 side
             elif f in branch_copies2.movewithdir:
                 f2 = branch_copies2.movewithdir[f]
                 if f2 in m1:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_MERGE,
                         (f2, f, None, False, pa.node()),
                         b'local directory rename, both created',
                     )
                 else:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
                         (f, fl2),
                         b'local directory rename - get from %s' % f,
                     )
             elif f in branch_copies2.copy:
                 f2 = branch_copies2.copy[f]
+                msg, args = None, None
                 if f2 in m2:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f2, f, f2, False, pa.node()),
-                        b'remote copied from %s' % f2,
-                    )
+                    args = (f2, f, f2, False, pa.node())
+                    msg = b'remote copied from %s' % f2
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f2, f, f2, True, pa.node()),
-                        b'remote moved from %s' % f2,
-                    )
+                    args = (f2, f, f2, True, pa.node())
+                    msg = b'remote moved from %s' % f2
+                mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
             elif f not in ma:
                 # local unknown, remote created: the logic is described by the
                 # following table:
@@ -789,19 +985,22 @@
                 # Checking whether the files are different is expensive, so we
                 # don't do that when we can avoid it.
                 if not force:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote created',
                     )
                 elif not branchmerge:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote created',
                     )
                 else:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED_MERGE,
                         (fl2, pa.node()),
                         b'remote created, get or merge',
@@ -814,60 +1013,77 @@
                         df = branch_copies1.dirmove[d] + f[len(d) :]
                         break
                 if df is not None and df in m1:
-                    actions[df] = (
+                    mresult.addfile(
+                        df,
                         mergestatemod.ACTION_MERGE,
                         (df, f, f, False, pa.node()),
                         b'local directory rename - respect move '
                         b'from %s' % f,
                     )
                 elif acceptremote:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote recreating',
                     )
                 else:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_DELETED_CHANGED,
                         (None, f, f, False, pa.node()),
                         b'prompt deleted/changed',
                     )
+                    if branchmerge:
+                        mresult.addcommitinfo(
+                            f, b'merge-removal-candidate', b'yes'
+                        )
+            else:
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_KEEP_ABSENT,
+                    None,
+                    b'local not present, remote unchanged',
+                )
+                if branchmerge:
+                    # the file must be absent after merging
+                    # however the user might make
+                    # the file reappear using revert and if they does,
+                    # we force create a new node
+                    mresult.addcommitinfo(f, b'merge-removal-candidate', b'yes')
 
     if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
         # If we are merging, look for path conflicts.
-        checkpathconflicts(repo, wctx, p2, actions)
+        checkpathconflicts(repo, wctx, p2, mresult)
 
     narrowmatch = repo.narrowmatch()
     if not narrowmatch.always():
         # Updates "actions" in place
-        _filternarrowactions(narrowmatch, branchmerge, actions)
+        _filternarrowactions(narrowmatch, branchmerge, mresult)
 
     renamedelete = branch_copies1.renamedelete
     renamedelete.update(branch_copies2.renamedelete)
 
-    return actions, diverge, renamedelete
+    mresult.updatevalues(diverge, renamedelete)
+    return mresult
 
 
-def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
+def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
     """Resolves false conflicts where the nodeid changed but the content
        remained the same."""
     # We force a copy of actions.items() because we're going to mutate
     # actions as we resolve trivial conflicts.
-    for f, (m, args, msg) in list(actions.items()):
-        if (
-            m == mergestatemod.ACTION_CHANGED_DELETED
-            and f in ancestor
-            and not wctx[f].cmp(ancestor[f])
-        ):
+    for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
+        if f in ancestor and not wctx[f].cmp(ancestor[f]):
             # local did change but ended up with same content
-            actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
-        elif (
-            m == mergestatemod.ACTION_DELETED_CHANGED
-            and f in ancestor
-            and not mctx[f].cmp(ancestor[f])
-        ):
+            mresult.addfile(
+                f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
+            )
+
+    for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
+        if f in ancestor and not mctx[f].cmp(ancestor[f]):
             # remote did change but ended up with same content
-            del actions[f]  # don't get = keep local deleted
+            mresult.removefile(f)  # don't get = keep local deleted
 
 
 def calculateupdates(
@@ -891,13 +1107,14 @@
 
     Also filters out actions which are unrequired if repository is sparse.
 
-    Returns same 3 element tuple as manifestmerge().
+    Returns mergeresult object same as manifestmerge().
     """
     # Avoid cycle.
     from . import sparse
 
+    mresult = None
     if len(ancestors) == 1:  # default
-        actions, diverge, renamedelete = manifestmerge(
+        mresult = manifestmerge(
             repo,
             wctx,
             mctx,
@@ -908,7 +1125,7 @@
             acceptremote,
             followcopies,
         )
-        _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
+        _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
 
     else:  # only when merge.preferancestor=* - the default
         repo.ui.note(
@@ -920,14 +1137,17 @@
             )
         )
 
-        # Call for bids
-        fbids = (
-            {}
-        )  # mapping filename to bids (action method to list af actions)
+        # mapping filename to bids (action method to list af actions)
+        # {FILENAME1 : BID1, FILENAME2 : BID2}
+        # BID is another dictionary which contains
+        # mapping of following form:
+        # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
+        fbids = {}
+        mresult = mergeresult()
         diverge, renamedelete = None, None
         for ancestor in ancestors:
             repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
-            actions, diverge1, renamedelete1 = manifestmerge(
+            mresult1 = manifestmerge(
                 repo,
                 wctx,
                 mctx,
@@ -939,19 +1159,25 @@
                 followcopies,
                 forcefulldiff=True,
             )
-            _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
+            _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
 
             # Track the shortest set of warning on the theory that bid
             # merge will correctly incorporate more information
-            if diverge is None or len(diverge1) < len(diverge):
-                diverge = diverge1
-            if renamedelete is None or len(renamedelete) < len(renamedelete1):
-                renamedelete = renamedelete1
+            if diverge is None or len(mresult1.diverge) < len(diverge):
+                diverge = mresult1.diverge
+            if renamedelete is None or len(renamedelete) < len(
+                mresult1.renamedelete
+            ):
+                renamedelete = mresult1.renamedelete
 
-            for f, a in sorted(pycompat.iteritems(actions)):
+            # blindly update final mergeresult commitinfo with what we get
+            # from mergeresult object for each ancestor
+            # TODO: some commitinfo depends on what bid merge choose and hence
+            # we will need to make commitinfo also depend on bid merge logic
+            mresult._commitinfo.update(mresult1._commitinfo)
+
+            for f, a in mresult1.filemap(sort=True):
                 m, args, msg = a
-                if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
-                    m = mergestatemod.ACTION_GET
                 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
                 if f in fbids:
                     d = fbids[f]
@@ -962,29 +1188,75 @@
                 else:
                     fbids[f] = {m: [a]}
 
+        # Call for bids
         # Pick the best bid for each file
-        repo.ui.note(_(b'\nauction for merging merge bids\n'))
-        actions = {}
+        repo.ui.note(
+            _(b'\nauction for merging merge bids (%d ancestors)\n')
+            % len(ancestors)
+        )
         for f, bids in sorted(fbids.items()):
+            if repo.ui.debugflag:
+                repo.ui.debug(b" list of bids for %s:\n" % f)
+                for m, l in sorted(bids.items()):
+                    for _f, args, msg in l:
+                        repo.ui.debug(b'   %s -> %s\n' % (msg, m))
             # bids is a mapping from action method to list af actions
             # Consensus?
             if len(bids) == 1:  # all bids are the same kind of method
                 m, l = list(bids.items())[0]
                 if all(a == l[0] for a in l[1:]):  # len(bids) is > 1
                     repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
-                    actions[f] = l[0]
+                    mresult.addfile(f, *l[0])
                     continue
             # If keep is an option, just do it.
             if mergestatemod.ACTION_KEEP in bids:
                 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
-                actions[f] = bids[mergestatemod.ACTION_KEEP][0]
+                mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
+                continue
+            # If keep absent is an option, just do that
+            if mergestatemod.ACTION_KEEP_ABSENT in bids:
+                repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
+                mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
+                continue
+            # ACTION_KEEP_NEW and ACTION_CHANGED_DELETED are conflicting actions
+            # as one say that file is new while other says that file was present
+            # earlier too and has a change delete conflict
+            # Let's fall back to conflicting ACTION_CHANGED_DELETED and let user
+            # do the right thing
+            if (
+                mergestatemod.ACTION_CHANGED_DELETED in bids
+                and mergestatemod.ACTION_KEEP_NEW in bids
+            ):
+                repo.ui.note(_(b" %s: picking 'changed/deleted' action\n") % f)
+                mresult.addfile(
+                    f, *bids[mergestatemod.ACTION_CHANGED_DELETED][0]
+                )
+                continue
+            # If keep new is an option, let's just do that
+            if mergestatemod.ACTION_KEEP_NEW in bids:
+                repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
+                mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
+                continue
+            # ACTION_GET and ACTION_DELETE_CHANGED are conflicting actions as
+            # one action states the file is newer/created on remote side and
+            # other states that file is deleted locally and changed on remote
+            # side. Let's fallback and rely on a conflicting action to let user
+            # do the right thing
+            if (
+                mergestatemod.ACTION_DELETED_CHANGED in bids
+                and mergestatemod.ACTION_GET in bids
+            ):
+                repo.ui.note(_(b" %s: picking 'delete/changed' action\n") % f)
+                mresult.addfile(
+                    f, *bids[mergestatemod.ACTION_DELETED_CHANGED][0]
+                )
                 continue
             # If there are gets and they all agree [how could they not?], do it.
             if mergestatemod.ACTION_GET in bids:
                 ga0 = bids[mergestatemod.ACTION_GET][0]
                 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
                     repo.ui.note(_(b" %s: picking 'get' action\n") % f)
-                    actions[f] = ga0
+                    mresult.addfile(f, *ga0)
                     continue
             # TODO: Consider other simple actions such as mode changes
             # Handle inefficient democrazy.
@@ -997,20 +1269,18 @@
             repo.ui.warn(
                 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
             )
-            actions[f] = l[0]
+            mresult.addfile(f, *l[0])
             continue
         repo.ui.note(_(b'end of auction\n\n'))
+        mresult.updatevalues(diverge, renamedelete)
 
     if wctx.rev() is None:
-        fractions = _forgetremoved(wctx, mctx, branchmerge)
-        actions.update(fractions)
+        _forgetremoved(wctx, mctx, branchmerge, mresult)
 
-    prunedactions = sparse.filterupdatesactions(
-        repo, wctx, mctx, branchmerge, actions
-    )
-    _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
+    sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
+    _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
 
-    return prunedactions, diverge, renamedelete
+    return mresult
 
 
 def _getcwd():
@@ -1039,7 +1309,8 @@
             wctx[f].remove(ignoremissing=True)
         except OSError as inst:
             repo.ui.warn(
-                _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
+                _(b"update failed to remove %s: %s!\n")
+                % (f, pycompat.bytestr(inst.strerror))
             )
         if i == 100:
             yield i, f
@@ -1117,34 +1388,26 @@
     yield True, filedata
 
 
-def _prefetchfiles(repo, ctx, actions):
+def _prefetchfiles(repo, ctx, mresult):
     """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
     of merge actions.  ``ctx`` is the context being merged in."""
 
     # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
     # don't touch the context to be merged in.  'cd' is skipped, because
     # changed/deleted never resolves to something from the remote side.
-    oplist = [
-        actions[a]
-        for a in (
+    files = mresult.files(
+        [
             mergestatemod.ACTION_GET,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
             mergestatemod.ACTION_MERGE,
-        )
-    ]
+        ]
+    )
+
     prefetch = scmutil.prefetchfiles
     matchfiles = scmutil.matchfiles
     prefetch(
-        repo,
-        [
-            (
-                ctx.rev(),
-                matchfiles(
-                    repo, [f for sublist in oplist for f, args, msg in sublist]
-                ),
-            )
-        ],
+        repo, [(ctx.rev(), matchfiles(repo, files),)],
     )
 
 
@@ -1164,35 +1427,12 @@
         )
 
 
-def emptyactions():
-    """create an actions dict, to be populated and passed to applyupdates()"""
-    return {
-        m: []
-        for m in (
-            mergestatemod.ACTION_ADD,
-            mergestatemod.ACTION_ADD_MODIFIED,
-            mergestatemod.ACTION_FORGET,
-            mergestatemod.ACTION_GET,
-            mergestatemod.ACTION_CHANGED_DELETED,
-            mergestatemod.ACTION_DELETED_CHANGED,
-            mergestatemod.ACTION_REMOVE,
-            mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
-            mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
-            mergestatemod.ACTION_MERGE,
-            mergestatemod.ACTION_EXEC,
-            mergestatemod.ACTION_KEEP,
-            mergestatemod.ACTION_PATH_CONFLICT,
-            mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
-            mergestatemod.ACTION_GET_OTHER_AND_STORE,
-        )
-    }
-
-
 def applyupdates(
-    repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+    repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None,
 ):
     """apply the merge action list to the working directory
 
+    mresult is a mergeresult object representing result of the merge
     wctx is the working copy context
     mctx is the context to be merged into the working copy
 
@@ -1202,25 +1442,171 @@
     batchget.
     """
 
-    _prefetchfiles(repo, mctx, actions)
+    _prefetchfiles(repo, mctx, mresult)
 
     updated, merged, removed = 0, 0, 0
-    ms = mergestatemod.mergestate.clean(
-        repo, wctx.p1().node(), mctx.node(), labels
+    ms = wctx.mergestate(clean=True)
+    ms.start(wctx.p1().node(), mctx.node(), labels)
+
+    for f, op in pycompat.iteritems(mresult.commitinfo):
+        # the other side of filenode was choosen while merging, store this in
+        # mergestate so that it can be reused on commit
+        ms.addcommitinfo(f, op)
+
+    numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
+    progress = repo.ui.makeprogress(
+        _(b'updating'), unit=_(b'files'), total=numupdates
     )
 
-    # add ACTION_GET_OTHER_AND_STORE to mergestate
-    for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
-        ms.addmergedother(e[0])
+    if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
+        subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+
+    # record path conflicts
+    for f, args, msg in mresult.getactions(
+        [mergestatemod.ACTION_PATH_CONFLICT], sort=True
+    ):
+        f1, fo = args
+        s = repo.ui.status
+        s(
+            _(
+                b"%s: path conflict - a file or link has the same name as a "
+                b"directory\n"
+            )
+            % f
+        )
+        if fo == b'l':
+            s(_(b"the local file has been renamed to %s\n") % f1)
+        else:
+            s(_(b"the remote file has been renamed to %s\n") % f1)
+        s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
+        ms.addpathconflict(f, f1, fo)
+        progress.increment(item=f)
+
+    # When merging in-memory, we can't support worker processes, so set the
+    # per-item cost at 0 in that case.
+    cost = 0 if wctx.isinmemory() else 0.001
+
+    # remove in parallel (must come before resolving path conflicts and getting)
+    prog = worker.worker(
+        repo.ui,
+        cost,
+        batchremove,
+        (repo, wctx),
+        list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
+    )
+    for i, item in prog:
+        progress.increment(step=i, item=item)
+    removed = mresult.len((mergestatemod.ACTION_REMOVE,))
+
+    # resolve path conflicts (must come before getting)
+    for f, args, msg in mresult.getactions(
+        [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
+        (f0, origf0) = args
+        if wctx[f0].lexists():
+            repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
+            wctx[f].audit()
+            wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
+            wctx[f0].remove()
+        progress.increment(item=f)
+
+    # get in parallel.
+    threadsafe = repo.ui.configbool(
+        b'experimental', b'worker.wdir-get-thread-safe'
+    )
+    prog = worker.worker(
+        repo.ui,
+        cost,
+        batchget,
+        (repo, mctx, wctx, wantfiledata),
+        list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
+        threadsafe=threadsafe,
+        hasretval=True,
+    )
+    getfiledata = {}
+    for final, res in prog:
+        if final:
+            getfiledata = res
+        else:
+            i, item = res
+            progress.increment(step=i, item=item)
+
+    if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
+        subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+
+    # forget (manifest only, just log it) (must come first)
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_FORGET,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
+        progress.increment(item=f)
+
+    # re-add (manifest only, just log it)
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_ADD,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
+        progress.increment(item=f)
+
+    # re-add/mark as modified (manifest only, just log it)
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
+        progress.increment(item=f)
+
+    # keep (noop, just log it)
+    for a in mergestatemod.NO_OP_ACTIONS:
+        for f, args, msg in mresult.getactions((a,), sort=True):
+            repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
+            # no progress
+
+    # directory rename, move local
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
+        progress.increment(item=f)
+        f0, flags = args
+        repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
+        wctx[f].audit()
+        wctx[f].write(wctx.filectx(f0).data(), flags)
+        wctx[f0].remove()
+
+    # local directory rename, get
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
+        progress.increment(item=f)
+        f0, flags = args
+        repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
+        wctx[f].write(mctx.filectx(f0).data(), flags)
+
+    # exec
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_EXEC,), sort=True
+    ):
+        repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
+        progress.increment(item=f)
+        (flags,) = args
+        wctx[f].audit()
+        wctx[f].setflags(b'l' in flags, b'x' in flags)
 
     moves = []
-    for m, l in actions.items():
-        l.sort()
 
     # 'cd' and 'dc' actions are treated like other merge conflicts
-    mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
-    mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
-    mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
+    mergeactions = list(
+        mresult.getactions(
+            [
+                mergestatemod.ACTION_CHANGED_DELETED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+                mergestatemod.ACTION_MERGE,
+            ],
+            sort=True,
+        )
+    )
     for f, args, msg in mergeactions:
         f1, f2, fa, move, anc = args
         if f == b'.hgsubstate':  # merged internally
@@ -1251,165 +1637,15 @@
             wctx[f].audit()
             wctx[f].remove()
 
-    numupdates = sum(
-        len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
-    )
-    progress = repo.ui.makeprogress(
-        _(b'updating'), unit=_(b'files'), total=numupdates
-    )
-
-    if [
-        a
-        for a in actions[mergestatemod.ACTION_REMOVE]
-        if a[0] == b'.hgsubstate'
-    ]:
-        subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
-
-    # record path conflicts
-    for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
-        f1, fo = args
-        s = repo.ui.status
-        s(
-            _(
-                b"%s: path conflict - a file or link has the same name as a "
-                b"directory\n"
-            )
-            % f
+    # these actions updates the file
+    updated = mresult.len(
+        (
+            mergestatemod.ACTION_GET,
+            mergestatemod.ACTION_EXEC,
+            mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+            mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
         )
-        if fo == b'l':
-            s(_(b"the local file has been renamed to %s\n") % f1)
-        else:
-            s(_(b"the remote file has been renamed to %s\n") % f1)
-        s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
-        ms.addpathconflict(f, f1, fo)
-        progress.increment(item=f)
-
-    # When merging in-memory, we can't support worker processes, so set the
-    # per-item cost at 0 in that case.
-    cost = 0 if wctx.isinmemory() else 0.001
-
-    # remove in parallel (must come before resolving path conflicts and getting)
-    prog = worker.worker(
-        repo.ui,
-        cost,
-        batchremove,
-        (repo, wctx),
-        actions[mergestatemod.ACTION_REMOVE],
-    )
-    for i, item in prog:
-        progress.increment(step=i, item=item)
-    removed = len(actions[mergestatemod.ACTION_REMOVE])
-
-    # resolve path conflicts (must come before getting)
-    for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
-        repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
-        (f0, origf0) = args
-        if wctx[f0].lexists():
-            repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
-            wctx[f].audit()
-            wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
-            wctx[f0].remove()
-        progress.increment(item=f)
-
-    # get in parallel.
-    threadsafe = repo.ui.configbool(
-        b'experimental', b'worker.wdir-get-thread-safe'
-    )
-    prog = worker.worker(
-        repo.ui,
-        cost,
-        batchget,
-        (repo, mctx, wctx, wantfiledata),
-        actions[mergestatemod.ACTION_GET],
-        threadsafe=threadsafe,
-        hasretval=True,
     )
-    getfiledata = {}
-    for final, res in prog:
-        if final:
-            getfiledata = res
-        else:
-            i, item = res
-            progress.increment(step=i, item=item)
-    updated = len(actions[mergestatemod.ACTION_GET])
-
-    if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
-        subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
-
-    # forget (manifest only, just log it) (must come first)
-    for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
-        repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
-        progress.increment(item=f)
-
-    # re-add (manifest only, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_ADD]:
-        repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
-        progress.increment(item=f)
-
-    # re-add/mark as modified (manifest only, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
-        repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
-        progress.increment(item=f)
-
-    # keep (noop, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
-        repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
-        # no progress
-
-    # directory rename, move local
-    for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
-        repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
-        progress.increment(item=f)
-        f0, flags = args
-        repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
-        wctx[f].audit()
-        wctx[f].write(wctx.filectx(f0).data(), flags)
-        wctx[f0].remove()
-        updated += 1
-
-    # local directory rename, get
-    for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
-        repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
-        progress.increment(item=f)
-        f0, flags = args
-        repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
-        wctx[f].write(mctx.filectx(f0).data(), flags)
-        updated += 1
-
-    # exec
-    for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
-        repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
-        progress.increment(item=f)
-        (flags,) = args
-        wctx[f].audit()
-        wctx[f].setflags(b'l' in flags, b'x' in flags)
-        updated += 1
-
-    # the ordering is important here -- ms.mergedriver will raise if the merge
-    # driver has changed, and we want to be able to bypass it when overwrite is
-    # True
-    usemergedriver = not overwrite and mergeactions and ms.mergedriver
-
-    if usemergedriver:
-        if wctx.isinmemory():
-            raise error.InMemoryMergeConflictsError(
-                b"in-memory merge does not support mergedriver"
-            )
-        ms.commit()
-        proceed = driverpreprocess(repo, ms, wctx, labels=labels)
-        # the driver might leave some files unresolved
-        unresolvedf = set(ms.unresolved())
-        if not proceed:
-            # XXX setting unresolved to at least 1 is a hack to make sure we
-            # error out
-            return updateresult(
-                updated, merged, removed, max(len(unresolvedf), 1)
-            )
-        newactions = []
-        for f, args, msg in mergeactions:
-            if f in unresolvedf:
-                newactions.append((f, args, msg))
-        mergeactions = newactions
 
     try:
         # premerge
@@ -1439,18 +1675,6 @@
 
     unresolved = ms.unresolvedcount()
 
-    if (
-        usemergedriver
-        and not unresolved
-        and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
-    ):
-        if not driverconclude(repo, ms, wctx, labels=labels):
-            # XXX setting unresolved to at least 1 is a hack to make sure we
-            # error out
-            unresolved = max(unresolved, 1)
-
-        ms.commit()
-
     msupdated, msmerged, msremoved = ms.counts()
     updated += msupdated
     merged += msmerged
@@ -1458,38 +1682,17 @@
 
     extraactions = ms.actions()
     if extraactions:
-        mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
         for k, acts in pycompat.iteritems(extraactions):
-            actions[k].extend(acts)
+            for a in acts:
+                mresult.addfile(a[0], k, *a[1:])
             if k == mergestatemod.ACTION_GET and wantfiledata:
                 # no filedata until mergestate is updated to provide it
                 for a in acts:
                     getfiledata[a[0]] = None
-            # Remove these files from actions[ACTION_MERGE] as well. This is
-            # important because in recordupdates, files in actions[ACTION_MERGE]
-            # are processed after files in other actions, and the merge driver
-            # might add files to those actions via extraactions above. This can
-            # lead to a file being recorded twice, with poor results. This is
-            # especially problematic for actions[ACTION_REMOVE] (currently only
-            # possible with the merge driver in the initial merge process;
-            # interrupted merges don't go through this flow).
-            #
-            # The real fix here is to have indexes by both file and action so
-            # that when the action for a file is changed it is automatically
-            # reflected in the other action lists. But that involves a more
-            # complex data structure, so this will do for now.
-            #
-            # We don't need to do the same operation for 'dc' and 'cd' because
-            # those lists aren't consulted again.
-            mfiles.difference_update(a[0] for a in acts)
-
-        actions[mergestatemod.ACTION_MERGE] = [
-            a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
-        ]
 
     progress.complete()
     assert len(getfiledata) == (
-        len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
+        mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
     )
     return updateresult(updated, merged, removed, unresolved), getfiledata
 
@@ -1509,6 +1712,15 @@
     fsmonitorthreshold = repo.ui.configint(
         b'fsmonitor', b'warn_update_file_count'
     )
+    # avoid cycle dirstate -> sparse -> merge -> dirstate
+    from . import dirstate
+
+    if dirstate.rustmod is not None:
+        # When using rust status, fsmonitor becomes necessary at higher sizes
+        fsmonitorthreshold = repo.ui.configint(
+            b'fsmonitor', b'warn_update_file_count_rust',
+        )
+
     try:
         # avoid cycle: extensions -> cmdutil -> merge
         from . import extensions
@@ -1543,7 +1755,7 @@
 UPDATECHECK_NO_CONFLICT = b'noconflict'
 
 
-def update(
+def _update(
     repo,
     node,
     branchmerge,
@@ -1663,7 +1875,7 @@
         if not overwrite:
             if len(pl) > 1:
                 raise error.Abort(_(b"outstanding uncommitted merge"))
-            ms = mergestatemod.mergestate.read(repo)
+            ms = wc.mergestate()
             if list(ms.unresolved()):
                 raise error.Abort(
                     _(b"outstanding merge conflicts"),
@@ -1734,7 +1946,7 @@
             followcopies = False
 
         ### calculate phase
-        actionbyfile, diverge, renamedelete = calculateupdates(
+        mresult = calculateupdates(
             repo,
             wc,
             p2,
@@ -1748,25 +1960,18 @@
         )
 
         if updatecheck == UPDATECHECK_NO_CONFLICT:
-            for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
-                if m not in (
-                    mergestatemod.ACTION_GET,
-                    mergestatemod.ACTION_KEEP,
-                    mergestatemod.ACTION_EXEC,
-                    mergestatemod.ACTION_REMOVE,
-                    mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
-                    mergestatemod.ACTION_GET_OTHER_AND_STORE,
-                ):
-                    msg = _(b"conflicting changes")
-                    hint = _(b"commit or update --clean to discard changes")
-                    raise error.Abort(msg, hint=hint)
+            if mresult.hasconflicts():
+                msg = _(b"conflicting changes")
+                hint = _(b"commit or update --clean to discard changes")
+                raise error.Abort(msg, hint=hint)
 
         # Prompt and create actions. Most of this is in the resolve phase
         # already, but we can't handle .hgsubstate in filemerge or
         # subrepoutil.submerge yet so we have to keep prompting for it.
-        if b'.hgsubstate' in actionbyfile:
+        vals = mresult.getfile(b'.hgsubstate')
+        if vals:
             f = b'.hgsubstate'
-            m, args, msg = actionbyfile[f]
+            m, args, msg = vals
             prompts = filemerge.partextras(labels)
             prompts[b'f'] = f
             if m == mergestatemod.ACTION_CHANGED_DELETED:
@@ -1779,22 +1984,19 @@
                     % prompts,
                     0,
                 ):
-                    actionbyfile[f] = (
-                        mergestatemod.ACTION_REMOVE,
-                        None,
-                        b'prompt delete',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
                     )
                 elif f in p1:
-                    actionbyfile[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_ADD_MODIFIED,
                         None,
                         b'prompt keep',
                     )
                 else:
-                    actionbyfile[f] = (
-                        mergestatemod.ACTION_ADD,
-                        None,
-                        b'prompt keep',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_ADD, None, b'prompt keep',
                     )
             elif m == mergestatemod.ACTION_DELETED_CHANGED:
                 f1, f2, fa, move, anc = args
@@ -1811,24 +2013,14 @@
                     )
                     == 0
                 ):
-                    actionbyfile[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_GET,
                         (flags, False),
                         b'prompt recreating',
                     )
                 else:
-                    del actionbyfile[f]
-
-        # Convert to dictionary-of-lists format
-        actions = emptyactions()
-        for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
-            if m not in actions:
-                actions[m] = []
-            actions[m].append((f, args, msg))
-
-        # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
-        for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
-            actions[mergestatemod.ACTION_GET].append(e)
+                    mresult.removefile(f)
 
         if not util.fscasesensitive(repo.path):
             # check collision between files only in p2 for clean update
@@ -1837,10 +2029,10 @@
             ):
                 _checkcollision(repo, p2.manifest(), None)
             else:
-                _checkcollision(repo, wc.manifest(), actions)
+                _checkcollision(repo, wc.manifest(), mresult)
 
         # divergent renames
-        for f, fl in sorted(pycompat.iteritems(diverge)):
+        for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
             repo.ui.warn(
                 _(
                     b"note: possible conflict - %s was renamed "
@@ -1852,7 +2044,7 @@
                 repo.ui.warn(b" %s\n" % nf)
 
         # rename and delete
-        for f, fl in sorted(pycompat.iteritems(renamedelete)):
+        for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
             repo.ui.warn(
                 _(
                     b"note: possible conflict - %s was deleted "
@@ -1876,19 +2068,19 @@
             repo.vfs.write(b'updatestate', p2.hex())
 
         _advertisefsmonitor(
-            repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
+            repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
         )
 
         wantfiledata = updatedirstate and not branchmerge
         stats, getfiledata = applyupdates(
-            repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
+            repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels,
         )
 
         if updatedirstate:
             with repo.dirstate.parentchange():
                 repo.setparents(fp1, fp2)
                 mergestatemod.recordupdates(
-                    repo, actions, branchmerge, getfiledata
+                    repo, mresult.actionsdict, branchmerge, getfiledata
                 )
                 # update completed, clear state
                 util.unlink(repo.vfs.join(b'updatestate'))
@@ -1914,7 +2106,7 @@
     force = whether the merge was run with 'merge --force' (deprecated)
     """
 
-    return update(
+    return _update(
         ctx.repo(),
         ctx.rev(),
         labels=labels,
@@ -1925,13 +2117,35 @@
     )
 
 
+def update(ctx, updatecheck=None, wc=None):
+    """Do a regular update to the given commit, aborting if there are conflicts.
+
+    The 'updatecheck' argument can be used to control what to do in case of
+    conflicts.
+
+    Note: This is a new, higher-level update() than the one that used to exist
+    in this module. That function is now called _update(). You can hopefully
+    replace your callers to use this new update(), or clean_update(), merge(),
+    revert_to(), or graft().
+    """
+    return _update(
+        ctx.repo(),
+        ctx.rev(),
+        branchmerge=False,
+        force=False,
+        labels=[b'working copy', b'destination'],
+        updatecheck=updatecheck,
+        wc=wc,
+    )
+
+
 def clean_update(ctx, wc=None):
     """Do a clean update to the given commit.
 
     This involves updating to the commit and discarding any changes in the
     working copy.
     """
-    return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
+    return _update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
 
 
 def revert_to(ctx, matcher=None, wc=None):
@@ -1941,7 +2155,7 @@
     be the same as in the given commit.
     """
 
-    return update(
+    return _update(
         ctx.repo(),
         ctx.rev(),
         branchmerge=False,
@@ -1992,7 +2206,7 @@
         or pctx.rev() == base.rev()
     )
 
-    stats = update(
+    stats = _update(
         repo,
         ctx.node(),
         True,
@@ -2028,6 +2242,23 @@
     return stats
 
 
+def back_out(ctx, parent=None, wc=None):
+    if parent is None:
+        if ctx.p2() is not None:
+            raise error.ProgrammingError(
+                b"must specify parent of merge commit to back out"
+            )
+        parent = ctx.p1()
+    return _update(
+        ctx.repo(),
+        parent,
+        branchmerge=True,
+        force=True,
+        ancestor=ctx.node(),
+        mergeancestor=False,
+    )
+
+
 def purge(
     repo,
     matcher,
--- a/mercurial/mergestate.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/mergestate.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,6 @@
 from __future__ import absolute_import
 
+import collections
 import errno
 import shutil
 import struct
@@ -11,7 +12,6 @@
     nullhex,
     nullid,
 )
-from .pycompat import delattr
 from . import (
     error,
     filemerge,
@@ -48,8 +48,6 @@
 RECORD_OTHER = b'O'
 # record merge labels
 RECORD_LABELS = b'l'
-# store info about merge driver used and it's state
-RECORD_MERGE_DRIVER_STATE = b'm'
 
 #####
 # record extra information about files, with one entry containing info about one
@@ -65,7 +63,6 @@
 #####
 RECORD_MERGED = b'F'
 RECORD_CHANGEDELETE_CONFLICT = b'C'
-RECORD_MERGE_DRIVER_MERGE = b'D'
 # the path was dir on one side of merge and file on another
 RECORD_PATH_CONFLICT = b'P'
 
@@ -77,9 +74,10 @@
 MERGE_RECORD_RESOLVED = b'r'
 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
 MERGE_RECORD_RESOLVED_PATH = b'pr'
-MERGE_RECORD_DRIVER_RESOLVED = b'd'
 # represents that the file was automatically merged in favor
 # of other version. This info is used on commit.
+# This is now deprecated and commit related information is now
+# stored in RECORD_FILE_VALUES
 MERGE_RECORD_MERGED_OTHER = b'o'
 
 #####
@@ -89,18 +87,16 @@
 RECORD_OVERRIDE = b't'
 
 #####
-# possible states which a merge driver can have. These are stored inside a
-# RECORD_MERGE_DRIVER_STATE entry
-#####
-MERGE_DRIVER_STATE_UNMARKED = b'u'
-MERGE_DRIVER_STATE_MARKED = b'm'
-MERGE_DRIVER_STATE_SUCCESS = b's'
-
-#####
 # legacy records which are no longer used but kept to prevent breaking BC
 #####
 # This record was release in 5.4 and usage was removed in 5.5
 LEGACY_RECORD_RESOLVED_OTHER = b'R'
+# This record was release in 3.7 and usage was removed in 5.6
+LEGACY_RECORD_DRIVER_RESOLVED = b'd'
+# This record was release in 3.7 and usage was removed in 5.6
+LEGACY_MERGE_DRIVER_STATE = b'm'
+# This record was release in 3.7 and usage was removed in 5.6
+LEGACY_MERGE_DRIVER_MERGE = b'D'
 
 
 ACTION_FORGET = b'f'
@@ -117,13 +113,25 @@
 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
 ACTION_KEEP = b'k'
+# the file was absent on local side before merge and we should
+# keep it absent (absent means file not present, it can be a result
+# of file deletion, rename etc.)
+ACTION_KEEP_ABSENT = b'ka'
+# the file is absent on the ancestor and remote side of the merge
+# hence this file is new and we should keep it
+ACTION_KEEP_NEW = b'kn'
 ACTION_EXEC = b'e'
 ACTION_CREATED_MERGE = b'cm'
-# GET the other/remote side and store this info in mergestate
-ACTION_GET_OTHER_AND_STORE = b'gs'
+
+# actions which are no op
+NO_OP_ACTIONS = (
+    ACTION_KEEP,
+    ACTION_KEEP_ABSENT,
+    ACTION_KEEP_NEW,
+)
 
 
-class mergestate(object):
+class _mergestate_base(object):
     '''track 3-way merge state of individual files
 
     The merge state is stored on disk when needed. Two files are used: one with
@@ -143,40 +151,321 @@
     O: the node of the "other" part of the merge (hexified version)
     F: a file to be merged entry
     C: a change/delete or delete/change conflict
-    D: a file that the external merge driver will merge internally
-       (experimental)
     P: a path conflict (file vs directory)
-    m: the external merge driver defined for this merge plus its run state
-       (experimental)
     f: a (filename, dictionary) tuple of optional values for a given file
     l: the labels for the parts of the merge.
 
-    Merge driver run states (experimental):
-    u: driver-resolved files unmarked -- needs to be run next time we're about
-       to resolve or commit
-    m: driver-resolved files marked -- only needs to be run before commit
-    s: success/skipped -- does not need to be run any more
-
     Merge record states (stored in self._state, indexed by filename):
     u: unresolved conflict
     r: resolved conflict
     pu: unresolved path conflict (file conflicts with directory)
     pr: resolved path conflict
-    d: driver-resolved conflict
+    o: file was merged in favor of other parent of merge (DEPRECATED)
 
     The resolve command transitions between 'u' and 'r' for conflicts and
     'pu' and 'pr' for path conflicts.
     '''
 
+    def __init__(self, repo):
+        """Initialize the merge state.
+
+        Do not use this directly! Instead call read() or clean()."""
+        self._repo = repo
+        self._state = {}
+        self._stateextras = collections.defaultdict(dict)
+        self._local = None
+        self._other = None
+        self._labels = None
+        # contains a mapping of form:
+        # {filename : (merge_return_value, action_to_be_performed}
+        # these are results of re-running merge process
+        # this dict is used to perform actions on dirstate caused by re-running
+        # the merge
+        self._results = {}
+        self._dirty = False
+
+    def reset(self):
+        pass
+
+    def start(self, node, other, labels=None):
+        self._local = node
+        self._other = other
+        self._labels = labels
+
+    @util.propertycache
+    def local(self):
+        if self._local is None:
+            msg = b"local accessed but self._local isn't set"
+            raise error.ProgrammingError(msg)
+        return self._local
+
+    @util.propertycache
+    def localctx(self):
+        return self._repo[self.local]
+
+    @util.propertycache
+    def other(self):
+        if self._other is None:
+            msg = b"other accessed but self._other isn't set"
+            raise error.ProgrammingError(msg)
+        return self._other
+
+    @util.propertycache
+    def otherctx(self):
+        return self._repo[self.other]
+
+    def active(self):
+        """Whether mergestate is active.
+
+        Returns True if there appears to be mergestate. This is a rough proxy
+        for "is a merge in progress."
+        """
+        return bool(self._local) or bool(self._state)
+
+    def commit(self):
+        """Write current state on disk (if necessary)"""
+
+    @staticmethod
+    def getlocalkey(path):
+        """hash the path of a local file context for storage in the .hg/merge
+        directory."""
+
+        return hex(hashutil.sha1(path).digest())
+
+    def _make_backup(self, fctx, localkey):
+        raise NotImplementedError()
+
+    def _restore_backup(self, fctx, localkey, flags):
+        raise NotImplementedError()
+
+    def add(self, fcl, fco, fca, fd):
+        """add a new (potentially?) conflicting file the merge state
+        fcl: file context for local,
+        fco: file context for remote,
+        fca: file context for ancestors,
+        fd:  file path of the resulting merge.
+
+        note: also write the local version to the `.hg/merge` directory.
+        """
+        if fcl.isabsent():
+            localkey = nullhex
+        else:
+            localkey = mergestate.getlocalkey(fcl.path())
+            self._make_backup(fcl, localkey)
+        self._state[fd] = [
+            MERGE_RECORD_UNRESOLVED,
+            localkey,
+            fcl.path(),
+            fca.path(),
+            hex(fca.filenode()),
+            fco.path(),
+            hex(fco.filenode()),
+            fcl.flags(),
+        ]
+        self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
+        self._dirty = True
+
+    def addpathconflict(self, path, frename, forigin):
+        """add a new conflicting path to the merge state
+        path:    the path that conflicts
+        frename: the filename the conflicting file was renamed to
+        forigin: origin of the file ('l' or 'r' for local/remote)
+        """
+        self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
+        self._dirty = True
+
+    def addcommitinfo(self, path, data):
+        """ stores information which is required at commit
+        into _stateextras """
+        self._stateextras[path].update(data)
+        self._dirty = True
+
+    def __contains__(self, dfile):
+        return dfile in self._state
+
+    def __getitem__(self, dfile):
+        return self._state[dfile][0]
+
+    def __iter__(self):
+        return iter(sorted(self._state))
+
+    def files(self):
+        return self._state.keys()
+
+    def mark(self, dfile, state):
+        self._state[dfile][0] = state
+        self._dirty = True
+
+    def unresolved(self):
+        """Obtain the paths of unresolved files."""
+
+        for f, entry in pycompat.iteritems(self._state):
+            if entry[0] in (
+                MERGE_RECORD_UNRESOLVED,
+                MERGE_RECORD_UNRESOLVED_PATH,
+            ):
+                yield f
+
+    def allextras(self):
+        """ return all extras information stored with the mergestate """
+        return self._stateextras
+
+    def extras(self, filename):
+        """ return extras stored with the mergestate for the given filename """
+        return self._stateextras[filename]
+
+    def _resolve(self, preresolve, dfile, wctx):
+        """rerun merge process for file path `dfile`.
+        Returns whether the merge was completed and the return value of merge
+        obtained from filemerge._filemerge().
+        """
+        if self[dfile] in (
+            MERGE_RECORD_RESOLVED,
+            LEGACY_RECORD_DRIVER_RESOLVED,
+        ):
+            return True, 0
+        stateentry = self._state[dfile]
+        state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
+        octx = self._repo[self._other]
+        extras = self.extras(dfile)
+        anccommitnode = extras.get(b'ancestorlinknode')
+        if anccommitnode:
+            actx = self._repo[anccommitnode]
+        else:
+            actx = None
+        fcd = _filectxorabsent(localkey, wctx, dfile)
+        fco = _filectxorabsent(onode, octx, ofile)
+        # TODO: move this to filectxorabsent
+        fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
+        # "premerge" x flags
+        flo = fco.flags()
+        fla = fca.flags()
+        if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
+            if fca.node() == nullid and flags != flo:
+                if preresolve:
+                    self._repo.ui.warn(
+                        _(
+                            b'warning: cannot merge flags for %s '
+                            b'without common ancestor - keeping local flags\n'
+                        )
+                        % afile
+                    )
+            elif flags == fla:
+                flags = flo
+        if preresolve:
+            # restore local
+            if localkey != nullhex:
+                self._restore_backup(wctx[dfile], localkey, flags)
+            else:
+                wctx[dfile].remove(ignoremissing=True)
+            complete, merge_ret, deleted = filemerge.premerge(
+                self._repo,
+                wctx,
+                self._local,
+                lfile,
+                fcd,
+                fco,
+                fca,
+                labels=self._labels,
+            )
+        else:
+            complete, merge_ret, deleted = filemerge.filemerge(
+                self._repo,
+                wctx,
+                self._local,
+                lfile,
+                fcd,
+                fco,
+                fca,
+                labels=self._labels,
+            )
+        if merge_ret is None:
+            # If return value of merge is None, then there are no real conflict
+            del self._state[dfile]
+            self._stateextras.pop(dfile, None)
+            self._dirty = True
+        elif not merge_ret:
+            self.mark(dfile, MERGE_RECORD_RESOLVED)
+
+        if complete:
+            action = None
+            if deleted:
+                if fcd.isabsent():
+                    # dc: local picked. Need to drop if present, which may
+                    # happen on re-resolves.
+                    action = ACTION_FORGET
+                else:
+                    # cd: remote picked (or otherwise deleted)
+                    action = ACTION_REMOVE
+            else:
+                if fcd.isabsent():  # dc: remote picked
+                    action = ACTION_GET
+                elif fco.isabsent():  # cd: local picked
+                    if dfile in self.localctx:
+                        action = ACTION_ADD_MODIFIED
+                    else:
+                        action = ACTION_ADD
+                # else: regular merges (no action necessary)
+            self._results[dfile] = merge_ret, action
+
+        return complete, merge_ret
+
+    def preresolve(self, dfile, wctx):
+        """run premerge process for dfile
+
+        Returns whether the merge is complete, and the exit code."""
+        return self._resolve(True, dfile, wctx)
+
+    def resolve(self, dfile, wctx):
+        """run merge process (assuming premerge was run) for dfile
+
+        Returns the exit code of the merge."""
+        return self._resolve(False, dfile, wctx)[1]
+
+    def counts(self):
+        """return counts for updated, merged and removed files in this
+        session"""
+        updated, merged, removed = 0, 0, 0
+        for r, action in pycompat.itervalues(self._results):
+            if r is None:
+                updated += 1
+            elif r == 0:
+                if action == ACTION_REMOVE:
+                    removed += 1
+                else:
+                    merged += 1
+        return updated, merged, removed
+
+    def unresolvedcount(self):
+        """get unresolved count for this merge (persistent)"""
+        return len(list(self.unresolved()))
+
+    def actions(self):
+        """return lists of actions to perform on the dirstate"""
+        actions = {
+            ACTION_REMOVE: [],
+            ACTION_FORGET: [],
+            ACTION_ADD: [],
+            ACTION_ADD_MODIFIED: [],
+            ACTION_GET: [],
+        }
+        for f, (r, action) in pycompat.iteritems(self._results):
+            if action is not None:
+                actions[action].append((f, None, b"merge result"))
+        return actions
+
+
+class mergestate(_mergestate_base):
+
     statepathv1 = b'merge/state'
     statepathv2 = b'merge/state2'
 
     @staticmethod
-    def clean(repo, node=None, other=None, labels=None):
+    def clean(repo):
         """Initialize a brand new merge state, removing any existing state on
         disk."""
         ms = mergestate(repo)
-        ms.reset(node, other, labels)
+        ms.reset()
         return ms
 
     @staticmethod
@@ -186,50 +475,12 @@
         ms._read()
         return ms
 
-    def __init__(self, repo):
-        """Initialize the merge state.
-
-        Do not use this directly! Instead call read() or clean()."""
-        self._repo = repo
-        self._dirty = False
-        self._labels = None
-
-    def reset(self, node=None, other=None, labels=None):
-        self._state = {}
-        self._stateextras = {}
-        self._local = None
-        self._other = None
-        self._labels = labels
-        for var in ('localctx', 'otherctx'):
-            if var in vars(self):
-                delattr(self, var)
-        if node:
-            self._local = node
-            self._other = other
-        self._readmergedriver = None
-        if self.mergedriver:
-            self._mdstate = MERGE_DRIVER_STATE_SUCCESS
-        else:
-            self._mdstate = MERGE_DRIVER_STATE_UNMARKED
-        shutil.rmtree(self._repo.vfs.join(b'merge'), True)
-        self._results = {}
-        self._dirty = False
-
     def _read(self):
         """Analyse each record content to restore a serialized state from disk
 
         This function process "record" entry produced by the de-serialization
         of on disk file.
         """
-        self._state = {}
-        self._stateextras = {}
-        self._local = None
-        self._other = None
-        for var in ('localctx', 'otherctx'):
-            if var in vars(self):
-                delattr(self, var)
-        self._readmergedriver = None
-        self._mdstate = MERGE_DRIVER_STATE_SUCCESS
         unsupported = set()
         records = self._readrecords()
         for rtype, record in records:
@@ -237,28 +488,23 @@
                 self._local = bin(record)
             elif rtype == RECORD_OTHER:
                 self._other = bin(record)
-            elif rtype == RECORD_MERGE_DRIVER_STATE:
-                bits = record.split(b'\0', 1)
-                mdstate = bits[1]
-                if len(mdstate) != 1 or mdstate not in (
-                    MERGE_DRIVER_STATE_UNMARKED,
-                    MERGE_DRIVER_STATE_MARKED,
-                    MERGE_DRIVER_STATE_SUCCESS,
-                ):
-                    # the merge driver should be idempotent, so just rerun it
-                    mdstate = MERGE_DRIVER_STATE_UNMARKED
-
-                self._readmergedriver = bits[0]
-                self._mdstate = mdstate
+            elif rtype == LEGACY_MERGE_DRIVER_STATE:
+                pass
             elif rtype in (
                 RECORD_MERGED,
                 RECORD_CHANGEDELETE_CONFLICT,
                 RECORD_PATH_CONFLICT,
-                RECORD_MERGE_DRIVER_MERGE,
+                LEGACY_MERGE_DRIVER_MERGE,
                 LEGACY_RECORD_RESOLVED_OTHER,
             ):
                 bits = record.split(b'\0')
-                self._state[bits[0]] = bits[1:]
+                # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
+                # and we now store related information in _stateextras, so
+                # lets write to _stateextras directly
+                if bits[1] == MERGE_RECORD_MERGED_OTHER:
+                    self._stateextras[bits[0]][b'filenode-source'] = b'other'
+                else:
+                    self._state[bits[0]] = bits[1:]
             elif rtype == RECORD_FILE_VALUES:
                 filename, rawextras = record.split(b'\0', 1)
                 extraparts = rawextras.split(b'\0')
@@ -274,13 +520,6 @@
                 self._labels = [l for l in labels if len(l) > 0]
             elif not rtype.islower():
                 unsupported.add(rtype)
-        # contains a mapping of form:
-        # {filename : (merge_return_value, action_to_be_performed}
-        # these are results of re-running merge process
-        # this dict is used to perform actions on dirstate caused by re-running
-        # the merge
-        self._results = {}
-        self._dirty = False
 
         if unsupported:
             raise error.UnsupportedMergeRecords(unsupported)
@@ -395,62 +634,7 @@
                 raise
         return records
 
-    @util.propertycache
-    def mergedriver(self):
-        # protect against the following:
-        # - A configures a malicious merge driver in their hgrc, then
-        #   pauses the merge
-        # - A edits their hgrc to remove references to the merge driver
-        # - A gives a copy of their entire repo, including .hg, to B
-        # - B inspects .hgrc and finds it to be clean
-        # - B then continues the merge and the malicious merge driver
-        #  gets invoked
-        configmergedriver = self._repo.ui.config(
-            b'experimental', b'mergedriver'
-        )
-        if (
-            self._readmergedriver is not None
-            and self._readmergedriver != configmergedriver
-        ):
-            raise error.ConfigError(
-                _(b"merge driver changed since merge started"),
-                hint=_(b"revert merge driver change or abort merge"),
-            )
-
-        return configmergedriver
-
-    @util.propertycache
-    def local(self):
-        if self._local is None:
-            msg = b"local accessed but self._local isn't set"
-            raise error.ProgrammingError(msg)
-        return self._local
-
-    @util.propertycache
-    def localctx(self):
-        return self._repo[self.local]
-
-    @util.propertycache
-    def other(self):
-        if self._other is None:
-            msg = b"other accessed but self._other isn't set"
-            raise error.ProgrammingError(msg)
-        return self._other
-
-    @util.propertycache
-    def otherctx(self):
-        return self._repo[self.other]
-
-    def active(self):
-        """Whether mergestate is active.
-
-        Returns True if there appears to be mergestate. This is a rough proxy
-        for "is a merge in progress."
-        """
-        return bool(self._local) or bool(self._state)
-
     def commit(self):
-        """Write current state on disk (if necessary)"""
         if self._dirty:
             records = self._makerecords()
             self._writerecords(records)
@@ -460,25 +644,13 @@
         records = []
         records.append((RECORD_LOCAL, hex(self._local)))
         records.append((RECORD_OTHER, hex(self._other)))
-        if self.mergedriver:
-            records.append(
-                (
-                    RECORD_MERGE_DRIVER_STATE,
-                    b'\0'.join([self.mergedriver, self._mdstate]),
-                )
-            )
         # Write out state items. In all cases, the value of the state map entry
         # is written as the contents of the record. The record type depends on
         # the type of state that is stored, and capital-letter records are used
         # to prevent older versions of Mercurial that do not support the feature
         # from loading them.
         for filename, v in pycompat.iteritems(self._state):
-            if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
-                # Driver-resolved merge. These are stored in 'D' records.
-                records.append(
-                    (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
-                )
-            elif v[0] in (
+            if v[0] in (
                 MERGE_RECORD_UNRESOLVED_PATH,
                 MERGE_RECORD_RESOLVED_PATH,
             ):
@@ -487,8 +659,6 @@
                 records.append(
                     (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
                 )
-            elif v[0] == MERGE_RECORD_MERGED_OTHER:
-                records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
             elif v[1] == nullhex or v[6] == nullhex:
                 # Change/Delete or Delete/Change conflicts. These are stored in
                 # 'C' records. v[1] is the local file, and is nullhex when the
@@ -544,250 +714,27 @@
             f.write(_pack(format, key, len(data), data))
         f.close()
 
-    @staticmethod
-    def getlocalkey(path):
-        """hash the path of a local file context for storage in the .hg/merge
-        directory."""
-
-        return hex(hashutil.sha1(path).digest())
-
-    def add(self, fcl, fco, fca, fd):
-        """add a new (potentially?) conflicting file the merge state
-        fcl: file context for local,
-        fco: file context for remote,
-        fca: file context for ancestors,
-        fd:  file path of the resulting merge.
-
-        note: also write the local version to the `.hg/merge` directory.
-        """
-        if fcl.isabsent():
-            localkey = nullhex
-        else:
-            localkey = mergestate.getlocalkey(fcl.path())
-            self._repo.vfs.write(b'merge/' + localkey, fcl.data())
-        self._state[fd] = [
-            MERGE_RECORD_UNRESOLVED,
-            localkey,
-            fcl.path(),
-            fca.path(),
-            hex(fca.filenode()),
-            fco.path(),
-            hex(fco.filenode()),
-            fcl.flags(),
-        ]
-        self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
-        self._dirty = True
-
-    def addpathconflict(self, path, frename, forigin):
-        """add a new conflicting path to the merge state
-        path:    the path that conflicts
-        frename: the filename the conflicting file was renamed to
-        forigin: origin of the file ('l' or 'r' for local/remote)
-        """
-        self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
-        self._dirty = True
+    def _make_backup(self, fctx, localkey):
+        self._repo.vfs.write(b'merge/' + localkey, fctx.data())
 
-    def addmergedother(self, path):
-        self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
-        self._dirty = True
-
-    def __contains__(self, dfile):
-        return dfile in self._state
-
-    def __getitem__(self, dfile):
-        return self._state[dfile][0]
-
-    def __iter__(self):
-        return iter(sorted(self._state))
-
-    def files(self):
-        return self._state.keys()
-
-    def mark(self, dfile, state):
-        self._state[dfile][0] = state
-        self._dirty = True
+    def _restore_backup(self, fctx, localkey, flags):
+        with self._repo.vfs(b'merge/' + localkey) as f:
+            fctx.write(f.read(), flags)
 
-    def mdstate(self):
-        return self._mdstate
-
-    def unresolved(self):
-        """Obtain the paths of unresolved files."""
+    def reset(self):
+        shutil.rmtree(self._repo.vfs.join(b'merge'), True)
 
-        for f, entry in pycompat.iteritems(self._state):
-            if entry[0] in (
-                MERGE_RECORD_UNRESOLVED,
-                MERGE_RECORD_UNRESOLVED_PATH,
-            ):
-                yield f
-
-    def driverresolved(self):
-        """Obtain the paths of driver-resolved files."""
-
-        for f, entry in self._state.items():
-            if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
-                yield f
-
-    def extras(self, filename):
-        return self._stateextras.setdefault(filename, {})
 
-    def _resolve(self, preresolve, dfile, wctx):
-        """rerun merge process for file path `dfile`.
-        Returns whether the merge was completed and the return value of merge
-        obtained from filemerge._filemerge().
-        """
-        if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
-            return True, 0
-        if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
-            return True, 0
-        stateentry = self._state[dfile]
-        state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
-        octx = self._repo[self._other]
-        extras = self.extras(dfile)
-        anccommitnode = extras.get(b'ancestorlinknode')
-        if anccommitnode:
-            actx = self._repo[anccommitnode]
-        else:
-            actx = None
-        fcd = _filectxorabsent(localkey, wctx, dfile)
-        fco = _filectxorabsent(onode, octx, ofile)
-        # TODO: move this to filectxorabsent
-        fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
-        # "premerge" x flags
-        flo = fco.flags()
-        fla = fca.flags()
-        if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
-            if fca.node() == nullid and flags != flo:
-                if preresolve:
-                    self._repo.ui.warn(
-                        _(
-                            b'warning: cannot merge flags for %s '
-                            b'without common ancestor - keeping local flags\n'
-                        )
-                        % afile
-                    )
-            elif flags == fla:
-                flags = flo
-        if preresolve:
-            # restore local
-            if localkey != nullhex:
-                f = self._repo.vfs(b'merge/' + localkey)
-                wctx[dfile].write(f.read(), flags)
-                f.close()
-            else:
-                wctx[dfile].remove(ignoremissing=True)
-            complete, merge_ret, deleted = filemerge.premerge(
-                self._repo,
-                wctx,
-                self._local,
-                lfile,
-                fcd,
-                fco,
-                fca,
-                labels=self._labels,
-            )
-        else:
-            complete, merge_ret, deleted = filemerge.filemerge(
-                self._repo,
-                wctx,
-                self._local,
-                lfile,
-                fcd,
-                fco,
-                fca,
-                labels=self._labels,
-            )
-        if merge_ret is None:
-            # If return value of merge is None, then there are no real conflict
-            del self._state[dfile]
-            self._stateextras.pop(dfile, None)
-            self._dirty = True
-        elif not merge_ret:
-            self.mark(dfile, MERGE_RECORD_RESOLVED)
+class memmergestate(_mergestate_base):
+    def __init__(self, repo):
+        super(memmergestate, self).__init__(repo)
+        self._backups = {}
 
-        if complete:
-            action = None
-            if deleted:
-                if fcd.isabsent():
-                    # dc: local picked. Need to drop if present, which may
-                    # happen on re-resolves.
-                    action = ACTION_FORGET
-                else:
-                    # cd: remote picked (or otherwise deleted)
-                    action = ACTION_REMOVE
-            else:
-                if fcd.isabsent():  # dc: remote picked
-                    action = ACTION_GET
-                elif fco.isabsent():  # cd: local picked
-                    if dfile in self.localctx:
-                        action = ACTION_ADD_MODIFIED
-                    else:
-                        action = ACTION_ADD
-                # else: regular merges (no action necessary)
-            self._results[dfile] = merge_ret, action
-
-        return complete, merge_ret
-
-    def preresolve(self, dfile, wctx):
-        """run premerge process for dfile
-
-        Returns whether the merge is complete, and the exit code."""
-        return self._resolve(True, dfile, wctx)
-
-    def resolve(self, dfile, wctx):
-        """run merge process (assuming premerge was run) for dfile
-
-        Returns the exit code of the merge."""
-        return self._resolve(False, dfile, wctx)[1]
+    def _make_backup(self, fctx, localkey):
+        self._backups[localkey] = fctx.data()
 
-    def counts(self):
-        """return counts for updated, merged and removed files in this
-        session"""
-        updated, merged, removed = 0, 0, 0
-        for r, action in pycompat.itervalues(self._results):
-            if r is None:
-                updated += 1
-            elif r == 0:
-                if action == ACTION_REMOVE:
-                    removed += 1
-                else:
-                    merged += 1
-        return updated, merged, removed
-
-    def unresolvedcount(self):
-        """get unresolved count for this merge (persistent)"""
-        return len(list(self.unresolved()))
-
-    def actions(self):
-        """return lists of actions to perform on the dirstate"""
-        actions = {
-            ACTION_REMOVE: [],
-            ACTION_FORGET: [],
-            ACTION_ADD: [],
-            ACTION_ADD_MODIFIED: [],
-            ACTION_GET: [],
-        }
-        for f, (r, action) in pycompat.iteritems(self._results):
-            if action is not None:
-                actions[action].append((f, None, b"merge result"))
-        return actions
-
-    def queueremove(self, f):
-        """queues a file to be removed from the dirstate
-
-        Meant for use by custom merge drivers."""
-        self._results[f] = 0, ACTION_REMOVE
-
-    def queueadd(self, f):
-        """queues a file to be added to the dirstate
-
-        Meant for use by custom merge drivers."""
-        self._results[f] = 0, ACTION_ADD
-
-    def queueget(self, f):
-        """queues a file to be marked modified in the dirstate
-
-        Meant for use by custom merge drivers."""
-        self._results[f] = 0, ACTION_GET
+    def _restore_backup(self, fctx, localkey, flags):
+        fctx.write(self._backups[localkey], flags)
 
 
 def recordupdates(repo, actions, branchmerge, getfiledata):
@@ -832,6 +779,14 @@
     for f, args, msg in actions.get(ACTION_KEEP, []):
         pass
 
+    # keep deleted
+    for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
+        pass
+
+    # keep new
+    for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
+        pass
+
     # get
     for f, args, msg in actions.get(ACTION_GET, []):
         if branchmerge:
--- a/mercurial/mergeutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/mergeutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -17,8 +17,3 @@
         raise error.Abort(
             _(b"unresolved merge conflicts (see 'hg help resolve')")
         )
-    if ms.mdstate() != b's' or list(ms.driverresolved()):
-        raise error.Abort(
-            _(b'driver-resolved merge conflicts'),
-            hint=_(b'run "hg resolve --all" to resolve'),
-        )
--- a/mercurial/metadata.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/metadata.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1,3 +1,4 @@
+# coding: utf8
 # metadata.py -- code related to various metadata computation and access.
 #
 # Copyright 2019 Google, Inc <martinvonz@google.com>
@@ -8,6 +9,7 @@
 from __future__ import absolute_import, print_function
 
 import multiprocessing
+import struct
 
 from . import (
     error,
@@ -22,6 +24,496 @@
 )
 
 
+class ChangingFiles(object):
+    """A class recording the changes made to files by a changeset
+
+    Actions performed on files are gathered into 3 sets:
+
+    - added:   files actively added in the changeset.
+    - merged:  files whose history got merged
+    - removed: files removed in the revision
+    - salvaged: files that might have been deleted by a merge but were not
+    - touched: files affected by the merge
+
+    and copies information is held by 2 mappings
+
+    - copied_from_p1: {"<new-name>": "<source-name-in-p1>"} mapping for copies
+    - copied_from_p2: {"<new-name>": "<source-name-in-p2>"} mapping for copies
+
+    See their inline help for details.
+    """
+
+    def __init__(
+        self,
+        touched=None,
+        added=None,
+        removed=None,
+        merged=None,
+        salvaged=None,
+        p1_copies=None,
+        p2_copies=None,
+    ):
+        self._added = set(() if added is None else added)
+        self._merged = set(() if merged is None else merged)
+        self._removed = set(() if removed is None else removed)
+        self._touched = set(() if touched is None else touched)
+        self._salvaged = set(() if salvaged is None else salvaged)
+        self._touched.update(self._added)
+        self._touched.update(self._merged)
+        self._touched.update(self._removed)
+        self._p1_copies = dict(() if p1_copies is None else p1_copies)
+        self._p2_copies = dict(() if p2_copies is None else p2_copies)
+
+    def __eq__(self, other):
+        return (
+            self.added == other.added
+            and self.merged == other.merged
+            and self.removed == other.removed
+            and self.salvaged == other.salvaged
+            and self.touched == other.touched
+            and self.copied_from_p1 == other.copied_from_p1
+            and self.copied_from_p2 == other.copied_from_p2
+        )
+
+    @property
+    def has_copies_info(self):
+        return bool(
+            self.removed
+            or self.merged
+            or self.salvaged
+            or self.copied_from_p1
+            or self.copied_from_p2
+        )
+
+    @util.propertycache
+    def added(self):
+        """files actively added in the changeset
+
+        Any file present in that revision that was absent in all the changeset's
+        parents.
+
+        In case of merge, this means a file absent in one of the parents but
+        existing in the other will *not* be contained in this set. (They were
+        added by an ancestor)
+        """
+        return frozenset(self._added)
+
+    def mark_added(self, filename):
+        if 'added' in vars(self):
+            del self.added
+        self._added.add(filename)
+        self.mark_touched(filename)
+
+    def update_added(self, filenames):
+        for f in filenames:
+            self.mark_added(f)
+
+    @util.propertycache
+    def merged(self):
+        """files actively merged during a merge
+
+        Any modified files which had modification on both size that needed merging.
+
+        In this case a new filenode was created and it has two parents.
+        """
+        return frozenset(self._merged)
+
+    def mark_merged(self, filename):
+        if 'merged' in vars(self):
+            del self.merged
+        self._merged.add(filename)
+        self.mark_touched(filename)
+
+    def update_merged(self, filenames):
+        for f in filenames:
+            self.mark_merged(f)
+
+    @util.propertycache
+    def removed(self):
+        """files actively removed by the changeset
+
+        In case of merge this will only contain the set of files removing "new"
+        content. For any file absent in the current changeset:
+
+        a) If the file exists in both parents, it is clearly "actively" removed
+        by this changeset.
+
+        b) If a file exists in only one parent and in none of the common
+        ancestors, then the file was newly added in one of the merged branches
+        and then got "actively" removed.
+
+        c) If a file exists in only one parent and at least one of the common
+        ancestors using the same filenode, then the file was unchanged on one
+        side and deleted on the other side. The merge "passively" propagated
+        that deletion, but didn't "actively" remove the file. In this case the
+        file is *not* included in the `removed` set.
+
+        d) If a file exists in only one parent and at least one of the common
+        ancestors using a different filenode, then the file was changed on one
+        side and removed on the other side. The merge process "actively"
+        decided to drop the new change and delete the file. Unlike in the
+        previous case, (c), the file included in the `removed` set.
+
+        Summary table for merge:
+
+        case | exists in parents | exists in gca || removed
+         (a) |       both        |     *         ||   yes
+         (b) |       one         |     none      ||   yes
+         (c) |       one         | same filenode ||   no
+         (d) |       one         |  new filenode ||   yes
+        """
+        return frozenset(self._removed)
+
+    def mark_removed(self, filename):
+        if 'removed' in vars(self):
+            del self.removed
+        self._removed.add(filename)
+        self.mark_touched(filename)
+
+    def update_removed(self, filenames):
+        for f in filenames:
+            self.mark_removed(f)
+
+    @util.propertycache
+    def salvaged(self):
+        """files that might have been deleted by a merge, but still exists.
+
+        During a merge, the manifest merging might select some files for
+        removal, or for a removed/changed conflict. If at commit time the file
+        still exists, its removal was "reverted" and the file is "salvaged"
+        """
+        return frozenset(self._salvaged)
+
+    def mark_salvaged(self, filename):
+        if "salvaged" in vars(self):
+            del self.salvaged
+        self._salvaged.add(filename)
+        self.mark_touched(filename)
+
+    def update_salvaged(self, filenames):
+        for f in filenames:
+            self.mark_salvaged(f)
+
+    @util.propertycache
+    def touched(self):
+        """files either actively modified, added or removed"""
+        return frozenset(self._touched)
+
+    def mark_touched(self, filename):
+        if 'touched' in vars(self):
+            del self.touched
+        self._touched.add(filename)
+
+    def update_touched(self, filenames):
+        for f in filenames:
+            self.mark_touched(f)
+
+    @util.propertycache
+    def copied_from_p1(self):
+        return self._p1_copies.copy()
+
+    def mark_copied_from_p1(self, source, dest):
+        if 'copied_from_p1' in vars(self):
+            del self.copied_from_p1
+        self._p1_copies[dest] = source
+
+    def update_copies_from_p1(self, copies):
+        for dest, source in copies.items():
+            self.mark_copied_from_p1(source, dest)
+
+    @util.propertycache
+    def copied_from_p2(self):
+        return self._p2_copies.copy()
+
+    def mark_copied_from_p2(self, source, dest):
+        if 'copied_from_p2' in vars(self):
+            del self.copied_from_p2
+        self._p2_copies[dest] = source
+
+    def update_copies_from_p2(self, copies):
+        for dest, source in copies.items():
+            self.mark_copied_from_p2(source, dest)
+
+
+def compute_all_files_changes(ctx):
+    """compute the files changed by a revision"""
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+    if p1.rev() == node.nullrev and p2.rev() == node.nullrev:
+        return _process_root(ctx)
+    elif p1.rev() != node.nullrev and p2.rev() == node.nullrev:
+        return _process_linear(p1, ctx)
+    elif p1.rev() == node.nullrev and p2.rev() != node.nullrev:
+        # In the wild, one can encounter changeset where p1 is null but p2 is not
+        return _process_linear(p1, ctx, parent=2)
+    elif p1.rev() == p2.rev():
+        # In the wild, one can encounter such "non-merge"
+        return _process_linear(p1, ctx)
+    else:
+        return _process_merge(p1, p2, ctx)
+
+
+def _process_root(ctx):
+    """compute the appropriate changed files for a changeset with no parents
+    """
+    # Simple, there was nothing before it, so everything is added.
+    md = ChangingFiles()
+    manifest = ctx.manifest()
+    for filename in manifest:
+        md.mark_added(filename)
+    return md
+
+
+def _process_linear(parent_ctx, children_ctx, parent=1):
+    """compute the appropriate changed files for a changeset with a single parent
+    """
+    md = ChangingFiles()
+    parent_manifest = parent_ctx.manifest()
+    children_manifest = children_ctx.manifest()
+
+    copies_candidate = []
+
+    for filename, d in parent_manifest.diff(children_manifest).items():
+        if d[1][0] is None:
+            # no filenode for the "new" value, file is absent
+            md.mark_removed(filename)
+        else:
+            copies_candidate.append(filename)
+            if d[0][0] is None:
+                # not filenode for the "old" value file was absent
+                md.mark_added(filename)
+            else:
+                # filenode for both "old" and "new"
+                md.mark_touched(filename)
+
+    if parent == 1:
+        copied = md.mark_copied_from_p1
+    elif parent == 2:
+        copied = md.mark_copied_from_p2
+    else:
+        assert False, "bad parent value %d" % parent
+
+    for filename in copies_candidate:
+        copy_info = children_ctx[filename].renamed()
+        if copy_info:
+            source, srcnode = copy_info
+            copied(source, filename)
+
+    return md
+
+
+def _process_merge(p1_ctx, p2_ctx, ctx):
+    """compute the appropriate changed files for a changeset with two parents
+
+    This is a more advance case. The information we need to record is summarise
+    in the following table:
+
+    ┌──────────────┬──────────────┬──────────────┬──────────────┬──────────────┐
+    │ diff ╲  diff │       ø      │ (Some, None) │ (None, Some) │ (Some, Some) │
+    │  p2   ╲  p1  │              │              │              │              │
+    ├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
+    │              │              │🄱  No Changes │🄳  No Changes │              │
+    │  ø           │🄰  No Changes │      OR      │     OR       │🄵  No Changes │
+    │              │              │🄲  Deleted[1] │🄴  Salvaged[2]│     [3]      │
+    ├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
+    │              │🄶  No Changes │              │              │              │
+    │ (Some, None) │      OR      │🄻  Deleted    │       ø      │      ø       │
+    │              │🄷  Deleted[1] │              │              │              │
+    ├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
+    │              │🄸  No Changes │              │              │              │
+    │ (None, Some) │     OR       │      ø       │🄼   Added     │🄽   Merged    │
+    │              │🄹  Salvaged[2]│              │   (copied?)  │   (copied?)  │
+    ├──────────────┼──────────────┼──────────────┼──────────────┼──────────────┤
+    │              │              │              │              │              │
+    │ (Some, Some) │🄺  No Changes │      ø       │🄾   Merged    │🄿   Merged    │
+    │              │     [3]      │              │   (copied?)  │   (copied?)  │
+    └──────────────┴──────────────┴──────────────┴──────────────┴──────────────┘
+
+    Special case [1]:
+
+      The situation is:
+        - parent-A:     file exists,
+        - parent-B:     no file,
+        - working-copy: no file.
+
+      Detecting a "deletion" will depend on the presence of actual change on
+      the "parent-A" branch:
+
+      Subcase 🄱 or 🄶 : if the state of the file in "parent-A" is unchanged
+      compared to the merge ancestors, then parent-A branch left the file
+      untouched while parent-B deleted it. We simply apply the change from
+      "parent-B" branch the file was automatically dropped.
+      The result is:
+          - file is not recorded as touched by the merge.
+
+      Subcase 🄲 or 🄷 : otherwise, the change from parent-A branch were explicitly dropped and
+      the file was "deleted again". From a user perspective, the message
+      about "locally changed" while "remotely deleted" (or the other way
+      around) was issued and the user chose to deleted the file.
+      The result:
+          - file is recorded as touched by the merge.
+
+
+    Special case [2]:
+
+      The situation is:
+        - parent-A:     no file,
+        - parent-B:     file,
+        - working-copy: file (same content as parent-B).
+
+      There are three subcases depending on the ancestors contents:
+
+      - A) the file is missing in all ancestors,
+      - B) at least one ancestor has the file with filenode ≠ from parent-B,
+      - C) all ancestors use the same filenode as parent-B,
+
+      Subcase (A) is the simpler, nothing happend on parent-A side while
+      parent-B added it.
+
+        The result:
+            - the file is not marked as touched by the merge.
+
+      Subcase (B) is the counter part of "Special case [1]", the file was
+        modified on parent-B side, while parent-A side deleted it. However this
+        time, the conflict was solved by keeping the file (and its
+        modification). We consider the file as "salvaged".
+
+        The result:
+            - the file is marked as "salvaged" by the merge.
+
+      Subcase (C) is subtle variation of the case above. In this case, the
+        file in unchanged on the parent-B side and actively removed on the
+        parent-A side. So the merge machinery correctly decide it should be
+        removed. However, the file was explicitly restored to its parent-B
+        content before the merge was commited. The file is be marked
+        as salvaged too. From the merge result perspective, this is similar to
+        Subcase (B), however from the merge resolution perspective they differ
+        since in (C), there was some conflict not obvious solution to the
+        merge (That got reversed)
+
+    Special case [3]:
+
+      The situation is:
+        - parent-A:     file,
+        - parent-B:     file (different filenode as parent-A),
+        - working-copy: file (same filenode as parent-B).
+
+      This case is in theory much simple, for this to happens, this mean the
+      filenode in parent-A is purely replacing the one in parent-B (either a
+      descendant, or a full new file history, see changeset). So the merge
+      introduce no changes, and the file is not affected by the merge...
+
+      However, in the wild it is possible to find commit with the above is not
+      True. For example repository have some commit where the *new* node is an
+      ancestor of the node in parent-A, or where parent-A and parent-B are two
+      branches of the same file history, yet not merge-filenode were created
+      (while the "merge" should have led to a "modification").
+
+      Detecting such cases (and not recording the file as modified) would be a
+      nice bonus. However do not any of this yet.
+    """
+
+    md = ChangingFiles()
+
+    m = ctx.manifest()
+    p1m = p1_ctx.manifest()
+    p2m = p2_ctx.manifest()
+    diff_p1 = p1m.diff(m)
+    diff_p2 = p2m.diff(m)
+
+    cahs = ctx.repo().changelog.commonancestorsheads(
+        p1_ctx.node(), p2_ctx.node()
+    )
+    if not cahs:
+        cahs = [node.nullrev]
+    mas = [ctx.repo()[r].manifest() for r in cahs]
+
+    copy_candidates = []
+
+    # Dealing with case 🄰 happens automatically.  Since there are no entry in
+    # d1 nor d2, we won't iterate on it ever.
+
+    # Iteration over d1 content will deal with all cases, but the one in the
+    # first column of the table.
+    for filename, d1 in diff_p1.items():
+
+        d2 = diff_p2.pop(filename, None)
+
+        if d2 is None:
+            # this deal with the first line of the table.
+            _process_other_unchanged(md, mas, filename, d1)
+        else:
+
+            if d1[0][0] is None and d2[0][0] is None:
+                # case 🄼 — both deleted the file.
+                md.mark_added(filename)
+                copy_candidates.append(filename)
+            elif d1[1][0] is None and d2[1][0] is None:
+                # case 🄻 — both deleted the file.
+                md.mark_removed(filename)
+            elif d1[1][0] is not None and d2[1][0] is not None:
+                # case 🄽 🄾 🄿
+                md.mark_merged(filename)
+                copy_candidates.append(filename)
+            else:
+                # Impossible case, the post-merge file status cannot be None on
+                # one side and Something on the other side.
+                assert False, "unreachable"
+
+    # Iteration over remaining d2 content deal with the first column of the
+    # table.
+    for filename, d2 in diff_p2.items():
+        _process_other_unchanged(md, mas, filename, d2)
+
+    for filename in copy_candidates:
+        copy_info = ctx[filename].renamed()
+        if copy_info:
+            source, srcnode = copy_info
+            if source in p1_ctx and p1_ctx[source].filenode() == srcnode:
+                md.mark_copied_from_p1(source, filename)
+            elif source in p2_ctx and p2_ctx[source].filenode() == srcnode:
+                md.mark_copied_from_p2(source, filename)
+    return md
+
+
+def _find(manifest, filename):
+    """return the associate filenode or None"""
+    if filename not in manifest:
+        return None
+    return manifest.find(filename)[0]
+
+
+def _process_other_unchanged(md, mas, filename, diff):
+    source_node = diff[0][0]
+    target_node = diff[1][0]
+
+    if source_node is not None and target_node is None:
+        if any(not _find(ma, filename) == source_node for ma in mas):
+            # case 🄲 of 🄷
+            md.mark_removed(filename)
+        # else, we have case 🄱 or 🄶 : no change need to be recorded
+    elif source_node is None and target_node is not None:
+        if any(_find(ma, filename) is not None for ma in mas):
+            # case 🄴 or 🄹
+            md.mark_salvaged(filename)
+        # else, we have case 🄳 or 🄸 : simple merge without intervention
+    elif source_node is not None and target_node is not None:
+        # case 🄵  or 🄺 : simple merge without intervention
+        #
+        # In buggy case where source_node is not an ancestors of target_node.
+        # There should have a been a new filenode created, recording this as
+        # "modified". We do not deal with them yet.
+        pass
+    else:
+        # An impossible case, the diff algorithm should not return entry if the
+        # file is missing on both side.
+        assert False, "unreachable"
+
+
+def _missing_from_all_ancestors(mas, filename):
+    return all(_find(ma, filename) is None for ma in mas)
+
+
 def computechangesetfilesadded(ctx):
     """return the list of files added in a changeset
     """
@@ -100,6 +592,21 @@
     return removed
 
 
+def computechangesetfilesmerged(ctx):
+    """return the list of files merged in a changeset
+    """
+    merged = []
+    if len(ctx.parents()) < 2:
+        return merged
+    for f in ctx.files():
+        if f in ctx:
+            fctx = ctx[f]
+            parents = fctx._filelog.parents(fctx._filenode)
+            if parents[1] != node.nullid:
+                merged.append(f)
+    return merged
+
+
 def computechangesetcopies(ctx):
     """return the copies data for a changeset
 
@@ -181,28 +688,122 @@
         return None
 
 
+# see mercurial/helptext/internals/revlogs.txt for details about the format
+
+ACTION_MASK = int("111" "00", 2)
+# note: untouched file used as copy source will as `000` for this mask.
+ADDED_FLAG = int("001" "00", 2)
+MERGED_FLAG = int("010" "00", 2)
+REMOVED_FLAG = int("011" "00", 2)
+SALVAGED_FLAG = int("100" "00", 2)
+TOUCHED_FLAG = int("101" "00", 2)
+
+COPIED_MASK = int("11", 2)
+COPIED_FROM_P1_FLAG = int("10", 2)
+COPIED_FROM_P2_FLAG = int("11", 2)
+
+# structure is <flag><filename-end><copy-source>
+INDEX_HEADER = struct.Struct(">L")
+INDEX_ENTRY = struct.Struct(">bLL")
+
+
+def encode_files_sidedata(files):
+    all_files = set(files.touched)
+    all_files.update(files.copied_from_p1.values())
+    all_files.update(files.copied_from_p2.values())
+    all_files = sorted(all_files)
+    file_idx = {f: i for (i, f) in enumerate(all_files)}
+    file_idx[None] = 0
+
+    chunks = [INDEX_HEADER.pack(len(all_files))]
+
+    filename_length = 0
+    for f in all_files:
+        filename_size = len(f)
+        filename_length += filename_size
+        flag = 0
+        if f in files.added:
+            flag |= ADDED_FLAG
+        elif f in files.merged:
+            flag |= MERGED_FLAG
+        elif f in files.removed:
+            flag |= REMOVED_FLAG
+        elif f in files.salvaged:
+            flag |= SALVAGED_FLAG
+        elif f in files.touched:
+            flag |= TOUCHED_FLAG
+
+        copy = None
+        if f in files.copied_from_p1:
+            flag |= COPIED_FROM_P1_FLAG
+            copy = files.copied_from_p1.get(f)
+        elif f in files.copied_from_p2:
+            copy = files.copied_from_p2.get(f)
+            flag |= COPIED_FROM_P2_FLAG
+        copy_idx = file_idx[copy]
+        chunks.append(INDEX_ENTRY.pack(flag, filename_length, copy_idx))
+    chunks.extend(all_files)
+    return {sidedatamod.SD_FILES: b''.join(chunks)}
+
+
+def decode_files_sidedata(sidedata):
+    md = ChangingFiles()
+    raw = sidedata.get(sidedatamod.SD_FILES)
+
+    if raw is None:
+        return md
+
+    copies = []
+    all_files = []
+
+    assert len(raw) >= INDEX_HEADER.size
+    total_files = INDEX_HEADER.unpack_from(raw, 0)[0]
+
+    offset = INDEX_HEADER.size
+    file_offset_base = offset + (INDEX_ENTRY.size * total_files)
+    file_offset_last = file_offset_base
+
+    assert len(raw) >= file_offset_base
+
+    for idx in range(total_files):
+        flag, file_end, copy_idx = INDEX_ENTRY.unpack_from(raw, offset)
+        file_end += file_offset_base
+        filename = raw[file_offset_last:file_end]
+        filesize = file_end - file_offset_last
+        assert len(filename) == filesize
+        offset += INDEX_ENTRY.size
+        file_offset_last = file_end
+        all_files.append(filename)
+        if flag & ACTION_MASK == ADDED_FLAG:
+            md.mark_added(filename)
+        elif flag & ACTION_MASK == MERGED_FLAG:
+            md.mark_merged(filename)
+        elif flag & ACTION_MASK == REMOVED_FLAG:
+            md.mark_removed(filename)
+        elif flag & ACTION_MASK == SALVAGED_FLAG:
+            md.mark_salvaged(filename)
+        elif flag & ACTION_MASK == TOUCHED_FLAG:
+            md.mark_touched(filename)
+
+        copied = None
+        if flag & COPIED_MASK == COPIED_FROM_P1_FLAG:
+            copied = md.mark_copied_from_p1
+        elif flag & COPIED_MASK == COPIED_FROM_P2_FLAG:
+            copied = md.mark_copied_from_p2
+
+        if copied is not None:
+            copies.append((copied, filename, copy_idx))
+
+    for copied, filename, copy_idx in copies:
+        copied(all_files[copy_idx], filename)
+
+    return md
+
+
 def _getsidedata(srcrepo, rev):
     ctx = srcrepo[rev]
-    filescopies = computechangesetcopies(ctx)
-    filesadded = computechangesetfilesadded(ctx)
-    filesremoved = computechangesetfilesremoved(ctx)
-    sidedata = {}
-    if any([filescopies, filesadded, filesremoved]):
-        sortedfiles = sorted(ctx.files())
-        p1copies, p2copies = filescopies
-        p1copies = encodecopies(sortedfiles, p1copies)
-        p2copies = encodecopies(sortedfiles, p2copies)
-        filesadded = encodefileindices(sortedfiles, filesadded)
-        filesremoved = encodefileindices(sortedfiles, filesremoved)
-        if p1copies:
-            sidedata[sidedatamod.SD_P1COPIES] = p1copies
-        if p2copies:
-            sidedata[sidedatamod.SD_P2COPIES] = p2copies
-        if filesadded:
-            sidedata[sidedatamod.SD_FILESADDED] = filesadded
-        if filesremoved:
-            sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
-    return sidedata
+    files = compute_all_files_changes(ctx)
+    return encode_files_sidedata(files), files.has_copies_info
 
 
 def getsidedataadder(srcrepo, destrepo):
@@ -280,19 +881,23 @@
     staging = {}
 
     def sidedata_companion(revlog, rev):
-        sidedata = {}
+        data = {}, False
         if util.safehasattr(revlog, b'filteredrevs'):  # this is a changelog
             # Is the data previously shelved ?
             sidedata = staging.pop(rev, None)
             if sidedata is None:
                 # look at the queued result until we find the one we are lookig
                 # for (shelve the other ones)
-                r, sidedata = sidedataq.get()
+                r, data = sidedataq.get()
                 while r != rev:
-                    staging[r] = sidedata
+                    staging[r] = data
                     r, sidedata = sidedataq.get()
             tokens.release()
-        return False, (), sidedata
+        sidedata, has_copies_info = data
+        new_flag = 0
+        if has_copies_info:
+            new_flag = sidedataflag.REVIDX_HASCOPIESINFO
+        return False, (), sidedata, new_flag, 0
 
     return sidedata_companion
 
@@ -303,10 +908,14 @@
     It just compute it in the same thread on request"""
 
     def sidedatacompanion(revlog, rev):
-        sidedata = {}
+        sidedata, has_copies_info = {}, False
         if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
-            sidedata = _getsidedata(srcrepo, rev)
-        return False, (), sidedata
+            sidedata, has_copies_info = _getsidedata(srcrepo, rev)
+        new_flag = 0
+        if has_copies_info:
+            new_flag = sidedataflag.REVIDX_HASCOPIESINFO
+
+        return False, (), sidedata, new_flag, 0
 
     return sidedatacompanion
 
@@ -322,6 +931,6 @@
                     sidedatamod.SD_FILESADDED,
                     sidedatamod.SD_FILESREMOVED,
                 )
-        return False, f, {}
+        return False, f, {}, 0, sidedataflag.REVIDX_HASCOPIESINFO
 
     return sidedatacompanion
--- a/mercurial/narrowspec.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/narrowspec.py	Tue Oct 20 22:04:04 2020 +0530
@@ -9,12 +9,12 @@
 
 from .i18n import _
 from .pycompat import getattr
-from .interfaces import repository
 from . import (
     error,
     match as matchmod,
     merge,
     mergestate as mergestatemod,
+    requirements,
     scmutil,
     sparse,
     util,
@@ -186,7 +186,7 @@
 
 
 def savebackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     svfs = repo.svfs
     svfs.tryunlink(backupname)
@@ -194,13 +194,13 @@
 
 
 def restorebackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
 
 
 def savewcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     vfs = repo.vfs
     vfs.tryunlink(backupname)
@@ -212,7 +212,7 @@
 
 
 def restorewcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     # It may not exist in old repos
     if repo.vfs.exists(backupname):
@@ -220,7 +220,7 @@
 
 
 def clearwcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     repo.vfs.tryunlink(backupname)
 
@@ -272,15 +272,19 @@
 
 
 def _writeaddedfiles(repo, pctx, files):
-    actions = merge.emptyactions()
-    addgaction = actions[mergestatemod.ACTION_GET].append
+    mresult = merge.mergeresult()
     mf = repo[b'.'].manifest()
     for f in files:
         if not repo.wvfs.exists(f):
-            addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
+            mresult.addfile(
+                f,
+                mergestatemod.ACTION_GET,
+                (mf.flags(f), False),
+                b"narrowspec updated",
+            )
     merge.applyupdates(
         repo,
-        actions,
+        mresult,
         wctx=repo[None],
         mctx=repo[b'.'],
         overwrite=False,
--- a/mercurial/obsolete.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/obsolete.py	Tue Oct 20 22:04:04 2020 +0530
@@ -328,7 +328,7 @@
 #
 # - remaining bytes: the metadata, each (key, value) pair after the other.
 _fm1version = 1
-_fm1fixed = b'>IdhHBBB20s'
+_fm1fixed = b'>IdhHBBB'
 _fm1nodesha1 = b'20s'
 _fm1nodesha256 = b'32s'
 _fm1nodesha1size = _calcsize(_fm1nodesha1)
@@ -360,48 +360,36 @@
     while off < stop:
         # read fixed part
         o1 = off + fsize
-        t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
+        t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
 
         if flags & sha2flag:
-            # FIXME: prec was read as a SHA1, needs to be amended
+            nodefmt = sha2fmt
+            nodesize = sha2size
+        else:
+            nodefmt = sha1fmt
+            nodesize = sha1size
 
-            # read 0 or more successors
-            if numsuc == 1:
-                o2 = o1 + sha2size
-                sucs = (data[o1:o2],)
-            else:
-                o2 = o1 + sha2size * numsuc
-                sucs = unpack(sha2fmt * numsuc, data[o1:o2])
+        (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
+        o1 += nodesize
 
-            # read parents
-            if numpar == noneflag:
-                o3 = o2
-                parents = None
-            elif numpar == 1:
-                o3 = o2 + sha2size
-                parents = (data[o2:o3],)
-            else:
-                o3 = o2 + sha2size * numpar
-                parents = unpack(sha2fmt * numpar, data[o2:o3])
+        # read 0 or more successors
+        if numsuc == 1:
+            o2 = o1 + nodesize
+            sucs = (data[o1:o2],)
         else:
-            # read 0 or more successors
-            if numsuc == 1:
-                o2 = o1 + sha1size
-                sucs = (data[o1:o2],)
-            else:
-                o2 = o1 + sha1size * numsuc
-                sucs = unpack(sha1fmt * numsuc, data[o1:o2])
+            o2 = o1 + nodesize * numsuc
+            sucs = unpack(nodefmt * numsuc, data[o1:o2])
 
-            # read parents
-            if numpar == noneflag:
-                o3 = o2
-                parents = None
-            elif numpar == 1:
-                o3 = o2 + sha1size
-                parents = (data[o2:o3],)
-            else:
-                o3 = o2 + sha1size * numpar
-                parents = unpack(sha1fmt * numpar, data[o2:o3])
+        # read parents
+        if numpar == noneflag:
+            o3 = o2
+            parents = None
+        elif numpar == 1:
+            o3 = o2 + nodesize
+            parents = (data[o2:o3],)
+        else:
+            o3 = o2 + nodesize * numpar
+            parents = unpack(nodefmt * numpar, data[o2:o3])
 
         # read metadata
         off = o3 + metasize * nummeta
@@ -423,7 +411,7 @@
     if flags & usingsha256:
         _fm1node = _fm1nodesha256
     numsuc = len(sucs)
-    numextranodes = numsuc
+    numextranodes = 1 + numsuc
     if parents is None:
         numpar = _fm1parentnone
     else:
@@ -624,6 +612,7 @@
         return True if a new marker have been added, False if the markers
         already existed (no op).
         """
+        flag = int(flag)
         if metadata is None:
             metadata = {}
         if date is None:
@@ -636,11 +625,18 @@
                     date = dateutil.makedate()
             else:
                 date = dateutil.makedate()
-        if len(prec) != 20:
-            raise ValueError(prec)
-        for succ in succs:
-            if len(succ) != 20:
-                raise ValueError(succ)
+        if flag & usingsha256:
+            if len(prec) != 32:
+                raise ValueError(prec)
+            for succ in succs:
+                if len(succ) != 32:
+                    raise ValueError(succ)
+        else:
+            if len(prec) != 20:
+                raise ValueError(prec)
+            for succ in succs:
+                if len(succ) != 20:
+                    raise ValueError(succ)
         if prec in succs:
             raise ValueError(
                 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
@@ -659,7 +655,7 @@
                     % (pycompat.bytestr(k), pycompat.bytestr(v))
                 )
 
-        marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
+        marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
         return bool(self.add(transaction, [marker]))
 
     def add(self, transaction, markers):
--- a/mercurial/phases.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/phases.py	Tue Oct 20 22:04:04 2020 +0530
@@ -121,6 +121,7 @@
 from . import (
     error,
     pycompat,
+    requirements,
     smartset,
     txnutil,
     util,
@@ -154,7 +155,7 @@
 
 def supportinternal(repo):
     """True if the internal phase can be used on a repository"""
-    return b'internal-phase' in repo.requirements
+    return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
 
 
 def _readroots(repo, phasedefaults=None):
@@ -281,26 +282,28 @@
     while low < high:
         mid = (low + high) // 2
         revs = data[mid][0]
+        revs_low = revs[0]
+        revs_high = revs[-1]
 
-        if rev in revs:
+        if rev >= revs_low and rev <= revs_high:
             _sortedrange_split(data, mid, rev, t)
             return
 
-        if revs[0] == rev + 1:
+        if revs_low == rev + 1:
             if mid and data[mid - 1][0][-1] == rev:
                 _sortedrange_split(data, mid - 1, rev, t)
             else:
                 _sortedrange_insert(data, mid, rev, t)
             return
 
-        if revs[-1] == rev - 1:
+        if revs_high == rev - 1:
             if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
                 _sortedrange_split(data, mid + 1, rev, t)
             else:
                 _sortedrange_insert(data, mid + 1, rev, t)
             return
 
-        if revs[0] > rev:
+        if revs_low > rev:
             high = mid
         else:
             low = mid + 1
--- a/mercurial/posix.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/posix.py	Tue Oct 20 22:04:04 2020 +0530
@@ -144,26 +144,24 @@
     if l:
         if not stat.S_ISLNK(s):
             # switch file to link
-            fp = open(f, b'rb')
-            data = fp.read()
-            fp.close()
+            with open(f, b'rb') as fp:
+                data = fp.read()
             unlink(f)
             try:
                 os.symlink(data, f)
             except OSError:
                 # failed to make a link, rewrite file
-                fp = open(f, b"wb")
-                fp.write(data)
-                fp.close()
+                with open(f, b"wb") as fp:
+                    fp.write(data)
+
         # no chmod needed at this point
         return
     if stat.S_ISLNK(s):
         # switch link to file
         data = os.readlink(f)
         unlink(f)
-        fp = open(f, b"wb")
-        fp.write(data)
-        fp.close()
+        with open(f, b"wb") as fp:
+            fp.write(data)
         s = 0o666 & ~umask  # avoid restatting for chmod
 
     sx = s & 0o100
@@ -766,10 +764,14 @@
     # platforms (see sys/un.h)
     dirname, basename = os.path.split(path)
     bakwdfd = None
-    if dirname:
-        bakwdfd = os.open(b'.', os.O_DIRECTORY)
-        os.chdir(dirname)
-    sock.bind(basename)
-    if bakwdfd:
-        os.fchdir(bakwdfd)
-        os.close(bakwdfd)
+
+    try:
+        if dirname:
+            bakwdfd = os.open(b'.', os.O_DIRECTORY)
+            os.chdir(dirname)
+        sock.bind(basename)
+        if bakwdfd:
+            os.fchdir(bakwdfd)
+    finally:
+        if bakwdfd:
+            os.close(bakwdfd)
--- a/mercurial/pure/parsers.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/pure/parsers.py	Tue Oct 20 22:04:04 2020 +0530
@@ -37,6 +37,7 @@
 indexfirst = struct.calcsize(b'Q')
 sizeint = struct.calcsize(b'i')
 indexsize = struct.calcsize(indexformatng)
+nullitem = (0, 0, 0, -1, -1, -1, -1, nullid)
 
 
 def gettype(q):
@@ -103,7 +104,7 @@
 
     def __getitem__(self, i):
         if i == -1:
-            return (0, 0, 0, -1, -1, -1, -1, nullid)
+            return nullitem
         self._check_index(i)
         if i >= self._lgt:
             return self._extra[i - self._lgt]
--- a/mercurial/pycompat.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/pycompat.py	Tue Oct 20 22:04:04 2020 +0530
@@ -506,7 +506,19 @@
     return tempfile.mkstemp(suffix, prefix, dir)
 
 
-# mode must include 'b'ytes as encoding= is not supported
+# TemporaryFile does not support an "encoding=" argument on python2.
+# This wrapper file are always open in byte mode.
+def unnamedtempfile(mode=None, *args, **kwargs):
+    if mode is None:
+        mode = b'w+b'
+    else:
+        mode = sysstr(mode)
+    assert 'b' in mode
+    return tempfile.TemporaryFile(mode, *args, **kwargs)
+
+
+# NamedTemporaryFile does not support an "encoding=" argument on python2.
+# This wrapper file are always open in byte mode.
 def namedtempfile(
     mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
 ):
--- a/mercurial/registrar.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/registrar.py	Tue Oct 20 22:04:04 2020 +0530
@@ -121,7 +121,7 @@
         return self._docformat % (decl, doc)
 
     def _extrasetup(self, name, func):
-        """Execute exra setup for registered function, if needed
+        """Execute extra setup for registered function, if needed
         """
 
 
--- a/mercurial/repair.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/repair.py	Tue Oct 20 22:04:04 2020 +0530
@@ -26,6 +26,8 @@
     pathutil,
     phases,
     pycompat,
+    requirements,
+    scmutil,
     util,
 )
 from .utils import (
@@ -418,7 +420,7 @@
 
 def manifestrevlogs(repo):
     yield repo.manifestlog.getstorage(b'')
-    if b'treemanifest' in repo.requirements:
+    if scmutil.istreemanifest(repo):
         # This logic is safe if treemanifest isn't enabled, but also
         # pointless, so we skip it if treemanifest isn't enabled.
         for unencoded, encoded, size in repo.store.datafiles():
@@ -476,7 +478,7 @@
 
         progress.complete()
 
-        if b'treemanifest' in repo.requirements:
+        if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
             # This logic is safe if treemanifest isn't enabled, but also
             # pointless, so we skip it if treemanifest isn't enabled.
             for dir in pathutil.dirs(seenfiles):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/requirements.py	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,75 @@
+# requirements.py - objects and functions related to repository requirements
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = b'narrowhg-experimental'
+
+# Enables sparse working directory usage
+SPARSE_REQUIREMENT = b'exp-sparse'
+
+# Enables the internal phase which is used to hide changesets instead
+# of stripping them
+INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
+
+# Stores manifest in Tree structure
+TREEMANIFEST_REQUIREMENT = b'treemanifest'
+
+# Increment the sub-version when the revlog v2 format changes to lock out old
+# clients.
+REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
+
+# A repository with the sparserevlog feature will have delta chains that
+# can spread over a larger span. Sparse reading cuts these large spans into
+# pieces, so that each piece isn't too big.
+# Without the sparserevlog capability, reading from the repository could use
+# huge amounts of memory, because the whole span would be read at once,
+# including all the intermediate revisions that aren't pertinent for the chain.
+# This is why once a repository has enabled sparse-read, it becomes required.
+SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
+
+# A repository with the sidedataflag requirement will allow to store extra
+# information for revision without altering their original hashes.
+SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
+
+# A repository with the the copies-sidedata-changeset requirement will store
+# copies related information in changeset's sidedata.
+COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
+
+# The repository use persistent nodemap for the changelog and the manifest.
+NODEMAP_REQUIREMENT = b'persistent-nodemap'
+
+# Denotes that the current repository is a share
+SHARED_REQUIREMENT = b'shared'
+
+# Denotes that current repository is a share and the shared source path is
+# relative to the current repository root path
+RELATIVE_SHARED_REQUIREMENT = b'relshared'
+
+# A repository with share implemented safely. The repository has different
+# store and working copy requirements i.e. both `.hg/requires` and
+# `.hg/store/requires` are present.
+SHARESAFE_REQUIREMENT = b'exp-sharesafe'
+
+# List of requirements which are working directory specific
+# These requirements cannot be shared between repositories if they
+# share the same store
+# * sparse is a working directory specific functionality and hence working
+#   directory specific requirement
+# * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
+#   represents that the current working copy/repository shares store of another
+#   repo. Hence both of them should be stored in working copy
+# * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
+#   the requirements are stored in store's requires
+WORKING_DIR_REQUIREMENTS = {
+    SPARSE_REQUIREMENT,
+    SHARED_REQUIREMENT,
+    RELATIVE_SHARED_REQUIREMENT,
+    SHARESAFE_REQUIREMENT,
+}
--- a/mercurial/revlog.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/revlog.py	Tue Oct 20 22:04:04 2020 +0530
@@ -53,6 +53,7 @@
     REVIDX_ELLIPSIS,
     REVIDX_EXTSTORED,
     REVIDX_FLAGS_ORDER,
+    REVIDX_HASCOPIESINFO,
     REVIDX_ISCENSORED,
     REVIDX_RAWTEXT_CHANGING_FLAGS,
     REVIDX_SIDEDATA,
@@ -98,6 +99,7 @@
 REVIDX_ISCENSORED
 REVIDX_ELLIPSIS
 REVIDX_SIDEDATA
+REVIDX_HASCOPIESINFO
 REVIDX_EXTSTORED
 REVIDX_DEFAULT_FLAGS
 REVIDX_FLAGS_ORDER
@@ -2703,14 +2705,16 @@
 
             (srcrevlog, rev)
 
-        and return a triplet that control changes to sidedata content from the
+        and return a quintet that control changes to sidedata content from the
         old revision to the new clone result:
 
-            (dropall, filterout, update)
+            (dropall, filterout, update, new_flags, dropped_flags)
 
         * if `dropall` is True, all sidedata should be dropped
         * `filterout` is a set of sidedata keys that should be dropped
         * `update` is a mapping of additionnal/new key -> value
+        * new_flags is a bitfields of new flags that the revision should get
+        * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
         """
         if deltareuse not in self.DELTAREUSEALL:
             raise ValueError(
@@ -2781,7 +2785,7 @@
             p2 = index[entry[6]][7]
             node = entry[7]
 
-            sidedataactions = (False, [], {})
+            sidedataactions = (False, [], {}, 0, 0)
             if sidedatacompanion is not None:
                 sidedataactions = sidedatacompanion(self, rev)
 
@@ -2790,7 +2794,11 @@
             cachedelta = None
             rawtext = None
             if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
-                dropall, filterout, update = sidedataactions
+                dropall = sidedataactions[0]
+                filterout = sidedataactions[1]
+                update = sidedataactions[2]
+                new_flags = sidedataactions[3]
+                dropped_flags = sidedataactions[4]
                 text, sidedata = self._revisiondata(rev)
                 if dropall:
                     sidedata = {}
@@ -2799,6 +2807,10 @@
                 sidedata.update(update)
                 if not sidedata:
                     sidedata = None
+
+                flags |= new_flags
+                flags &= ~dropped_flags
+
                 destrevlog.addrevision(
                     text,
                     tr,
--- a/mercurial/revlogutils/constants.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/revlogutils/constants.py	Tue Oct 20 22:04:04 2020 +0530
@@ -40,6 +40,8 @@
 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
 # revision data contains extra metadata not part of the official digest
 REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
+# revision changes files in a way that could affect copy tracing.
+REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
 REVIDX_DEFAULT_FLAGS = 0
 # stable order in which flags need to be processed and their processors applied
 REVIDX_FLAGS_ORDER = [
@@ -47,6 +49,7 @@
     REVIDX_ELLIPSIS,
     REVIDX_EXTSTORED,
     REVIDX_SIDEDATA,
+    REVIDX_HASCOPIESINFO,
 ]
 
 # bitmark for flags that could cause rawdata content change
--- a/mercurial/revlogutils/flagutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/revlogutils/flagutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -15,6 +15,7 @@
     REVIDX_ELLIPSIS,
     REVIDX_EXTSTORED,
     REVIDX_FLAGS_ORDER,
+    REVIDX_HASCOPIESINFO,
     REVIDX_ISCENSORED,
     REVIDX_RAWTEXT_CHANGING_FLAGS,
     REVIDX_SIDEDATA,
@@ -28,6 +29,7 @@
 REVIDX_ELLIPSIS
 REVIDX_EXTSTORED
 REVIDX_SIDEDATA
+REVIDX_HASCOPIESINFO,
 REVIDX_DEFAULT_FLAGS
 REVIDX_FLAGS_ORDER
 REVIDX_RAWTEXT_CHANGING_FLAGS
@@ -37,6 +39,7 @@
 # Store flag processors (cf. 'addflagprocessor()' to register)
 flagprocessors = {
     REVIDX_ISCENSORED: None,
+    REVIDX_HASCOPIESINFO: None,
 }
 
 
--- a/mercurial/revlogutils/sidedata.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/revlogutils/sidedata.py	Tue Oct 20 22:04:04 2020 +0530
@@ -53,6 +53,7 @@
 SD_P2COPIES = 9
 SD_FILESADDED = 10
 SD_FILESREMOVED = 11
+SD_FILES = 12
 
 # internal format constant
 SIDEDATA_HEADER = struct.Struct('>H')
--- a/mercurial/revset.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/revset.py	Tue Oct 20 22:04:04 2020 +0530
@@ -17,6 +17,7 @@
     diffutil,
     encoding,
     error,
+    grep as grepmod,
     hbisect,
     match as matchmod,
     node,
@@ -411,7 +412,7 @@
     """
     # i18n: "adds" is a keyword
     pat = getstring(x, _(b"adds requires a pattern"))
-    return checkstatus(repo, subset, pat, 1)
+    return checkstatus(repo, subset, pat, 'added')
 
 
 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
@@ -681,12 +682,8 @@
 
 def checkstatus(repo, subset, pat, field):
     """Helper for status-related revsets (adds, removes, modifies).
-    The field parameter says which kind is desired:
-    0: modified
-    1: added
-    2: removed
+    The field parameter says which kind is desired.
     """
-    label = {0: 'modified', 1: 'added', 2: 'removed'}[field]
     hasset = matchmod.patkind(pat) == b'set'
 
     mcache = [None]
@@ -707,7 +704,7 @@
         else:
             if not any(m(f) for f in c.files()):
                 return False
-        files = getattr(repo.status(c.p1().node(), c.node()), label)
+        files = getattr(repo.status(c.p1().node(), c.node()), field)
         if fname is not None:
             if fname in files:
                 return True
@@ -715,7 +712,9 @@
             if any(m(f) for f in files):
                 return True
 
-    return subset.filter(matches, condrepr=(b'<status[%r] %r>', field, pat))
+    return subset.filter(
+        matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
+    )
 
 
 def _children(repo, subset, parentset):
@@ -995,6 +994,45 @@
     )
 
 
+@predicate(b'diff(pattern)', weight=110)
+def diff(repo, subset, x):
+    """Search revision differences for when the pattern was added or removed.
+
+    The pattern may be a substring literal or a regular expression. See
+    :hg:`help revisions.patterns`.
+    """
+    args = getargsdict(x, b'diff', b'pattern')
+    if b'pattern' not in args:
+        # i18n: "diff" is a keyword
+        raise error.ParseError(_(b'diff takes at least 1 argument'))
+
+    pattern = getstring(args[b'pattern'], _(b'diff requires a string pattern'))
+    regexp = stringutil.substringregexp(pattern, re.M)
+
+    # TODO: add support for file pattern and --follow. For example,
+    # diff(pattern[, set]) where set may be file(pattern) or follow(pattern),
+    # and we'll eventually add a support for narrowing files by revset?
+    fmatch = matchmod.always()
+
+    def makefilematcher(ctx):
+        return fmatch
+
+    # TODO: search in a windowed way
+    searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
+
+    def testdiff(rev):
+        # consume the generator to discard revfiles/matches cache
+        found = False
+        for fn, ctx, pstates, states in searcher.searchfiles(
+            baseset([rev]), makefilematcher
+        ):
+            if next(grepmod.difflinestates(pstates, states), None):
+                found = True
+        return found
+
+    return subset.filter(testdiff, condrepr=(b'<diff %r>', pattern))
+
+
 @predicate(b'contentdivergent()', safe=True)
 def contentdivergent(repo, subset, x):
     """
@@ -1631,7 +1669,7 @@
     """
     # i18n: "modifies" is a keyword
     pat = getstring(x, _(b"modifies requires a pattern"))
-    return checkstatus(repo, subset, pat, 0)
+    return checkstatus(repo, subset, pat, 'modified')
 
 
 @predicate(b'named(namespace)')
@@ -2090,7 +2128,7 @@
     """
     # i18n: "removes" is a keyword
     pat = getstring(x, _(b"removes requires a pattern"))
-    return checkstatus(repo, subset, pat, 2)
+    return checkstatus(repo, subset, pat, 'removed')
 
 
 @predicate(b'rev(number)', safe=True)
@@ -2289,12 +2327,13 @@
 
 
 _sortkeyfuncs = {
-    b'rev': lambda c: c.rev(),
+    b'rev': scmutil.intrev,
     b'branch': lambda c: c.branch(),
     b'desc': lambda c: c.description(),
     b'user': lambda c: c.user(),
     b'author': lambda c: c.user(),
     b'date': lambda c: c.date()[0],
+    b'node': scmutil.binnode,
 }
 
 
@@ -2358,6 +2397,7 @@
     - ``user`` for user name (``author`` can be used as an alias),
     - ``date`` for the commit date
     - ``topo`` for a reverse topographical sort
+    - ``node`` the nodeid of the revision
 
     The ``topo`` sort order cannot be combined with other sort keys. This sort
     takes one optional argument, ``topo.firstbranch``, which takes a revset that
--- a/mercurial/rewriteutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/rewriteutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -7,16 +7,23 @@
 
 from __future__ import absolute_import
 
+import re
+
 from .i18n import _
 
 from . import (
     error,
     node,
     obsolete,
+    obsutil,
     revset,
+    scmutil,
 )
 
 
+NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b')
+
+
 def precheck(repo, revs, action=b'rewrite'):
     """check if revs can be rewritten
     action is used to control the error message.
@@ -70,3 +77,59 @@
             )
             % (command, empty_successor)
         )
+
+
+def update_hash_refs(repo, commitmsg, pending=None):
+    """Replace all obsolete commit hashes in the message with the current hash.
+
+    If the obsolete commit was split or is divergent, the hash is not replaced
+    as there's no way to know which successor to choose.
+
+    For commands that update a series of commits in the current transaction, the
+    new obsolete markers can be considered by setting ``pending`` to a mapping
+    of ``pending[oldnode] = [successor_node1, successor_node2,..]``.
+    """
+    if not pending:
+        pending = {}
+    cache = {}
+    hashes = re.findall(NODE_RE, commitmsg)
+    unfi = repo.unfiltered()
+    for h in hashes:
+        fullnode = scmutil.resolvehexnodeidprefix(unfi, h)
+        if fullnode is None:
+            continue
+        ctx = unfi[fullnode]
+        if not ctx.obsolete():
+            successors = pending.get(fullnode)
+            if successors is None:
+                continue
+            # obsutil.successorssets() returns a list of list of nodes
+            successors = [successors]
+        else:
+            successors = obsutil.successorssets(repo, ctx.node(), cache=cache)
+
+        # We can't make any assumptions about how to update the hash if the
+        # cset in question was split or diverged.
+        if len(successors) == 1 and len(successors[0]) == 1:
+            successor = successors[0][0]
+            if successor is not None:
+                newhash = node.hex(successor)
+                commitmsg = commitmsg.replace(h, newhash[: len(h)])
+            else:
+                repo.ui.note(
+                    _(
+                        b'The stale commit message reference to %s could '
+                        b'not be updated\n(The referenced commit was dropped)\n'
+                    )
+                    % h
+                )
+        else:
+            repo.ui.note(
+                _(
+                    b'The stale commit message reference to %s could '
+                    b'not be updated\n'
+                )
+                % h
+            )
+
+    return commitmsg
--- a/mercurial/scmutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/scmutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -38,6 +38,7 @@
     phases,
     policy,
     pycompat,
+    requirements as requirementsmod,
     revsetlang,
     similar,
     smartset,
@@ -215,7 +216,7 @@
     except error.WdirUnsupported:
         ui.error(_(b"abort: working directory revision cannot be specified\n"))
     except error.Abort as inst:
-        ui.error(_(b"abort: %s\n") % inst)
+        ui.error(_(b"abort: %s\n") % inst.message)
         if inst.hint:
             ui.error(_(b"(%s)\n") % inst.hint)
     except ImportError as inst:
@@ -363,13 +364,15 @@
     cl = repo.changelog
     if not cl.filteredrevs:
         return None
-    key = None
-    revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
-    if revs:
-        s = hashutil.sha1()
-        for rev in revs:
-            s.update(b'%d;' % rev)
-        key = s.digest()
+    key = cl._filteredrevs_hashcache.get(maxrev)
+    if not key:
+        revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
+        if revs:
+            s = hashutil.sha1()
+            for rev in revs:
+                s.update(b'%d;' % rev)
+            key = s.digest()
+            cl._filteredrevs_hashcache[maxrev] = key
     return key
 
 
@@ -757,6 +760,55 @@
     return repo.anyrevs(allspecs, user=True, localalias=localalias)
 
 
+def increasingwindows(windowsize=8, sizelimit=512):
+    while True:
+        yield windowsize
+        if windowsize < sizelimit:
+            windowsize *= 2
+
+
+def walkchangerevs(repo, revs, makefilematcher, prepare):
+    '''Iterate over files and the revs in a "windowed" way.
+
+    Callers most commonly need to iterate backwards over the history
+    in which they are interested. Doing so has awful (quadratic-looking)
+    performance, so we use iterators in a "windowed" way.
+
+    We walk a window of revisions in the desired order.  Within the
+    window, we first walk forwards to gather data, then in the desired
+    order (usually backwards) to display it.
+
+    This function returns an iterator yielding contexts. Before
+    yielding each context, the iterator will first call the prepare
+    function on each context in the window in forward order.'''
+
+    if not revs:
+        return []
+    change = repo.__getitem__
+
+    def iterate():
+        it = iter(revs)
+        stopiteration = False
+        for windowsize in increasingwindows():
+            nrevs = []
+            for i in pycompat.xrange(windowsize):
+                rev = next(it, None)
+                if rev is None:
+                    stopiteration = True
+                    break
+                nrevs.append(rev)
+            for rev in sorted(nrevs):
+                ctx = change(rev)
+                prepare(ctx, makefilematcher(ctx))
+            for rev in nrevs:
+                yield change(rev)
+
+            if stopiteration:
+                break
+
+    return iterate()
+
+
 def meaningfulparents(repo, ctx):
     """Return list of meaningful (or all if debug) parentrevs for rev.
 
@@ -1470,11 +1522,39 @@
     repo._quick_access_changeid_invalidate()
 
 
+def filterrequirements(requirements):
+    """ filters the requirements into two sets:
+
+    wcreq: requirements which should be written in .hg/requires
+    storereq: which should be written in .hg/store/requires
+
+    Returns (wcreq, storereq)
+    """
+    if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
+        wc, store = set(), set()
+        for r in requirements:
+            if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
+                wc.add(r)
+            else:
+                store.add(r)
+        return wc, store
+    return requirements, None
+
+
+def istreemanifest(repo):
+    """ returns whether the repository is using treemanifest or not """
+    return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
+
+
 def writereporequirements(repo, requirements=None):
     """ writes requirements for the repo to .hg/requires """
     if requirements:
         repo.requirements = requirements
-    writerequires(repo.vfs, repo.requirements)
+    wcreq, storereq = filterrequirements(repo.requirements)
+    if wcreq is not None:
+        writerequires(repo.vfs, wcreq)
+    if storereq is not None:
+        writerequires(repo.svfs, storereq)
 
 
 def writerequires(opener, requirements):
@@ -1711,29 +1791,6 @@
     return data
 
 
-def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
-    if lock is None:
-        raise error.LockInheritanceContractViolation(
-            b'lock can only be inherited while held'
-        )
-    if environ is None:
-        environ = {}
-    with lock.inherit() as locker:
-        environ[envvar] = locker
-        return repo.ui.system(cmd, environ=environ, *args, **kwargs)
-
-
-def wlocksub(repo, cmd, *args, **kwargs):
-    """run cmd as a subprocess that allows inheriting repo's wlock
-
-    This can only be called while the wlock is held. This takes all the
-    arguments that ui.system does, and returns the exit code of the
-    subprocess."""
-    return _locksub(
-        repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
-    )
-
-
 class progress(object):
     def __init__(self, ui, updatebar, topic, unit=b"", total=None):
         self.ui = ui
--- a/mercurial/shelve.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/shelve.py	Tue Oct 20 22:04:04 2020 +0530
@@ -772,7 +772,7 @@
     with ui.configoverride({(b'ui', b'quiet'): True}):
         hg.update(repo, wctx.node())
         ui.pushbuffer(True)
-        cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
+        cmdutil.revert(ui, repo, shelvectx)
         ui.popbuffer()
 
 
@@ -839,7 +839,7 @@
             state.nodestoremove.append(newnode)
             shelvectx = repo[newnode]
 
-        hg.updaterepo(repo, pendingctx.node(), overwrite=False)
+        merge.update(pendingctx)
         mergefiles(ui, repo, state.wctx, shelvectx)
         restorebranch(ui, repo, state.branchtorestore)
 
@@ -1031,7 +1031,7 @@
             ui.status(msg)
         else:
             shelvectx = repo[newnode]
-            hg.updaterepo(repo, tmpwctx.node(), False)
+            merge.update(tmpwctx)
 
     return shelvectx, ispartialunshelve
 
--- a/mercurial/sparse.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/sparse.py	Tue Oct 20 22:04:04 2020 +0530
@@ -21,11 +21,13 @@
     mergestate as mergestatemod,
     pathutil,
     pycompat,
+    requirements,
     scmutil,
     util,
 )
 from .utils import hashutil
 
+
 # Whether sparse features are enabled. This variable is intended to be
 # temporary to facilitate porting sparse to core. It should eventually be
 # a per-repo option, possibly a repo requirement.
@@ -269,19 +271,17 @@
 
     sparsematch = matcher(repo, includetemp=False)
     dirstate = repo.dirstate
-    actions = []
+    mresult = mergemod.mergeresult()
     dropped = []
     tempincludes = readtemporaryincludes(repo)
     for file in tempincludes:
         if file in dirstate and not sparsematch(file):
             message = _(b'dropping temporarily included sparse files')
-            actions.append((file, None, message))
+            mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
             dropped.append(file)
 
-    typeactions = mergemod.emptyactions()
-    typeactions[b'r'] = actions
     mergemod.applyupdates(
-        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+        repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
     )
 
     # Fix dirstate
@@ -366,16 +366,16 @@
     return result
 
 
-def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
+def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
     """Filter updates to only lay out files that match the sparse rules."""
     if not enabled:
-        return actions
+        return
 
     oldrevs = [pctx.rev() for pctx in wctx.parents()]
     oldsparsematch = matcher(repo, oldrevs)
 
     if oldsparsematch.always():
-        return actions
+        return
 
     files = set()
     prunedactions = {}
@@ -390,23 +390,29 @@
         sparsematch = matcher(repo, [mctx.rev()])
 
     temporaryfiles = []
-    for file, action in pycompat.iteritems(actions):
+    for file, action in mresult.filemap():
         type, args, msg = action
         files.add(file)
         if sparsematch(file):
             prunedactions[file] = action
-        elif type == b'm':
+        elif type == mergestatemod.ACTION_MERGE:
             temporaryfiles.append(file)
             prunedactions[file] = action
         elif branchmerge:
-            if type != b'k':
+            if type not in mergestatemod.NO_OP_ACTIONS:
                 temporaryfiles.append(file)
                 prunedactions[file] = action
-        elif type == b'f':
+        elif type == mergestatemod.ACTION_FORGET:
             prunedactions[file] = action
         elif file in wctx:
-            prunedactions[file] = (b'r', args, msg)
+            prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
 
+        # in case or rename on one side, it is possible that f1 might not
+        # be present in sparse checkout we should include it
+        # TODO: should we do the same for f2?
+        # exists as a separate check because file can be in sparse and hence
+        # if we try to club this condition in above `elif type == ACTION_MERGE`
+        # it won't be triggered
         if branchmerge and type == mergestatemod.ACTION_MERGE:
             f1, f2, fa, move, anc = args
             if not sparsematch(f1):
@@ -423,22 +429,25 @@
         addtemporaryincludes(repo, temporaryfiles)
 
         # Add the new files to the working copy so they can be merged, etc
-        actions = []
+        tmresult = mergemod.mergeresult()
         message = b'temporarily adding to sparse checkout'
         wctxmanifest = repo[None].manifest()
         for file in temporaryfiles:
             if file in wctxmanifest:
                 fctx = repo[None][file]
-                actions.append((file, (fctx.flags(), False), message))
+                tmresult.addfile(
+                    file,
+                    mergestatemod.ACTION_GET,
+                    (fctx.flags(), False),
+                    message,
+                )
 
-        typeactions = mergemod.emptyactions()
-        typeactions[b'g'] = actions
         mergemod.applyupdates(
-            repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+            repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
         )
 
         dirstate = repo.dirstate
-        for file, flags, msg in actions:
+        for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
             dirstate.normal(file)
 
     profiles = activeconfig(repo)[2]
@@ -453,11 +462,15 @@
             new = sparsematch(file)
             if not old and new:
                 flags = mf.flags(file)
-                prunedactions[file] = (b'g', (flags, False), b'')
+                prunedactions[file] = (
+                    mergestatemod.ACTION_GET,
+                    (flags, False),
+                    b'',
+                )
             elif old and not new:
-                prunedactions[file] = (b'r', [], b'')
+                prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
 
-    return prunedactions
+    mresult.setactions(prunedactions)
 
 
 def refreshwdir(repo, origstatus, origsparsematch, force=False):
@@ -487,7 +500,7 @@
             _(b'could not update sparseness due to pending changes')
         )
 
-    # Calculate actions
+    # Calculate merge result
     dirstate = repo.dirstate
     ctx = repo[b'.']
     added = []
@@ -495,8 +508,7 @@
     dropped = []
     mf = ctx.manifest()
     files = set(mf)
-
-    actions = {}
+    mresult = mergemod.mergeresult()
 
     for file in files:
         old = origsparsematch(file)
@@ -506,17 +518,19 @@
         if (new and not old) or (old and new and not file in dirstate):
             fl = mf.flags(file)
             if repo.wvfs.exists(file):
-                actions[file] = (b'e', (fl,), b'')
+                mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
                 lookup.append(file)
             else:
-                actions[file] = (b'g', (fl, False), b'')
+                mresult.addfile(
+                    file, mergestatemod.ACTION_GET, (fl, False), b''
+                )
                 added.append(file)
         # Drop files that are newly excluded, or that still exist in
         # the dirstate.
         elif (old and not new) or (not old and not new and file in dirstate):
             dropped.append(file)
             if file not in pending:
-                actions[file] = (b'r', [], b'')
+                mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
 
     # Verify there are no pending changes in newly included files
     abort = False
@@ -540,13 +554,8 @@
             if old and not new:
                 dropped.append(file)
 
-    # Apply changes to disk
-    typeactions = mergemod.emptyactions()
-    for f, (m, args, msg) in pycompat.iteritems(actions):
-        typeactions[m].append((f, args, msg))
-
     mergemod.applyupdates(
-        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+        repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
     )
 
     # Fix dirstate
@@ -599,11 +608,11 @@
     # updated. But this requires massive rework to matcher() and its
     # consumers.
 
-    if b'exp-sparse' in oldrequires and removing:
-        repo.requirements.discard(b'exp-sparse')
+    if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
+        repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
         scmutil.writereporequirements(repo)
-    elif b'exp-sparse' not in oldrequires:
-        repo.requirements.add(b'exp-sparse')
+    elif requirements.SPARSE_REQUIREMENT not in oldrequires:
+        repo.requirements.add(requirements.SPARSE_REQUIREMENT)
         scmutil.writereporequirements(repo)
 
     try:
--- a/mercurial/state.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/state.py	Tue Oct 20 22:04:04 2020 +0530
@@ -164,10 +164,17 @@
         operation
         """
         if not self._cmdhint:
-            return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
-                self._opname,
-                self._opname,
-            )
+            if not self._stopflag:
+                return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
+                    self._opname,
+                    self._opname,
+                )
+            else:
+                return _(
+                    b"use 'hg %s --continue', 'hg %s --abort', "
+                    b"or 'hg %s --stop'"
+                ) % (self._opname, self._opname, self._opname,)
+
         return self._cmdhint
 
     def msg(self):
--- a/mercurial/statichttprepo.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/statichttprepo.py	Tue Oct 20 22:04:04 2020 +0530
@@ -238,7 +238,12 @@
         )
 
     def lock(self, wait=True):
-        raise error.Abort(_(b'cannot lock static-http repository'))
+        raise error.LockUnavailable(
+            0,
+            _(b'lock not available'),
+            b'lock',
+            _(b'cannot lock static-http repository'),
+        )
 
     def _writecaches(self):
         pass  # statichttprepository are read only
--- a/mercurial/store.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/store.py	Tue Oct 20 22:04:04 2020 +0530
@@ -373,10 +373,19 @@
     return mode
 
 
-_data = (
-    b'bookmarks narrowspec data meta 00manifest.d 00manifest.i'
-    b' 00changelog.d 00changelog.i phaseroots obsstore'
-)
+_data = [
+    b'bookmarks',
+    b'narrowspec',
+    b'data',
+    b'meta',
+    b'00manifest.d',
+    b'00manifest.i',
+    b'00changelog.d',
+    b'00changelog.i',
+    b'phaseroots',
+    b'obsstore',
+    b'requires',
+]
 
 
 def isrevlog(f, kind, st):
@@ -447,7 +456,7 @@
             yield x
 
     def copylist(self):
-        return [b'requires'] + _data.split()
+        return _data
 
     def write(self, tr):
         pass
@@ -494,9 +503,7 @@
         return self.path + b'/' + encodefilename(f)
 
     def copylist(self):
-        return [b'requires', b'00changelog.i'] + [
-            b'store/' + f for f in _data.split()
-        ]
+        return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
 
 
 class fncache(object):
@@ -686,12 +693,21 @@
 
     def copylist(self):
         d = (
-            b'bookmarks narrowspec data meta dh fncache phaseroots obsstore'
-            b' 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
+            b'bookmarks',
+            b'narrowspec',
+            b'data',
+            b'meta',
+            b'dh',
+            b'fncache',
+            b'phaseroots',
+            b'obsstore',
+            b'00manifest.d',
+            b'00manifest.i',
+            b'00changelog.d',
+            b'00changelog.i',
+            b'requires',
         )
-        return [b'requires', b'00changelog.i'] + [
-            b'store/' + f for f in d.split()
-        ]
+        return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
 
     def write(self, tr):
         self.fncache.write(tr)
--- a/mercurial/subrepo.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/subrepo.py	Tue Oct 20 22:04:04 2020 +0530
@@ -25,6 +25,7 @@
     exchange,
     logcmdutil,
     match as matchmod,
+    merge as merge,
     node,
     pathutil,
     phases,
@@ -38,7 +39,6 @@
     dateutil,
     hashutil,
     procutil,
-    stringutil,
 )
 
 hg = None
@@ -83,9 +83,7 @@
         except error.Abort as ex:
             subrepo = subrelpath(self)
             errormsg = (
-                stringutil.forcebytestr(ex)
-                + b' '
-                + _(b'(in subrepository "%s")') % subrepo
+                ex.message + b' ' + _(b'(in subrepository "%s")') % subrepo
             )
             # avoid handling this exception by raising a SubrepoAbort exception
             raise SubrepoAbort(
@@ -783,7 +781,10 @@
                     % (revision[0:12], self._path)
                 )
                 repo = urepo
-        hg.updaterepo(repo, revision, overwrite)
+        if overwrite:
+            merge.clean_update(repo[revision])
+        else:
+            merge.update(repo[revision])
 
     @annotatesubrepoerror
     def merge(self, state):
@@ -986,12 +987,11 @@
 
     def filerevert(self, *pats, **opts):
         ctx = self._repo[opts['rev']]
-        parents = self._repo.dirstate.parents()
         if opts.get('all'):
             pats = [b'set:modified()']
         else:
             pats = []
-        cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
+        cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
 
     def shortid(self, revid):
         return revid[:12]
--- a/mercurial/tags.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/tags.py	Tue Oct 20 22:04:04 2020 +0530
@@ -838,7 +838,7 @@
         repo = self._repo
 
         try:
-            lock = repo.wlock(wait=False)
+            lock = repo.lock(wait=False)
         except error.LockError:
             repo.ui.log(
                 b'tagscache',
--- a/mercurial/templatekw.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/templatekw.py	Tue Oct 20 22:04:04 2020 +0530
@@ -422,7 +422,7 @@
             from . import mergestate as mergestatemod
 
             mergestate = mergestatemod.mergestate.read(repo)
-            if mergestate.active():
+            if mergestate.unresolvedcount():
                 merge_nodes = (mergestate.local, mergestate.other)
             else:
                 merge_nodes = ()
@@ -712,21 +712,20 @@
     while also diverged into ctx3. (EXPERIMENTAL)"""
     repo = context.resource(mapping, b'repo')
     ctx = context.resource(mapping, b'ctx')
-    if not ctx.obsolete():
-        return b''
+    data = []
 
-    ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
-    ssets = [[hex(n) for n in ss] for ss in ssets]
+    if ctx.obsolete():
+        ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
+        ssets = [[hex(n) for n in ss] for ss in ssets]
 
-    data = []
-    for ss in ssets:
-        h = _hybrid(
-            None,
-            ss,
-            lambda x: {b'ctx': repo[x]},
-            lambda x: scmutil.formatchangeid(repo[x]),
-        )
-        data.append(h)
+        for ss in ssets:
+            h = _hybrid(
+                None,
+                ss,
+                lambda x: {b'ctx': repo[x]},
+                lambda x: scmutil.formatchangeid(repo[x]),
+            )
+            data.append(h)
 
     # Format the successorssets
     def render(d):
--- a/mercurial/templater.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/templater.py	Tue Oct 20 22:04:04 2020 +0530
@@ -800,10 +800,10 @@
 
 
 def stylelist():
-    paths = templatepaths()
-    if not paths:
+    path = templatedir()
+    if not path:
         return _(b'no templates found, try `hg debuginstall` for more info')
-    dirlist = os.listdir(paths[0])
+    dirlist = os.listdir(path)
     stylelist = []
     for file in dirlist:
         split = file.split(b".")
@@ -814,17 +814,46 @@
     return b", ".join(sorted(stylelist))
 
 
-def _readmapfile(mapfile):
+def _open_mapfile(mapfile):
+    if os.path.exists(mapfile):
+        return util.posixfile(mapfile, b'rb')
+    raise error.Abort(
+        _(b"style '%s' not found") % mapfile,
+        hint=_(b"available styles: %s") % stylelist(),
+    )
+
+
+def _readmapfile(fp, mapfile):
     """Load template elements from the given map file"""
-    if not os.path.exists(mapfile):
-        raise error.Abort(
-            _(b"style '%s' not found") % mapfile,
-            hint=_(b"available styles: %s") % stylelist(),
-        )
+    base = os.path.dirname(mapfile)
+    conf = config.config()
 
-    base = os.path.dirname(mapfile)
-    conf = config.config(includepaths=templatepaths())
-    conf.read(mapfile, remap={b'': b'templates'})
+    def include(rel, remap, sections):
+        subresource = None
+        if base:
+            abs = os.path.normpath(os.path.join(base, rel))
+            if os.path.isfile(abs):
+                subresource = util.posixfile(abs, b'rb')
+        if not subresource:
+            if pycompat.ossep not in rel:
+                abs = rel
+                subresource = resourceutil.open_resource(
+                    b'mercurial.templates', rel
+                )
+            else:
+                dir = templatedir()
+                if dir:
+                    abs = os.path.normpath(os.path.join(dir, rel))
+                    if os.path.isfile(abs):
+                        subresource = util.posixfile(abs, b'rb')
+        if subresource:
+            data = subresource.read()
+            conf.parse(
+                abs, data, sections=sections, remap=remap, include=include,
+            )
+
+    data = fp.read()
+    conf.parse(mapfile, data, remap={b'': b'templates'}, include=include)
 
     cache = {}
     tmap = {}
@@ -833,21 +862,22 @@
     val = conf.get(b'templates', b'__base__')
     if val and val[0] not in b"'\"":
         # treat as a pointer to a base class for this style
-        path = util.normpath(os.path.join(base, val))
+        path = os.path.normpath(os.path.join(base, val))
 
         # fallback check in template paths
         if not os.path.exists(path):
-            for p in templatepaths():
-                p2 = util.normpath(os.path.join(p, val))
+            dir = templatedir()
+            if dir is not None:
+                p2 = os.path.normpath(os.path.join(dir, val))
                 if os.path.isfile(p2):
                     path = p2
-                    break
-                p3 = util.normpath(os.path.join(p2, b"map"))
-                if os.path.isfile(p3):
-                    path = p3
-                    break
+                else:
+                    p3 = os.path.normpath(os.path.join(p2, b"map"))
+                    if os.path.isfile(p3):
+                        path = p3
 
-        cache, tmap, aliases = _readmapfile(path)
+        fp = _open_mapfile(path)
+        cache, tmap, aliases = _readmapfile(fp, path)
 
     for key, val in conf[b'templates'].items():
         if not val:
@@ -883,7 +913,8 @@
         """Get parsed tree for the given template name. Use a local cache."""
         if t not in self.cache:
             try:
-                self.cache[t] = util.readfile(self._map[t])
+                mapfile, fp = open_template(self._map[t])
+                self.cache[t] = fp.read()
             except KeyError as inst:
                 raise templateutil.TemplateNotFound(
                     _(b'"%s" not in template map') % inst.args[0]
@@ -975,6 +1006,7 @@
     def frommapfile(
         cls,
         mapfile,
+        fp=None,
         filters=None,
         defaults=None,
         resources=None,
@@ -984,7 +1016,9 @@
     ):
         """Create templater from the specified map file"""
         t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk)
-        cache, tmap, aliases = _readmapfile(mapfile)
+        if not fp:
+            fp = _open_mapfile(mapfile)
+        cache, tmap, aliases = _readmapfile(fp, mapfile)
         t._loader.cache.update(cache)
         t._loader._map = tmap
         t._loader._aliasmap = _aliasrules.buildmap(aliases)
@@ -1045,59 +1079,42 @@
         return stream
 
 
-def templatepaths():
-    '''return locations used for template files.'''
-    pathsrel = [b'templates']
-    paths = [
-        os.path.normpath(os.path.join(resourceutil.datapath, f))
-        for f in pathsrel
-    ]
-    return [p for p in paths if os.path.isdir(p)]
-
-
-def templatepath(name):
-    '''return location of template file. returns None if not found.'''
-    for p in templatepaths():
-        f = os.path.join(p, name)
-        if os.path.exists(f):
-            return f
-    return None
+def templatedir():
+    '''return the directory used for template files, or None.'''
+    path = os.path.normpath(os.path.join(resourceutil.datapath, b'templates'))
+    return path if os.path.isdir(path) else None
 
 
-def stylemap(styles, paths=None):
-    """Return path to mapfile for a given style.
+def open_template(name, templatepath=None):
+    '''returns a file-like object for the given template, and its full path
 
-    Searches mapfile in the following locations:
-    1. templatepath/style/map
-    2. templatepath/map-style
-    3. templatepath/map
-    """
-
-    if paths is None:
-        paths = templatepaths()
-    elif isinstance(paths, bytes):
-        paths = [paths]
-
-    if isinstance(styles, bytes):
-        styles = [styles]
+    If the name is a relative path and we're in a frozen binary, the template
+    will be read from the mercurial.templates package instead. The returned path
+    will then be the relative path.
+    '''
+    # Does the name point directly to a map file?
+    if os.path.isfile(name) or os.path.isabs(name):
+        return name, open(name, mode='rb')
 
-    for style in styles:
-        # only plain name is allowed to honor template paths
-        if (
-            not style
-            or style in (pycompat.oscurdir, pycompat.ospardir)
-            or pycompat.ossep in style
-            or pycompat.osaltsep
-            and pycompat.osaltsep in style
-        ):
-            continue
-        locations = [os.path.join(style, b'map'), b'map-' + style]
-        locations.append(b'map')
+    # Does the name point to a template in the provided templatepath, or
+    # in mercurial/templates/ if no path was provided?
+    if templatepath is None:
+        templatepath = templatedir()
+    if templatepath is not None:
+        f = os.path.join(templatepath, name)
+        return f, open(f, mode='rb')
 
-        for path in paths:
-            for location in locations:
-                mapfile = os.path.join(path, location)
-                if os.path.isfile(mapfile):
-                    return style, mapfile
+    # Otherwise try to read it using the resources API
+    name_parts = name.split(b'/')
+    package_name = b'.'.join([b'mercurial', b'templates'] + name_parts[:-1])
+    return (
+        name,
+        resourceutil.open_resource(package_name, name_parts[-1]),
+    )
 
-    raise RuntimeError(b"No hgweb templates found in %r" % paths)
+
+def try_open_template(name, templatepath=None):
+    try:
+        return open_template(name, templatepath)
+    except (EnvironmentError, ImportError):
+        return None, None
--- a/mercurial/transaction.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/transaction.py	Tue Oct 20 22:04:04 2020 +0530
@@ -38,10 +38,8 @@
 def active(func):
     def _active(self, *args, **kwds):
         if self._count == 0:
-            raise error.Abort(
-                _(
-                    b'cannot use transaction when it is already committed/aborted'
-                )
+            raise error.ProgrammingError(
+                b'cannot use transaction when it is already committed/aborted'
             )
         return func(self, *args, **kwds)
 
--- a/mercurial/upgrade.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/upgrade.py	Tue Oct 20 22:04:04 2020 +0530
@@ -20,6 +20,7 @@
     manifest,
     metadata,
     pycompat,
+    requirements,
     revlog,
     scmutil,
     util,
@@ -31,7 +32,7 @@
 # list of requirements that request a clone of all revlog if added/removed
 RECLONES_REQUIREMENTS = {
     b'generaldelta',
-    localrepo.SPARSEREVLOG_REQUIREMENT,
+    requirements.SPARSEREVLOG_REQUIREMENT,
 }
 
 
@@ -58,12 +59,12 @@
     return {
         # The upgrade code does not yet support these experimental features.
         # This is an artificial limitation.
-        b'treemanifest',
+        requirements.TREEMANIFEST_REQUIREMENT,
         # This was a precursor to generaldelta and was never enabled by default.
         # It should (hopefully) not exist in the wild.
         b'parentdelta',
         # Upgrade should operate on the actual store, not the shared link.
-        b'shared',
+        requirements.SHARED_REQUIREMENT,
     }
 
 
@@ -75,10 +76,10 @@
     to be allowed.
     """
     supported = {
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -103,10 +104,11 @@
         b'generaldelta',
         b'revlogv1',
         b'store',
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
+        requirements.SHARESAFE_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -131,10 +133,10 @@
         b'dotencode',
         b'fncache',
         b'generaldelta',
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -338,7 +340,7 @@
 class sparserevlog(requirementformatvariant):
     name = b'sparserevlog'
 
-    _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
+    _requirement = requirements.SPARSEREVLOG_REQUIREMENT
 
     default = True
 
@@ -364,7 +366,7 @@
 class sidedata(requirementformatvariant):
     name = b'sidedata'
 
-    _requirement = localrepo.SIDEDATA_REQUIREMENT
+    _requirement = requirements.SIDEDATA_REQUIREMENT
 
     default = False
 
@@ -380,7 +382,7 @@
 class persistentnodemap(requirementformatvariant):
     name = b'persistent-nodemap'
 
-    _requirement = localrepo.NODEMAP_REQUIREMENT
+    _requirement = requirements.NODEMAP_REQUIREMENT
 
     default = False
 
@@ -395,7 +397,7 @@
 class copiessdc(requirementformatvariant):
     name = b'copies-sdc'
 
-    _requirement = localrepo.COPIESSDC_REQUIREMENT
+    _requirement = requirements.COPIESSDC_REQUIREMENT
 
     default = False
 
@@ -725,23 +727,26 @@
     sidedatacompanion = None
     removedreqs = srcrepo.requirements - dstrepo.requirements
     addedreqs = dstrepo.requirements - srcrepo.requirements
-    if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
+    if requirements.SIDEDATA_REQUIREMENT in removedreqs:
 
         def sidedatacompanion(rl, rev):
             rl = getattr(rl, '_revlog', rl)
             if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
-                return True, (), {}
-            return False, (), {}
+                return True, (), {}, 0, 0
+            return False, (), {}, 0, 0
 
-    elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
+    elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
         sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
-    elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
+    elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
         sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
     return sidedatacompanion
 
 
 def matchrevlog(revlogfilter, entry):
-    """check is a revlog is selected for cloning
+    """check if a revlog is selected for cloning.
+
+    In other words, are there any updates which need to be done on revlog
+    or it can be blindly copied.
 
     The store entry is checked against the passed filter"""
     if entry.endswith(b'00changelog.i'):
--- a/mercurial/utils/resourceutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/utils/resourceutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -55,6 +55,8 @@
 
 
 try:
+    # importlib.resources exists from Python 3.7; see fallback in except clause
+    # further down
     from importlib import resources
 
     from .. import encoding
@@ -78,6 +80,8 @@
 
 
 except (ImportError, AttributeError):
+    # importlib.resources was not found (almost definitely because we're on a
+    # Python version before 3.7)
 
     def open_resource(package, name):
         path = os.path.join(_package_path(package), name)
--- a/mercurial/utils/storageutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/utils/storageutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -180,9 +180,9 @@
 
     ``fileid`` can be:
 
-    * A 20 byte binary node.
+    * A 20 or 32 byte binary node.
     * An integer revision number
-    * A 40 byte hex node.
+    * A 40 or 64 byte hex node.
     * A bytes that can be parsed as an integer representing a revision number.
 
     ``identifier`` is used to populate ``error.LookupError`` with an identifier
@@ -198,14 +198,14 @@
                 b'%d' % fileid, identifier, _(b'no match found')
             )
 
-    if len(fileid) == 20:
+    if len(fileid) in (20, 32):
         try:
             store.rev(fileid)
             return fileid
         except error.LookupError:
             pass
 
-    if len(fileid) == 40:
+    if len(fileid) in (40, 64):
         try:
             rawnode = bin(fileid)
             store.rev(rawnode)
--- a/mercurial/utils/stringutil.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/utils/stringutil.py	Tue Oct 20 22:04:04 2020 +0530
@@ -307,6 +307,14 @@
     return bool(s and b'\0' in s)
 
 
+def _splitpattern(pattern):
+    if pattern.startswith(b're:'):
+        return b're', pattern[3:]
+    elif pattern.startswith(b'literal:'):
+        return b'literal', pattern[8:]
+    return b'literal', pattern
+
+
 def stringmatcher(pattern, casesensitive=True):
     """
     accepts a string, possibly starting with 're:' or 'literal:' prefix.
@@ -345,25 +353,79 @@
     >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
     ('literal', 'ABCDEFG', [False, False, True])
     """
-    if pattern.startswith(b're:'):
-        pattern = pattern[3:]
+    kind, pattern = _splitpattern(pattern)
+    if kind == b're':
         try:
             flags = 0
             if not casesensitive:
                 flags = remod.I
             regex = remod.compile(pattern, flags)
         except remod.error as e:
-            raise error.ParseError(_(b'invalid regular expression: %s') % e)
-        return b're', pattern, regex.search
-    elif pattern.startswith(b'literal:'):
-        pattern = pattern[8:]
+            raise error.ParseError(
+                _(b'invalid regular expression: %s') % forcebytestr(e)
+            )
+        return kind, pattern, regex.search
+    elif kind == b'literal':
+        if casesensitive:
+            match = pattern.__eq__
+        else:
+            ipat = encoding.lower(pattern)
+            match = lambda s: ipat == encoding.lower(s)
+        return kind, pattern, match
+
+    raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
+
+
+def substringregexp(pattern, flags=0):
+    """Build a regexp object from a string pattern possibly starting with
+    're:' or 'literal:' prefix.
+
+    helper for tests:
+    >>> def test(pattern, *tests):
+    ...     regexp = substringregexp(pattern)
+    ...     return [bool(regexp.search(t)) for t in tests]
+    >>> def itest(pattern, *tests):
+    ...     regexp = substringregexp(pattern, remod.I)
+    ...     return [bool(regexp.search(t)) for t in tests]
+
+    substring matching (no prefix):
+    >>> test(b'bcde', b'abc', b'def', b'abcdefg')
+    [False, False, True]
 
-    match = pattern.__eq__
+    substring pattern should be escaped:
+    >>> substringregexp(b'.bc').pattern
+    '\\\\.bc'
+    >>> test(b'.bc', b'abc', b'def', b'abcdefg')
+    [False, False, False]
+
+    regex matching ('re:' prefix)
+    >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
+    [False, False, True]
+
+    force substring matches ('literal:' prefix)
+    >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
+    [False, True]
 
-    if not casesensitive:
-        ipat = encoding.lower(pattern)
-        match = lambda s: ipat == encoding.lower(s)
-    return b'literal', pattern, match
+    case insensitive literal matches
+    >>> itest(b'BCDE', b'abc', b'def', b'abcdefg')
+    [False, False, True]
+
+    case insensitive regex matches
+    >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
+    [False, False, True]
+    """
+    kind, pattern = _splitpattern(pattern)
+    if kind == b're':
+        try:
+            return remod.compile(pattern, flags)
+        except remod.error as e:
+            raise error.ParseError(
+                _(b'invalid regular expression: %s') % forcebytestr(e)
+            )
+    elif kind == b'literal':
+        return remod.compile(remod.escape(pattern), flags)
+
+    raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
 
 
 def shortuser(user):
--- a/mercurial/wireprotov1server.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/wireprotov1server.py	Tue Oct 20 22:04:04 2020 +0530
@@ -497,11 +497,11 @@
         # cleanly forward Abort error to the client
         if not exchange.bundle2requested(opts.get(b'bundlecaps')):
             if proto.name == b'http-v1':
-                return wireprototypes.ooberror(pycompat.bytestr(exc) + b'\n')
+                return wireprototypes.ooberror(exc.message + b'\n')
             raise  # cannot do better for bundle1 + ssh
         # bundle2 request expect a bundle2 reply
         bundler = bundle2.bundle20(repo.ui)
-        manargs = [(b'message', pycompat.bytestr(exc))]
+        manargs = [(b'message', exc.message)]
         advargs = []
         if exc.hint is not None:
             advargs.append((b'hint', exc.hint))
@@ -684,7 +684,7 @@
                     # We did not change it to minimise code change.
                     # This need to be moved to something proper.
                     # Feel free to do it.
-                    procutil.stderr.write(b"abort: %s\n" % exc)
+                    procutil.stderr.write(b"abort: %s\n" % exc.message)
                     if exc.hint is not None:
                         procutil.stderr.write(b"(%s)\n" % exc.hint)
                     procutil.stderr.flush()
@@ -733,7 +733,7 @@
                 if exc.params:
                     errpart.addparam(b'params', b'\0'.join(exc.params))
             except error.Abort as exc:
-                manargs = [(b'message', stringutil.forcebytestr(exc))]
+                manargs = [(b'message', exc.message)]
                 advargs = []
                 if exc.hint is not None:
                     advargs.append((b'hint', exc.hint))
--- a/mercurial/worker.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/mercurial/worker.py	Tue Oct 20 22:04:04 2020 +0530
@@ -71,8 +71,12 @@
         def __init__(self, wrapped):
             self._wrapped = wrapped
 
-        def __getattr__(self, attr):
-            return getattr(self._wrapped, attr)
+        # Do NOT implement readinto() by making it delegate to
+        # _wrapped.readinto(), since that is unbuffered. The unpickler is fine
+        # with just read() and readline(), so we don't need to implement it.
+
+        def readline(self):
+            return self._wrapped.readline()
 
         # issue multiple reads until size is fulfilled
         def read(self, size=-1):
@@ -91,7 +95,7 @@
 
             del view
             del buf[pos:]
-            return buf
+            return bytes(buf)
 
 
 else:
@@ -211,7 +215,7 @@
     parentpid = os.getpid()
     pipes = []
     retval = {}
-    for pargs in partition(args, workers):
+    for pargs in partition(args, min(workers, len(args))):
         # Every worker gets its own pipe to send results on, so we don't have to
         # implement atomic writes larger than PIPE_BUF. Each forked process has
         # its own pipe's descriptors in the local variables, and the parent
--- a/relnotes/next	Thu Oct 08 13:45:56 2020 -0700
+++ b/relnotes/next	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,9 @@
 == New Features ==
 
+ * `hg mv -A` can now be used with `--at-rev`. It behaves just like
+   `hg cp -A --at-rev`, i.e. it marks the destination as a copy of the
+   source whether or not the source still exists (but the source must
+   exist in the parent revision).
 
 
 == New Experimental Features ==
@@ -16,3 +20,7 @@
 
 == Internal API Changes ==
 
+ * `merge.update()` is now private (renamed to `_update()`). Hopefully
+   the higher-level functions available in the same module cover your
+   use cases.
+
--- a/rust/Cargo.lock	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/Cargo.lock	Tue Oct 20 22:04:04 2020 +0530
@@ -1,8 +1,13 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
 [[package]]
+name = "adler"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "aho-corasick"
-version = "0.7.10"
+version = "0.7.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -13,7 +18,7 @@
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -21,14 +26,14 @@
 version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "autocfg"
-version = "1.0.0"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -42,23 +47,21 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "cc"
+version = "1.0.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cfg-if"
 version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "chrono"
-version = "0.4.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "clap"
-version = "2.33.1"
+version = "2.33.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -66,18 +69,8 @@
  "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "colored"
-version = "1.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -85,28 +78,36 @@
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
  "python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "crc32fast"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "crossbeam"
 version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-channel"
-version = "0.4.2"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -128,22 +129,23 @@
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "crossbeam-queue"
-version = "0.2.1"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -151,18 +153,18 @@
 version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "ctor"
-version = "0.1.13"
+version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -172,25 +174,64 @@
 
 [[package]]
 name = "either"
-version = "1.5.3"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "env_logger"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "flate2"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "miniz_oxide 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "gcc"
+version = "0.3.55"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "getrandom"
-version = "0.1.14"
+version = "0.1.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
  "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "glob"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "hermit-abi"
-version = "0.1.8"
+version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -203,23 +244,26 @@
 version = "0.1.0"
 dependencies = [
  "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "flate2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
  "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
  "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
  "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -227,10 +271,34 @@
 version = "0.1.0"
 dependencies = [
  "cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "hg-core 0.1.0",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "simple_logger 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "humantime"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "itertools"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "jobserver"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -240,12 +308,22 @@
 
 [[package]]
 name = "libc"
-version = "0.2.67"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "libz-sys"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pkg-config 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "vcpkg 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "log"
-version = "0.4.8"
+version = "0.4.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -266,62 +344,62 @@
 version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "memoffset"
-version = "0.5.3"
+version = "0.5.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "micro-timer"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "micro-timer-macros"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "num-integer"
-version = "0.1.42"
+name = "miniz_oxide"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "num-traits"
-version = "0.2.11"
+version = "0.2.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "num_cpus"
-version = "1.12.0"
+version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -329,12 +407,17 @@
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "pkg-config"
+version = "0.3.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "ppv-lite86"
-version = "0.2.6"
+version = "0.2.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -343,17 +426,17 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ctor 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
  "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.9"
+version = "1.0.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -361,7 +444,7 @@
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -370,16 +453,42 @@
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "quick-error"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "quote"
-version = "1.0.3"
+version = "1.0.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.3.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -387,8 +496,8 @@
 version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -399,16 +508,29 @@
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand_core"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -437,29 +559,38 @@
 
 [[package]]
 name = "rayon"
-version = "1.3.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.7.0"
+version = "1.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
+ "crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rdrand"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "redox_syscall"
-version = "0.1.56"
+version = "0.1.57"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -467,7 +598,7 @@
 version = "1.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.7.13 (registry+https://github.com/rust-lang/crates.io-index)",
  "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -480,34 +611,46 @@
 
 [[package]]
 name = "remove_dir_all"
-version = "0.5.2"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rhg"
 version = "0.1.0"
 dependencies = [
- "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "hg-core 0.1.0",
+ "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "rustc_version"
-version = "0.2.3"
+name = "rust-crypto"
+version = "0.2.36"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "rustc-serialize"
+version = "0.3.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "same-file"
 version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -516,43 +659,18 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "semver"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "semver-parser"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "simple_logger"
-version = "1.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "colored 1.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "strsim"
 version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "syn"
-version = "1.0.16"
+version = "1.0.41"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -561,11 +679,19 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
- "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
+ "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -573,7 +699,7 @@
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -586,12 +712,12 @@
 
 [[package]]
 name = "time"
-version = "0.1.42"
+version = "0.1.44"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -604,17 +730,22 @@
 
 [[package]]
 name = "unicode-width"
-version = "0.1.7"
+version = "0.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "unicode-xid"
-version = "0.2.0"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "vcpkg"
+version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "vec_map"
-version = "0.8.1"
+version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -623,8 +754,13 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "wasi"
+version = "0.10.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "winapi"
-version = "0.3.8"
+version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -638,10 +774,10 @@
 
 [[package]]
 name = "winapi-util"
-version = "0.1.3"
+version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -649,79 +785,128 @@
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
+[[package]]
+name = "zstd"
+version = "0.5.3+zstd.1.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "zstd-safe"
+version = "2.0.5+zstd.1.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "zstd-sys"
+version = "1.4.17+zstd.1.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)",
+ "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
 [metadata]
-"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
+"checksum adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
+"checksum aho-corasick 0.7.13 (registry+https://github.com/rust-lang/crates.io-index)" = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86"
 "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
 "checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
+"checksum autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 "checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+"checksum cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)" = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c"
 "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
-"checksum clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"
-"checksum colored 1.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f4ffc801dacf156c5854b9df4f425a626539c3a6ef7893cc0c5084a23f0b6c59"
+"checksum clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)" = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
 "checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
+"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
 "checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e"
-"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"
+"checksum crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
 "checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
 "checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
+"checksum crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
 "checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum ctor 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1"
+"checksum ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
 "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
-"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
-"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
-"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8"
+"checksum either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
+"checksum flate2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "766d0e77a2c1502169d4a93ff3b8c15a71fd946cd0126309752104e5f3c46d94"
+"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+"checksum getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
+"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
+"checksum hermit-abi 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c30f6d0bc6b00693347368a67d41b58f2fb851215ff1da49e90fe2c5c667151"
 "checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35"
+"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
+"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
+"checksum jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
 "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018"
-"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
+"checksum libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235"
+"checksum libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
+"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
 "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
 "checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
 "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
-"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
-"checksum micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25b31d6cb9112984323d05d7a353f272ae5d7a307074f9ab9b25c00121b8c947"
-"checksum micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5694085dd384bb9e824207facc040c248d9df653f55e28c3ad0686958b448504"
-"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
-"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
-"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
+"checksum memoffset 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
+"checksum micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
+"checksum micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
+"checksum miniz_oxide 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9"
+"checksum num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
+"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
 "checksum output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
-"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
+"checksum pkg-config 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)" = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33"
+"checksum ppv-lite86 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20"
 "checksum pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
-"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435"
+"checksum proc-macro2 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)" = "36e28516df94f3dd551a587da5357459d9b36d945a7c37c3557928c1c2ff2a2c"
 "checksum python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
 "checksum python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
-"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
+"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+"checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
+"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
 "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 "checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
 "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
 "checksum rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
 "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
 "checksum rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
-"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
-"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
-"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
+"checksum rayon 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfd016f0c045ad38b5251be2c9c0ab806917f82da4d36b2a327e5166adad9270"
+"checksum rayon-core 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e8c4fec834fb6e6d2dd5eece3c7b432a52f0ba887cf40e595190c4107edc08bf"
+"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+"checksum redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)" = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
 "checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
 "checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
-"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
-"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
+"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
 "checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
 "checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
-"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-"checksum simple_logger 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fea0c4611f32f4c2bac73754f22dca1f57e6c1945e0590dae4e5f2a077b92367"
 "checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859"
+"checksum syn 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "6690e3e9f692504b941dc6c3b188fd28df054f7fb8469ab40680df52fdcc842b"
 "checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
+"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
 "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
 "checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
-"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f"
+"checksum time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)" = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
 "checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
-"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479"
-"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
-"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a"
+"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+"checksum vcpkg 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
+"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+"checksum wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
 "checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
+"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
+"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+"checksum zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
+"checksum zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
+"checksum zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
--- a/rust/README.rst	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/README.rst	Tue Oct 20 22:04:04 2020 +0530
@@ -34,6 +34,15 @@
 One day we may use this environment variable to switch to new experimental
 binding crates like a hypothetical ``HGWITHRUSTEXT=hpy``.
 
+Special features
+================
+
+You might want to check the `features` section in ``hg-cpython/Cargo.toml``.
+It may contain features that might be interesting to try out.
+
+To use features from the Makefile, use the `HG_RUST_FEATURES` environment 
+variable: for instance `HG_RUST_FEATURES="some-feature other-feature"`
+
 Profiling
 =========
 
@@ -52,8 +61,8 @@
 Developing Rust
 ===============
 
-The current version of Rust in use is ``1.34.2``, because it's what Debian
-stable has. You can use ``rustup override set 1.34.2`` at the root of the repo
+The current version of Rust in use is ``1.41.1``, because it's what Debian
+stable has. You can use ``rustup override set 1.41.1`` at the root of the repo
 to make it easier on you.
 
 Go to the ``hg-cpython`` folder::
--- a/rust/chg/src/clientext.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/clientext.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -43,11 +43,20 @@
         stdout: &impl AsRawFd,
         stderr: &impl AsRawFd,
     ) -> io::Result<()> {
-        attachio::attach_io(self.client.borrow_protocol_mut(), stdin, stdout, stderr).await
+        attachio::attach_io(
+            self.client.borrow_protocol_mut(),
+            stdin,
+            stdout,
+            stderr,
+        )
+        .await
     }
 
     /// Changes the working directory of the server.
-    pub async fn set_current_dir(&mut self, dir: impl AsRef<Path>) -> io::Result<()> {
+    pub async fn set_current_dir(
+        &mut self,
+        dir: impl AsRef<Path>,
+    ) -> io::Result<()> {
         let dir_bytes = dir.as_ref().as_os_str().as_bytes().to_owned();
         self.client
             .borrow_protocol_mut()
@@ -67,7 +76,10 @@
     }
 
     /// Changes the process title of the server.
-    pub async fn set_process_name(&mut self, name: impl AsRef<OsStr>) -> io::Result<()> {
+    pub async fn set_process_name(
+        &mut self,
+        name: impl AsRef<OsStr>,
+    ) -> io::Result<()> {
         let name_bytes = name.as_ref().as_bytes().to_owned();
         self.client
             .borrow_protocol_mut()
--- a/rust/chg/src/locator.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/locator.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -71,8 +71,12 @@
     }
 
     /// Specifies the arguments to be passed to the server at start.
-    pub fn set_early_args(&mut self, args: impl IntoIterator<Item = impl AsRef<OsStr>>) {
-        self.hg_early_args = args.into_iter().map(|a| a.as_ref().to_owned()).collect();
+    pub fn set_early_args(
+        &mut self,
+        args: impl IntoIterator<Item = impl AsRef<OsStr>>,
+    ) {
+        self.hg_early_args =
+            args.into_iter().map(|a| a.as_ref().to_owned()).collect();
     }
 
     /// Connects to the server.
@@ -104,7 +108,10 @@
     /// Runs instructions received from the server.
     ///
     /// Returns true if the client should try connecting to the other server.
-    fn run_instructions(&mut self, instructions: &[Instruction]) -> io::Result<bool> {
+    fn run_instructions(
+        &mut self,
+        instructions: &[Instruction],
+    ) -> io::Result<bool> {
         let mut reconnect = false;
         for inst in instructions {
             debug!("instruction: {:?}", inst);
@@ -123,7 +130,10 @@
                             "insecure redirect instruction from server: {}",
                             path.display()
                         );
-                        return Err(io::Error::new(io::ErrorKind::InvalidData, msg));
+                        return Err(io::Error::new(
+                            io::ErrorKind::InvalidData,
+                            msg,
+                        ));
                     }
                     self.redirect_sock_path = Some(path.to_owned());
                     reconnect = true;
@@ -134,7 +144,10 @@
                             "insecure unlink instruction from server: {}",
                             path.display()
                         );
-                        return Err(io::Error::new(io::ErrorKind::InvalidData, msg));
+                        return Err(io::Error::new(
+                            io::ErrorKind::InvalidData,
+                            msg,
+                        ));
                     }
                     fs::remove_file(path).unwrap_or(()); // may race
                 }
@@ -319,7 +332,10 @@
     P: AsRef<Path>,
 {
     let a = fs::symlink_metadata(path.as_ref())?;
-    if a.is_dir() && a.uid() == procutil::get_effective_uid() && (a.mode() & 0o777) == 0o700 {
+    if a.is_dir()
+        && a.uid() == procutil::get_effective_uid()
+        && (a.mode() & 0o777) == 0o700
+    {
         Ok(path)
     } else {
         Err(io::Error::new(io::ErrorKind::Other, "insecure directory"))
@@ -344,7 +360,9 @@
 }
 
 /// Collects arguments which need to be passed to the server at start.
-pub fn collect_early_args(args: impl IntoIterator<Item = impl AsRef<OsStr>>) -> Vec<OsString> {
+pub fn collect_early_args(
+    args: impl IntoIterator<Item = impl AsRef<OsStr>>,
+) -> Vec<OsString> {
     let mut args_iter = args.into_iter();
     let mut early_args = Vec::new();
     while let Some(arg) = args_iter.next() {
--- a/rust/chg/src/message.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/message.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -32,11 +32,16 @@
 }
 
 /// Parses "S" channel request into command type and spec.
-pub fn parse_command_spec(data: Bytes) -> io::Result<(CommandType, CommandSpec)> {
+pub fn parse_command_spec(
+    data: Bytes,
+) -> io::Result<(CommandType, CommandSpec)> {
     let mut split = data.split(|&c| c == b'\0');
-    let ctype = parse_command_type(split.next().ok_or(new_parse_error("missing type"))?)?;
+    let ctype = parse_command_type(
+        split.next().ok_or(new_parse_error("missing type"))?,
+    )?;
     let command = split.next().ok_or(new_parse_error("missing command"))?;
-    let current_dir = split.next().ok_or(new_parse_error("missing current dir"))?;
+    let current_dir =
+        split.next().ok_or(new_parse_error("missing current dir"))?;
 
     let mut envs = Vec::new();
     for l in split {
@@ -89,14 +94,21 @@
             (b"exit", Some(arg)) => decode_latin1(arg)
                 .parse()
                 .map(Instruction::Exit)
-                .map_err(|_| new_parse_error(format!("invalid exit code: {:?}", arg)))?,
+                .map_err(|_| {
+                    new_parse_error(format!("invalid exit code: {:?}", arg))
+                })?,
             (b"reconnect", None) => Instruction::Reconnect,
             (b"redirect", Some(arg)) => {
                 Instruction::Redirect(OsStr::from_bytes(arg).to_owned().into())
             }
-            (b"unlink", Some(arg)) => Instruction::Unlink(OsStr::from_bytes(arg).to_owned().into()),
+            (b"unlink", Some(arg)) => {
+                Instruction::Unlink(OsStr::from_bytes(arg).to_owned().into())
+            }
             _ => {
-                return Err(new_parse_error(format!("unknown command: {:?}", l)));
+                return Err(new_parse_error(format!(
+                    "unknown command: {:?}",
+                    l
+                )));
             }
         };
         instructions.push(inst);
@@ -118,7 +130,8 @@
 ) -> Bytes {
     let mut vars_iter = vars.into_iter();
     if let Some((k, v)) = vars_iter.next() {
-        let mut dst = BytesMut::with_capacity(INITIAL_PACKED_ENV_VARS_CAPACITY);
+        let mut dst =
+            BytesMut::with_capacity(INITIAL_PACKED_ENV_VARS_CAPACITY);
         pack_env_into(&mut dst, k.as_ref(), v.as_ref());
         for (k, v) in vars_iter {
             dst.reserve(1);
@@ -145,7 +158,9 @@
     s.as_ref().iter().map(|&c| c as char).collect()
 }
 
-fn new_parse_error(error: impl Into<Box<dyn error::Error + Send + Sync>>) -> io::Error {
+fn new_parse_error(
+    error: impl Into<Box<dyn error::Error + Send + Sync>>,
+) -> io::Error {
     io::Error::new(io::ErrorKind::InvalidData, error)
 }
 
@@ -183,17 +198,24 @@
     fn parse_command_spec_too_short() {
         assert!(parse_command_spec(Bytes::from_static(b"")).is_err());
         assert!(parse_command_spec(Bytes::from_static(b"pager")).is_err());
-        assert!(parse_command_spec(Bytes::from_static(b"pager\0less")).is_err());
+        assert!(
+            parse_command_spec(Bytes::from_static(b"pager\0less")).is_err()
+        );
     }
 
     #[test]
     fn parse_command_spec_malformed_env() {
-        assert!(parse_command_spec(Bytes::from_static(b"pager\0less\0/tmp\0HOME")).is_err());
+        assert!(parse_command_spec(Bytes::from_static(
+            b"pager\0less\0/tmp\0HOME"
+        ))
+        .is_err());
     }
 
     #[test]
     fn parse_command_spec_unknown_type() {
-        assert!(parse_command_spec(Bytes::from_static(b"paper\0less")).is_err());
+        assert!(
+            parse_command_spec(Bytes::from_static(b"paper\0less")).is_err()
+        );
     }
 
     #[test]
--- a/rust/chg/src/procutil.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/procutil.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -44,7 +44,8 @@
     if flags < 0 {
         return Err(io::Error::last_os_error());
     }
-    let r = unsafe { libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_NONBLOCK) };
+    let r =
+        unsafe { libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_NONBLOCK) };
     if r < 0 {
         return Err(io::Error::last_os_error());
     }
@@ -69,7 +70,10 @@
 ///
 /// This touches global states, and thus synchronized as a one-time
 /// initialization function.
-pub fn setup_signal_handler_once(pid: u32, pgid: Option<u32>) -> io::Result<()> {
+pub fn setup_signal_handler_once(
+    pid: u32,
+    pgid: Option<u32>,
+) -> io::Result<()> {
     let pid_signed = pid as i32;
     let pgid_signed = pgid.map(|n| n as i32).unwrap_or(0);
     let mut r = 0;
--- a/rust/chg/src/runcommand.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/runcommand.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -36,7 +36,8 @@
             ChannelMessage::Data(..) => {
                 // just ignores data sent to optional channel
             }
-            ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => {
+            ChannelMessage::InputRequest(..)
+            | ChannelMessage::LineRequest(..) => {
                 return Err(io::Error::new(
                     io::ErrorKind::InvalidData,
                     "unsupported request",
@@ -49,7 +50,8 @@
                         // server spins new command loop while pager request is
                         // in progress, which can be terminated by "" command.
                         let pin = handler.spawn_pager(&cmd_spec).await?;
-                        attachio::attach_io(proto, &io::stdin(), &pin, &pin).await?;
+                        attachio::attach_io(proto, &io::stdin(), &pin, &pin)
+                            .await?;
                         proto.send_command("").await?; // terminator
                     }
                     CommandType::System => {
--- a/rust/chg/src/uihandler.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/chg/src/uihandler.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -22,7 +22,10 @@
     /// Handles pager command request.
     ///
     /// Returns the pipe to be attached to the server if the pager is spawned.
-    async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin>;
+    async fn spawn_pager(
+        &mut self,
+        spec: &CommandSpec,
+    ) -> io::Result<Self::PagerStdin>;
 
     /// Handles system command request.
     ///
@@ -53,8 +56,12 @@
 impl SystemHandler for ChgUiHandler {
     type PagerStdin = ChildStdin;
 
-    async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin> {
-        let mut pager = new_shell_command(&spec).stdin(Stdio::piped()).spawn()?;
+    async fn spawn_pager(
+        &mut self,
+        spec: &CommandSpec,
+    ) -> io::Result<Self::PagerStdin> {
+        let mut pager =
+            new_shell_command(&spec).stdin(Stdio::piped()).spawn()?;
         let pin = pager.stdin.take().unwrap();
         procutil::set_blocking_fd(pin.as_raw_fd())?;
         // TODO: if pager exits, notify the server with SIGPIPE immediately.
--- a/rust/hg-core/Cargo.toml	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/Cargo.toml	Tue Oct 20 22:04:04 2020 +0530
@@ -23,9 +23,24 @@
 crossbeam = "0.7.3"
 micro-timer = "0.3.0"
 log = "0.4.8"
+memmap = "0.7.0"
+zstd = "0.5.3"
+rust-crypto = "0.2.36"
+
+# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
+# we have a clearer view of which backend is the fastest.
+[dependencies.flate2]
+version = "1.0.16"
+features = ["zlib"]
+default-features = false
 
 [dev-dependencies]
 clap = "*"
-memmap = "0.7.0"
 pretty_assertions = "0.6.1"
 tempfile = "3.1.0"
+
+[features]
+# Use a (still unoptimized) tree for the dirstate instead of the current flat
+# dirstate. This is not yet recommended for performance reasons. A future
+# version might make it the default, or make it a runtime option.
+dirstate-tree = []
--- a/rust/hg-core/rustfmt.toml	Thu Oct 08 13:45:56 2020 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-max_width = 79
-wrap_comments = true
-error_on_line_overflow = true
--- a/rust/hg-core/src/dirstate.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/dirstate.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -11,6 +11,8 @@
 
 pub mod dirs_multiset;
 pub mod dirstate_map;
+#[cfg(feature = "dirstate-tree")]
+pub mod dirstate_tree;
 pub mod parsers;
 pub mod status;
 
@@ -36,8 +38,15 @@
 /// merge.
 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
 
+#[cfg(not(feature = "dirstate-tree"))]
 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
+#[cfg(not(feature = "dirstate-tree"))]
 pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+
+#[cfg(feature = "dirstate-tree")]
+pub type StateMap = dirstate_tree::tree::Tree;
+#[cfg(feature = "dirstate-tree")]
+pub type StateMapIter<'a> = dirstate_tree::iter::Iter<'a>;
 pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
 pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
 
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -14,7 +14,7 @@
         files,
         hg_path::{HgPath, HgPathBuf, HgPathError},
     },
-    DirstateEntry, DirstateMapError, FastHashMap,
+    DirstateEntry, DirstateMapError, FastHashMap, StateMap,
 };
 use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
 
@@ -30,15 +30,15 @@
     /// Initializes the multiset from a dirstate.
     ///
     /// If `skip_state` is provided, skips dirstate entries with equal state.
+    #[cfg(not(feature = "dirstate-tree"))]
     pub fn from_dirstate(
-        dirstate: &FastHashMap<HgPathBuf, DirstateEntry>,
+        dirstate: &StateMap,
         skip_state: Option<EntryState>,
     ) -> Result<Self, DirstateMapError> {
         let mut multiset = DirsMultiset {
             inner: FastHashMap::default(),
         };
-
-        for (filename, DirstateEntry { state, .. }) in dirstate {
+        for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
             // This `if` is optimized out of the loop
             if let Some(skip) = skip_state {
                 if skip != *state {
@@ -51,6 +51,30 @@
 
         Ok(multiset)
     }
+    /// Initializes the multiset from a dirstate.
+    ///
+    /// If `skip_state` is provided, skips dirstate entries with equal state.
+    #[cfg(feature = "dirstate-tree")]
+    pub fn from_dirstate(
+        dirstate: &StateMap,
+        skip_state: Option<EntryState>,
+    ) -> Result<Self, DirstateMapError> {
+        let mut multiset = DirsMultiset {
+            inner: FastHashMap::default(),
+        };
+        for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
+            // This `if` is optimized out of the loop
+            if let Some(skip) = skip_state {
+                if skip != state {
+                    multiset.add_path(filename)?;
+                }
+            } else {
+                multiset.add_path(filename)?;
+            }
+        }
+
+        Ok(multiset)
+    }
 
     /// Initializes the multiset from a manifest.
     pub fn from_manifest(
@@ -332,8 +356,8 @@
         };
         assert_eq!(expected, new);
 
-        let new = DirsMultiset::from_dirstate(&FastHashMap::default(), None)
-            .unwrap();
+        let new =
+            DirsMultiset::from_dirstate(&StateMap::default(), None).unwrap();
         let expected = DirsMultiset {
             inner: FastHashMap::default(),
         };
@@ -357,7 +381,7 @@
         };
         assert_eq!(expected, new);
 
-        let input_map = ["a/", "b/", "a/c", "a/d/"]
+        let input_map = ["b/x", "a/c", "a/d/x"]
             .iter()
             .map(|f| {
                 (
@@ -371,7 +395,7 @@
                 )
             })
             .collect();
-        let expected_inner = [("", 2), ("a", 3), ("b", 1), ("a/d", 1)]
+        let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
             .iter()
             .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
@@ -387,9 +411,9 @@
     fn test_dirsmultiset_new_skip() {
         let input_map = [
             ("a/", EntryState::Normal),
-            ("a/b/", EntryState::Normal),
+            ("a/b", EntryState::Normal),
             ("a/c", EntryState::Removed),
-            ("a/d/", EntryState::Merged),
+            ("a/d", EntryState::Merged),
         ]
         .iter()
         .map(|(f, state)| {
@@ -406,7 +430,7 @@
         .collect();
 
         // "a" incremented with "a/c" and "a/d/"
-        let expected_inner = [("", 1), ("a", 2), ("a/d", 1)]
+        let expected_inner = [("", 1), ("a", 2)]
             .iter()
             .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -5,6 +5,7 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+use crate::revlog::node::NULL_NODE_ID;
 use crate::{
     dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
     pack_dirstate, parse_dirstate,
@@ -15,7 +16,7 @@
     CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
     DirstateParents, DirstateParseError, FastHashMap, StateMap,
 };
-use core::borrow::Borrow;
+use micro_timer::timed;
 use std::collections::HashSet;
 use std::convert::TryInto;
 use std::iter::FromIterator;
@@ -24,7 +25,6 @@
 
 pub type FileFoldMap = FastHashMap<HgPathBuf, HgPathBuf>;
 
-const NULL_ID: [u8; 20] = [0; 20];
 const MTIME_UNSET: i32 = -1;
 
 #[derive(Default)]
@@ -66,14 +66,14 @@
     }
 
     pub fn clear(&mut self) {
-        self.state_map.clear();
+        self.state_map = StateMap::default();
         self.copy_map.clear();
         self.file_fold_map = None;
         self.non_normal_set = None;
         self.other_parent_set = None;
         self.set_parents(&DirstateParents {
-            p1: NULL_ID,
-            p2: NULL_ID,
+            p1: NULL_NODE_ID,
+            p2: NULL_NODE_ID,
         })
     }
 
@@ -188,18 +188,15 @@
     ) {
         for filename in filenames {
             let mut changed = false;
-            self.state_map
-                .entry(filename.to_owned())
-                .and_modify(|entry| {
-                    if entry.state == EntryState::Normal && entry.mtime == now
-                    {
-                        changed = true;
-                        *entry = DirstateEntry {
-                            mtime: MTIME_UNSET,
-                            ..*entry
-                        };
-                    }
-                });
+            if let Some(entry) = self.state_map.get_mut(&filename) {
+                if entry.state == EntryState::Normal && entry.mtime == now {
+                    changed = true;
+                    *entry = DirstateEntry {
+                        mtime: MTIME_UNSET,
+                        ..*entry
+                    };
+                }
+            }
             if changed {
                 self.get_non_normal_other_parent_entries()
                     .0
@@ -256,6 +253,7 @@
         )
     }
 
+    #[cfg(not(feature = "dirstate-tree"))]
     pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
         if !force
             && self.non_normal_set.is_some()
@@ -284,6 +282,34 @@
         self.non_normal_set = Some(non_normal);
         self.other_parent_set = Some(other_parent);
     }
+    #[cfg(feature = "dirstate-tree")]
+    pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+        if !force
+            && self.non_normal_set.is_some()
+            && self.other_parent_set.is_some()
+        {
+            return;
+        }
+        let mut non_normal = HashSet::new();
+        let mut other_parent = HashSet::new();
+
+        for (
+            filename,
+            DirstateEntry {
+                state, size, mtime, ..
+            },
+        ) in self.state_map.iter()
+        {
+            if state != EntryState::Normal || mtime == MTIME_UNSET {
+                non_normal.insert(filename.to_owned());
+            }
+            if state == EntryState::Normal && size == SIZE_FROM_OTHER_PARENT {
+                other_parent.insert(filename.to_owned());
+            }
+        }
+        self.non_normal_set = Some(non_normal);
+        self.other_parent_set = Some(other_parent);
+    }
 
     /// Both of these setters and their uses appear to be the simplest way to
     /// emulate a Python lazy property, but it is ugly and unidiomatic.
@@ -340,8 +366,8 @@
             };
         } else if file_contents.is_empty() {
             parents = DirstateParents {
-                p1: NULL_ID,
-                p2: NULL_ID,
+                p1: NULL_NODE_ID,
+                p2: NULL_NODE_ID,
             };
         } else {
             return Err(DirstateError::Parse(DirstateParseError::Damaged));
@@ -356,6 +382,7 @@
         self.dirty_parents = true;
     }
 
+    #[timed]
     pub fn read(
         &mut self,
         file_contents: &[u8],
@@ -364,11 +391,17 @@
             return Ok(None);
         }
 
-        let parents = parse_dirstate(
-            &mut self.state_map,
-            &mut self.copy_map,
-            file_contents,
-        )?;
+        let (parents, entries, copies) = parse_dirstate(file_contents)?;
+        self.state_map.extend(
+            entries
+                .into_iter()
+                .map(|(path, entry)| (path.to_owned(), entry)),
+        );
+        self.copy_map.extend(
+            copies
+                .into_iter()
+                .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
+        );
 
         if !self.dirty_parents {
             self.set_parents(&parents);
@@ -390,17 +423,33 @@
         self.set_non_normal_other_parent_entries(true);
         Ok(packed)
     }
-
+    #[cfg(not(feature = "dirstate-tree"))]
     pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
         if let Some(ref file_fold_map) = self.file_fold_map {
             return file_fold_map;
         }
         let mut new_file_fold_map = FileFoldMap::default();
-        for (filename, DirstateEntry { state, .. }) in self.state_map.borrow()
-        {
+
+        for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
             if *state == EntryState::Removed {
                 new_file_fold_map
-                    .insert(normalize_case(filename), filename.to_owned());
+                    .insert(normalize_case(&filename), filename.to_owned());
+            }
+        }
+        self.file_fold_map = Some(new_file_fold_map);
+        self.file_fold_map.as_ref().unwrap()
+    }
+    #[cfg(feature = "dirstate-tree")]
+    pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
+        if let Some(ref file_fold_map) = self.file_fold_map {
+            return file_fold_map;
+        }
+        let mut new_file_fold_map = FileFoldMap::default();
+
+        for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
+            if state == EntryState::Removed {
+                new_file_fold_map
+                    .insert(normalize_case(&filename), filename.to_owned());
             }
         }
         self.file_fold_map = Some(new_file_fold_map);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/dirstate_tree.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,14 @@
+// dirstate_tree.rs
+//
+// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Special-case radix tree that matches a filesystem hierarchy for use in the
+//! dirstate.
+//! It has not been optimized at all yet.
+
+pub mod iter;
+pub mod node;
+pub mod tree;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/dirstate_tree/iter.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,392 @@
+// iter.rs
+//
+// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use super::node::{Node, NodeKind};
+use super::tree::Tree;
+use crate::dirstate::dirstate_tree::node::Directory;
+use crate::dirstate::status::Dispatch;
+use crate::utils::hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf};
+use crate::DirstateEntry;
+use std::borrow::Cow;
+use std::collections::VecDeque;
+use std::iter::{FromIterator, FusedIterator};
+use std::path::PathBuf;
+
+impl FromIterator<(HgPathBuf, DirstateEntry)> for Tree {
+    fn from_iter<T: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
+        iter: T,
+    ) -> Self {
+        let mut tree = Self::new();
+        for (path, entry) in iter {
+            tree.insert(path, entry);
+        }
+        tree
+    }
+}
+
+/// Iterator of all entries in the dirstate tree.
+///
+/// It has no particular ordering.
+pub struct Iter<'a> {
+    to_visit: VecDeque<(Cow<'a, [u8]>, &'a Node)>,
+}
+
+impl<'a> Iter<'a> {
+    pub fn new(node: &'a Node) -> Iter<'a> {
+        let mut to_visit = VecDeque::new();
+        to_visit.push_back((Cow::Borrowed(&b""[..]), node));
+        Self { to_visit }
+    }
+}
+
+impl<'a> Iterator for Iter<'a> {
+    type Item = (HgPathBuf, DirstateEntry);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        while let Some((base_path, node)) = self.to_visit.pop_front() {
+            match &node.kind {
+                NodeKind::Directory(dir) => {
+                    add_children_to_visit(
+                        &mut self.to_visit,
+                        &base_path,
+                        &dir,
+                    );
+                    if let Some(file) = &dir.was_file {
+                        return Some((
+                            HgPathBuf::from_bytes(&base_path),
+                            file.entry,
+                        ));
+                    }
+                }
+                NodeKind::File(file) => {
+                    if let Some(dir) = &file.was_directory {
+                        add_children_to_visit(
+                            &mut self.to_visit,
+                            &base_path,
+                            &dir,
+                        );
+                    }
+                    return Some((
+                        HgPathBuf::from_bytes(&base_path),
+                        file.entry,
+                    ));
+                }
+            }
+        }
+        None
+    }
+}
+
+impl<'a> FusedIterator for Iter<'a> {}
+
+/// Iterator of all entries in the dirstate tree, with a special filesystem
+/// handling for the directories containing said entries.
+///
+/// It checks every directory on-disk to see if it has become a symlink, to
+/// prevent a potential security issue.
+/// Using this information, it may dispatch `status` information early: it
+/// returns canonical paths along with `Shortcut`s, which are either a
+/// `DirstateEntry` or a `Dispatch`, if the fate of said path has already been
+/// determined.
+///
+/// Like `Iter`, it has no particular ordering.
+pub struct FsIter<'a> {
+    root_dir: PathBuf,
+    to_visit: VecDeque<(Cow<'a, [u8]>, &'a Node)>,
+    shortcuts: VecDeque<(HgPathBuf, StatusShortcut)>,
+}
+
+impl<'a> FsIter<'a> {
+    pub fn new(node: &'a Node, root_dir: PathBuf) -> FsIter<'a> {
+        let mut to_visit = VecDeque::new();
+        to_visit.push_back((Cow::Borrowed(&b""[..]), node));
+        Self {
+            root_dir,
+            to_visit,
+            shortcuts: Default::default(),
+        }
+    }
+
+    /// Mercurial tracks symlinks but *not* what they point to.
+    /// If a directory is moved and symlinked:
+    ///
+    /// ```bash
+    /// $ mkdir foo
+    /// $ touch foo/a
+    /// $ # commit...
+    /// $ mv foo bar
+    /// $ ln -s bar foo
+    /// ```
+    /// We need to dispatch the new symlink as `Unknown` and all the
+    /// descendents of the directory it replace as `Deleted`.
+    fn dispatch_symlinked_directory(
+        &mut self,
+        path: impl AsRef<HgPath>,
+        node: &Node,
+    ) {
+        let path = path.as_ref();
+        self.shortcuts.push_back((
+            path.to_owned(),
+            StatusShortcut::Dispatch(Dispatch::Unknown),
+        ));
+        for (file, _) in node.iter() {
+            self.shortcuts.push_back((
+                path.join(&file),
+                StatusShortcut::Dispatch(Dispatch::Deleted),
+            ));
+        }
+    }
+
+    /// Returns `true` if the canonical `path` of a directory corresponds to a
+    /// symlink on disk. It means it was moved and symlinked after the last
+    /// dirstate update.
+    ///
+    /// # Special cases
+    ///
+    /// Returns `false` for the repository root.
+    /// Returns `false` on io error, error handling is outside of the iterator.
+    fn directory_became_symlink(&mut self, path: &HgPath) -> bool {
+        if path.is_empty() {
+            return false;
+        }
+        let filename_as_path = match hg_path_to_path_buf(&path) {
+            Ok(p) => p,
+            _ => return false,
+        };
+        let meta = self.root_dir.join(filename_as_path).symlink_metadata();
+        match meta {
+            Ok(ref m) if m.file_type().is_symlink() => true,
+            _ => false,
+        }
+    }
+}
+
+/// Returned by `FsIter`, since the `Dispatch` of any given entry may already
+/// be determined during the iteration. This is necessary for performance
+/// reasons, since hierarchical information is needed to `Dispatch` an entire
+/// subtree efficiently.
+#[derive(Debug, Copy, Clone)]
+pub enum StatusShortcut {
+    /// A entry in the dirstate for further inspection
+    Entry(DirstateEntry),
+    /// The result of the status of the corresponding file
+    Dispatch(Dispatch),
+}
+
+impl<'a> Iterator for FsIter<'a> {
+    type Item = (HgPathBuf, StatusShortcut);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        // If any paths have already been `Dispatch`-ed, return them
+        if let Some(res) = self.shortcuts.pop_front() {
+            return Some(res);
+        }
+
+        while let Some((base_path, node)) = self.to_visit.pop_front() {
+            match &node.kind {
+                NodeKind::Directory(dir) => {
+                    let canonical_path = HgPath::new(&base_path);
+                    if self.directory_became_symlink(canonical_path) {
+                        // Potential security issue, don't do a normal
+                        // traversal, force the results.
+                        self.dispatch_symlinked_directory(
+                            canonical_path,
+                            &node,
+                        );
+                        continue;
+                    }
+                    add_children_to_visit(
+                        &mut self.to_visit,
+                        &base_path,
+                        &dir,
+                    );
+                    if let Some(file) = &dir.was_file {
+                        return Some((
+                            HgPathBuf::from_bytes(&base_path),
+                            StatusShortcut::Entry(file.entry),
+                        ));
+                    }
+                }
+                NodeKind::File(file) => {
+                    if let Some(dir) = &file.was_directory {
+                        add_children_to_visit(
+                            &mut self.to_visit,
+                            &base_path,
+                            &dir,
+                        );
+                    }
+                    return Some((
+                        HgPathBuf::from_bytes(&base_path),
+                        StatusShortcut::Entry(file.entry),
+                    ));
+                }
+            }
+        }
+
+        None
+    }
+}
+
+impl<'a> FusedIterator for FsIter<'a> {}
+
+fn join_path<'a, 'b>(path: &'a [u8], other: &'b [u8]) -> Cow<'b, [u8]> {
+    if path.is_empty() {
+        other.into()
+    } else {
+        [path, &b"/"[..], other].concat().into()
+    }
+}
+
+/// Adds all children of a given directory `dir` to the visit queue `to_visit`
+/// prefixed by a `base_path`.
+fn add_children_to_visit<'a>(
+    to_visit: &mut VecDeque<(Cow<'a, [u8]>, &'a Node)>,
+    base_path: &[u8],
+    dir: &'a Directory,
+) {
+    to_visit.extend(dir.children.iter().map(|(path, child)| {
+        let full_path = join_path(&base_path, &path);
+        (full_path, child)
+    }));
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::utils::hg_path::HgPath;
+    use crate::{EntryState, FastHashMap};
+    use std::collections::HashSet;
+
+    #[test]
+    fn test_iteration() {
+        let mut tree = Tree::new();
+
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"foo/bar"),
+                DirstateEntry {
+                    state: EntryState::Merged,
+                    mode: 41,
+                    mtime: 42,
+                    size: 43,
+                }
+            ),
+            None
+        );
+
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"foo2"),
+                DirstateEntry {
+                    state: EntryState::Merged,
+                    mode: 40,
+                    mtime: 41,
+                    size: 42,
+                }
+            ),
+            None
+        );
+
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"foo/baz"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                }
+            ),
+            None
+        );
+
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"foo/bap/nested"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                }
+            ),
+            None
+        );
+
+        assert_eq!(tree.len(), 4);
+
+        let results: HashSet<_> =
+            tree.iter().map(|(c, _)| c.to_owned()).collect();
+        dbg!(&results);
+        assert!(results.contains(HgPath::new(b"foo2")));
+        assert!(results.contains(HgPath::new(b"foo/bar")));
+        assert!(results.contains(HgPath::new(b"foo/baz")));
+        assert!(results.contains(HgPath::new(b"foo/bap/nested")));
+
+        let mut iter = tree.iter();
+        assert!(iter.next().is_some());
+        assert!(iter.next().is_some());
+        assert!(iter.next().is_some());
+        assert!(iter.next().is_some());
+        assert_eq!(None, iter.next());
+        assert_eq!(None, iter.next());
+        drop(iter);
+
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"foo/bap/nested/a"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                }
+            ),
+            None
+        );
+
+        let results: FastHashMap<_, _> = tree.iter().collect();
+        assert!(results.contains_key(HgPath::new(b"foo2")));
+        assert!(results.contains_key(HgPath::new(b"foo/bar")));
+        assert!(results.contains_key(HgPath::new(b"foo/baz")));
+        // Is a dir but `was_file`, so it's listed as a removed file
+        assert!(results.contains_key(HgPath::new(b"foo/bap/nested")));
+        assert!(results.contains_key(HgPath::new(b"foo/bap/nested/a")));
+
+        // insert removed file (now directory) after nested file
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"a/a"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                }
+            ),
+            None
+        );
+
+        // `insert` returns `None` for a directory
+        assert_eq!(
+            tree.insert(
+                HgPathBuf::from_bytes(b"a"),
+                DirstateEntry {
+                    state: EntryState::Removed,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                }
+            ),
+            None
+        );
+
+        let results: FastHashMap<_, _> = tree.iter().collect();
+        assert!(results.contains_key(HgPath::new(b"a")));
+        assert!(results.contains_key(HgPath::new(b"a/a")));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/dirstate_tree/node.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,395 @@
+// node.rs
+//
+// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use super::iter::Iter;
+use crate::utils::hg_path::HgPathBuf;
+use crate::{DirstateEntry, EntryState, FastHashMap};
+
+/// Represents a filesystem directory in the dirstate tree
+#[derive(Debug, Default, Clone, PartialEq)]
+pub struct Directory {
+    /// Contains the old file information if it existed between changesets.
+    /// Happens if a file `foo` is marked as removed, removed from the
+    /// filesystem then a directory `foo` is created and at least one of its
+    /// descendents is added to Mercurial.
+    pub(super) was_file: Option<Box<File>>,
+    pub(super) children: FastHashMap<Vec<u8>, Node>,
+}
+
+/// Represents a filesystem file (or symlink) in the dirstate tree
+#[derive(Debug, Clone, PartialEq)]
+pub struct File {
+    /// Contains the old structure if it existed between changesets.
+    /// Happens all descendents of `foo` marked as removed and removed from
+    /// the filesystem, then a file `foo` is created and added to Mercurial.
+    pub(super) was_directory: Option<Box<Directory>>,
+    pub(super) entry: DirstateEntry,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum NodeKind {
+    Directory(Directory),
+    File(File),
+}
+
+#[derive(Debug, Default, Clone, PartialEq)]
+pub struct Node {
+    pub kind: NodeKind,
+}
+
+impl Default for NodeKind {
+    fn default() -> Self {
+        NodeKind::Directory(Default::default())
+    }
+}
+
+impl Node {
+    pub fn insert(
+        &mut self,
+        path: &[u8],
+        new_entry: DirstateEntry,
+    ) -> InsertResult {
+        let mut split = path.splitn(2, |&c| c == b'/');
+        let head = split.next().unwrap_or(b"");
+        let tail = split.next().unwrap_or(b"");
+
+        // Are we're modifying the current file ? Is the the end of the path ?
+        let is_current_file = tail.is_empty() && head.is_empty();
+
+        if let NodeKind::File(file) = &mut self.kind {
+            if is_current_file {
+                let new = Self {
+                    kind: NodeKind::File(File {
+                        entry: new_entry,
+                        ..file.clone()
+                    }),
+                };
+                return InsertResult {
+                    did_insert: false,
+                    old_entry: Some(std::mem::replace(self, new)),
+                };
+            } else {
+                match file.entry.state {
+                    // Only replace the current file with a directory if it's
+                    // marked as `Removed`
+                    EntryState::Removed => {
+                        self.kind = NodeKind::Directory(Directory {
+                            was_file: Some(Box::from(file.clone())),
+                            children: Default::default(),
+                        })
+                    }
+                    _ => {
+                        return Node::insert_in_file(
+                            file, new_entry, head, tail,
+                        )
+                    }
+                }
+            }
+        }
+
+        match &mut self.kind {
+            NodeKind::Directory(directory) => {
+                Node::insert_in_directory(directory, new_entry, head, tail)
+            }
+            NodeKind::File(_) => {
+                unreachable!("The file case has already been handled")
+            }
+        }
+    }
+
+    /// The current file still exists and is not marked as `Removed`.
+    /// Insert the entry in its `was_directory`.
+    fn insert_in_file(
+        file: &mut File,
+        new_entry: DirstateEntry,
+        head: &[u8],
+        tail: &[u8],
+    ) -> InsertResult {
+        if let Some(d) = &mut file.was_directory {
+            Node::insert_in_directory(d, new_entry, head, tail)
+        } else {
+            let mut dir = Directory {
+                was_file: None,
+                children: FastHashMap::default(),
+            };
+            let res =
+                Node::insert_in_directory(&mut dir, new_entry, head, tail);
+            file.was_directory = Some(Box::new(dir));
+            res
+        }
+    }
+
+    /// Insert an entry in the subtree of `directory`
+    fn insert_in_directory(
+        directory: &mut Directory,
+        new_entry: DirstateEntry,
+        head: &[u8],
+        tail: &[u8],
+    ) -> InsertResult {
+        let mut res = InsertResult::default();
+
+        if let Some(node) = directory.children.get_mut(head) {
+            // Node exists
+            match &mut node.kind {
+                NodeKind::Directory(subdir) => {
+                    if tail.is_empty() {
+                        let becomes_file = Self {
+                            kind: NodeKind::File(File {
+                                was_directory: Some(Box::from(subdir.clone())),
+                                entry: new_entry,
+                            }),
+                        };
+                        let old_entry = directory
+                            .children
+                            .insert(head.to_owned(), becomes_file);
+                        return InsertResult {
+                            did_insert: true,
+                            old_entry,
+                        };
+                    } else {
+                        res = node.insert(tail, new_entry);
+                    }
+                }
+                NodeKind::File(_) => {
+                    res = node.insert(tail, new_entry);
+                }
+            }
+        } else if tail.is_empty() {
+            // File does not already exist
+            directory.children.insert(
+                head.to_owned(),
+                Self {
+                    kind: NodeKind::File(File {
+                        was_directory: None,
+                        entry: new_entry,
+                    }),
+                },
+            );
+            res.did_insert = true;
+        } else {
+            // Directory does not already exist
+            let mut nested = Self {
+                kind: NodeKind::Directory(Directory {
+                    was_file: None,
+                    children: Default::default(),
+                }),
+            };
+            res = nested.insert(tail, new_entry);
+            directory.children.insert(head.to_owned(), nested);
+        }
+        res
+    }
+
+    /// Removes an entry from the tree, returns a `RemoveResult`.
+    pub fn remove(&mut self, path: &[u8]) -> RemoveResult {
+        let empty_result = RemoveResult::default();
+        if path.is_empty() {
+            return empty_result;
+        }
+        let mut split = path.splitn(2, |&c| c == b'/');
+        let head = split.next();
+        let tail = split.next().unwrap_or(b"");
+
+        let head = match head {
+            None => {
+                return empty_result;
+            }
+            Some(h) => h,
+        };
+        if head == path {
+            match &mut self.kind {
+                NodeKind::Directory(d) => {
+                    return Node::remove_from_directory(head, d);
+                }
+                NodeKind::File(f) => {
+                    if let Some(d) = &mut f.was_directory {
+                        let RemoveResult { old_entry, .. } =
+                            Node::remove_from_directory(head, d);
+                        return RemoveResult {
+                            cleanup: false,
+                            old_entry,
+                        };
+                    }
+                }
+            }
+            empty_result
+        } else {
+            // Look into the dirs
+            match &mut self.kind {
+                NodeKind::Directory(d) => {
+                    if let Some(child) = d.children.get_mut(head) {
+                        let mut res = child.remove(tail);
+                        if res.cleanup {
+                            d.children.remove(head);
+                        }
+                        res.cleanup =
+                            d.children.is_empty() && d.was_file.is_none();
+                        res
+                    } else {
+                        empty_result
+                    }
+                }
+                NodeKind::File(f) => {
+                    if let Some(d) = &mut f.was_directory {
+                        if let Some(child) = d.children.get_mut(head) {
+                            let RemoveResult { cleanup, old_entry } =
+                                child.remove(tail);
+                            if cleanup {
+                                d.children.remove(head);
+                            }
+                            if d.children.is_empty() && d.was_file.is_none() {
+                                f.was_directory = None;
+                            }
+
+                            return RemoveResult {
+                                cleanup: false,
+                                old_entry,
+                            };
+                        }
+                    }
+                    empty_result
+                }
+            }
+        }
+    }
+
+    fn remove_from_directory(head: &[u8], d: &mut Directory) -> RemoveResult {
+        if let Some(node) = d.children.get_mut(head) {
+            return match &mut node.kind {
+                NodeKind::Directory(d) => {
+                    if let Some(f) = &mut d.was_file {
+                        let entry = f.entry;
+                        d.was_file = None;
+                        RemoveResult {
+                            cleanup: false,
+                            old_entry: Some(entry),
+                        }
+                    } else {
+                        RemoveResult::default()
+                    }
+                }
+                NodeKind::File(f) => {
+                    let entry = f.entry;
+                    let mut cleanup = false;
+                    match &f.was_directory {
+                        None => {
+                            if d.children.len() == 1 {
+                                cleanup = true;
+                            }
+                            d.children.remove(head);
+                        }
+                        Some(dir) => {
+                            node.kind = NodeKind::Directory(*dir.clone());
+                        }
+                    }
+
+                    RemoveResult {
+                        cleanup,
+                        old_entry: Some(entry),
+                    }
+                }
+            };
+        }
+        RemoveResult::default()
+    }
+
+    pub fn get(&self, path: &[u8]) -> Option<&Node> {
+        if path.is_empty() {
+            return Some(&self);
+        }
+        let mut split = path.splitn(2, |&c| c == b'/');
+        let head = split.next();
+        let tail = split.next().unwrap_or(b"");
+
+        let head = match head {
+            None => {
+                return Some(&self);
+            }
+            Some(h) => h,
+        };
+        match &self.kind {
+            NodeKind::Directory(d) => {
+                if let Some(child) = d.children.get(head) {
+                    return child.get(tail);
+                }
+            }
+            NodeKind::File(f) => {
+                if let Some(d) = &f.was_directory {
+                    if let Some(child) = d.children.get(head) {
+                        return child.get(tail);
+                    }
+                }
+            }
+        }
+
+        None
+    }
+
+    pub fn get_mut(&mut self, path: &[u8]) -> Option<&mut NodeKind> {
+        if path.is_empty() {
+            return Some(&mut self.kind);
+        }
+        let mut split = path.splitn(2, |&c| c == b'/');
+        let head = split.next();
+        let tail = split.next().unwrap_or(b"");
+
+        let head = match head {
+            None => {
+                return Some(&mut self.kind);
+            }
+            Some(h) => h,
+        };
+        match &mut self.kind {
+            NodeKind::Directory(d) => {
+                if let Some(child) = d.children.get_mut(head) {
+                    return child.get_mut(tail);
+                }
+            }
+            NodeKind::File(f) => {
+                if let Some(d) = &mut f.was_directory {
+                    if let Some(child) = d.children.get_mut(head) {
+                        return child.get_mut(tail);
+                    }
+                }
+            }
+        }
+
+        None
+    }
+
+    pub fn iter(&self) -> Iter {
+        Iter::new(self)
+    }
+}
+
+/// Information returned to the caller of an `insert` operation for integrity.
+#[derive(Debug, Default)]
+pub struct InsertResult {
+    /// Whether the insertion resulted in an actual insertion and not an
+    /// update
+    pub(super) did_insert: bool,
+    /// The entry that was replaced, if it exists
+    pub(super) old_entry: Option<Node>,
+}
+
+/// Information returned to the caller of a `remove` operation integrity.
+#[derive(Debug, Default)]
+pub struct RemoveResult {
+    /// If the caller needs to remove the current node
+    pub(super) cleanup: bool,
+    /// The entry that was replaced, if it exists
+    pub(super) old_entry: Option<DirstateEntry>,
+}
+
+impl<'a> IntoIterator for &'a Node {
+    type Item = (HgPathBuf, DirstateEntry);
+    type IntoIter = Iter<'a>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/dirstate_tree/tree.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,682 @@
+// tree.rs
+//
+// Copyright 2020, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use super::iter::Iter;
+use super::node::{Directory, Node, NodeKind};
+use crate::dirstate::dirstate_tree::iter::FsIter;
+use crate::dirstate::dirstate_tree::node::{InsertResult, RemoveResult};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::DirstateEntry;
+use std::path::PathBuf;
+
+/// A specialized tree to represent the Mercurial dirstate.
+///
+/// # Advantages over a flat structure
+///
+/// The dirstate is inherently hierarchical, since it's a representation of the
+/// file structure of the project. The current dirstate format is flat, and
+/// while that affords us potentially great (unordered) iteration speeds, the
+/// need to retrieve a given path is great enough that you need some kind of
+/// hashmap or tree in a lot of cases anyway.
+///
+/// Going with a tree allows us to be smarter:
+///   - Skipping an ignored directory means we don't visit its entire subtree
+///   - Security auditing does not need to reconstruct paths backwards to check
+///     for symlinked directories, this can be done during the iteration in a
+///     very efficient fashion
+///   - We don't need to build the directory information in another struct,
+///     simplifying the code a lot, reducing the memory footprint and
+///     potentially going faster depending on the implementation.
+///   - We can use it to store a (platform-dependent) caching mechanism [1]
+///   - And probably other types of optimizations.
+///
+/// Only the first two items in this list are implemented as of this commit.
+///
+/// [1]: https://www.mercurial-scm.org/wiki/DirsCachePlan
+///
+///
+/// # Structure
+///
+/// It's a prefix (radix) tree with no fixed arity, with a granularity of a
+/// folder, allowing it to mimic a filesystem hierarchy:
+///
+/// ```text
+/// foo/bar
+/// foo/baz
+/// test
+/// ```
+/// Will be represented (simplified) by:
+///
+/// ```text
+/// Directory(root):
+///   - File("test")
+///   - Directory("foo"):
+///     - File("bar")
+///     - File("baz")
+/// ```
+///
+/// Moreover, it is special-cased for storing the dirstate and as such handles
+/// cases that a simple `HashMap` would handle, but while preserving the
+/// hierarchy.
+/// For example:
+///
+/// ```shell
+/// $ touch foo
+/// $ hg add foo
+/// $ hg commit -m "foo"
+/// $ hg remove foo
+/// $ rm foo
+/// $ mkdir foo
+/// $ touch foo/a
+/// $ hg add foo/a
+/// $ hg status
+///   R foo
+///   A foo/a
+/// ```
+/// To represent this in a tree, one needs to keep track of whether any given
+/// file was a directory and whether any given directory was a file at the last
+/// dirstate update. This tree stores that information, but only in the right
+/// circumstances by respecting the high-level rules that prevent nonsensical
+/// structures to exist:
+///     - a file can only be added as a child of another file if the latter is
+///       marked as `Removed`
+///     - a file cannot replace a folder unless all its descendents are removed
+///
+/// This second rule is not checked by the tree for performance reasons, and
+/// because high-level logic already prevents that state from happening.
+///
+/// # Ordering
+///
+/// It makes no guarantee of ordering for now.
+#[derive(Debug, Default, Clone, PartialEq)]
+pub struct Tree {
+    pub root: Node,
+    files_count: usize,
+}
+
+impl Tree {
+    pub fn new() -> Self {
+        Self {
+            root: Node {
+                kind: NodeKind::Directory(Directory {
+                    was_file: None,
+                    children: Default::default(),
+                }),
+            },
+            files_count: 0,
+        }
+    }
+
+    /// How many files (not directories) are stored in the tree, including ones
+    /// marked as `Removed`.
+    pub fn len(&self) -> usize {
+        self.files_count
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Inserts a file in the tree and returns the previous entry if any.
+    pub fn insert(
+        &mut self,
+        path: impl AsRef<HgPath>,
+        kind: DirstateEntry,
+    ) -> Option<DirstateEntry> {
+        let old = self.insert_node(path, kind);
+        match old?.kind {
+            NodeKind::Directory(_) => None,
+            NodeKind::File(f) => Some(f.entry),
+        }
+    }
+
+    /// Low-level insertion method that returns the previous node (directories
+    /// included).
+    fn insert_node(
+        &mut self,
+        path: impl AsRef<HgPath>,
+        kind: DirstateEntry,
+    ) -> Option<Node> {
+        let InsertResult {
+            did_insert,
+            old_entry,
+        } = self.root.insert(path.as_ref().as_bytes(), kind);
+        self.files_count += if did_insert { 1 } else { 0 };
+        old_entry
+    }
+
+    /// Returns a reference to a node if it exists.
+    pub fn get_node(&self, path: impl AsRef<HgPath>) -> Option<&Node> {
+        self.root.get(path.as_ref().as_bytes())
+    }
+
+    /// Returns a reference to the entry corresponding to `path` if it exists.
+    pub fn get(&self, path: impl AsRef<HgPath>) -> Option<&DirstateEntry> {
+        if let Some(node) = self.get_node(&path) {
+            return match &node.kind {
+                NodeKind::Directory(d) => {
+                    d.was_file.as_ref().map(|f| &f.entry)
+                }
+                NodeKind::File(f) => Some(&f.entry),
+            };
+        }
+        None
+    }
+
+    /// Returns `true` if an entry is found for the given `path`.
+    pub fn contains_key(&self, path: impl AsRef<HgPath>) -> bool {
+        self.get(path).is_some()
+    }
+
+    /// Returns a mutable reference to the entry corresponding to `path` if it
+    /// exists.
+    pub fn get_mut(
+        &mut self,
+        path: impl AsRef<HgPath>,
+    ) -> Option<&mut DirstateEntry> {
+        if let Some(kind) = self.root.get_mut(path.as_ref().as_bytes()) {
+            return match kind {
+                NodeKind::Directory(d) => {
+                    d.was_file.as_mut().map(|f| &mut f.entry)
+                }
+                NodeKind::File(f) => Some(&mut f.entry),
+            };
+        }
+        None
+    }
+
+    /// Returns an iterator over the paths and corresponding entries in the
+    /// tree.
+    pub fn iter(&self) -> Iter {
+        Iter::new(&self.root)
+    }
+
+    /// Returns an iterator of all entries in the tree, with a special
+    /// filesystem handling for the directories containing said entries. See
+    /// the documentation of `FsIter` for more.
+    pub fn fs_iter(&self, root_dir: PathBuf) -> FsIter {
+        FsIter::new(&self.root, root_dir)
+    }
+
+    /// Remove the entry at `path` and returns it, if it exists.
+    pub fn remove(
+        &mut self,
+        path: impl AsRef<HgPath>,
+    ) -> Option<DirstateEntry> {
+        let RemoveResult { old_entry, .. } =
+            self.root.remove(path.as_ref().as_bytes());
+        self.files_count = self
+            .files_count
+            .checked_sub(if old_entry.is_some() { 1 } else { 0 })
+            .expect("removed too many files");
+        old_entry
+    }
+}
+
+impl<P: AsRef<HgPath>> Extend<(P, DirstateEntry)> for Tree {
+    fn extend<T: IntoIterator<Item = (P, DirstateEntry)>>(&mut self, iter: T) {
+        for (path, entry) in iter {
+            self.insert(path, entry);
+        }
+    }
+}
+
+impl<'a> IntoIterator for &'a Tree {
+    type Item = (HgPathBuf, DirstateEntry);
+    type IntoIter = Iter<'a>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::dirstate::dirstate_tree::node::File;
+    use crate::{EntryState, FastHashMap};
+    use pretty_assertions::assert_eq;
+
+    impl Node {
+        /// Shortcut for getting children of a node in tests.
+        fn children(&self) -> Option<&FastHashMap<Vec<u8>, Node>> {
+            match &self.kind {
+                NodeKind::Directory(d) => Some(&d.children),
+                NodeKind::File(_) => None,
+            }
+        }
+    }
+
+    #[test]
+    fn test_dirstate_tree() {
+        let mut tree = Tree::new();
+
+        assert_eq!(
+            tree.insert_node(
+                HgPath::new(b"we/p"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0
+                }
+            ),
+            None
+        );
+        dbg!(&tree);
+        assert!(tree.get_node(HgPath::new(b"we")).is_some());
+        let entry = DirstateEntry {
+            state: EntryState::Merged,
+            mode: 41,
+            mtime: 42,
+            size: 43,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"foo/bar"), entry), None);
+        assert_eq!(
+            tree.get_node(HgPath::new(b"foo/bar")),
+            Some(&Node {
+                kind: NodeKind::File(File {
+                    was_directory: None,
+                    entry
+                })
+            })
+        );
+        // We didn't override the first entry we made
+        assert!(tree.get_node(HgPath::new(b"we")).is_some(),);
+        // Inserting the same key again
+        assert_eq!(
+            tree.insert_node(HgPath::new(b"foo/bar"), entry),
+            Some(Node {
+                kind: NodeKind::File(File {
+                    was_directory: None,
+                    entry
+                }),
+            })
+        );
+        // Inserting the two levels deep
+        assert_eq!(tree.insert_node(HgPath::new(b"foo/bar/baz"), entry), None);
+        // Getting a file "inside a file" should return `None`
+        assert_eq!(tree.get_node(HgPath::new(b"foo/bar/baz/bap"),), None);
+
+        assert_eq!(
+            tree.insert_node(HgPath::new(b"wasdir/subfile"), entry),
+            None,
+        );
+        let removed_entry = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 0,
+            mtime: 0,
+            size: 0,
+        };
+        assert!(tree
+            .insert_node(HgPath::new(b"wasdir"), removed_entry)
+            .is_some());
+
+        assert_eq!(
+            tree.get_node(HgPath::new(b"wasdir")),
+            Some(&Node {
+                kind: NodeKind::File(File {
+                    was_directory: Some(Box::new(Directory {
+                        was_file: None,
+                        children: [(
+                            b"subfile".to_vec(),
+                            Node {
+                                kind: NodeKind::File(File {
+                                    was_directory: None,
+                                    entry,
+                                })
+                            }
+                        )]
+                        .to_vec()
+                        .into_iter()
+                        .collect()
+                    })),
+                    entry: removed_entry
+                })
+            })
+        );
+
+        assert!(tree.get(HgPath::new(b"wasdir/subfile")).is_some())
+    }
+
+    #[test]
+    fn test_insert_removed() {
+        let mut tree = Tree::new();
+        let entry = DirstateEntry {
+            state: EntryState::Merged,
+            mode: 1,
+            mtime: 2,
+            size: 3,
+        };
+        let removed_entry = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 10,
+            mtime: 20,
+            size: 30,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"foo"), entry), None);
+        assert_eq!(
+            tree.insert_node(HgPath::new(b"foo/a"), removed_entry),
+            None
+        );
+        // The insert should not turn `foo` into a directory as `foo` is not
+        // `Removed`.
+        match tree.get_node(HgPath::new(b"foo")).unwrap().kind {
+            NodeKind::Directory(_) => panic!("should be a file"),
+            NodeKind::File(_) => {}
+        }
+
+        let mut tree = Tree::new();
+        let entry = DirstateEntry {
+            state: EntryState::Merged,
+            mode: 1,
+            mtime: 2,
+            size: 3,
+        };
+        let removed_entry = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 10,
+            mtime: 20,
+            size: 30,
+        };
+        // The insert *should* turn `foo` into a directory as it is `Removed`.
+        assert_eq!(tree.insert_node(HgPath::new(b"foo"), removed_entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"foo/a"), entry), None);
+        match tree.get_node(HgPath::new(b"foo")).unwrap().kind {
+            NodeKind::Directory(_) => {}
+            NodeKind::File(_) => panic!("should be a directory"),
+        }
+    }
+
+    #[test]
+    fn test_get() {
+        let mut tree = Tree::new();
+        let entry = DirstateEntry {
+            state: EntryState::Merged,
+            mode: 1,
+            mtime: 2,
+            size: 3,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.get(HgPath::new(b"a/b/c")), Some(&entry));
+        assert_eq!(tree.get(HgPath::new(b"a/b")), None);
+        assert_eq!(tree.get(HgPath::new(b"a")), None);
+        assert_eq!(tree.get(HgPath::new(b"a/b/c/d")), None);
+        let entry2 = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 0,
+            mtime: 5,
+            size: 1,
+        };
+        // was_directory
+        assert_eq!(tree.insert(HgPath::new(b"a/b"), entry2), None);
+        assert_eq!(tree.files_count, 2);
+        assert_eq!(tree.get(HgPath::new(b"a/b")), Some(&entry2));
+        assert_eq!(tree.get(HgPath::new(b"a/b/c")), Some(&entry));
+
+        let mut tree = Tree::new();
+
+        // was_file
+        assert_eq!(tree.insert_node(HgPath::new(b"a"), entry), None);
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b"), entry2), None);
+        assert_eq!(tree.files_count, 2);
+        assert_eq!(tree.get(HgPath::new(b"a/b")), Some(&entry2));
+    }
+
+    #[test]
+    fn test_get_mut() {
+        let mut tree = Tree::new();
+        let mut entry = DirstateEntry {
+            state: EntryState::Merged,
+            mode: 1,
+            mtime: 2,
+            size: 3,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b/c")), Some(&mut entry));
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b")), None);
+        assert_eq!(tree.get_mut(HgPath::new(b"a")), None);
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b/c/d")), None);
+        let mut entry2 = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 0,
+            mtime: 5,
+            size: 1,
+        };
+        // was_directory
+        assert_eq!(tree.insert(HgPath::new(b"a/b"), entry2), None);
+        assert_eq!(tree.files_count, 2);
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b")), Some(&mut entry2));
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b/c")), Some(&mut entry));
+
+        let mut tree = Tree::new();
+
+        // was_file
+        assert_eq!(tree.insert_node(HgPath::new(b"a"), entry), None);
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b"), entry2), None);
+        assert_eq!(tree.files_count, 2);
+        assert_eq!(tree.get_mut(HgPath::new(b"a/b")), Some(&mut entry2));
+    }
+
+    #[test]
+    fn test_remove() {
+        let mut tree = Tree::new();
+        assert_eq!(tree.files_count, 0);
+        assert_eq!(tree.remove(HgPath::new(b"foo")), None);
+        assert_eq!(tree.files_count, 0);
+
+        let entry = DirstateEntry {
+            state: EntryState::Normal,
+            mode: 0,
+            mtime: 0,
+            size: 0,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
+        assert_eq!(tree.files_count, 1);
+
+        assert_eq!(tree.remove(HgPath::new(b"a/b/c")), Some(entry));
+        assert_eq!(tree.files_count, 0);
+
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/x"), entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/y"), entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/z"), entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"x"), entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"y"), entry), None);
+        assert_eq!(tree.files_count, 5);
+
+        assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(entry));
+        assert_eq!(tree.files_count, 4);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/x")), None);
+        assert_eq!(tree.files_count, 4);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/y")), Some(entry));
+        assert_eq!(tree.files_count, 3);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/z")), Some(entry));
+        assert_eq!(tree.files_count, 2);
+
+        assert_eq!(tree.remove(HgPath::new(b"x")), Some(entry));
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.remove(HgPath::new(b"y")), Some(entry));
+        assert_eq!(tree.files_count, 0);
+
+        // `a` should have been cleaned up, no more files anywhere in its
+        // descendents
+        assert_eq!(tree.get_node(HgPath::new(b"a")), None);
+        assert_eq!(tree.root.children().unwrap().len(), 0);
+
+        let removed_entry = DirstateEntry {
+            state: EntryState::Removed,
+            ..entry
+        };
+        assert_eq!(tree.insert(HgPath::new(b"a"), removed_entry), None);
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/x"), entry), None);
+        assert_eq!(tree.files_count, 2);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a")), Some(removed_entry));
+        assert_eq!(tree.files_count, 1);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(entry));
+        assert_eq!(tree.files_count, 0);
+
+        // The entire tree should have been cleaned up, no more files anywhere
+        // in its descendents
+        assert_eq!(tree.root.children().unwrap().len(), 0);
+
+        let removed_entry = DirstateEntry {
+            state: EntryState::Removed,
+            ..entry
+        };
+        assert_eq!(tree.insert(HgPath::new(b"a"), entry), None);
+        assert_eq!(
+            tree.insert_node(HgPath::new(b"a/b/x"), removed_entry),
+            None
+        );
+        assert_eq!(tree.files_count, 2);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a")), Some(entry));
+        assert_eq!(tree.files_count, 1);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/x")), Some(removed_entry));
+        assert_eq!(tree.files_count, 0);
+
+        dbg!(&tree);
+        // The entire tree should have been cleaned up, no more files anywhere
+        // in its descendents
+        assert_eq!(tree.root.children().unwrap().len(), 0);
+
+        assert_eq!(tree.insert(HgPath::new(b"d"), entry), None);
+        assert_eq!(tree.insert(HgPath::new(b"d/d/d"), entry), None);
+        assert_eq!(tree.files_count, 2);
+
+        // Deleting the nested file should not delete the top directory as it
+        // used to be a file
+        assert_eq!(tree.remove(HgPath::new(b"d/d/d")), Some(entry));
+        assert_eq!(tree.files_count, 1);
+        assert!(tree.get_node(HgPath::new(b"d")).is_some());
+        assert!(tree.remove(HgPath::new(b"d")).is_some());
+        assert_eq!(tree.files_count, 0);
+
+        // Deleting the nested file should not delete the top file (other way
+        // around from the last case)
+        assert_eq!(tree.insert(HgPath::new(b"a/a"), entry), None);
+        assert_eq!(tree.files_count, 1);
+        assert_eq!(tree.insert(HgPath::new(b"a"), entry), None);
+        assert_eq!(tree.files_count, 2);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a/a")), Some(entry));
+        assert_eq!(tree.files_count, 1);
+        dbg!(&tree);
+        assert!(tree.get_node(HgPath::new(b"a")).is_some());
+        assert!(tree.get_node(HgPath::new(b"a/a")).is_none());
+    }
+
+    #[test]
+    fn test_was_directory() {
+        let mut tree = Tree::new();
+
+        let entry = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 0,
+            mtime: 0,
+            size: 0,
+        };
+        assert_eq!(tree.insert_node(HgPath::new(b"a/b/c"), entry), None);
+        assert_eq!(tree.files_count, 1);
+
+        assert!(tree.insert_node(HgPath::new(b"a"), entry).is_some());
+        let new_a = tree.root.children().unwrap().get(&b"a".to_vec()).unwrap();
+
+        match &new_a.kind {
+            NodeKind::Directory(_) => panic!(),
+            NodeKind::File(f) => {
+                let dir = f.was_directory.clone().unwrap();
+                let c = dir
+                    .children
+                    .get(&b"b".to_vec())
+                    .unwrap()
+                    .children()
+                    .unwrap()
+                    .get(&b"c".to_vec())
+                    .unwrap();
+
+                assert_eq!(
+                    match &c.kind {
+                        NodeKind::Directory(_) => panic!(),
+                        NodeKind::File(f) => f.entry,
+                    },
+                    entry
+                );
+            }
+        }
+        assert_eq!(tree.files_count, 2);
+        dbg!(&tree);
+        assert_eq!(tree.remove(HgPath::new(b"a/b/c")), Some(entry));
+        assert_eq!(tree.files_count, 1);
+        dbg!(&tree);
+        let a = tree.get_node(HgPath::new(b"a")).unwrap();
+        match &a.kind {
+            NodeKind::Directory(_) => panic!(),
+            NodeKind::File(f) => {
+                // Directory in `was_directory` was emptied, should be removed
+                assert_eq!(f.was_directory, None);
+            }
+        }
+    }
+    #[test]
+    fn test_extend() {
+        let insertions = [
+            (
+                HgPathBuf::from_bytes(b"d"),
+                DirstateEntry {
+                    state: EntryState::Added,
+                    mode: 0,
+                    mtime: -1,
+                    size: -1,
+                },
+            ),
+            (
+                HgPathBuf::from_bytes(b"b"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 33188,
+                    mtime: 1599647984,
+                    size: 2,
+                },
+            ),
+            (
+                HgPathBuf::from_bytes(b"a/a"),
+                DirstateEntry {
+                    state: EntryState::Normal,
+                    mode: 33188,
+                    mtime: 1599647984,
+                    size: 2,
+                },
+            ),
+            (
+                HgPathBuf::from_bytes(b"d/d/d"),
+                DirstateEntry {
+                    state: EntryState::Removed,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                },
+            ),
+        ]
+        .to_vec();
+        let mut tree = Tree::new();
+
+        tree.extend(insertions.clone().into_iter());
+
+        for (path, _) in &insertions {
+            assert!(tree.contains_key(path), true);
+        }
+        assert_eq!(tree.files_count, 4);
+    }
+}
--- a/rust/hg-core/src/dirstate/parsers.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/dirstate/parsers.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -19,17 +19,21 @@
 /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits.
 const MIN_ENTRY_SIZE: usize = 17;
 
-// TODO parse/pack: is mutate-on-loop better for performance?
+type ParseResult<'a> = (
+    DirstateParents,
+    Vec<(&'a HgPath, DirstateEntry)>,
+    Vec<(&'a HgPath, &'a HgPath)>,
+);
 
 #[timed]
 pub fn parse_dirstate(
-    state_map: &mut StateMap,
-    copy_map: &mut CopyMap,
     contents: &[u8],
-) -> Result<DirstateParents, DirstateParseError> {
+) -> Result<ParseResult, DirstateParseError> {
     if contents.len() < PARENT_SIZE * 2 {
         return Err(DirstateParseError::TooLittleData);
     }
+    let mut copies = vec![];
+    let mut entries = vec![];
 
     let mut curr_pos = PARENT_SIZE * 2;
     let parents = DirstateParents {
@@ -63,27 +67,24 @@
         };
 
         if let Some(copy_path) = copy {
-            copy_map.insert(
-                HgPath::new(path).to_owned(),
-                HgPath::new(copy_path).to_owned(),
-            );
+            copies.push((HgPath::new(path), HgPath::new(copy_path)));
         };
-        state_map.insert(
-            HgPath::new(path).to_owned(),
+        entries.push((
+            HgPath::new(path),
             DirstateEntry {
                 state,
                 mode,
                 size,
                 mtime,
             },
-        );
+        ));
         curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len);
     }
-
-    Ok(parents)
+    Ok((parents, entries, copies))
 }
 
 /// `now` is the duration in seconds since the Unix epoch
+#[cfg(not(feature = "dirstate-tree"))]
 pub fn pack_dirstate(
     state_map: &mut StateMap,
     copy_map: &CopyMap,
@@ -106,6 +107,73 @@
     let expected_size = expected_size + PARENT_SIZE * 2;
 
     let mut packed = Vec::with_capacity(expected_size);
+
+    packed.extend(&parents.p1);
+    packed.extend(&parents.p2);
+
+    for (filename, entry) in state_map.iter_mut() {
+        let new_filename = filename.to_owned();
+        let mut new_mtime: i32 = entry.mtime;
+        if entry.state == EntryState::Normal && entry.mtime == now {
+            // The file was last modified "simultaneously" with the current
+            // write to dirstate (i.e. within the same second for file-
+            // systems with a granularity of 1 sec). This commonly happens
+            // for at least a couple of files on 'update'.
+            // The user could change the file without changing its size
+            // within the same second. Invalidate the file's mtime in
+            // dirstate, forcing future 'status' calls to compare the
+            // contents of the file if the size is the same. This prevents
+            // mistakenly treating such files as clean.
+            new_mtime = -1;
+            *entry = DirstateEntry {
+                mtime: new_mtime,
+                ..*entry
+            };
+        }
+        let mut new_filename = new_filename.into_vec();
+        if let Some(copy) = copy_map.get(filename) {
+            new_filename.push(b'\0');
+            new_filename.extend(copy.bytes());
+        }
+
+        packed.write_u8(entry.state.into())?;
+        packed.write_i32::<BigEndian>(entry.mode)?;
+        packed.write_i32::<BigEndian>(entry.size)?;
+        packed.write_i32::<BigEndian>(new_mtime)?;
+        packed.write_i32::<BigEndian>(new_filename.len() as i32)?;
+        packed.extend(new_filename)
+    }
+
+    if packed.len() != expected_size {
+        return Err(DirstatePackError::BadSize(expected_size, packed.len()));
+    }
+
+    Ok(packed)
+}
+/// `now` is the duration in seconds since the Unix epoch
+#[cfg(feature = "dirstate-tree")]
+pub fn pack_dirstate(
+    state_map: &mut StateMap,
+    copy_map: &CopyMap,
+    parents: DirstateParents,
+    now: Duration,
+) -> Result<Vec<u8>, DirstatePackError> {
+    // TODO move away from i32 before 2038.
+    let now: i32 = now.as_secs().try_into().expect("time overflow");
+
+    let expected_size: usize = state_map
+        .iter()
+        .map(|(filename, _)| {
+            let mut length = MIN_ENTRY_SIZE + filename.len();
+            if let Some(copy) = copy_map.get(&filename) {
+                length += copy.len() + 1;
+            }
+            length
+        })
+        .sum();
+    let expected_size = expected_size + PARENT_SIZE * 2;
+
+    let mut packed = Vec::with_capacity(expected_size);
     let mut new_state_map = vec![];
 
     packed.extend(&parents.p1);
@@ -129,12 +197,12 @@
                 filename.to_owned(),
                 DirstateEntry {
                     mtime: new_mtime,
-                    ..*entry
+                    ..entry
                 },
             ));
         }
         let mut new_filename = new_filename.into_vec();
-        if let Some(copy) = copy_map.get(filename) {
+        if let Some(copy) = copy_map.get(&filename) {
             new_filename.push(b'\0');
             new_filename.extend(copy.bytes());
         }
@@ -160,10 +228,11 @@
 mod tests {
     use super::*;
     use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+    use pretty_assertions::assert_eq;
 
     #[test]
     fn test_pack_dirstate_empty() {
-        let mut state_map: StateMap = FastHashMap::default();
+        let mut state_map = StateMap::default();
         let copymap = FastHashMap::default();
         let parents = DirstateParents {
             p1: *b"12345678910111213141",
@@ -285,14 +354,17 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
+
         assert_eq!(
             (parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
@@ -360,14 +432,17 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
+
         assert_eq!(
             (parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
@@ -403,14 +478,16 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
 
         assert_eq!(
             (
--- a/rust/hg-core/src/dirstate/status.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/dirstate/status.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -9,18 +9,20 @@
 //! It is currently missing a lot of functionality compared to the Python one
 //! and will only be triggered in narrow cases.
 
+#[cfg(feature = "dirstate-tree")]
+use crate::dirstate::dirstate_tree::iter::StatusShortcut;
+#[cfg(not(feature = "dirstate-tree"))]
+use crate::utils::path_auditor::PathAuditor;
 use crate::{
     dirstate::SIZE_FROM_OTHER_PARENT,
     filepatterns::PatternFileWarning,
     matchers::{get_ignore_function, Matcher, VisitChildrenSet},
-    operations::Operation,
     utils::{
         files::{find_dirs, HgMetadata},
         hg_path::{
             hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf,
             HgPathError,
         },
-        path_auditor::PathAuditor,
     },
     CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap,
     PatternError,
@@ -702,12 +704,131 @@
         })
     }
 
+    /// Add the files in the dirstate to the results.
+    ///
+    /// This takes a mutable reference to the results to account for the
+    /// `extend` in timings
+    #[cfg(feature = "dirstate-tree")]
+    #[timed]
+    pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
+        results.par_extend(
+            self.dmap
+                .fs_iter(self.root_dir.clone())
+                .par_bridge()
+                .filter(|(path, _)| self.matcher.matches(path))
+                .flat_map(move |(filename, shortcut)| {
+                    let entry = match shortcut {
+                        StatusShortcut::Entry(e) => e,
+                        StatusShortcut::Dispatch(d) => {
+                            return Ok((Cow::Owned(filename), d))
+                        }
+                    };
+                    let filename_as_path = hg_path_to_path_buf(&filename)?;
+                    let meta = self
+                        .root_dir
+                        .join(filename_as_path)
+                        .symlink_metadata();
+
+                    match meta {
+                        Ok(m)
+                            if !(m.file_type().is_file()
+                                || m.file_type().is_symlink()) =>
+                        {
+                            Ok((
+                                Cow::Owned(filename),
+                                dispatch_missing(entry.state),
+                            ))
+                        }
+                        Ok(m) => {
+                            let dispatch = dispatch_found(
+                                &filename,
+                                entry,
+                                HgMetadata::from_metadata(m),
+                                &self.dmap.copy_map,
+                                self.options,
+                            );
+                            Ok((Cow::Owned(filename), dispatch))
+                        }
+                        Err(e)
+                            if e.kind() == ErrorKind::NotFound
+                                || e.raw_os_error() == Some(20) =>
+                        {
+                            // Rust does not yet have an `ErrorKind` for
+                            // `NotADirectory` (errno 20)
+                            // It happens if the dirstate contains `foo/bar`
+                            // and foo is not a
+                            // directory
+                            Ok((
+                                Cow::Owned(filename),
+                                dispatch_missing(entry.state),
+                            ))
+                        }
+                        Err(e) => Err(e),
+                    }
+                }),
+        );
+    }
+
+    /// Add the files in the dirstate to the results.
+    ///
+    /// This takes a mutable reference to the results to account for the
+    /// `extend` in timings
+    #[cfg(not(feature = "dirstate-tree"))]
+    #[timed]
+    pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
+        results.par_extend(self.dmap.par_iter().flat_map(
+            move |(filename, entry)| {
+                let filename: &HgPath = filename;
+                let filename_as_path = hg_path_to_path_buf(filename)?;
+                let meta =
+                    self.root_dir.join(filename_as_path).symlink_metadata();
+                match meta {
+                    Ok(m)
+                        if !(m.file_type().is_file()
+                            || m.file_type().is_symlink()) =>
+                    {
+                        Ok((
+                            Cow::Borrowed(filename),
+                            dispatch_missing(entry.state),
+                        ))
+                    }
+                    Ok(m) => Ok((
+                        Cow::Borrowed(filename),
+                        dispatch_found(
+                            filename,
+                            *entry,
+                            HgMetadata::from_metadata(m),
+                            &self.dmap.copy_map,
+                            self.options,
+                        ),
+                    )),
+                    Err(e)
+                        if e.kind() == ErrorKind::NotFound
+                            || e.raw_os_error() == Some(20) =>
+                    {
+                        // Rust does not yet have an `ErrorKind` for
+                        // `NotADirectory` (errno 20)
+                        // It happens if the dirstate contains `foo/bar`
+                        // and foo is not a
+                        // directory
+                        Ok((
+                            Cow::Borrowed(filename),
+                            dispatch_missing(entry.state),
+                        ))
+                    }
+                    Err(e) => Err(e),
+                }
+            },
+        ));
+    }
+
     /// Checks all files that are in the dirstate but were not found during the
     /// working directory traversal. This means that the rest must
     /// be either ignored, under a symlink or under a new nested repo.
     ///
     /// This takes a mutable reference to the results to account for the
     /// `extend` in timings
+    #[cfg(not(feature = "dirstate-tree"))]
     #[timed]
     pub fn handle_unknowns(
         &self,
@@ -782,59 +903,6 @@
 
         Ok(())
     }
-
-    /// Add the files in the dirstate to the results.
-    ///
-    /// This takes a mutable reference to the results to account for the
-    /// `extend` in timings
-    #[timed]
-    pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
-        results.par_extend(self.dmap.par_iter().flat_map(
-            move |(filename, entry)| {
-                let filename: &HgPath = filename;
-                let filename_as_path = hg_path_to_path_buf(filename)?;
-                let meta =
-                    self.root_dir.join(filename_as_path).symlink_metadata();
-
-                match meta {
-                    Ok(ref m)
-                        if !(m.file_type().is_file()
-                            || m.file_type().is_symlink()) =>
-                    {
-                        Ok((
-                            Cow::Borrowed(filename),
-                            dispatch_missing(entry.state),
-                        ))
-                    }
-                    Ok(m) => Ok((
-                        Cow::Borrowed(filename),
-                        dispatch_found(
-                            filename,
-                            *entry,
-                            HgMetadata::from_metadata(m),
-                            &self.dmap.copy_map,
-                            self.options,
-                        ),
-                    )),
-                    Err(ref e)
-                        if e.kind() == ErrorKind::NotFound
-                            || e.raw_os_error() == Some(20) =>
-                    {
-                        // Rust does not yet have an `ErrorKind` for
-                        // `NotADirectory` (errno 20)
-                        // It happens if the dirstate contains `foo/bar`
-                        // and foo is not a
-                        // directory
-                        Ok((
-                            Cow::Borrowed(filename),
-                            dispatch_missing(entry.state),
-                        ))
-                    }
-                    Err(e) => Err(e),
-                }
-            },
-        ));
-    }
 }
 
 #[timed]
--- a/rust/hg-core/src/lib.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/lib.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -57,6 +57,7 @@
 pub enum DirstateParseError {
     TooLittleData,
     Overflow,
+    // TODO refactor to use bytes instead of String
     CorruptedEntry(String),
     Damaged,
 }
--- a/rust/hg-core/src/matchers.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/matchers.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -49,9 +49,9 @@
     /// Explicitly listed files
     fn file_set(&self) -> Option<&HashSet<&HgPath>>;
     /// Returns whether `filename` is in `file_set`
-    fn exact_match(&self, filename: impl AsRef<HgPath>) -> bool;
+    fn exact_match(&self, filename: &HgPath) -> bool;
     /// Returns whether `filename` is matched by this matcher
-    fn matches(&self, filename: impl AsRef<HgPath>) -> bool;
+    fn matches(&self, filename: &HgPath) -> bool;
     /// Decides whether a directory should be visited based on whether it
     /// has potential matches in it or one of its subdirectories, and
     /// potentially lists which subdirectories of that directory should be
@@ -89,10 +89,7 @@
     /// no files in this dir to investigate (or equivalently that if there are
     /// files to investigate in 'dir' that it will always return
     /// `VisitChildrenSet::This`).
-    fn visit_children_set(
-        &self,
-        directory: impl AsRef<HgPath>,
-    ) -> VisitChildrenSet;
+    fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet;
     /// Matcher will match everything and `files_set()` will be empty:
     /// optimization might be possible.
     fn matches_everything(&self) -> bool;
@@ -119,16 +116,13 @@
     fn file_set(&self) -> Option<&HashSet<&HgPath>> {
         None
     }
-    fn exact_match(&self, _filename: impl AsRef<HgPath>) -> bool {
+    fn exact_match(&self, _filename: &HgPath) -> bool {
         false
     }
-    fn matches(&self, _filename: impl AsRef<HgPath>) -> bool {
+    fn matches(&self, _filename: &HgPath) -> bool {
         true
     }
-    fn visit_children_set(
-        &self,
-        _directory: impl AsRef<HgPath>,
-    ) -> VisitChildrenSet {
+    fn visit_children_set(&self, _directory: &HgPath) -> VisitChildrenSet {
         VisitChildrenSet::Recursive
     }
     fn matches_everything(&self) -> bool {
@@ -143,9 +137,9 @@
 /// patterns.
 ///
 ///```
-/// use hg::{ matchers::{Matcher, FileMatcher}, utils::hg_path::HgPath };
+/// use hg::{ matchers::{Matcher, FileMatcher}, utils::hg_path::{HgPath, HgPathBuf} };
 ///
-/// let files = [HgPath::new(b"a.txt"), HgPath::new(br"re:.*\.c$")];
+/// let files = [HgPathBuf::from_bytes(b"a.txt"), HgPathBuf::from_bytes(br"re:.*\.c$")];
 /// let matcher = FileMatcher::new(&files).unwrap();
 ///
 /// assert_eq!(matcher.matches(HgPath::new(b"a.txt")), true);
@@ -160,15 +154,13 @@
 }
 
 impl<'a> FileMatcher<'a> {
-    pub fn new(
-        files: &'a [impl AsRef<HgPath>],
-    ) -> Result<Self, DirstateMapError> {
+    pub fn new(files: &'a [HgPathBuf]) -> Result<Self, DirstateMapError> {
         Ok(Self {
             files: HashSet::from_iter(files.iter().map(AsRef::as_ref)),
             dirs: DirsMultiset::from_manifest(files)?,
         })
     }
-    fn inner_matches(&self, filename: impl AsRef<HgPath>) -> bool {
+    fn inner_matches(&self, filename: &HgPath) -> bool {
         self.files.contains(filename.as_ref())
     }
 }
@@ -177,16 +169,13 @@
     fn file_set(&self) -> Option<&HashSet<&HgPath>> {
         Some(&self.files)
     }
-    fn exact_match(&self, filename: impl AsRef<HgPath>) -> bool {
+    fn exact_match(&self, filename: &HgPath) -> bool {
         self.inner_matches(filename)
     }
-    fn matches(&self, filename: impl AsRef<HgPath>) -> bool {
+    fn matches(&self, filename: &HgPath) -> bool {
         self.inner_matches(filename)
     }
-    fn visit_children_set(
-        &self,
-        directory: impl AsRef<HgPath>,
-    ) -> VisitChildrenSet {
+    fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
         if self.files.is_empty() || !self.dirs.contains(&directory) {
             return VisitChildrenSet::Empty;
         }
@@ -270,18 +259,15 @@
         None
     }
 
-    fn exact_match(&self, _filename: impl AsRef<HgPath>) -> bool {
+    fn exact_match(&self, _filename: &HgPath) -> bool {
         false
     }
 
-    fn matches(&self, filename: impl AsRef<HgPath>) -> bool {
+    fn matches(&self, filename: &HgPath) -> bool {
         (self.match_fn)(filename.as_ref())
     }
 
-    fn visit_children_set(
-        &self,
-        directory: impl AsRef<HgPath>,
-    ) -> VisitChildrenSet {
+    fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
         let dir = directory.as_ref();
         if self.prefix && self.roots.contains(dir) {
             return VisitChildrenSet::Recursive;
@@ -725,7 +711,7 @@
     #[test]
     fn test_filematcher_visit_children_set() {
         // Visitchildrenset
-        let files = vec![HgPath::new(b"dir/subdir/foo.txt")];
+        let files = vec![HgPathBuf::from_bytes(b"dir/subdir/foo.txt")];
         let matcher = FileMatcher::new(&files).unwrap();
 
         let mut set = HashSet::new();
@@ -766,11 +752,11 @@
     #[test]
     fn test_filematcher_visit_children_set_files_and_dirs() {
         let files = vec![
-            HgPath::new(b"rootfile.txt"),
-            HgPath::new(b"a/file1.txt"),
-            HgPath::new(b"a/b/file2.txt"),
+            HgPathBuf::from_bytes(b"rootfile.txt"),
+            HgPathBuf::from_bytes(b"a/file1.txt"),
+            HgPathBuf::from_bytes(b"a/b/file2.txt"),
             // No file in a/b/c
-            HgPath::new(b"a/b/c/d/file4.txt"),
+            HgPathBuf::from_bytes(b"a/b/c/d/file4.txt"),
         ];
         let matcher = FileMatcher::new(&files).unwrap();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/cat.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,145 @@
+// list_tracked_files.rs
+//
+// Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use std::convert::From;
+use std::path::PathBuf;
+
+use crate::revlog::changelog::Changelog;
+use crate::revlog::manifest::{Manifest, ManifestEntry};
+use crate::revlog::path_encode::path_encode;
+use crate::revlog::revlog::Revlog;
+use crate::revlog::revlog::RevlogError;
+use crate::revlog::Revision;
+use crate::utils::hg_path::HgPathBuf;
+
+/// Kind of error encountered by `CatRev`
+#[derive(Debug)]
+pub enum CatRevErrorKind {
+    /// Error when reading a `revlog` file.
+    IoError(std::io::Error),
+    /// The revision has not been found.
+    InvalidRevision,
+    /// A `revlog` file is corrupted.
+    CorruptedRevlog,
+    /// The `revlog` format version is not supported.
+    UnsuportedRevlogVersion(u16),
+    /// The `revlog` data format is not supported.
+    UnknowRevlogDataFormat(u8),
+}
+
+/// A `CatRev` error
+#[derive(Debug)]
+pub struct CatRevError {
+    /// Kind of error encountered by `CatRev`
+    pub kind: CatRevErrorKind,
+}
+
+impl From<CatRevErrorKind> for CatRevError {
+    fn from(kind: CatRevErrorKind) -> Self {
+        CatRevError { kind }
+    }
+}
+
+impl From<RevlogError> for CatRevError {
+    fn from(err: RevlogError) -> Self {
+        match err {
+            RevlogError::IoError(err) => CatRevErrorKind::IoError(err),
+            RevlogError::UnsuportedVersion(version) => {
+                CatRevErrorKind::UnsuportedRevlogVersion(version)
+            }
+            RevlogError::InvalidRevision => CatRevErrorKind::InvalidRevision,
+            RevlogError::Corrupted => CatRevErrorKind::CorruptedRevlog,
+            RevlogError::UnknowDataFormat(format) => {
+                CatRevErrorKind::UnknowRevlogDataFormat(format)
+            }
+        }
+        .into()
+    }
+}
+
+/// List files under Mercurial control at a given revision.
+pub struct CatRev<'a> {
+    root: &'a PathBuf,
+    /// The revision to cat the files from.
+    rev: &'a str,
+    /// The files to output.
+    files: &'a [HgPathBuf],
+    /// The changelog file
+    changelog: Changelog,
+    /// The manifest file
+    manifest: Manifest,
+    /// The manifest entry corresponding to the revision.
+    ///
+    /// Used to hold the owner of the returned references.
+    manifest_entry: Option<ManifestEntry>,
+}
+
+impl<'a> CatRev<'a> {
+    pub fn new(
+        root: &'a PathBuf,
+        rev: &'a str,
+        files: &'a [HgPathBuf],
+    ) -> Result<Self, CatRevError> {
+        let changelog = Changelog::open(&root)?;
+        let manifest = Manifest::open(&root)?;
+        let manifest_entry = None;
+
+        Ok(Self {
+            root,
+            rev,
+            files,
+            changelog,
+            manifest,
+            manifest_entry,
+        })
+    }
+
+    pub fn run(&mut self) -> Result<Vec<u8>, CatRevError> {
+        let changelog_entry = match self.rev.parse::<Revision>() {
+            Ok(rev) => self.changelog.get_rev(rev)?,
+            _ => {
+                let changelog_node = hex::decode(&self.rev)
+                    .map_err(|_| CatRevErrorKind::InvalidRevision)?;
+                self.changelog.get_node(&changelog_node)?
+            }
+        };
+        let manifest_node = hex::decode(&changelog_entry.manifest_node()?)
+            .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
+
+        self.manifest_entry = Some(self.manifest.get_node(&manifest_node)?);
+        if let Some(ref manifest_entry) = self.manifest_entry {
+            let mut bytes = vec![];
+
+            for (manifest_file, node_bytes) in
+                manifest_entry.files_with_nodes()
+            {
+                for cat_file in self.files.iter() {
+                    if cat_file.as_bytes() == manifest_file.as_bytes() {
+                        let encoded_bytes =
+                            path_encode(manifest_file.as_bytes());
+                        let revlog_index_string = format!(
+                            ".hg/store/data/{}.i",
+                            String::from_utf8_lossy(&encoded_bytes),
+                        );
+                        let revlog_index_path =
+                            self.root.join(&revlog_index_string);
+                        let file_log = Revlog::open(&revlog_index_path)?;
+                        let file_node = hex::decode(&node_bytes)
+                            .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
+                        let file_rev = file_log.get_node_rev(&file_node)?;
+                        let data = file_log.get_rev_data(file_rev)?;
+                        bytes.extend(data);
+                    }
+                }
+            }
+
+            Ok(bytes)
+        } else {
+            unreachable!("manifest_entry should have been stored");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/debugdata.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,110 @@
+// debugdata.rs
+//
+// Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use super::find_root;
+use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::Revision;
+
+/// Kind of data to debug
+#[derive(Debug, Copy, Clone)]
+pub enum DebugDataKind {
+    Changelog,
+    Manifest,
+}
+
+/// Kind of error encountered by DebugData
+#[derive(Debug)]
+pub enum DebugDataErrorKind {
+    FindRootError(find_root::FindRootError),
+    /// Error when reading a `revlog` file.
+    IoError(std::io::Error),
+    /// The revision has not been found.
+    InvalidRevision,
+    /// A `revlog` file is corrupted.
+    CorruptedRevlog,
+    /// The `revlog` format version is not supported.
+    UnsuportedRevlogVersion(u16),
+    /// The `revlog` data format is not supported.
+    UnknowRevlogDataFormat(u8),
+}
+
+/// A DebugData error
+#[derive(Debug)]
+pub struct DebugDataError {
+    /// Kind of error encountered by DebugData
+    pub kind: DebugDataErrorKind,
+}
+
+impl From<DebugDataErrorKind> for DebugDataError {
+    fn from(kind: DebugDataErrorKind) -> Self {
+        DebugDataError { kind }
+    }
+}
+
+impl From<find_root::FindRootError> for DebugDataError {
+    fn from(err: find_root::FindRootError) -> Self {
+        let kind = DebugDataErrorKind::FindRootError(err);
+        DebugDataError { kind }
+    }
+}
+
+impl From<std::io::Error> for DebugDataError {
+    fn from(err: std::io::Error) -> Self {
+        let kind = DebugDataErrorKind::IoError(err);
+        DebugDataError { kind }
+    }
+}
+
+impl From<RevlogError> for DebugDataError {
+    fn from(err: RevlogError) -> Self {
+        match err {
+            RevlogError::IoError(err) => DebugDataErrorKind::IoError(err),
+            RevlogError::UnsuportedVersion(version) => {
+                DebugDataErrorKind::UnsuportedRevlogVersion(version)
+            }
+            RevlogError::InvalidRevision => {
+                DebugDataErrorKind::InvalidRevision
+            }
+            RevlogError::Corrupted => DebugDataErrorKind::CorruptedRevlog,
+            RevlogError::UnknowDataFormat(format) => {
+                DebugDataErrorKind::UnknowRevlogDataFormat(format)
+            }
+        }
+        .into()
+    }
+}
+
+/// Dump the contents data of a revision.
+pub struct DebugData<'a> {
+    /// Revision or hash of the revision.
+    rev: &'a str,
+    /// Kind of data to debug.
+    kind: DebugDataKind,
+}
+
+impl<'a> DebugData<'a> {
+    pub fn new(rev: &'a str, kind: DebugDataKind) -> Self {
+        DebugData { rev, kind }
+    }
+
+    pub fn run(&mut self) -> Result<Vec<u8>, DebugDataError> {
+        let rev = self
+            .rev
+            .parse::<Revision>()
+            .or(Err(DebugDataErrorKind::InvalidRevision))?;
+
+        let root = find_root::FindRoot::new().run()?;
+        let index_file = match self.kind {
+            DebugDataKind::Changelog => root.join(".hg/store/00changelog.i"),
+            DebugDataKind::Manifest => root.join(".hg/store/00manifest.i"),
+        };
+        let revlog = Revlog::open(&index_file)?;
+        let data = revlog.get_rev_data(rev)?;
+
+        Ok(data)
+    }
+}
--- a/rust/hg-core/src/operations/dirstate_status.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/operations/dirstate_status.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -7,7 +7,6 @@
 
 use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
 use crate::matchers::Matcher;
-use crate::operations::Operation;
 use crate::{DirstateStatus, StatusError};
 
 /// A tuple of the paths that need to be checked in the filelog because it's
@@ -15,10 +14,68 @@
 /// files.
 pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
 
-impl<'a, M: Matcher + Sync> Operation<LookupAndStatus<'a>> for Status<'a, M> {
-    type Error = StatusError;
+#[cfg(feature = "dirstate-tree")]
+impl<'a, M: Matcher + Sync> Status<'a, M> {
+    pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
+        let (traversed_sender, traversed_receiver) =
+            crossbeam::channel::unbounded();
+
+        // Step 1: check the files explicitly mentioned by the user
+        let (work, mut results) = self.walk_explicit(traversed_sender.clone());
+
+        // Step 2: Check files in the dirstate
+        if !self.matcher.is_exact() {
+            self.extend_from_dmap(&mut results);
+        }
+        // Step 3: Check the working directory if listing unknowns
+        if !work.is_empty() {
+            // Hashmaps are quite a bit slower to build than vecs, so only
+            // build it if needed.
+            let mut old_results = None;
 
-    fn run(&self) -> Result<LookupAndStatus<'a>, Self::Error> {
+            // Step 2: recursively check the working directory for changes if
+            // needed
+            for (dir, dispatch) in work {
+                match dispatch {
+                    Dispatch::Directory { was_file } => {
+                        if was_file {
+                            results.push((dir.to_owned(), Dispatch::Removed));
+                        }
+                        if self.options.list_ignored
+                            || self.options.list_unknown
+                                && !self.dir_ignore(&dir)
+                        {
+                            if old_results.is_none() {
+                                old_results =
+                                    Some(results.iter().cloned().collect());
+                            }
+                            self.traverse(
+                                &dir,
+                                old_results
+                                    .as_ref()
+                                    .expect("old results should exist"),
+                                &mut results,
+                                traversed_sender.clone(),
+                            )?;
+                        }
+                    }
+                    _ => {
+                        unreachable!("There can only be directories in `work`")
+                    }
+                }
+            }
+        }
+
+        drop(traversed_sender);
+        let traversed = traversed_receiver.into_iter().collect();
+
+        Ok(build_response(results, traversed))
+    }
+}
+
+#[cfg(not(feature = "dirstate-tree"))]
+impl<'a, M: Matcher + Sync> Status<'a, M> {
+    pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
         let (traversed_sender, traversed_receiver) =
             crossbeam::channel::unbounded();
 
--- a/rust/hg-core/src/operations/find_root.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/operations/find_root.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,3 @@
-use super::Operation;
 use std::fmt;
 use std::path::{Path, PathBuf};
 
@@ -45,12 +44,8 @@
             current_dir: Some(current_dir),
         }
     }
-}
 
-impl<'a> Operation<PathBuf> for FindRoot<'a> {
-    type Error = FindRootError;
-
-    fn run(&self) -> Result<PathBuf, Self::Error> {
+    pub fn run(&self) -> Result<PathBuf, FindRootError> {
         let current_dir = match self.current_dir {
             None => std::env::current_dir().or_else(|e| {
                 Err(FindRootError {
@@ -61,10 +56,10 @@
         };
 
         if current_dir.join(".hg").exists() {
-            return Ok(current_dir.into());
+            return Ok(current_dir);
         }
-        let mut ancestors = current_dir.ancestors();
-        while let Some(parent) = ancestors.next() {
+        let ancestors = current_dir.ancestors();
+        for parent in ancestors {
             if parent.join(".hg").exists() {
                 return Ok(parent.into());
             }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/list_tracked_files.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,187 @@
+// list_tracked_files.rs
+//
+// Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::dirstate::parsers::parse_dirstate;
+use crate::revlog::changelog::Changelog;
+use crate::revlog::manifest::{Manifest, ManifestEntry};
+use crate::revlog::revlog::RevlogError;
+use crate::revlog::Revision;
+use crate::utils::hg_path::HgPath;
+use crate::{DirstateParseError, EntryState};
+use rayon::prelude::*;
+use std::convert::From;
+use std::fs;
+use std::path::PathBuf;
+
+/// Kind of error encountered by `ListDirstateTrackedFiles`
+#[derive(Debug)]
+pub enum ListDirstateTrackedFilesErrorKind {
+    /// Error when reading the `dirstate` file
+    IoError(std::io::Error),
+    /// Error when parsing the `dirstate` file
+    ParseError(DirstateParseError),
+}
+
+/// A `ListDirstateTrackedFiles` error
+#[derive(Debug)]
+pub struct ListDirstateTrackedFilesError {
+    /// Kind of error encountered by `ListDirstateTrackedFiles`
+    pub kind: ListDirstateTrackedFilesErrorKind,
+}
+
+impl From<ListDirstateTrackedFilesErrorKind>
+    for ListDirstateTrackedFilesError
+{
+    fn from(kind: ListDirstateTrackedFilesErrorKind) -> Self {
+        ListDirstateTrackedFilesError { kind }
+    }
+}
+
+impl From<std::io::Error> for ListDirstateTrackedFilesError {
+    fn from(err: std::io::Error) -> Self {
+        let kind = ListDirstateTrackedFilesErrorKind::IoError(err);
+        ListDirstateTrackedFilesError { kind }
+    }
+}
+
+/// List files under Mercurial control in the working directory
+/// by reading the dirstate
+pub struct ListDirstateTrackedFiles {
+    /// The `dirstate` content.
+    content: Vec<u8>,
+}
+
+impl ListDirstateTrackedFiles {
+    pub fn new(root: &PathBuf) -> Result<Self, ListDirstateTrackedFilesError> {
+        let dirstate = root.join(".hg/dirstate");
+        let content = fs::read(&dirstate)?;
+        Ok(Self { content })
+    }
+
+    pub fn run(
+        &mut self,
+    ) -> Result<Vec<&HgPath>, ListDirstateTrackedFilesError> {
+        let (_, entries, _) = parse_dirstate(&self.content)
+            .map_err(ListDirstateTrackedFilesErrorKind::ParseError)?;
+        let mut files: Vec<&HgPath> = entries
+            .into_iter()
+            .filter_map(|(path, entry)| match entry.state {
+                EntryState::Removed => None,
+                _ => Some(path),
+            })
+            .collect();
+        files.par_sort_unstable();
+        Ok(files)
+    }
+}
+
+/// Kind of error encountered by `ListRevTrackedFiles`
+#[derive(Debug)]
+pub enum ListRevTrackedFilesErrorKind {
+    /// Error when reading a `revlog` file.
+    IoError(std::io::Error),
+    /// The revision has not been found.
+    InvalidRevision,
+    /// A `revlog` file is corrupted.
+    CorruptedRevlog,
+    /// The `revlog` format version is not supported.
+    UnsuportedRevlogVersion(u16),
+    /// The `revlog` data format is not supported.
+    UnknowRevlogDataFormat(u8),
+}
+
+/// A `ListRevTrackedFiles` error
+#[derive(Debug)]
+pub struct ListRevTrackedFilesError {
+    /// Kind of error encountered by `ListRevTrackedFiles`
+    pub kind: ListRevTrackedFilesErrorKind,
+}
+
+impl From<ListRevTrackedFilesErrorKind> for ListRevTrackedFilesError {
+    fn from(kind: ListRevTrackedFilesErrorKind) -> Self {
+        ListRevTrackedFilesError { kind }
+    }
+}
+
+impl From<RevlogError> for ListRevTrackedFilesError {
+    fn from(err: RevlogError) -> Self {
+        match err {
+            RevlogError::IoError(err) => {
+                ListRevTrackedFilesErrorKind::IoError(err)
+            }
+            RevlogError::UnsuportedVersion(version) => {
+                ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version)
+            }
+            RevlogError::InvalidRevision => {
+                ListRevTrackedFilesErrorKind::InvalidRevision
+            }
+            RevlogError::Corrupted => {
+                ListRevTrackedFilesErrorKind::CorruptedRevlog
+            }
+            RevlogError::UnknowDataFormat(format) => {
+                ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format)
+            }
+        }
+        .into()
+    }
+}
+
+/// List files under Mercurial control at a given revision.
+pub struct ListRevTrackedFiles<'a> {
+    /// The revision to list the files from.
+    rev: &'a str,
+    /// The changelog file
+    changelog: Changelog,
+    /// The manifest file
+    manifest: Manifest,
+    /// The manifest entry corresponding to the revision.
+    ///
+    /// Used to hold the owner of the returned references.
+    manifest_entry: Option<ManifestEntry>,
+}
+
+impl<'a> ListRevTrackedFiles<'a> {
+    pub fn new(
+        root: &PathBuf,
+        rev: &'a str,
+    ) -> Result<Self, ListRevTrackedFilesError> {
+        let changelog = Changelog::open(&root)?;
+        let manifest = Manifest::open(&root)?;
+
+        Ok(Self {
+            rev,
+            changelog,
+            manifest,
+            manifest_entry: None,
+        })
+    }
+
+    pub fn run(
+        &mut self,
+    ) -> Result<impl Iterator<Item = &HgPath>, ListRevTrackedFilesError> {
+        let changelog_entry = match self.rev.parse::<Revision>() {
+            Ok(rev) => self.changelog.get_rev(rev)?,
+            _ => {
+                let changelog_node = hex::decode(&self.rev)
+                    .or(Err(ListRevTrackedFilesErrorKind::InvalidRevision))?;
+                self.changelog.get_node(&changelog_node)?
+            }
+        };
+        let manifest_node = hex::decode(&changelog_entry.manifest_node()?)
+            .or(Err(ListRevTrackedFilesErrorKind::CorruptedRevlog))?;
+
+        self.manifest_entry = Some(self.manifest.get_node(&manifest_node)?);
+
+        if let Some(ref manifest_entry) = self.manifest_entry {
+            Ok(manifest_entry.files())
+        } else {
+            panic!(
+                "manifest entry should have been stored in self.manifest_node to ensure its lifetime since references are returned from it"
+            )
+        }
+    }
+}
--- a/rust/hg-core/src/operations/mod.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/operations/mod.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,13 +1,28 @@
+//! A distinction is made between operations and commands.
+//! An operation is what can be done whereas a command is what is exposed by
+//! the cli. A single command can use several operations to achieve its goal.
+
+mod cat;
+mod debugdata;
 mod dirstate_status;
 mod find_root;
+mod list_tracked_files;
+pub use cat::{CatRev, CatRevError, CatRevErrorKind};
+pub use debugdata::{
+    DebugData, DebugDataError, DebugDataErrorKind, DebugDataKind,
+};
 pub use find_root::{FindRoot, FindRootError, FindRootErrorKind};
+pub use list_tracked_files::{
+    ListDirstateTrackedFiles, ListDirstateTrackedFilesError,
+    ListDirstateTrackedFilesErrorKind,
+};
+pub use list_tracked_files::{
+    ListRevTrackedFiles, ListRevTrackedFilesError,
+    ListRevTrackedFilesErrorKind,
+};
 
-/// An interface for high-level hg operations.
-///
-/// A distinction is made between operation and commands.
-/// An operation is what can be done whereas a command is what is exposed by
-/// the cli. A single command can use several operations to achieve its goal.
-pub trait Operation<T> {
-    type Error;
-    fn run(&self) -> Result<T, Self::Error>;
-}
+// TODO add an `Operation` trait when GAT have landed (rust #44265):
+// there is no way to currently define a trait which can both return
+// references to `self` and to passed data, which is what we would need.
+// Generic Associated Types may fix this and allow us to have a unified
+// interface.
--- a/rust/hg-core/src/revlog.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/revlog.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -7,7 +7,13 @@
 
 pub mod node;
 pub mod nodemap;
+pub mod path_encode;
 pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+pub mod changelog;
+pub mod index;
+pub mod manifest;
+pub mod patch;
+pub mod revlog;
 
 /// Mercurial revision numbers
 ///
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/changelog.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,58 @@
+use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::Revision;
+use std::path::PathBuf;
+
+/// A specialized `Revlog` to work with `changelog` data format.
+pub struct Changelog {
+    /// The generic `revlog` format.
+    revlog: Revlog,
+}
+
+impl Changelog {
+    /// Open the `changelog` of a repository given by its root.
+    pub fn open(root: &PathBuf) -> Result<Self, RevlogError> {
+        let index_file = root.join(".hg/store/00changelog.i");
+        let revlog = Revlog::open(&index_file)?;
+        Ok(Self { revlog })
+    }
+
+    /// Return the `ChangelogEntry` a given node id.
+    pub fn get_node(
+        &self,
+        node: &[u8],
+    ) -> Result<ChangelogEntry, RevlogError> {
+        let rev = self.revlog.get_node_rev(node)?;
+        self.get_rev(rev)
+    }
+
+    /// Return the `ChangelogEntry` of a given node revision.
+    pub fn get_rev(
+        &self,
+        rev: Revision,
+    ) -> Result<ChangelogEntry, RevlogError> {
+        let bytes = self.revlog.get_rev_data(rev)?;
+        Ok(ChangelogEntry { bytes })
+    }
+}
+
+/// `Changelog` entry which knows how to interpret the `changelog` data bytes.
+#[derive(Debug)]
+pub struct ChangelogEntry {
+    /// The data bytes of the `changelog` entry.
+    bytes: Vec<u8>,
+}
+
+impl ChangelogEntry {
+    /// Return an iterator over the lines of the entry.
+    pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
+        self.bytes
+            .split(|b| b == &b'\n')
+            .filter(|line| !line.is_empty())
+    }
+
+    /// Return the node id of the `manifest` referenced by this `changelog`
+    /// entry.
+    pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
+        self.lines().next().ok_or(RevlogError::Corrupted)
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/index.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,390 @@
+use std::ops::Deref;
+
+use byteorder::{BigEndian, ByteOrder};
+
+use crate::revlog::revlog::RevlogError;
+use crate::revlog::{Revision, NULL_REVISION};
+
+pub const INDEX_ENTRY_SIZE: usize = 64;
+
+/// A Revlog index
+pub struct Index {
+    bytes: Box<dyn Deref<Target = [u8]> + Send>,
+    /// Offsets of starts of index blocks.
+    /// Only needed when the index is interleaved with data.
+    offsets: Option<Vec<usize>>,
+}
+
+impl Index {
+    /// Create an index from bytes.
+    /// Calculate the start of each entry when is_inline is true.
+    pub fn new(
+        bytes: Box<dyn Deref<Target = [u8]> + Send>,
+    ) -> Result<Self, RevlogError> {
+        if is_inline(&bytes) {
+            let mut offset: usize = 0;
+            let mut offsets = Vec::new();
+
+            while offset + INDEX_ENTRY_SIZE <= bytes.len() {
+                offsets.push(offset);
+                let end = offset + INDEX_ENTRY_SIZE;
+                let entry = IndexEntry {
+                    bytes: &bytes[offset..end],
+                    offset_override: None,
+                };
+
+                offset += INDEX_ENTRY_SIZE + entry.compressed_len();
+            }
+
+            if offset == bytes.len() {
+                Ok(Self {
+                    bytes,
+                    offsets: Some(offsets),
+                })
+            } else {
+                Err(RevlogError::Corrupted)
+            }
+        } else {
+            Ok(Self {
+                bytes,
+                offsets: None,
+            })
+        }
+    }
+
+    /// Value of the inline flag.
+    pub fn is_inline(&self) -> bool {
+        is_inline(&self.bytes)
+    }
+
+    /// Return a slice of bytes if `revlog` is inline. Panic if not.
+    pub fn data(&self, start: usize, end: usize) -> &[u8] {
+        if !self.is_inline() {
+            panic!("tried to access data in the index of a revlog that is not inline");
+        }
+        &self.bytes[start..end]
+    }
+
+    /// Return number of entries of the revlog index.
+    pub fn len(&self) -> usize {
+        if let Some(offsets) = &self.offsets {
+            offsets.len()
+        } else {
+            self.bytes.len() / INDEX_ENTRY_SIZE
+        }
+    }
+
+    /// Returns `true` if the `Index` has zero `entries`.
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Return the index entry corresponding to the given revision if it
+    /// exists.
+    pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
+        if rev == NULL_REVISION {
+            return None;
+        }
+        if let Some(offsets) = &self.offsets {
+            self.get_entry_inline(rev, offsets)
+        } else {
+            self.get_entry_separated(rev)
+        }
+    }
+
+    fn get_entry_inline(
+        &self,
+        rev: Revision,
+        offsets: &[usize],
+    ) -> Option<IndexEntry> {
+        let start = *offsets.get(rev as usize)?;
+        let end = start.checked_add(INDEX_ENTRY_SIZE)?;
+        let bytes = &self.bytes[start..end];
+
+        // See IndexEntry for an explanation of this override.
+        let offset_override = Some(end);
+
+        Some(IndexEntry {
+            bytes,
+            offset_override,
+        })
+    }
+
+    fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
+        let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
+        if rev as usize >= max_rev {
+            return None;
+        }
+        let start = rev as usize * INDEX_ENTRY_SIZE;
+        let end = start + INDEX_ENTRY_SIZE;
+        let bytes = &self.bytes[start..end];
+
+        // Override the offset of the first revision as its bytes are used
+        // for the index's metadata (saving space because it is always 0)
+        let offset_override = if rev == 0 { Some(0) } else { None };
+
+        Some(IndexEntry {
+            bytes,
+            offset_override,
+        })
+    }
+}
+
+#[derive(Debug)]
+pub struct IndexEntry<'a> {
+    bytes: &'a [u8],
+    /// Allows to override the offset value of the entry.
+    ///
+    /// For interleaved index and data, the offset stored in the index
+    /// corresponds to the separated data offset.
+    /// It has to be overridden with the actual offset in the interleaved
+    /// index which is just after the index block.
+    ///
+    /// For separated index and data, the offset stored in the first index
+    /// entry is mixed with the index headers.
+    /// It has to be overridden with 0.
+    offset_override: Option<usize>,
+}
+
+impl<'a> IndexEntry<'a> {
+    /// Return the offset of the data.
+    pub fn offset(&self) -> usize {
+        if let Some(offset_override) = self.offset_override {
+            offset_override
+        } else {
+            let mut bytes = [0; 8];
+            bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
+            BigEndian::read_u64(&bytes[..]) as usize
+        }
+    }
+
+    /// Return the compressed length of the data.
+    pub fn compressed_len(&self) -> usize {
+        BigEndian::read_u32(&self.bytes[8..=11]) as usize
+    }
+
+    /// Return the uncompressed length of the data.
+    pub fn uncompressed_len(&self) -> usize {
+        BigEndian::read_u32(&self.bytes[12..=15]) as usize
+    }
+
+    /// Return the revision upon which the data has been derived.
+    pub fn base_revision(&self) -> Revision {
+        // TODO Maybe return an Option when base_revision == rev?
+        //      Requires to add rev to IndexEntry
+
+        BigEndian::read_i32(&self.bytes[16..])
+    }
+
+    pub fn p1(&self) -> Revision {
+        BigEndian::read_i32(&self.bytes[24..])
+    }
+
+    pub fn p2(&self) -> Revision {
+        BigEndian::read_i32(&self.bytes[28..])
+    }
+
+    /// Return the hash of revision's full text.
+    ///
+    /// Currently, SHA-1 is used and only the first 20 bytes of this field
+    /// are used.
+    pub fn hash(&self) -> &[u8] {
+        &self.bytes[32..52]
+    }
+}
+
+/// Value of the inline flag.
+pub fn is_inline(index_bytes: &[u8]) -> bool {
+    match &index_bytes[0..=1] {
+        [0, 0] | [0, 2] => false,
+        _ => true,
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[cfg(test)]
+    #[derive(Debug, Copy, Clone)]
+    pub struct IndexEntryBuilder {
+        is_first: bool,
+        is_inline: bool,
+        is_general_delta: bool,
+        version: u16,
+        offset: usize,
+        compressed_len: usize,
+        uncompressed_len: usize,
+        base_revision: Revision,
+    }
+
+    #[cfg(test)]
+    impl IndexEntryBuilder {
+        pub fn new() -> Self {
+            Self {
+                is_first: false,
+                is_inline: false,
+                is_general_delta: true,
+                version: 2,
+                offset: 0,
+                compressed_len: 0,
+                uncompressed_len: 0,
+                base_revision: 0,
+            }
+        }
+
+        pub fn is_first(&mut self, value: bool) -> &mut Self {
+            self.is_first = value;
+            self
+        }
+
+        pub fn with_inline(&mut self, value: bool) -> &mut Self {
+            self.is_inline = value;
+            self
+        }
+
+        pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
+            self.is_general_delta = value;
+            self
+        }
+
+        pub fn with_version(&mut self, value: u16) -> &mut Self {
+            self.version = value;
+            self
+        }
+
+        pub fn with_offset(&mut self, value: usize) -> &mut Self {
+            self.offset = value;
+            self
+        }
+
+        pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
+            self.compressed_len = value;
+            self
+        }
+
+        pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
+            self.uncompressed_len = value;
+            self
+        }
+
+        pub fn with_base_revision(&mut self, value: Revision) -> &mut Self {
+            self.base_revision = value;
+            self
+        }
+
+        pub fn build(&self) -> Vec<u8> {
+            let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
+            if self.is_first {
+                bytes.extend(&match (self.is_general_delta, self.is_inline) {
+                    (false, false) => [0u8, 0],
+                    (false, true) => [0u8, 1],
+                    (true, false) => [0u8, 2],
+                    (true, true) => [0u8, 3],
+                });
+                bytes.extend(&self.version.to_be_bytes());
+                // Remaining offset bytes.
+                bytes.extend(&[0u8; 2]);
+            } else {
+                // Offset is only 6 bytes will usize is 8.
+                bytes.extend(&self.offset.to_be_bytes()[2..]);
+            }
+            bytes.extend(&[0u8; 2]); // Revision flags.
+            bytes.extend(&self.compressed_len.to_be_bytes()[4..]);
+            bytes.extend(&self.uncompressed_len.to_be_bytes()[4..]);
+            bytes.extend(&self.base_revision.to_be_bytes());
+            bytes
+        }
+    }
+
+    #[test]
+    fn is_not_inline_when_no_inline_flag_test() {
+        let bytes = IndexEntryBuilder::new()
+            .is_first(true)
+            .with_general_delta(false)
+            .with_inline(false)
+            .build();
+
+        assert_eq!(is_inline(&bytes), false)
+    }
+
+    #[test]
+    fn is_inline_when_inline_flag_test() {
+        let bytes = IndexEntryBuilder::new()
+            .is_first(true)
+            .with_general_delta(false)
+            .with_inline(true)
+            .build();
+
+        assert_eq!(is_inline(&bytes), true)
+    }
+
+    #[test]
+    fn is_inline_when_inline_and_generaldelta_flags_test() {
+        let bytes = IndexEntryBuilder::new()
+            .is_first(true)
+            .with_general_delta(true)
+            .with_inline(true)
+            .build();
+
+        assert_eq!(is_inline(&bytes), true)
+    }
+
+    #[test]
+    fn test_offset() {
+        let bytes = IndexEntryBuilder::new().with_offset(1).build();
+        let entry = IndexEntry {
+            bytes: &bytes,
+            offset_override: None,
+        };
+
+        assert_eq!(entry.offset(), 1)
+    }
+
+    #[test]
+    fn test_with_overridden_offset() {
+        let bytes = IndexEntryBuilder::new().with_offset(1).build();
+        let entry = IndexEntry {
+            bytes: &bytes,
+            offset_override: Some(2),
+        };
+
+        assert_eq!(entry.offset(), 2)
+    }
+
+    #[test]
+    fn test_compressed_len() {
+        let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
+        let entry = IndexEntry {
+            bytes: &bytes,
+            offset_override: None,
+        };
+
+        assert_eq!(entry.compressed_len(), 1)
+    }
+
+    #[test]
+    fn test_uncompressed_len() {
+        let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
+        let entry = IndexEntry {
+            bytes: &bytes,
+            offset_override: None,
+        };
+
+        assert_eq!(entry.uncompressed_len(), 1)
+    }
+
+    #[test]
+    fn test_base_revision() {
+        let bytes = IndexEntryBuilder::new().with_base_revision(1).build();
+        let entry = IndexEntry {
+            bytes: &bytes,
+            offset_override: None,
+        };
+
+        assert_eq!(entry.base_revision(), 1)
+    }
+}
+
+#[cfg(test)]
+pub use tests::IndexEntryBuilder;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/manifest.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,73 @@
+use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::Revision;
+use crate::utils::hg_path::HgPath;
+use std::path::PathBuf;
+
+/// A specialized `Revlog` to work with `manifest` data format.
+pub struct Manifest {
+    /// The generic `revlog` format.
+    revlog: Revlog,
+}
+
+impl Manifest {
+    /// Open the `manifest` of a repository given by its root.
+    pub fn open(root: &PathBuf) -> Result<Self, RevlogError> {
+        let index_file = root.join(".hg/store/00manifest.i");
+        let revlog = Revlog::open(&index_file)?;
+        Ok(Self { revlog })
+    }
+
+    /// Return the `ManifestEntry` of a given node id.
+    pub fn get_node(&self, node: &[u8]) -> Result<ManifestEntry, RevlogError> {
+        let rev = self.revlog.get_node_rev(node)?;
+        self.get_rev(rev)
+    }
+
+    /// Return the `ManifestEntry` of a given node revision.
+    pub fn get_rev(
+        &self,
+        rev: Revision,
+    ) -> Result<ManifestEntry, RevlogError> {
+        let bytes = self.revlog.get_rev_data(rev)?;
+        Ok(ManifestEntry { bytes })
+    }
+}
+
+/// `Manifest` entry which knows how to interpret the `manifest` data bytes.
+#[derive(Debug)]
+pub struct ManifestEntry {
+    bytes: Vec<u8>,
+}
+
+impl ManifestEntry {
+    /// Return an iterator over the lines of the entry.
+    pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
+        self.bytes
+            .split(|b| b == &b'\n')
+            .filter(|line| !line.is_empty())
+    }
+
+    /// Return an iterator over the files of the entry.
+    pub fn files(&self) -> impl Iterator<Item = &HgPath> {
+        self.lines().filter(|line| !line.is_empty()).map(|line| {
+            let pos = line
+                .iter()
+                .position(|x| x == &b'\0')
+                .expect("manifest line should contain \\0");
+            HgPath::new(&line[..pos])
+        })
+    }
+
+    /// Return an iterator over the files of the entry.
+    pub fn files_with_nodes(&self) -> impl Iterator<Item = (&HgPath, &[u8])> {
+        self.lines().filter(|line| !line.is_empty()).map(|line| {
+            let pos = line
+                .iter()
+                .position(|x| x == &b'\0')
+                .expect("manifest line should contain \\0");
+            let hash_start = pos + 1;
+            let hash_end = hash_start + 40;
+            (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
+        })
+    }
+}
--- a/rust/hg-core/src/revlog/node.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/revlog/node.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -16,7 +16,12 @@
 /// are private so that calling code does not expect all nodes have
 /// the same size, should we support several formats concurrently in
 /// the future.
-const NODE_BYTES_LENGTH: usize = 20;
+pub const NODE_BYTES_LENGTH: usize = 20;
+
+/// Id of the null node.
+///
+/// Used to indicate the absence of node.
+pub const NULL_NODE_ID: [u8; NODE_BYTES_LENGTH] = [0u8; NODE_BYTES_LENGTH];
 
 /// The length in bytes of a `Node`
 ///
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/patch.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,369 @@
+use byteorder::{BigEndian, ByteOrder};
+
+/// A chunk of data to insert, delete or replace in a patch
+///
+/// A chunk is:
+/// - an insertion when `!data.is_empty() && start == end`
+/// - an deletion when `data.is_empty() && start < end`
+/// - a replacement when `!data.is_empty() && start < end`
+/// - not doing anything when `data.is_empty() && start == end`
+#[derive(Debug, Clone)]
+struct Chunk<'a> {
+    /// The start position of the chunk of data to replace
+    start: u32,
+    /// The end position of the chunk of data to replace (open end interval)
+    end: u32,
+    /// The data replacing the chunk
+    data: &'a [u8],
+}
+
+impl Chunk<'_> {
+    /// Adjusted start of the chunk to replace.
+    ///
+    /// The offset, taking into account the growth/shrinkage of data
+    /// induced by previously applied chunks.
+    fn start_offset_by(&self, offset: i32) -> u32 {
+        let start = self.start as i32 + offset;
+        assert!(start >= 0, "negative chunk start should never happen");
+        start as u32
+    }
+
+    /// Adjusted end of the chunk to replace.
+    ///
+    /// The offset, taking into account the growth/shrinkage of data
+    /// induced by previously applied chunks.
+    fn end_offset_by(&self, offset: i32) -> u32 {
+        self.start_offset_by(offset) + self.data.len() as u32
+    }
+
+    /// Length of the replaced chunk.
+    fn replaced_len(&self) -> u32 {
+        self.end - self.start
+    }
+
+    /// Length difference between the replacing data and the replaced data.
+    fn len_diff(&self) -> i32 {
+        self.data.len() as i32 - self.replaced_len() as i32
+    }
+}
+
+/// The delta between two revisions data.
+#[derive(Debug, Clone)]
+pub struct PatchList<'a> {
+    /// A collection of chunks to apply.
+    ///
+    /// Those chunks are:
+    /// - ordered from the left-most replacement to the right-most replacement
+    /// - non-overlapping, meaning that two chucks can not change the same
+    ///   chunk of the patched data
+    chunks: Vec<Chunk<'a>>,
+}
+
+impl<'a> PatchList<'a> {
+    /// Create a `PatchList` from bytes.
+    pub fn new(data: &'a [u8]) -> Self {
+        let mut chunks = vec![];
+        let mut data = data;
+        while !data.is_empty() {
+            let start = BigEndian::read_u32(&data[0..]);
+            let end = BigEndian::read_u32(&data[4..]);
+            let len = BigEndian::read_u32(&data[8..]);
+            assert!(start <= end);
+            chunks.push(Chunk {
+                start,
+                end,
+                data: &data[12..12 + (len as usize)],
+            });
+            data = &data[12 + (len as usize)..];
+        }
+        PatchList { chunks }
+    }
+
+    /// Return the final length of data after patching
+    /// given its initial length .
+    fn size(&self, initial_size: i32) -> i32 {
+        self.chunks
+            .iter()
+            .fold(initial_size, |acc, chunk| acc + chunk.len_diff())
+    }
+
+    /// Apply the patch to some data.
+    pub fn apply(&self, initial: &[u8]) -> Vec<u8> {
+        let mut last: usize = 0;
+        let mut vec =
+            Vec::with_capacity(self.size(initial.len() as i32) as usize);
+        for Chunk { start, end, data } in self.chunks.iter() {
+            vec.extend(&initial[last..(*start as usize)]);
+            vec.extend(data.iter());
+            last = *end as usize;
+        }
+        vec.extend(&initial[last..]);
+        vec
+    }
+
+    /// Combine two patch lists into a single patch list.
+    ///
+    /// Applying consecutive patches can lead to waste of time and memory
+    /// as the changes introduced by one patch can be overridden by the next.
+    /// Combining patches optimizes the whole patching sequence.
+    fn combine(&mut self, other: &mut Self) -> Self {
+        let mut chunks = vec![];
+
+        // Keep track of each growth/shrinkage resulting from applying a chunk
+        // in order to adjust the start/end of subsequent chunks.
+        let mut offset = 0i32;
+
+        // Keep track of the chunk of self.chunks to process.
+        let mut pos = 0;
+
+        // For each chunk of `other`, chunks of `self` are processed
+        // until they start after the end of the current chunk.
+        for Chunk { start, end, data } in other.chunks.iter() {
+            // Add chunks of `self` that start before this chunk of `other`
+            // without overlap.
+            while pos < self.chunks.len()
+                && self.chunks[pos].end_offset_by(offset) <= *start
+            {
+                let first = self.chunks[pos].clone();
+                offset += first.len_diff();
+                chunks.push(first);
+                pos += 1;
+            }
+
+            // The current chunk of `self` starts before this chunk of `other`
+            // with overlap.
+            // The left-most part of data is added as an insertion chunk.
+            // The right-most part data is kept in the chunk.
+            if pos < self.chunks.len()
+                && self.chunks[pos].start_offset_by(offset) < *start
+            {
+                let first = &mut self.chunks[pos];
+
+                let (data_left, data_right) = first.data.split_at(
+                    (*start - first.start_offset_by(offset)) as usize,
+                );
+                let left = Chunk {
+                    start: first.start,
+                    end: first.start,
+                    data: data_left,
+                };
+
+                first.data = data_right;
+
+                offset += left.len_diff();
+
+                chunks.push(left);
+
+                // There is no index incrementation because the right-most part
+                // needs further examination.
+            }
+
+            // At this point remaining chunks of `self` starts after
+            // the current chunk of `other`.
+
+            // `start_offset` will be used to adjust the start of the current
+            // chunk of `other`.
+            // Offset tracking continues with `end_offset` to adjust the end
+            // of the current chunk of `other`.
+            let mut next_offset = offset;
+
+            // Discard the chunks of `self` that are totally overridden
+            // by the current chunk of `other`
+            while pos < self.chunks.len()
+                && self.chunks[pos].end_offset_by(next_offset) <= *end
+            {
+                let first = &self.chunks[pos];
+                next_offset += first.len_diff();
+                pos += 1;
+            }
+
+            // Truncate the left-most part of chunk of `self` that overlaps
+            // the current chunk of `other`.
+            if pos < self.chunks.len()
+                && self.chunks[pos].start_offset_by(next_offset) < *end
+            {
+                let first = &mut self.chunks[pos];
+
+                let how_much_to_discard =
+                    *end - first.start_offset_by(next_offset);
+
+                first.data = &first.data[(how_much_to_discard as usize)..];
+
+                next_offset += how_much_to_discard as i32;
+            }
+
+            // Add the chunk of `other` with adjusted position.
+            chunks.push(Chunk {
+                start: (*start as i32 - offset) as u32,
+                end: (*end as i32 - next_offset) as u32,
+                data,
+            });
+
+            // Go back to normal offset tracking for the next `o` chunk
+            offset = next_offset;
+        }
+
+        // Add remaining chunks of `self`.
+        for elt in &self.chunks[pos..] {
+            chunks.push(elt.clone());
+        }
+        PatchList { chunks }
+    }
+}
+
+/// Combine a list of patch list into a single patch optimized patch list.
+pub fn fold_patch_lists<'a>(lists: &[PatchList<'a>]) -> PatchList<'a> {
+    if lists.len() <= 1 {
+        if lists.is_empty() {
+            PatchList { chunks: vec![] }
+        } else {
+            lists[0].clone()
+        }
+    } else {
+        let (left, right) = lists.split_at(lists.len() / 2);
+        let mut left_res = fold_patch_lists(left);
+        let mut right_res = fold_patch_lists(right);
+        left_res.combine(&mut right_res)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    struct PatchDataBuilder {
+        data: Vec<u8>,
+    }
+
+    impl PatchDataBuilder {
+        pub fn new() -> Self {
+            Self { data: vec![] }
+        }
+
+        pub fn replace(
+            &mut self,
+            start: usize,
+            end: usize,
+            data: &[u8],
+        ) -> &mut Self {
+            assert!(start <= end);
+            self.data.extend(&(start as i32).to_be_bytes());
+            self.data.extend(&(end as i32).to_be_bytes());
+            self.data.extend(&(data.len() as i32).to_be_bytes());
+            self.data.extend(data.iter());
+            self
+        }
+
+        pub fn get(&mut self) -> &[u8] {
+            &self.data
+        }
+    }
+
+    #[test]
+    fn test_ends_before() {
+        let data = vec![0u8, 0u8, 0u8];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(0, 1, &[1, 2]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(2, 4, &[3, 4]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![1u8, 2, 3, 4]);
+    }
+
+    #[test]
+    fn test_starts_after() {
+        let data = vec![0u8, 0u8, 0u8];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(2, 3, &[3]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(1, 2, &[1, 2]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![0u8, 1, 2, 3]);
+    }
+
+    #[test]
+    fn test_overridden() {
+        let data = vec![0u8, 0, 0];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(1, 2, &[3, 4]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(1, 4, &[1, 2, 3]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![0u8, 1, 2, 3]);
+    }
+
+    #[test]
+    fn test_right_most_part_is_overridden() {
+        let data = vec![0u8, 0, 0];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(0, 1, &[1, 3]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(1, 4, &[2, 3, 4]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![1u8, 2, 3, 4]);
+    }
+
+    #[test]
+    fn test_left_most_part_is_overridden() {
+        let data = vec![0u8, 0, 0];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(1, 3, &[1, 3, 4]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(0, 2, &[1, 2]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![1u8, 2, 3, 4]);
+    }
+
+    #[test]
+    fn test_mid_is_overridden() {
+        let data = vec![0u8, 0, 0];
+        let mut patch1_data = PatchDataBuilder::new();
+        patch1_data.replace(0, 3, &[1, 3, 3, 4]);
+        let mut patch1 = PatchList::new(patch1_data.get());
+
+        let mut patch2_data = PatchDataBuilder::new();
+        patch2_data.replace(1, 3, &[2, 3]);
+        let mut patch2 = PatchList::new(patch2_data.get());
+
+        let patch = patch1.combine(&mut patch2);
+
+        let result = patch.apply(&data);
+
+        assert_eq!(result, vec![1u8, 2, 3, 4]);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/path_encode.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,653 @@
+use crypto::digest::Digest;
+use crypto::sha1::Sha1;
+
+#[derive(PartialEq, Debug)]
+#[allow(non_camel_case_types)]
+enum path_state {
+    START, /* first byte of a path component */
+    A,     /* "AUX" */
+    AU,
+    THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */
+    C,     /* "CON" or "COMn" */
+    CO,
+    COMLPT, /* "COM" or "LPT" */
+    COMLPTn,
+    L,
+    LP,
+    N,
+    NU,
+    P, /* "PRN" */
+    PR,
+    LDOT, /* leading '.' */
+    DOT,  /* '.' in a non-leading position */
+    H,    /* ".h" */
+    HGDI, /* ".hg", ".d", or ".i" */
+    SPACE,
+    DEFAULT, /* byte of a path component after the first */
+}
+
+/* state machine for dir-encoding */
+#[allow(non_camel_case_types)]
+enum dir_state {
+    DDOT,
+    DH,
+    DHGDI,
+    DDEFAULT,
+}
+
+fn inset(bitset: &[u32; 8], c: u8) -> bool {
+    bitset[(c as usize) >> 5] & (1 << (c & 31)) != 0
+}
+
+fn charcopy(dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
+    if let Some(slice) = dest {
+        slice[*destlen] = c
+    }
+    *destlen += 1
+}
+
+fn memcopy(dest: Option<&mut [u8]>, destlen: &mut usize, src: &[u8]) {
+    if let Some(slice) = dest {
+        slice[*destlen..*destlen + src.len()].copy_from_slice(src)
+    }
+    *destlen += src.len();
+}
+
+fn rewrap_option<'a, 'b: 'a>(
+    x: &'a mut Option<&'b mut [u8]>,
+) -> Option<&'a mut [u8]> {
+    match x {
+        None => None,
+        Some(y) => Some(y),
+    }
+}
+
+fn hexencode<'a>(mut dest: Option<&'a mut [u8]>, destlen: &mut usize, c: u8) {
+    let hexdigit = b"0123456789abcdef";
+    charcopy(
+        rewrap_option(&mut dest),
+        destlen,
+        hexdigit[(c as usize) >> 4],
+    );
+    charcopy(dest, destlen, hexdigit[(c as usize) & 15]);
+}
+
+/* 3-byte escape: tilde followed by two hex digits */
+fn escape3(mut dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
+    charcopy(rewrap_option(&mut dest), destlen, b'~');
+    hexencode(dest, destlen, c);
+}
+
+fn encode_dir(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+    let mut state = dir_state::DDEFAULT;
+    let mut i = 0;
+    let mut destlen = 0;
+
+    while i < src.len() {
+        match state {
+            dir_state::DDOT => match src[i] {
+                b'd' | b'i' => {
+                    state = dir_state::DHGDI;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'h' => {
+                    state = dir_state::DH;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                _ => {
+                    state = dir_state::DDEFAULT;
+                }
+            },
+            dir_state::DH => {
+                if src[i] == b'g' {
+                    state = dir_state::DHGDI;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = dir_state::DDEFAULT;
+                }
+            }
+            dir_state::DHGDI => {
+                if src[i] == b'/' {
+                    memcopy(rewrap_option(&mut dest), &mut destlen, b".hg");
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                state = dir_state::DDEFAULT;
+            }
+            dir_state::DDEFAULT => {
+                if src[i] == b'.' {
+                    state = dir_state::DDOT
+                }
+                charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                i += 1;
+            }
+        }
+    }
+    destlen
+}
+
+fn _encode(
+    twobytes: &[u32; 8],
+    onebyte: &[u32; 8],
+    mut dest: Option<&mut [u8]>,
+    src: &[u8],
+    encodedir: bool,
+) -> usize {
+    let mut state = path_state::START;
+    let mut i = 0;
+    let mut destlen = 0;
+    let len = src.len();
+
+    while i < len {
+        match state {
+            path_state::START => match src[i] {
+                b'/' => {
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'.' => {
+                    state = path_state::LDOT;
+                    escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b' ' => {
+                    state = path_state::DEFAULT;
+                    escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'a' => {
+                    state = path_state::A;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'c' => {
+                    state = path_state::C;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'l' => {
+                    state = path_state::L;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'n' => {
+                    state = path_state::N;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'p' => {
+                    state = path_state::P;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                _ => {
+                    state = path_state::DEFAULT;
+                }
+            },
+            path_state::A => {
+                if src[i] == b'u' {
+                    state = path_state::AU;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::AU => {
+                if src[i] == b'x' {
+                    state = path_state::THIRD;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::THIRD => {
+                state = path_state::DEFAULT;
+                match src[i] {
+                    b'.' | b'/' | b'\0' => escape3(
+                        rewrap_option(&mut dest),
+                        &mut destlen,
+                        src[i - 1],
+                    ),
+                    _ => i -= 1,
+                }
+            }
+            path_state::C => {
+                if src[i] == b'o' {
+                    state = path_state::CO;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::CO => {
+                if src[i] == b'm' {
+                    state = path_state::COMLPT;
+                    i += 1;
+                } else if src[i] == b'n' {
+                    state = path_state::THIRD;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::COMLPT => {
+                if src[i] >= b'1' && src[i] <= b'9' {
+                    state = path_state::COMLPTn;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                    charcopy(
+                        rewrap_option(&mut dest),
+                        &mut destlen,
+                        src[i - 1],
+                    );
+                }
+            }
+            path_state::COMLPTn => {
+                state = path_state::DEFAULT;
+                match src[i] {
+                    b'.' | b'/' | b'\0' => {
+                        escape3(
+                            rewrap_option(&mut dest),
+                            &mut destlen,
+                            src[i - 2],
+                        );
+                        charcopy(
+                            rewrap_option(&mut dest),
+                            &mut destlen,
+                            src[i - 1],
+                        );
+                    }
+                    _ => {
+                        memcopy(
+                            rewrap_option(&mut dest),
+                            &mut destlen,
+                            &src[i - 2..i],
+                        );
+                    }
+                }
+            }
+            path_state::L => {
+                if src[i] == b'p' {
+                    state = path_state::LP;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::LP => {
+                if src[i] == b't' {
+                    state = path_state::COMLPT;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::N => {
+                if src[i] == b'u' {
+                    state = path_state::NU;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::NU => {
+                if src[i] == b'l' {
+                    state = path_state::THIRD;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::P => {
+                if src[i] == b'r' {
+                    state = path_state::PR;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::PR => {
+                if src[i] == b'n' {
+                    state = path_state::THIRD;
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::LDOT => match src[i] {
+                b'd' | b'i' => {
+                    state = path_state::HGDI;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'h' => {
+                    state = path_state::H;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                _ => {
+                    state = path_state::DEFAULT;
+                }
+            },
+            path_state::DOT => match src[i] {
+                b'/' | b'\0' => {
+                    state = path_state::START;
+                    memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'd' | b'i' => {
+                    state = path_state::HGDI;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                b'h' => {
+                    state = path_state::H;
+                    memcopy(rewrap_option(&mut dest), &mut destlen, b".h");
+                    i += 1;
+                }
+                _ => {
+                    state = path_state::DEFAULT;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
+                }
+            },
+            path_state::H => {
+                if src[i] == b'g' {
+                    state = path_state::HGDI;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::HGDI => {
+                if src[i] == b'/' {
+                    state = path_state::START;
+                    if encodedir {
+                        memcopy(
+                            rewrap_option(&mut dest),
+                            &mut destlen,
+                            b".hg",
+                        );
+                    }
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1
+                } else {
+                    state = path_state::DEFAULT;
+                }
+            }
+            path_state::SPACE => match src[i] {
+                b'/' | b'\0' => {
+                    state = path_state::START;
+                    memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                _ => {
+                    state = path_state::DEFAULT;
+                    charcopy(rewrap_option(&mut dest), &mut destlen, b' ');
+                }
+            },
+            path_state::DEFAULT => {
+                while i != len && inset(onebyte, src[i]) {
+                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    i += 1;
+                }
+                if i == len {
+                    break;
+                }
+                match src[i] {
+                    b'.' => {
+                        state = path_state::DOT;
+                        i += 1
+                    }
+                    b' ' => {
+                        state = path_state::SPACE;
+                        i += 1
+                    }
+                    b'/' => {
+                        state = path_state::START;
+                        charcopy(rewrap_option(&mut dest), &mut destlen, b'/');
+                        i += 1;
+                    }
+                    _ => {
+                        if inset(onebyte, src[i]) {
+                            loop {
+                                charcopy(
+                                    rewrap_option(&mut dest),
+                                    &mut destlen,
+                                    src[i],
+                                );
+                                i += 1;
+                                if !(i < len && inset(onebyte, src[i])) {
+                                    break;
+                                }
+                            }
+                        } else if inset(twobytes, src[i]) {
+                            let c = src[i];
+                            i += 1;
+                            charcopy(
+                                rewrap_option(&mut dest),
+                                &mut destlen,
+                                b'_',
+                            );
+                            charcopy(
+                                rewrap_option(&mut dest),
+                                &mut destlen,
+                                if c == b'_' { b'_' } else { c + 32 },
+                            );
+                        } else {
+                            escape3(
+                                rewrap_option(&mut dest),
+                                &mut destlen,
+                                src[i],
+                            );
+                            i += 1;
+                        }
+                    }
+                }
+            }
+        }
+    }
+    match state {
+        path_state::START => (),
+        path_state::A => (),
+        path_state::AU => (),
+        path_state::THIRD => {
+            escape3(rewrap_option(&mut dest), &mut destlen, src[i - 1])
+        }
+        path_state::C => (),
+        path_state::CO => (),
+        path_state::COMLPT => {
+            charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1])
+        }
+        path_state::COMLPTn => {
+            escape3(rewrap_option(&mut dest), &mut destlen, src[i - 2]);
+            charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1]);
+        }
+        path_state::L => (),
+        path_state::LP => (),
+        path_state::N => (),
+        path_state::NU => (),
+        path_state::P => (),
+        path_state::PR => (),
+        path_state::LDOT => (),
+        path_state::DOT => {
+            memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
+        }
+        path_state::H => (),
+        path_state::HGDI => (),
+        path_state::SPACE => {
+            memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
+        }
+        path_state::DEFAULT => (),
+    };
+    destlen
+}
+
+fn basic_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+    let twobytes: [u32; 8] = [0, 0, 0x87ff_fffe, 0, 0, 0, 0, 0];
+    let onebyte: [u32; 8] =
+        [1, 0x2bff_3bfa, 0x6800_0001, 0x2fff_ffff, 0, 0, 0, 0];
+    _encode(&twobytes, &onebyte, dest, src, true)
+}
+
+const MAXSTOREPATHLEN: usize = 120;
+
+fn lower_encode(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+    let onebyte: [u32; 8] =
+        [1, 0x2bff_fbfb, 0xe800_0001, 0x2fff_ffff, 0, 0, 0, 0];
+    let lower: [u32; 8] = [0, 0, 0x07ff_fffe, 0, 0, 0, 0, 0];
+    let mut destlen = 0;
+    for c in src {
+        if inset(&onebyte, *c) {
+            charcopy(rewrap_option(&mut dest), &mut destlen, *c)
+        } else if inset(&lower, *c) {
+            charcopy(rewrap_option(&mut dest), &mut destlen, *c + 32)
+        } else {
+            escape3(rewrap_option(&mut dest), &mut destlen, *c)
+        }
+    }
+    destlen
+}
+
+fn aux_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+    let twobytes = [0; 8];
+    let onebyte: [u32; 8] = [!0, 0xffff_3ffe, !0, !0, !0, !0, !0, !0];
+    _encode(&twobytes, &onebyte, dest, src, false)
+}
+
+fn hash_mangle(src: &[u8], sha: &[u8]) -> Vec<u8> {
+    let dirprefixlen = 8;
+    let maxshortdirslen = 68;
+    let mut destlen = 0;
+
+    let last_slash = src.iter().rposition(|b| *b == b'/');
+    let last_dot: Option<usize> = {
+        let s = last_slash.unwrap_or(0);
+        src[s..]
+            .iter()
+            .rposition(|b| *b == b'.')
+            .and_then(|i| Some(i + s))
+    };
+
+    let mut dest = vec![0; MAXSTOREPATHLEN];
+    memcopy(Some(&mut dest), &mut destlen, b"dh/");
+
+    {
+        let mut first = true;
+        for slice in src[..last_slash.unwrap_or_else(|| src.len())]
+            .split(|b| *b == b'/')
+        {
+            let slice = &slice[..std::cmp::min(slice.len(), dirprefixlen)];
+            if destlen + (slice.len() + if first { 0 } else { 1 })
+                > maxshortdirslen + 3
+            {
+                break;
+            } else {
+                if !first {
+                    charcopy(Some(&mut dest), &mut destlen, b'/')
+                };
+                memcopy(Some(&mut dest), &mut destlen, slice);
+                if dest[destlen - 1] == b'.' || dest[destlen - 1] == b' ' {
+                    dest[destlen - 1] = b'_'
+                }
+            }
+            first = false;
+        }
+        if !first {
+            charcopy(Some(&mut dest), &mut destlen, b'/');
+        }
+    }
+
+    let used = destlen + 40 + {
+        if let Some(l) = last_dot {
+            src.len() - l
+        } else {
+            0
+        }
+    };
+
+    if MAXSTOREPATHLEN > used {
+        let slop = MAXSTOREPATHLEN - used;
+        let basenamelen = match last_slash {
+            Some(l) => src.len() - l - 1,
+            None => src.len(),
+        };
+        let basenamelen = std::cmp::min(basenamelen, slop);
+        if basenamelen > 0 {
+            let start = match last_slash {
+                Some(l) => l + 1,
+                None => 0,
+            };
+            memcopy(
+                Some(&mut dest),
+                &mut destlen,
+                &src[start..][..basenamelen],
+            )
+        }
+    }
+    for c in sha {
+        hexencode(Some(&mut dest), &mut destlen, *c);
+    }
+    if let Some(l) = last_dot {
+        memcopy(Some(&mut dest), &mut destlen, &src[l..]);
+    }
+    if destlen == dest.len() {
+        dest
+    } else {
+        // sometimes the path are shorter than MAXSTOREPATHLEN
+        dest[..destlen].to_vec()
+    }
+}
+
+const MAXENCODE: usize = 4096 * 4;
+fn hash_encode(src: &[u8]) -> Vec<u8> {
+    let dired = &mut [0; MAXENCODE];
+    let lowered = &mut [0; MAXENCODE];
+    let auxed = &mut [0; MAXENCODE];
+    let baselen = (src.len() - 5) * 3;
+    if baselen >= MAXENCODE {
+        panic!("path_encode::hash_encore: string too long: {}", baselen)
+    };
+    let dirlen = encode_dir(Some(&mut dired[..]), src);
+    let sha = {
+        let mut hasher = Sha1::new();
+        hasher.input(&dired[..dirlen]);
+        let mut hash = vec![0; 20];
+        hasher.result(&mut hash);
+        hash
+    };
+    let lowerlen = lower_encode(Some(&mut lowered[..]), &dired[..dirlen][5..]);
+    let auxlen = aux_encode(Some(&mut auxed[..]), &lowered[..lowerlen]);
+    hash_mangle(&auxed[..auxlen], &sha)
+}
+
+pub fn path_encode(path: &[u8]) -> Vec<u8> {
+    let newlen = if path.len() <= MAXSTOREPATHLEN {
+        basic_encode(None, path)
+    } else {
+        MAXSTOREPATHLEN + 1
+    };
+    if newlen <= MAXSTOREPATHLEN {
+        if newlen == path.len() {
+            path.to_vec()
+        } else {
+            let mut res = vec![0; newlen];
+            basic_encode(Some(&mut res), path);
+            res
+        }
+    } else {
+        hash_encode(&path)
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/revlog.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,331 @@
+use std::borrow::Cow;
+use std::fs::File;
+use std::io::Read;
+use std::ops::Deref;
+use std::path::Path;
+
+use byteorder::{BigEndian, ByteOrder};
+use crypto::digest::Digest;
+use crypto::sha1::Sha1;
+use flate2::read::ZlibDecoder;
+use memmap::{Mmap, MmapOptions};
+use micro_timer::timed;
+use zstd;
+
+use super::index::Index;
+use super::node::{NODE_BYTES_LENGTH, NULL_NODE_ID};
+use super::patch;
+use crate::revlog::Revision;
+
+pub enum RevlogError {
+    IoError(std::io::Error),
+    UnsuportedVersion(u16),
+    InvalidRevision,
+    Corrupted,
+    UnknowDataFormat(u8),
+}
+
+fn mmap_open(path: &Path) -> Result<Mmap, std::io::Error> {
+    let file = File::open(path)?;
+    let mmap = unsafe { MmapOptions::new().map(&file) }?;
+    Ok(mmap)
+}
+
+/// Read only implementation of revlog.
+pub struct Revlog {
+    /// When index and data are not interleaved: bytes of the revlog index.
+    /// When index and data are interleaved: bytes of the revlog index and
+    /// data.
+    index: Index,
+    /// When index and data are not interleaved: bytes of the revlog data
+    data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
+}
+
+impl Revlog {
+    /// Open a revlog index file.
+    ///
+    /// It will also open the associated data file if index and data are not
+    /// interleaved.
+    #[timed]
+    pub fn open(index_path: &Path) -> Result<Self, RevlogError> {
+        let index_mmap =
+            mmap_open(&index_path).map_err(RevlogError::IoError)?;
+
+        let version = get_version(&index_mmap);
+        if version != 1 {
+            return Err(RevlogError::UnsuportedVersion(version));
+        }
+
+        let index = Index::new(Box::new(index_mmap))?;
+
+        // TODO load data only when needed //
+        // type annotation required
+        // won't recognize Mmap as Deref<Target = [u8]>
+        let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
+            if index.is_inline() {
+                None
+            } else {
+                let data_path = index_path.with_extension("d");
+                let data_mmap =
+                    mmap_open(&data_path).map_err(RevlogError::IoError)?;
+                Some(Box::new(data_mmap))
+            };
+
+        Ok(Revlog { index, data_bytes })
+    }
+
+    /// Return number of entries of the `Revlog`.
+    pub fn len(&self) -> usize {
+        self.index.len()
+    }
+
+    /// Returns `true` if the `Revlog` has zero `entries`.
+    pub fn is_empty(&self) -> bool {
+        self.index.is_empty()
+    }
+
+    /// Return the full data associated to a node.
+    #[timed]
+    pub fn get_node_rev(&self, node: &[u8]) -> Result<Revision, RevlogError> {
+        // This is brute force. But it is fast enough for now.
+        // Optimization will come later.
+        for rev in (0..self.len() as Revision).rev() {
+            let index_entry =
+                self.index.get_entry(rev).ok_or(RevlogError::Corrupted)?;
+            if node == index_entry.hash() {
+                return Ok(rev);
+            }
+        }
+        Err(RevlogError::InvalidRevision)
+    }
+
+    /// Return the full data associated to a revision.
+    ///
+    /// All entries required to build the final data out of deltas will be
+    /// retrieved as needed, and the deltas will be applied to the inital
+    /// snapshot to rebuild the final data.
+    #[timed]
+    pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> {
+        // Todo return -> Cow
+        let mut entry = self.get_entry(rev)?;
+        let mut delta_chain = vec![];
+        while let Some(base_rev) = entry.base_rev {
+            delta_chain.push(entry);
+            entry =
+                self.get_entry(base_rev).or(Err(RevlogError::Corrupted))?;
+        }
+
+        // TODO do not look twice in the index
+        let index_entry = self
+            .index
+            .get_entry(rev)
+            .ok_or(RevlogError::InvalidRevision)?;
+
+        let data: Vec<u8> = if delta_chain.is_empty() {
+            entry.data()?.into()
+        } else {
+            Revlog::build_data_from_deltas(entry, &delta_chain)?
+        };
+
+        if self.check_hash(
+            index_entry.p1(),
+            index_entry.p2(),
+            index_entry.hash(),
+            &data,
+        ) {
+            Ok(data)
+        } else {
+            Err(RevlogError::Corrupted)
+        }
+    }
+
+    /// Check the hash of some given data against the recorded hash.
+    pub fn check_hash(
+        &self,
+        p1: Revision,
+        p2: Revision,
+        expected: &[u8],
+        data: &[u8],
+    ) -> bool {
+        let e1 = self.index.get_entry(p1);
+        let h1 = match e1 {
+            Some(ref entry) => entry.hash(),
+            None => &NULL_NODE_ID,
+        };
+        let e2 = self.index.get_entry(p2);
+        let h2 = match e2 {
+            Some(ref entry) => entry.hash(),
+            None => &NULL_NODE_ID,
+        };
+
+        hash(data, &h1, &h2).as_slice() == expected
+    }
+
+    /// Build the full data of a revision out its snapshot
+    /// and its deltas.
+    #[timed]
+    fn build_data_from_deltas(
+        snapshot: RevlogEntry,
+        deltas: &[RevlogEntry],
+    ) -> Result<Vec<u8>, RevlogError> {
+        let snapshot = snapshot.data()?;
+        let deltas = deltas
+            .iter()
+            .rev()
+            .map(RevlogEntry::data)
+            .collect::<Result<Vec<Cow<'_, [u8]>>, RevlogError>>()?;
+        let patches: Vec<_> =
+            deltas.iter().map(|d| patch::PatchList::new(d)).collect();
+        let patch = patch::fold_patch_lists(&patches);
+        Ok(patch.apply(&snapshot))
+    }
+
+    /// Return the revlog data.
+    fn data(&self) -> &[u8] {
+        match self.data_bytes {
+            Some(ref data_bytes) => &data_bytes,
+            None => panic!(
+                "forgot to load the data or trying to access inline data"
+            ),
+        }
+    }
+
+    /// Get an entry of the revlog.
+    fn get_entry(&self, rev: Revision) -> Result<RevlogEntry, RevlogError> {
+        let index_entry = self
+            .index
+            .get_entry(rev)
+            .ok_or(RevlogError::InvalidRevision)?;
+        let start = index_entry.offset();
+        let end = start + index_entry.compressed_len();
+        let data = if self.index.is_inline() {
+            self.index.data(start, end)
+        } else {
+            &self.data()[start..end]
+        };
+        let entry = RevlogEntry {
+            rev,
+            bytes: data,
+            compressed_len: index_entry.compressed_len(),
+            uncompressed_len: index_entry.uncompressed_len(),
+            base_rev: if index_entry.base_revision() == rev {
+                None
+            } else {
+                Some(index_entry.base_revision())
+            },
+        };
+        Ok(entry)
+    }
+}
+
+/// The revlog entry's bytes and the necessary informations to extract
+/// the entry's data.
+#[derive(Debug)]
+pub struct RevlogEntry<'a> {
+    rev: Revision,
+    bytes: &'a [u8],
+    compressed_len: usize,
+    uncompressed_len: usize,
+    base_rev: Option<Revision>,
+}
+
+impl<'a> RevlogEntry<'a> {
+    /// Extract the data contained in the entry.
+    pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> {
+        if self.bytes.is_empty() {
+            return Ok(Cow::Borrowed(&[]));
+        }
+        match self.bytes[0] {
+            // Revision data is the entirety of the entry, including this
+            // header.
+            b'\0' => Ok(Cow::Borrowed(self.bytes)),
+            // Raw revision data follows.
+            b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
+            // zlib (RFC 1950) data.
+            b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
+            // zstd data.
+            b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
+            format_type => Err(RevlogError::UnknowDataFormat(format_type)),
+        }
+    }
+
+    fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, RevlogError> {
+        let mut decoder = ZlibDecoder::new(self.bytes);
+        if self.is_delta() {
+            let mut buf = Vec::with_capacity(self.compressed_len);
+            decoder
+                .read_to_end(&mut buf)
+                .or(Err(RevlogError::Corrupted))?;
+            Ok(buf)
+        } else {
+            let mut buf = vec![0; self.uncompressed_len];
+            decoder
+                .read_exact(&mut buf)
+                .or(Err(RevlogError::Corrupted))?;
+            Ok(buf)
+        }
+    }
+
+    fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, RevlogError> {
+        if self.is_delta() {
+            let mut buf = Vec::with_capacity(self.compressed_len);
+            zstd::stream::copy_decode(self.bytes, &mut buf)
+                .or(Err(RevlogError::Corrupted))?;
+            Ok(buf)
+        } else {
+            let mut buf = vec![0; self.uncompressed_len];
+            let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
+                .or(Err(RevlogError::Corrupted))?;
+            if len != self.uncompressed_len {
+                Err(RevlogError::Corrupted)
+            } else {
+                Ok(buf)
+            }
+        }
+    }
+
+    /// Tell if the entry is a snapshot or a delta
+    /// (influences on decompression).
+    fn is_delta(&self) -> bool {
+        self.base_rev.is_some()
+    }
+}
+
+/// Format version of the revlog.
+pub fn get_version(index_bytes: &[u8]) -> u16 {
+    BigEndian::read_u16(&index_bytes[2..=3])
+}
+
+/// Calculate the hash of a revision given its data and its parents.
+fn hash(data: &[u8], p1_hash: &[u8], p2_hash: &[u8]) -> Vec<u8> {
+    let mut hasher = Sha1::new();
+    let (a, b) = (p1_hash, p2_hash);
+    if a > b {
+        hasher.input(b);
+        hasher.input(a);
+    } else {
+        hasher.input(a);
+        hasher.input(b);
+    }
+    hasher.input(data);
+    let mut hash = vec![0; NODE_BYTES_LENGTH];
+    hasher.result(&mut hash);
+    hash
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use super::super::index::IndexEntryBuilder;
+
+    #[test]
+    fn version_test() {
+        let bytes = IndexEntryBuilder::new()
+            .is_first(true)
+            .with_version(1)
+            .build();
+
+        assert_eq!(get_version(&bytes), 1)
+    }
+}
--- a/rust/hg-core/src/utils/files.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/utils/files.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -16,7 +16,7 @@
 };
 use lazy_static::lazy_static;
 use same_file::is_same_file;
-use std::borrow::ToOwned;
+use std::borrow::{Cow, ToOwned};
 use std::fs::Metadata;
 use std::iter::FusedIterator;
 use std::ops::Deref;
@@ -248,6 +248,66 @@
     }
 }
 
+/// Returns the representation of the path relative to the current working
+/// directory for display purposes.
+///
+/// `cwd` is a `HgPath`, so it is considered relative to the root directory
+/// of the repository.
+///
+/// # Examples
+///
+/// ```
+/// use hg::utils::hg_path::HgPath;
+/// use hg::utils::files::relativize_path;
+/// use std::borrow::Cow;
+///
+/// let file = HgPath::new(b"nested/file");
+/// let cwd = HgPath::new(b"");
+/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"nested/file"));
+///
+/// let cwd = HgPath::new(b"nested");
+/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"file"));
+///
+/// let cwd = HgPath::new(b"other");
+/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"../nested/file"));
+/// ```
+pub fn relativize_path(path: &HgPath, cwd: impl AsRef<HgPath>) -> Cow<[u8]> {
+    if cwd.as_ref().is_empty() {
+        Cow::Borrowed(path.as_bytes())
+    } else {
+        let mut res: Vec<u8> = Vec::new();
+        let mut path_iter = path.as_bytes().split(|b| *b == b'/').peekable();
+        let mut cwd_iter =
+            cwd.as_ref().as_bytes().split(|b| *b == b'/').peekable();
+        loop {
+            match (path_iter.peek(), cwd_iter.peek()) {
+                (Some(a), Some(b)) if a == b => (),
+                _ => break,
+            }
+            path_iter.next();
+            cwd_iter.next();
+        }
+        let mut need_sep = false;
+        for _ in cwd_iter {
+            if need_sep {
+                res.extend(b"/")
+            } else {
+                need_sep = true
+            };
+            res.extend(b"..");
+        }
+        for c in path_iter {
+            if need_sep {
+                res.extend(b"/")
+            } else {
+                need_sep = true
+            };
+            res.extend(c);
+        }
+        Cow::Owned(res)
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
--- a/rust/hg-core/src/utils/hg_path.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-core/src/utils/hg_path.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -6,6 +6,7 @@
 // GNU General Public License version 2 or any later version.
 
 use std::borrow::Borrow;
+use std::convert::TryFrom;
 use std::ffi::{OsStr, OsString};
 use std::fmt;
 use std::ops::Deref;
@@ -515,6 +516,13 @@
     Ok(buf)
 }
 
+impl TryFrom<PathBuf> for HgPathBuf {
+    type Error = HgPathError;
+    fn try_from(path: PathBuf) -> Result<Self, Self::Error> {
+        path_to_hg_path_buf(path)
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
--- a/rust/hg-cpython/Cargo.toml	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-cpython/Cargo.toml	Tue Oct 20 22:04:04 2020 +0530
@@ -10,6 +10,7 @@
 
 [features]
 default = ["python27"]
+dirstate-tree = ["hg-core/dirstate-tree"]
 
 # Features to build an extension module:
 python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
@@ -24,7 +25,7 @@
 hg-core = { path = "../hg-core"}
 libc = '*'
 log = "0.4.8"
-simple_logger = "1.6.0"
+env_logger = "0.7.1"
 
 [dependencies.cpython]
 version = "0.4.1"
--- a/rust/hg-cpython/rustfmt.toml	Thu Oct 08 13:45:56 2020 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-max_width = 79
-wrap_comments = true
-error_on_line_overflow = true
--- a/rust/hg-cpython/src/dirstate.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-cpython/src/dirstate.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -107,7 +107,7 @@
     let dotted_name = &format!("{}.dirstate", package);
     let m = PyModule::new(py, dotted_name)?;
 
-    simple_logger::init_by_env();
+    env_logger::init();
 
     m.add(py, "__package__", package)?;
     m.add(py, "__doc__", "Dirstate - Rust implementation")?;
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -142,10 +142,10 @@
                     })?,
             )
             .and_then(|b| Ok(b.to_py_object(py)))
-            .or_else(|_| {
+            .or_else(|e| {
                 Err(PyErr::new::<exc::OSError, _>(
                     py,
-                    "Dirstate error".to_string(),
+                    format!("Dirstate error: {}", e.to_string()),
                 ))
             })
     }
@@ -549,12 +549,14 @@
     ) -> Ref<'a, RustDirstateMap> {
         self.inner(py).borrow()
     }
+    #[cfg(not(feature = "dirstate-tree"))]
     fn translate_key(
         py: Python,
         res: (&HgPathBuf, &DirstateEntry),
     ) -> PyResult<Option<PyBytes>> {
         Ok(Some(PyBytes::new(py, res.0.as_bytes())))
     }
+    #[cfg(not(feature = "dirstate-tree"))]
     fn translate_key_value(
         py: Python,
         res: (&HgPathBuf, &DirstateEntry),
@@ -562,7 +564,25 @@
         let (f, entry) = res;
         Ok(Some((
             PyBytes::new(py, f.as_bytes()),
-            make_dirstate_tuple(py, entry)?,
+            make_dirstate_tuple(py, &entry)?,
+        )))
+    }
+    #[cfg(feature = "dirstate-tree")]
+    fn translate_key(
+        py: Python,
+        res: (HgPathBuf, DirstateEntry),
+    ) -> PyResult<Option<PyBytes>> {
+        Ok(Some(PyBytes::new(py, res.0.as_bytes())))
+    }
+    #[cfg(feature = "dirstate-tree")]
+    fn translate_key_value(
+        py: Python,
+        res: (HgPathBuf, DirstateEntry),
+    ) -> PyResult<Option<(PyBytes, PyObject)>> {
+        let (f, entry) = res;
+        Ok(Some((
+            PyBytes::new(py, f.as_bytes()),
+            make_dirstate_tuple(py, &entry)?,
         )))
     }
 }
--- a/rust/hg-cpython/src/dirstate/status.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-cpython/src/dirstate/status.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -159,7 +159,7 @@
                 .collect();
 
             let files = files?;
-            let matcher = FileMatcher::new(&files)
+            let matcher = FileMatcher::new(files.as_ref())
                 .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
             let ((lookup, status_res), warnings) = status(
                 &dmap,
--- a/rust/hg-cpython/src/parsers.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hg-cpython/src/parsers.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -14,7 +14,7 @@
     PythonObject, ToPyObject,
 };
 use hg::{
-    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf,
+    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
     DirstatePackError, DirstateParents, DirstateParseError, FastHashMap,
     PARENT_SIZE,
 };
@@ -29,11 +29,17 @@
     copymap: PyDict,
     st: PyBytes,
 ) -> PyResult<PyTuple> {
-    let mut dirstate_map = FastHashMap::default();
-    let mut copies = FastHashMap::default();
+    match parse_dirstate(st.data(py)) {
+        Ok((parents, entries, copies)) => {
+            let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
+                .into_iter()
+                .map(|(path, entry)| (path.to_owned(), entry))
+                .collect();
+            let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
+                .into_iter()
+                .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+                .collect();
 
-    match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) {
-        Ok(parents) => {
             for (filename, entry) in &dirstate_map {
                 dmap.set_item(
                     py,
@@ -41,7 +47,7 @@
                     make_dirstate_tuple(py, entry)?,
                 )?;
             }
-            for (path, copy_path) in copies {
+            for (path, copy_path) in copy_map {
                 copymap.set_item(
                     py,
                     PyBytes::new(py, path.as_bytes()),
@@ -113,11 +119,11 @@
         Duration::from_secs(now.as_object().extract::<u64>(py)?),
     ) {
         Ok(packed) => {
-            for (filename, entry) in &dirstate_map {
+            for (filename, entry) in dirstate_map.iter() {
                 dmap.set_item(
                     py,
                     PyBytes::new(py, filename.as_bytes()),
-                    make_dirstate_tuple(py, entry)?,
+                    make_dirstate_tuple(py, &entry)?,
                 )?;
             }
             Ok(PyBytes::new(py, &packed))
--- a/rust/hgcli/Cargo.lock	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hgcli/Cargo.lock	Tue Oct 20 22:04:04 2020 +0530
@@ -2,7 +2,7 @@
 # It is not intended for manual editing.
 [[package]]
 name = "aho-corasick"
-version = "0.7.10"
+version = "0.7.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -10,12 +10,25 @@
 
 [[package]]
 name = "anyhow"
-version = "1.0.28"
+version = "1.0.32"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "autocfg"
-version = "1.0.0"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "base64"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "base64"
+version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -25,7 +38,7 @@
 
 [[package]]
 name = "cc"
-version = "1.0.50"
+version = "1.0.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -34,28 +47,55 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "charset"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "encoding_rs 0.8.24 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cpython"
-version = "0.4.1"
-source = "git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f#387e87d9deb6b678508888239f9f87dc36973d3f"
+version = "0.5.0"
+source = "git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8#4283acd94f4e794fe03679efc7a6c18bc50938a8"
 dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "paste 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.4.1 (git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.5.0 (git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8)",
+]
+
+[[package]]
+name = "either"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "encoding_rs"
+version = "0.8.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "fs_extra"
-version = "1.1.0"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "getrandom"
-version = "0.1.14"
+version = "0.1.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
  "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -64,7 +104,15 @@
 version = "0.1.0"
 dependencies = [
  "jemallocator-global 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "pyembed 0.7.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d)",
+ "pyembed 0.8.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)",
+]
+
+[[package]]
+name = "itertools"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -72,9 +120,9 @@
 version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
- "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fs_extra 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -83,7 +131,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -102,110 +150,147 @@
 
 [[package]]
 name = "libc"
-version = "0.2.68"
+version = "0.2.78"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "mailparse"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "base64 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "charset 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quoted_printable 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "memchr"
 version = "2.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "memmap"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "memory-module-sys"
 version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "num-traits"
-version = "0.2.11"
+version = "0.2.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "paste"
-version = "0.1.9"
+version = "0.1.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "paste-impl 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "paste-impl 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.18 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "paste-impl"
-version = "0.1.9"
+version = "0.1.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.18 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "ppv-lite86"
-version = "0.2.6"
+version = "0.2.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "proc-macro-hack"
-version = "0.5.15"
+version = "0.5.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "proc-macro2"
-version = "1.0.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "pyembed"
-version = "0.7.0-pre"
-source = "git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d#c772a1379c3026314eda1c8ea244b86c0658951d"
+version = "0.8.0-pre"
+source = "git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08#4697fb25918dfad6dc73288daeea501063963a08"
 dependencies = [
- "cpython 0.4.1 (git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f)",
+ "anyhow 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython 0.5.0 (git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8)",
  "jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "memory-module-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "python-packed-resources 0.1.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d)",
- "python3-sys 0.4.1 (git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f)",
+ "python-packaging 0.1.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)",
+ "python-packed-resources 0.2.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)",
+ "python3-sys 0.5.0 (git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8)",
  "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "python-packaging"
+version = "0.1.0-pre"
+source = "git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08#4697fb25918dfad6dc73288daeea501063963a08"
+dependencies = [
+ "anyhow 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "encoding_rs 0.8.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mailparse 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python-packed-resources 0.2.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "python-packed-resources"
-version = "0.1.0-pre"
-source = "git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d#c772a1379c3026314eda1c8ea244b86c0658951d"
+version = "0.2.0-pre"
+source = "git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08#4697fb25918dfad6dc73288daeea501063963a08"
 dependencies = [
- "anyhow 1.0.28 (registry+https://github.com/rust-lang/crates.io-index)",
+ "anyhow 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
  "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "python3-sys"
-version = "0.4.1"
-source = "git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f#387e87d9deb6b678508888239f9f87dc36973d3f"
+version = "0.5.0"
+source = "git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8#4283acd94f4e794fe03679efc7a6c18bc50938a8"
 dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "quote"
-version = "1.0.3"
+name = "quoted_printable"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand"
+version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -213,8 +298,8 @@
 version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -225,16 +310,29 @@
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand_core"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -246,29 +344,52 @@
 ]
 
 [[package]]
-name = "regex"
-version = "1.3.6"
+name = "rdrand"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex"
+version = "1.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "aho-corasick 0.7.13 (registry+https://github.com/rust-lang/crates.io-index)",
  "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.17"
+version = "0.6.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
-name = "syn"
-version = "1.0.17"
+name = "remove_dir_all"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tempdir"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -280,11 +401,6 @@
 ]
 
 [[package]]
-name = "unicode-xid"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "uuid"
 version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -293,13 +409,23 @@
 ]
 
 [[package]]
+name = "walkdir"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "wasi"
 version = "0.9.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "winapi"
-version = "0.3.8"
+version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -312,48 +438,72 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [metadata]
-"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
-"checksum anyhow 1.0.28 (registry+https://github.com/rust-lang/crates.io-index)" = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff"
-"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
+"checksum aho-corasick 0.7.13 (registry+https://github.com/rust-lang/crates.io-index)" = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86"
+"checksum anyhow 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b"
+"checksum autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
+"checksum base64 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff"
 "checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
+"checksum cc 1.0.60 (registry+https://github.com/rust-lang/crates.io-index)" = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c"
 "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cpython 0.4.1 (git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f)" = "<none>"
-"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674"
-"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
+"checksum charset 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4f426e64df1c3de26cbf44593c6ffff5dbfd43bbf9de0d075058558126b3fc73"
+"checksum cpython 0.5.0 (git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8)" = "<none>"
+"checksum either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+"checksum encoding_rs 0.8.24 (registry+https://github.com/rust-lang/crates.io-index)" = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2"
+"checksum fs_extra 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394"
+"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
+"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
 "checksum jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45"
 "checksum jemallocator 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69"
 "checksum jemallocator-global 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "991b61de8365c8b5707cf6cabbff98cfd6eaca9b851948b883efea408c7f581e"
 "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0"
+"checksum libc 0.2.78 (registry+https://github.com/rust-lang/crates.io-index)" = "aa7087f49d294270db4e1928fc110c976cd4b9e5a16348e0a1df09afa99e6c98"
+"checksum mailparse 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "479b94621ea0fe875638d27f4a0b68213174b63e1ff9355d0948a04f71a5055a"
 "checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
+"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
 "checksum memory-module-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bbdce2925c681860b08875119254fb5543dbf6337c56ff93afebeed9c686da3"
-"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
-"checksum paste 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "092d791bf7847f70bbd49085489fba25fc2c193571752bff9e36e74e72403932"
-"checksum paste-impl 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "406c23fb4c45cc6f68a9bbabb8ec7bd6f8cfcbd17e9e8f72c2460282f8325729"
-"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
-"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63"
-"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
-"checksum pyembed 0.7.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d)" = "<none>"
-"checksum python-packed-resources 0.1.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=c772a1379c3026314eda1c8ea244b86c0658951d)" = "<none>"
-"checksum python3-sys 0.4.1 (git+https://github.com/dgrunwald/rust-cpython?rev=387e87d9deb6b678508888239f9f87dc36973d3f)" = "<none>"
-"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
+"checksum num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
+"checksum paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
+"checksum paste-impl 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
+"checksum ppv-lite86 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20"
+"checksum proc-macro-hack 0.5.18 (registry+https://github.com/rust-lang/crates.io-index)" = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598"
+"checksum pyembed 0.8.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)" = "<none>"
+"checksum python-packaging 0.1.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)" = "<none>"
+"checksum python-packed-resources 0.2.0-pre (git+https://github.com/indygreg/PyOxidizer.git?rev=4697fb25918dfad6dc73288daeea501063963a08)" = "<none>"
+"checksum python3-sys 0.5.0 (git+https://github.com/dgrunwald/rust-cpython.git?rev=4283acd94f4e794fe03679efc7a6c18bc50938a8)" = "<none>"
+"checksum quoted_printable 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47b080c5db639b292ac79cbd34be0cfc5d36694768d8341109634d90b86930e2"
+"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
 "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 "checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
 "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
 "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
-"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
-"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
-"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
+"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+"checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
+"checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
+"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
 "checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
-"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
 "checksum uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
+"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
 "checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
+"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/hgcli/Cargo.toml	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hgcli/Cargo.toml	Tue Oct 20 22:04:04 2020 +0530
@@ -13,7 +13,11 @@
 
 [dependencies]
 jemallocator-global = { version = "0.3", optional = true }
-pyembed = { git = "https://github.com/indygreg/PyOxidizer.git", rev = "c772a1379c3026314eda1c8ea244b86c0658951d", default-features=false }
+
+[dependencies.pyembed]
+git = "https://github.com/indygreg/PyOxidizer.git"
+rev = "4697fb25918dfad6dc73288daeea501063963a08"
+default-features = false
 
 [features]
 default = ["build-mode-pyoxidizer-exe"]
--- a/rust/hgcli/build.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hgcli/build.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -5,7 +5,9 @@
 /*! Build script to integrate PyOxidizer. */
 
 fn main() {
-    if let Ok(config_rs) = std::env::var("DEP_PYTHONXY_DEFAULT_PYTHON_CONFIG_RS") {
+    if let Ok(config_rs) =
+        std::env::var("DEP_PYTHONXY_DEFAULT_PYTHON_CONFIG_RS")
+    {
         println!(
             "cargo:rustc-env=PYOXIDIZER_DEFAULT_PYTHON_CONFIG_RS={}",
             config_rs
--- a/rust/hgcli/pyoxidizer.bzl	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hgcli/pyoxidizer.bzl	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,7 @@
 ROOT = CWD + "/../.."
 
+IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
+
 # Code to run in Python interpreter.
 RUN_CODE = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
 
@@ -11,8 +13,35 @@
 def make_distribution_windows():
     return default_python_distribution(flavor = "standalone_dynamic")
 
+def resource_callback(policy, resource):
+    # We use a custom resource routing policy to influence where things are loaded
+    # from.
+    #
+    # For Python modules and resources, we load from memory if they are in
+    # the standard library and from the filesystem if not. This is because
+    # parts of Mercurial and some 3rd party packages aren't yet compatible
+    # with memory loading.
+    #
+    # For Python extension modules, we load from the filesystem because
+    # this yields greatest compatibility.
+    if type(resource) in ("PythonModuleSource", "PythonPackageResource", "PythonPackageDistributionResource"):
+        if resource.is_stdlib:
+            resource.add_location = "in-memory"
+        else:
+            resource.add_location = "filesystem-relative:lib"
+
+    elif type(resource) == "PythonExtensionModule":
+        resource.add_location = "filesystem-relative:lib"
+
 def make_exe(dist):
     """Builds a Rust-wrapped Mercurial binary."""
+    packaging_policy = dist.make_python_packaging_policy()
+    # Extension may depend on any Python functionality. Include all
+    # extensions.
+    packaging_policy.extension_module_filter = "all"
+    packaging_policy.resources_policy = "prefer-in-memory-fallback-filesystem-relative:lib"
+    packaging_policy.register_resource_callback(resource_callback)
+
     config = PythonInterpreterConfig(
         raw_allocator = "system",
         run_eval = RUN_CODE,
@@ -25,37 +54,17 @@
 
     exe = dist.to_python_executable(
         name = "hg",
-        resources_policy = "prefer-in-memory-fallback-filesystem-relative:lib",
+        packaging_policy = packaging_policy,
         config = config,
-        # Extension may depend on any Python functionality. Include all
-        # extensions.
-        extension_module_filter = "all",
     )
 
     # Add Mercurial to resources.
-    for resource in dist.pip_install(["--verbose", ROOT]):
-        # This is a bit wonky and worth explaining.
-        #
-        # Various parts of Mercurial don't yet support loading package
-        # resources via the ResourceReader interface. Or, not having
-        # file-based resources would be too inconvenient for users.
-        #
-        # So, for package resources, we package them both in the
-        # filesystem as well as in memory. If both are defined,
-        # PyOxidizer will prefer the in-memory location. So even
-        # if the filesystem file isn't packaged in the location
-        # specified here, we should never encounter an errors as the
-        # resource will always be available in memory.
-        if type(resource) == "PythonPackageResource":
-            exe.add_filesystem_relative_python_resource(".", resource)
-            exe.add_in_memory_python_resource(resource)
-        else:
-            exe.add_python_resource(resource)
+    exe.add_python_resources(exe.pip_install(["--verbose", ROOT]))
 
     # On Windows, we install extra packages for convenience.
-    if "windows" in BUILD_TARGET_TRIPLE:
+    if IS_WINDOWS:
         exe.add_python_resources(
-            dist.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"]),
+            exe.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"]),
         )
 
     return exe
@@ -95,4 +104,5 @@
 # Everything below this is typically managed by PyOxidizer and doesn't need
 # to be updated by people.
 
-PYOXIDIZER_VERSION = "0.7.0"
+PYOXIDIZER_VERSION = "0.8.0-pre"
+PYOXIDIZER_COMMIT = "4697fb25918dfad6dc73288daeea501063963a08"
--- a/rust/hgcli/src/main.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/hgcli/src/main.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -9,21 +9,22 @@
 include!(env!("PYOXIDIZER_DEFAULT_PYTHON_CONFIG_RS"));
 
 fn main() {
-    // The following code is in a block so the MainPythonInterpreter is destroyed in an
-    // orderly manner, before process exit.
+    // The following code is in a block so the MainPythonInterpreter is
+    // destroyed in an orderly manner, before process exit.
     let code = {
-        // Load the default Python configuration as derived by the PyOxidizer config
-        // file used at build time.
+        // Load the default Python configuration as derived by the PyOxidizer
+        // config file used at build time.
         let config = default_python_config();
 
-        // Construct a new Python interpreter using that config, handling any errors
-        // from construction.
+        // Construct a new Python interpreter using that config, handling any
+        // errors from construction.
         match MainPythonInterpreter::new(config) {
             Ok(mut interp) => {
-                // And run it using the default run configuration as specified by the
-                // configuration. If an uncaught Python exception is raised, handle it.
-                // This includes the special SystemExit, which is a request to terminate the
-                // process.
+                // And run it using the default run configuration as specified
+                // by the configuration. If an uncaught Python
+                // exception is raised, handle it.
+                // This includes the special SystemExit, which is a request to
+                // terminate the process.
                 interp.run_as_main()
             }
             Err(msg) => {
--- a/rust/rhg/Cargo.toml	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/Cargo.toml	Tue Oct 20 22:04:04 2020 +0530
@@ -7,4 +7,6 @@
 [dependencies]
 hg-core = { path = "../hg-core"}
 clap = "2.33.1"
-
+log = "0.4.11"
+micro-timer = "0.3.1"
+env_logger = "0.7.1"
--- a/rust/rhg/rustfmt.toml	Thu Oct 08 13:45:56 2020 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-max_width = 79
-wrap_comments = true
-error_on_line_overflow = true
--- a/rust/rhg/src/commands.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/src/commands.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,9 +1,13 @@
+pub mod cat;
+pub mod debugdata;
+pub mod files;
 pub mod root;
 use crate::error::CommandError;
+use crate::ui::Ui;
 
 /// The common trait for rhg commands
 ///
 /// Normalize the interface of the commands provided by rhg
 pub trait Command {
-    fn run(&self) -> Result<(), CommandError>;
+    fn run(&self, ui: &Ui) -> Result<(), CommandError>;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/cat.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,99 @@
+use crate::commands::Command;
+use crate::error::{CommandError, CommandErrorKind};
+use crate::ui::utf8_to_local;
+use crate::ui::Ui;
+use hg::operations::FindRoot;
+use hg::operations::{CatRev, CatRevError, CatRevErrorKind};
+use hg::utils::hg_path::HgPathBuf;
+use micro_timer::timed;
+use std::convert::TryFrom;
+
+pub const HELP_TEXT: &str = "
+Output the current or given revision of files
+";
+
+pub struct CatCommand<'a> {
+    rev: Option<&'a str>,
+    files: Vec<&'a str>,
+}
+
+impl<'a> CatCommand<'a> {
+    pub fn new(rev: Option<&'a str>, files: Vec<&'a str>) -> Self {
+        Self { rev, files }
+    }
+
+    fn display(&self, ui: &Ui, data: &[u8]) -> Result<(), CommandError> {
+        ui.write_stdout(data)?;
+        Ok(())
+    }
+}
+
+impl<'a> Command for CatCommand<'a> {
+    #[timed]
+    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
+        let root = FindRoot::new().run()?;
+        let cwd = std::env::current_dir()
+            .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
+
+        let mut files = vec![];
+        for file in self.files.iter() {
+            let normalized = cwd.join(&file);
+            let stripped = normalized
+                .strip_prefix(&root)
+                .or(Err(CommandErrorKind::Abort(None)))?;
+            let hg_file = HgPathBuf::try_from(stripped.to_path_buf())
+                .or(Err(CommandErrorKind::Abort(None)))?;
+            files.push(hg_file);
+        }
+
+        match self.rev {
+            Some(rev) => {
+                let mut operation = CatRev::new(&root, rev, &files)
+                    .map_err(|e| map_rev_error(rev, e))?;
+                let data =
+                    operation.run().map_err(|e| map_rev_error(rev, e))?;
+                self.display(ui, &data)
+            }
+            None => Err(CommandErrorKind::Unimplemented.into()),
+        }
+    }
+}
+
+/// Convert `CatRevErrorKind` to `CommandError`
+fn map_rev_error(rev: &str, err: CatRevError) -> CommandError {
+    CommandError {
+        kind: match err.kind {
+            CatRevErrorKind::IoError(err) => CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!("abort: {}\n", err)).into(),
+            )),
+            CatRevErrorKind::InvalidRevision => CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!(
+                    "abort: invalid revision identifier{}\n",
+                    rev
+                ))
+                .into(),
+            )),
+            CatRevErrorKind::UnsuportedRevlogVersion(version) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!(
+                        "abort: unsupported revlog version {}\n",
+                        version
+                    ))
+                    .into(),
+                ))
+            }
+            CatRevErrorKind::CorruptedRevlog => CommandErrorKind::Abort(Some(
+                "abort: corrupted revlog\n".into(),
+            )),
+            CatRevErrorKind::UnknowRevlogDataFormat(format) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!(
+                        "abort: unknow revlog dataformat {:?}\n",
+                        format
+                    ))
+                    .into(),
+                ))
+            }
+        },
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/debugdata.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,82 @@
+use crate::commands::Command;
+use crate::error::{CommandError, CommandErrorKind};
+use crate::ui::utf8_to_local;
+use crate::ui::Ui;
+use hg::operations::{
+    DebugData, DebugDataError, DebugDataErrorKind, DebugDataKind,
+};
+use micro_timer::timed;
+
+pub const HELP_TEXT: &str = "
+Dump the contents of a data file revision
+";
+
+pub struct DebugDataCommand<'a> {
+    rev: &'a str,
+    kind: DebugDataKind,
+}
+
+impl<'a> DebugDataCommand<'a> {
+    pub fn new(rev: &'a str, kind: DebugDataKind) -> Self {
+        DebugDataCommand { rev, kind }
+    }
+}
+
+impl<'a> Command for DebugDataCommand<'a> {
+    #[timed]
+    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
+        let mut operation = DebugData::new(self.rev, self.kind);
+        let data =
+            operation.run().map_err(|e| to_command_error(self.rev, e))?;
+
+        let mut stdout = ui.stdout_buffer();
+        stdout.write_all(&data)?;
+        stdout.flush()?;
+
+        Ok(())
+    }
+}
+
+/// Convert operation errors to command errors
+fn to_command_error(rev: &str, err: DebugDataError) -> CommandError {
+    match err.kind {
+        DebugDataErrorKind::FindRootError(err) => CommandError::from(err),
+        DebugDataErrorKind::IoError(err) => CommandError {
+            kind: CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!("abort: {}\n", err)).into(),
+            )),
+        },
+        DebugDataErrorKind::InvalidRevision => CommandError {
+            kind: CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!(
+                    "abort: invalid revision identifier{}\n",
+                    rev
+                ))
+                .into(),
+            )),
+        },
+        DebugDataErrorKind::UnsuportedRevlogVersion(version) => CommandError {
+            kind: CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!(
+                    "abort: unsupported revlog version {}\n",
+                    version
+                ))
+                .into(),
+            )),
+        },
+        DebugDataErrorKind::CorruptedRevlog => CommandError {
+            kind: CommandErrorKind::Abort(Some(
+                "abort: corrupted revlog\n".into(),
+            )),
+        },
+        DebugDataErrorKind::UnknowRevlogDataFormat(format) => CommandError {
+            kind: CommandErrorKind::Abort(Some(
+                utf8_to_local(&format!(
+                    "abort: unknow revlog dataformat {:?}\n",
+                    format
+                ))
+                .into(),
+            )),
+        },
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/files.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,136 @@
+use crate::commands::Command;
+use crate::error::{CommandError, CommandErrorKind};
+use crate::ui::utf8_to_local;
+use crate::ui::Ui;
+use hg::operations::FindRoot;
+use hg::operations::{
+    ListDirstateTrackedFiles, ListDirstateTrackedFilesError,
+    ListDirstateTrackedFilesErrorKind,
+};
+use hg::operations::{
+    ListRevTrackedFiles, ListRevTrackedFilesError,
+    ListRevTrackedFilesErrorKind,
+};
+use hg::utils::files::{get_bytes_from_path, relativize_path};
+use hg::utils::hg_path::{HgPath, HgPathBuf};
+use std::path::PathBuf;
+
+pub const HELP_TEXT: &str = "
+List tracked files.
+
+Returns 0 on success.
+";
+
+pub struct FilesCommand<'a> {
+    rev: Option<&'a str>,
+}
+
+impl<'a> FilesCommand<'a> {
+    pub fn new(rev: Option<&'a str>) -> Self {
+        FilesCommand { rev }
+    }
+
+    fn display_files(
+        &self,
+        ui: &Ui,
+        root: &PathBuf,
+        files: impl IntoIterator<Item = &'a HgPath>,
+    ) -> Result<(), CommandError> {
+        let cwd = std::env::current_dir()
+            .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
+        let rooted_cwd = cwd
+            .strip_prefix(&root)
+            .expect("cwd was already checked within the repository");
+        let rooted_cwd = HgPathBuf::from(get_bytes_from_path(rooted_cwd));
+
+        let mut stdout = ui.stdout_buffer();
+
+        for file in files {
+            stdout.write_all(relativize_path(file, &rooted_cwd).as_ref())?;
+            stdout.write_all(b"\n")?;
+        }
+        stdout.flush()?;
+        Ok(())
+    }
+}
+
+impl<'a> Command for FilesCommand<'a> {
+    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
+        let root = FindRoot::new().run()?;
+        if let Some(rev) = self.rev {
+            let mut operation = ListRevTrackedFiles::new(&root, rev)
+                .map_err(|e| map_rev_error(rev, e))?;
+            let files = operation.run().map_err(|e| map_rev_error(rev, e))?;
+            self.display_files(ui, &root, files)
+        } else {
+            let mut operation = ListDirstateTrackedFiles::new(&root)
+                .map_err(map_dirstate_error)?;
+            let files = operation.run().map_err(map_dirstate_error)?;
+            self.display_files(ui, &root, files)
+        }
+    }
+}
+
+/// Convert `ListRevTrackedFilesErrorKind` to `CommandError`
+fn map_rev_error(rev: &str, err: ListRevTrackedFilesError) -> CommandError {
+    CommandError {
+        kind: match err.kind {
+            ListRevTrackedFilesErrorKind::IoError(err) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!("abort: {}\n", err)).into(),
+                ))
+            }
+            ListRevTrackedFilesErrorKind::InvalidRevision => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!(
+                        "abort: invalid revision identifier{}\n",
+                        rev
+                    ))
+                    .into(),
+                ))
+            }
+            ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!(
+                        "abort: unsupported revlog version {}\n",
+                        version
+                    ))
+                    .into(),
+                ))
+            }
+            ListRevTrackedFilesErrorKind::CorruptedRevlog => {
+                CommandErrorKind::Abort(Some(
+                    "abort: corrupted revlog\n".into(),
+                ))
+            }
+            ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!(
+                        "abort: unknow revlog dataformat {:?}\n",
+                        format
+                    ))
+                    .into(),
+                ))
+            }
+        },
+    }
+}
+
+/// Convert `ListDirstateTrackedFilesError` to `CommandError`
+fn map_dirstate_error(err: ListDirstateTrackedFilesError) -> CommandError {
+    CommandError {
+        kind: match err.kind {
+            ListDirstateTrackedFilesErrorKind::IoError(err) => {
+                CommandErrorKind::Abort(Some(
+                    utf8_to_local(&format!("abort: {}\n", err)).into(),
+                ))
+            }
+            ListDirstateTrackedFilesErrorKind::ParseError(_) => {
+                CommandErrorKind::Abort(Some(
+                    // TODO find a better error message
+                    b"abort: parse error\n".to_vec(),
+                ))
+            }
+        },
+    }
+}
--- a/rust/rhg/src/commands/root.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/src/commands/root.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,9 +1,8 @@
 use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
+use crate::error::CommandError;
 use crate::ui::Ui;
-use hg::operations::{FindRoot, FindRootError, FindRootErrorKind, Operation};
+use hg::operations::FindRoot;
 use hg::utils::files::get_bytes_from_path;
-use std::path::PathBuf;
 
 pub const HELP_TEXT: &str = "
 Print the root directory of the current repository.
@@ -11,66 +10,23 @@
 Returns 0 on success.
 ";
 
-pub struct RootCommand {
-    ui: Ui,
-}
+pub struct RootCommand {}
 
 impl RootCommand {
     pub fn new() -> Self {
-        RootCommand { ui: Ui::new() }
-    }
-
-    fn display_found_path(
-        &self,
-        path_buf: PathBuf,
-    ) -> Result<(), CommandError> {
-        let bytes = get_bytes_from_path(path_buf);
-
-        // TODO use formating macro
-        self.ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?;
-
-        Err(CommandErrorKind::Ok.into())
-    }
-
-    fn display_error(&self, error: FindRootError) -> Result<(), CommandError> {
-        match error.kind {
-            FindRootErrorKind::RootNotFound(path) => {
-                let bytes = get_bytes_from_path(path);
-
-                // TODO use formating macro
-                self.ui.write_stderr(
-                    &[
-                        b"abort: no repository found in '",
-                        bytes.as_slice(),
-                        b"' (.hg not found)!\n",
-                    ]
-                    .concat(),
-                )?;
-
-                Err(CommandErrorKind::RootNotFound.into())
-            }
-            FindRootErrorKind::GetCurrentDirError(e) => {
-                // TODO use formating macro
-                self.ui.write_stderr(
-                    &[
-                        b"abort: error getting current working directory: ",
-                        e.to_string().as_bytes(),
-                        b"\n",
-                    ]
-                    .concat(),
-                )?;
-
-                Err(CommandErrorKind::CurrentDirNotFound.into())
-            }
-        }
+        RootCommand {}
     }
 }
 
 impl Command for RootCommand {
-    fn run(&self) -> Result<(), CommandError> {
-        match FindRoot::new().run() {
-            Ok(path_buf) => self.display_found_path(path_buf),
-            Err(e) => self.display_error(e),
-        }
+    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
+        let path_buf = FindRoot::new().run()?;
+
+        let bytes = get_bytes_from_path(path_buf);
+
+        // TODO use formating macro
+        ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?;
+
+        Ok(())
     }
 }
--- a/rust/rhg/src/error.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/src/error.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,45 +1,85 @@
 use crate::exitcode;
 use crate::ui::UiError;
+use hg::operations::{FindRootError, FindRootErrorKind};
+use hg::utils::files::get_bytes_from_path;
 use std::convert::From;
+use std::path::PathBuf;
 
 /// The kind of command error
-#[derive(Debug, PartialEq)]
+#[derive(Debug)]
 pub enum CommandErrorKind {
-    /// The command finished without error
-    Ok,
     /// The root of the repository cannot be found
-    RootNotFound,
+    RootNotFound(PathBuf),
     /// The current directory cannot be found
-    CurrentDirNotFound,
+    CurrentDirNotFound(std::io::Error),
     /// The standard output stream cannot be written to
     StdoutError,
     /// The standard error stream cannot be written to
     StderrError,
+    /// The command aborted
+    Abort(Option<Vec<u8>>),
+    /// A mercurial capability as not been implemented.
+    Unimplemented,
 }
 
 impl CommandErrorKind {
     pub fn get_exit_code(&self) -> exitcode::ExitCode {
         match self {
-            CommandErrorKind::Ok => exitcode::OK,
-            CommandErrorKind::RootNotFound => exitcode::ABORT,
-            CommandErrorKind::CurrentDirNotFound => exitcode::ABORT,
+            CommandErrorKind::RootNotFound(_) => exitcode::ABORT,
+            CommandErrorKind::CurrentDirNotFound(_) => exitcode::ABORT,
             CommandErrorKind::StdoutError => exitcode::ABORT,
             CommandErrorKind::StderrError => exitcode::ABORT,
+            CommandErrorKind::Abort(_) => exitcode::ABORT,
+            CommandErrorKind::Unimplemented => exitcode::UNIMPLEMENTED_COMMAND,
+        }
+    }
+
+    /// Return the message corresponding to the error kind if any
+    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
+        match self {
+            // TODO use formating macro
+            CommandErrorKind::RootNotFound(path) => {
+                let bytes = get_bytes_from_path(path);
+                Some(
+                    [
+                        b"abort: no repository found in '",
+                        bytes.as_slice(),
+                        b"' (.hg not found)!\n",
+                    ]
+                    .concat(),
+                )
+            }
+            // TODO use formating macro
+            CommandErrorKind::CurrentDirNotFound(e) => Some(
+                [
+                    b"abort: error getting current working directory: ",
+                    e.to_string().as_bytes(),
+                    b"\n",
+                ]
+                .concat(),
+            ),
+            CommandErrorKind::Abort(message) => message.to_owned(),
+            _ => None,
         }
     }
 }
 
 /// The error type for the Command trait
-#[derive(Debug, PartialEq)]
+#[derive(Debug)]
 pub struct CommandError {
     pub kind: CommandErrorKind,
 }
 
 impl CommandError {
     /// Exist the process with the corresponding exit code.
-    pub fn exit(&self) -> () {
+    pub fn exit(&self) {
         std::process::exit(self.kind.get_exit_code())
     }
+
+    /// Return the message corresponding to the command error if any
+    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
+        self.kind.get_error_message_bytes()
+    }
 }
 
 impl From<CommandErrorKind> for CommandError {
@@ -58,3 +98,16 @@
         }
     }
 }
+
+impl From<FindRootError> for CommandError {
+    fn from(err: FindRootError) -> Self {
+        match err.kind {
+            FindRootErrorKind::RootNotFound(path) => CommandError {
+                kind: CommandErrorKind::RootNotFound(path),
+            },
+            FindRootErrorKind::GetCurrentDirError(e) => CommandError {
+                kind: CommandErrorKind::CurrentDirNotFound(e),
+            },
+        }
+    }
+}
--- a/rust/rhg/src/main.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/src/main.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,42 +1,177 @@
+extern crate log;
 use clap::App;
 use clap::AppSettings;
+use clap::Arg;
+use clap::ArgGroup;
+use clap::ArgMatches;
 use clap::SubCommand;
+use hg::operations::DebugDataKind;
+use std::convert::TryFrom;
 
 mod commands;
 mod error;
 mod exitcode;
 mod ui;
 use commands::Command;
+use error::CommandError;
 
 fn main() {
-    let mut app = App::new("rhg")
+    env_logger::init();
+    let app = App::new("rhg")
         .setting(AppSettings::AllowInvalidUtf8)
         .setting(AppSettings::SubcommandRequired)
         .setting(AppSettings::VersionlessSubcommands)
         .version("0.0.1")
         .subcommand(
             SubCommand::with_name("root").about(commands::root::HELP_TEXT),
+        )
+        .subcommand(
+            SubCommand::with_name("files")
+                .arg(
+                    Arg::with_name("rev")
+                        .help("search the repository as it is in REV")
+                        .short("-r")
+                        .long("--revision")
+                        .value_name("REV")
+                        .takes_value(true),
+                )
+                .about(commands::files::HELP_TEXT),
+        )
+        .subcommand(
+            SubCommand::with_name("cat")
+                .arg(
+                    Arg::with_name("rev")
+                        .help("search the repository as it is in REV")
+                        .short("-r")
+                        .long("--revision")
+                        .value_name("REV")
+                        .takes_value(true),
+                )
+                .arg(
+                    clap::Arg::with_name("files")
+                        .required(true)
+                        .multiple(true)
+                        .empty_values(false)
+                        .value_name("FILE")
+                        .help("Activity to start: activity@category"),
+                )
+                .about(commands::cat::HELP_TEXT),
+        )
+        .subcommand(
+            SubCommand::with_name("debugdata")
+                .about(commands::debugdata::HELP_TEXT)
+                .arg(
+                    Arg::with_name("changelog")
+                        .help("open changelog")
+                        .short("-c")
+                        .long("--changelog"),
+                )
+                .arg(
+                    Arg::with_name("manifest")
+                        .help("open manifest")
+                        .short("-m")
+                        .long("--manifest"),
+                )
+                .group(
+                    ArgGroup::with_name("")
+                        .args(&["changelog", "manifest"])
+                        .required(true),
+                )
+                .arg(
+                    Arg::with_name("rev")
+                        .help("revision")
+                        .required(true)
+                        .value_name("REV"),
+                ),
         );
 
-    let matches = app.clone().get_matches_safe().unwrap_or_else(|_| {
+    let matches = app.clone().get_matches_safe().unwrap_or_else(|err| {
+        let _ = ui::Ui::new().writeln_stderr_str(&err.message);
         std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
     });
 
-    let command_result = match matches.subcommand_name() {
-        Some(name) => match name {
-            "root" => commands::root::RootCommand::new().run(),
-            _ => std::process::exit(exitcode::UNIMPLEMENTED_COMMAND),
-        },
-        _ => {
-            match app.print_help() {
-                Ok(_) => std::process::exit(exitcode::OK),
-                Err(_) => std::process::exit(exitcode::ABORT),
-            };
-        }
-    };
+    let ui = ui::Ui::new();
+
+    let command_result = match_subcommand(matches, &ui);
 
     match command_result {
         Ok(_) => std::process::exit(exitcode::OK),
-        Err(e) => e.exit(),
+        Err(e) => {
+            let message = e.get_error_message_bytes();
+            if let Some(msg) = message {
+                match ui.write_stderr(&msg) {
+                    Ok(_) => (),
+                    Err(_) => std::process::exit(exitcode::ABORT),
+                };
+            };
+            e.exit()
+        }
+    }
+}
+
+fn match_subcommand(
+    matches: ArgMatches,
+    ui: &ui::Ui,
+) -> Result<(), CommandError> {
+    match matches.subcommand() {
+        ("root", _) => commands::root::RootCommand::new().run(&ui),
+        ("files", Some(matches)) => {
+            commands::files::FilesCommand::try_from(matches)?.run(&ui)
+        }
+        ("cat", Some(matches)) => {
+            commands::cat::CatCommand::try_from(matches)?.run(&ui)
+        }
+        ("debugdata", Some(matches)) => {
+            commands::debugdata::DebugDataCommand::try_from(matches)?.run(&ui)
+        }
+        _ => unreachable!(), // Because of AppSettings::SubcommandRequired,
+    }
+}
+
+impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::files::FilesCommand<'a> {
+    type Error = CommandError;
+
+    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
+        let rev = args.value_of("rev");
+        Ok(commands::files::FilesCommand::new(rev))
     }
 }
+
+impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::cat::CatCommand<'a> {
+    type Error = CommandError;
+
+    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
+        let rev = args.value_of("rev");
+        let files = match args.values_of("files") {
+            Some(files) => files.collect(),
+            None => vec![],
+        };
+        Ok(commands::cat::CatCommand::new(rev, files))
+    }
+}
+
+impl<'a> TryFrom<&'a ArgMatches<'_>>
+    for commands::debugdata::DebugDataCommand<'a>
+{
+    type Error = CommandError;
+
+    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
+        let rev = args
+            .value_of("rev")
+            .expect("rev should be a required argument");
+        let kind = match (
+            args.is_present("changelog"),
+            args.is_present("manifest"),
+        ) {
+            (true, false) => DebugDataKind::Changelog,
+            (false, true) => DebugDataKind::Manifest,
+            (true, true) => {
+                unreachable!("Should not happen since options are exclusive")
+            }
+            (false, false) => {
+                unreachable!("Should not happen since options are required")
+            }
+        };
+        Ok(commands::debugdata::DebugDataCommand::new(rev, kind))
+    }
+}
--- a/rust/rhg/src/ui.rs	Thu Oct 08 13:45:56 2020 -0700
+++ b/rust/rhg/src/ui.rs	Tue Oct 20 22:04:04 2020 +0530
@@ -1,7 +1,12 @@
+use std::borrow::Cow;
 use std::io;
-use std::io::Write;
+use std::io::{ErrorKind, Write};
 
-pub struct Ui {}
+#[derive(Debug)]
+pub struct Ui {
+    stdout: std::io::Stdout,
+    stderr: std::io::Stderr,
+}
 
 /// The kind of user interface error
 pub enum UiError {
@@ -14,41 +19,95 @@
 /// The commandline user interface
 impl Ui {
     pub fn new() -> Self {
-        Ui {}
+        Ui {
+            stdout: std::io::stdout(),
+            stderr: std::io::stderr(),
+        }
+    }
+
+    /// Returns a buffered handle on stdout for faster batch printing
+    /// operations.
+    pub fn stdout_buffer(&self) -> StdoutBuffer<std::io::StdoutLock> {
+        StdoutBuffer::new(self.stdout.lock())
     }
 
     /// Write bytes to stdout
     pub fn write_stdout(&self, bytes: &[u8]) -> Result<(), UiError> {
-        let mut stdout = io::stdout();
-
-        self.write_stream(&mut stdout, bytes)
-            .or_else(|e| self.into_stdout_error(e))?;
+        let mut stdout = self.stdout.lock();
 
-        stdout.flush().or_else(|e| self.into_stdout_error(e))
-    }
+        stdout.write_all(bytes).or_else(handle_stdout_error)?;
 
-    fn into_stdout_error(&self, error: io::Error) -> Result<(), UiError> {
-        self.write_stderr(
-            &[b"abort: ", error.to_string().as_bytes(), b"\n"].concat(),
-        )?;
-        Err(UiError::StdoutError(error))
+        stdout.flush().or_else(handle_stdout_error)
     }
 
     /// Write bytes to stderr
     pub fn write_stderr(&self, bytes: &[u8]) -> Result<(), UiError> {
-        let mut stderr = io::stderr();
+        let mut stderr = self.stderr.lock();
+
+        stderr.write_all(bytes).or_else(handle_stderr_error)?;
+
+        stderr.flush().or_else(handle_stderr_error)
+    }
+
+    /// Write string line to stderr
+    pub fn writeln_stderr_str(&self, s: &str) -> Result<(), UiError> {
+        self.write_stderr(&format!("{}\n", s).as_bytes())
+    }
+}
 
-        self.write_stream(&mut stderr, bytes)
-            .or_else(|e| Err(UiError::StderrError(e)))?;
+/// A buffered stdout writer for faster batch printing operations.
+pub struct StdoutBuffer<W: Write> {
+    buf: io::BufWriter<W>,
+}
 
-        stderr.flush().or_else(|e| Err(UiError::StderrError(e)))
+impl<W: Write> StdoutBuffer<W> {
+    pub fn new(writer: W) -> Self {
+        let buf = io::BufWriter::new(writer);
+        Self { buf }
+    }
+
+    /// Write bytes to stdout buffer
+    pub fn write_all(&mut self, bytes: &[u8]) -> Result<(), UiError> {
+        self.buf.write_all(bytes).or_else(handle_stdout_error)
     }
 
-    fn write_stream(
-        &self,
-        stream: &mut impl Write,
-        bytes: &[u8],
-    ) -> Result<(), io::Error> {
-        stream.write_all(bytes)
+    /// Flush bytes to stdout
+    pub fn flush(&mut self) -> Result<(), UiError> {
+        self.buf.flush().or_else(handle_stdout_error)
     }
 }
+
+/// Sometimes writing to stdout is not possible, try writing to stderr to
+/// signal that failure, otherwise just bail.
+fn handle_stdout_error(error: io::Error) -> Result<(), UiError> {
+    if let ErrorKind::BrokenPipe = error.kind() {
+        // This makes `| head` work for example
+        return Ok(());
+    }
+    let mut stderr = io::stderr();
+
+    stderr
+        .write_all(&[b"abort: ", error.to_string().as_bytes(), b"\n"].concat())
+        .map_err(UiError::StderrError)?;
+
+    stderr.flush().map_err(UiError::StderrError)?;
+
+    Err(UiError::StdoutError(error))
+}
+
+/// Sometimes writing to stderr is not possible.
+fn handle_stderr_error(error: io::Error) -> Result<(), UiError> {
+    // A broken pipe should not result in a error
+    // like with `| head` for example
+    if let ErrorKind::BrokenPipe = error.kind() {
+        return Ok(());
+    }
+    Err(UiError::StdoutError(error))
+}
+
+/// Encode rust strings according to the user system.
+pub fn utf8_to_local(s: &str) -> Cow<[u8]> {
+    // TODO encode for the user's system //
+    let bytes = s.as_bytes();
+    Cow::Borrowed(bytes)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rustfmt.toml	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,4 @@
+edition = "2018"
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- a/setup.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/setup.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1268,6 +1268,7 @@
     'mercurial.hgweb',
     'mercurial.interfaces',
     'mercurial.pure',
+    'mercurial.templates',
     'mercurial.thirdparty',
     'mercurial.thirdparty.attr',
     'mercurial.thirdparty.zope',
@@ -1292,6 +1293,13 @@
     'hgext3rd',
     'hgdemandimport',
 ]
+
+for name in os.listdir(os.path.join('mercurial', 'templates')):
+    if name != '__pycache__' and os.path.isdir(
+        os.path.join('mercurial', 'templates', name)
+    ):
+        packages.append('mercurial.templates.%s' % name)
+
 if sys.version_info[0] == 2:
     packages.extend(
         [
@@ -1614,11 +1622,8 @@
     msvccompiler.MSVCCompiler = HackedMSVCCompiler
 
 packagedata = {
-    'mercurial': [
-        'locale/*/LC_MESSAGES/hg.mo',
-        'defaultrc/*.rc',
-        'dummycert.pem',
-    ],
+    'mercurial': ['locale/*/LC_MESSAGES/hg.mo', 'dummycert.pem',],
+    'mercurial.defaultrc': ['*.rc',],
     'mercurial.helptext': ['*.txt',],
     'mercurial.helptext.internals': ['*.txt',],
 }
@@ -1630,11 +1635,8 @@
 
 for root in ('templates',):
     for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
-        curdir = curdir.split(os.sep, 1)[1]
-        dirs[:] = filter(ordinarypath, dirs)
-        for f in filter(ordinarypath, files):
-            f = os.path.join(curdir, f)
-            packagedata['mercurial'].append(f)
+        packagename = curdir.replace(os.sep, '.')
+        packagedata[packagename] = list(filter(ordinarypath, files))
 
 datafiles = []
 
--- a/tests/bruterebase.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/bruterebase.py	Tue Oct 20 22:04:04 2020 +0530
@@ -52,7 +52,7 @@
             try:
                 rebase.rebase(ui, repo, dest=dest, rev=[spec])
             except error.Abort as ex:
-                summary = b'ABORT: %s' % ex
+                summary = b'ABORT: %s' % ex.message
             except Exception as ex:
                 summary = b'CRASH: %s' % ex
             else:
--- a/tests/failfilemerge.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/failfilemerge.py	Tue Oct 20 22:04:04 2020 +0530
@@ -12,7 +12,7 @@
 def failfilemerge(
     filemergefn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
 ):
-    raise error.Abort("^C")
+    raise error.Abort(b"^C")
     return filemergefn(premerge, repo, mynode, orig, fcd, fco, fca, labels)
 
 
--- a/tests/hghave.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/hghave.py	Tue Oct 20 22:04:04 2020 +0530
@@ -886,8 +886,11 @@
         return False
 
 
-@check("virtualenv", "Python virtualenv support")
-def has_virtualenv():
+@check("py2virtualenv", "Python2 virtualenv support")
+def has_py2virtualenv():
+    if sys.version_info[0] != 2:
+        return False
+
     try:
         import virtualenv
 
@@ -1063,6 +1066,11 @@
     )
 
 
+@check("cargo", "cargo tool")
+def has_cargo():
+    return matchoutput('`rustup which cargo` --version', b'cargo')
+
+
 @check("lzma", "python lzma module")
 def has_lzma():
     try:
--- a/tests/lockdelay.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/lockdelay.py	Tue Oct 20 22:04:04 2020 +0530
@@ -10,11 +10,11 @@
 
 def reposetup(ui, repo):
     class delayedlockrepo(repo.__class__):
-        def lock(self):
+        def lock(self, wait=True):
             delay = float(os.environ.get('HGPRELOCKDELAY', '0.0'))
             if delay:
                 time.sleep(delay)
-            res = super(delayedlockrepo, self).lock()
+            res = super(delayedlockrepo, self).lock(wait=wait)
             delay = float(os.environ.get('HGPOSTLOCKDELAY', '0.0'))
             if delay:
                 time.sleep(delay)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-hash-fixes.json	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,1096 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:15 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "183"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"diffid\":22437,\"phid\":\"PHID-DIFF-q7y7rru5hbxnq2mtosrf\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/22437\\/\"},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:15 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "1162"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22file.txt%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22file.txt%22%2C+%22delLines%22%3A+1%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-mod3%5Cn%2Bcontent%5Cn%22%2C+%22delLines%22%3A+1%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+1%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22file.txt%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:16 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "connection": [
+                        "close"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "482"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:17 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "594"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"base review (generate test for phabsend)\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"base review (generate test for phabsend)\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:17 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "189"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22base+review+%28generate+test+for+phabsend%29%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":8945,\"phid\":\"PHID-DREV-suqt5s55kjw235uv2vcf\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-76klselssdel6vp\"},{\"phid\":\"PHID-XACT-DREV-atejrjnkqevgpnv\"},{\"phid\":\"PHID-XACT-DREV-wqkucxolugjm4yr\"},{\"phid\":\"PHID-XACT-DREV-pziu2ibzwaljzto\"},{\"phid\":\"PHID-XACT-DREV-k4o6ptid6jztdrx\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:18 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "342"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-q7y7rru5hbxnq2mtosrf%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22base+review+%28generate+test+for+phabsend%29%22%7D%5D%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"diffid\":22438,\"phid\":\"PHID-DIFF-6lntv23mzadpzyeaizej\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/22438\\/\"},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:19 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "1170"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22file.txt%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22file.txt%22%2C+%22delLines%22%3A+1%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-content%5Cn%2Bmore+content%5Cn%22%2C+%22delLines%22%3A+1%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+1%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22file.txt%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:20 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "482"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:20 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "594"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"133c1c6c6449 is my parent (generate test for phabsend)\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"133c1c6c6449 is my parent (generate test for phabsend)\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:21 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "203"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22133c1c6c6449+is+my+parent+%28generate+test+for+phabsend%29%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":8946,\"phid\":\"PHID-DREV-ja6bdevg5fbykjrpghj4\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-pupj6v3uzdeb6iu\"},{\"phid\":\"PHID-XACT-DREV-czsnsiuaxsecqf4\"},{\"phid\":\"PHID-XACT-DREV-qs6vcl5qj4cqyu2\"},{\"phid\":\"PHID-XACT-DREV-qig4ohigvfnr4h2\"},{\"phid\":\"PHID-XACT-DREV-iv6asp4osxnslvs\"},{\"phid\":\"PHID-XACT-DREV-jn3ojiw6yt3mzuz\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:22 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "458"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-6lntv23mzadpzyeaizej%22%7D%2C+%7B%22type%22%3A+%22parents.set%22%2C+%22value%22%3A+%5B%22PHID-DREV-suqt5s55kjw235uv2vcf%22%5D%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22133c1c6c6449+is+my+parent+%28generate+test+for+phabsend%29%22%7D%5D%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"8946\",\"phid\":\"PHID-DREV-ja6bdevg5fbykjrpghj4\",\"title\":\"133c1c6c6449 is my parent (generate test for phabsend)\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D8946\",\"dateCreated\":\"1598307502\",\"dateModified\":\"1598307502\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-6lntv23mzadpzyeaizej\",\"diffs\":[\"22438\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-nf7kno6lkl3fjsmo5pyp\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-suqt5s55kjw235uv2vcf\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"},{\"id\":\"8945\",\"phid\":\"PHID-DREV-suqt5s55kjw235uv2vcf\",\"title\":\"base review (generate test for phabsend)\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D8945\",\"dateCreated\":\"1598307498\",\"dateModified\":\"1598307502\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-q7y7rru5hbxnq2mtosrf\",\"diffs\":[\"22437\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-nf7kno6lkl3fjsmo5pyp\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:23 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "154"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B8945%2C+8946%5D%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:23 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "482"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:24 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "594"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:24 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "482"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }, 
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Mon, 24 Aug 2020 22:18:25 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "594"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "method": "POST"
+            }
+        }
+    ]
+}
\ No newline at end of file
--- a/tests/printrevset.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/printrevset.py	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,5 @@
 from __future__ import absolute_import
+from mercurial.thirdparty import attr
 from mercurial import (
     cmdutil,
     commands,
@@ -11,26 +12,27 @@
 from mercurial.utils import stringutil
 
 
-def logrevset(repo, pats, opts):
-    revs = logcmdutil._initialrevs(repo, opts)
+def logrevset(repo, wopts):
+    revs = logcmdutil._initialrevs(repo, wopts)
     if not revs:
         return None
-    match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-    return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
+    match, pats, slowpath = logcmdutil._makematcher(repo, revs, wopts)
+    wopts = attr.evolve(wopts, pats=pats)
+    return logcmdutil._makerevset(repo, wopts, slowpath)
 
 
 def uisetup(ui):
-    def printrevset(orig, repo, pats, opts):
-        revs, filematcher = orig(repo, pats, opts)
-        if opts.get(b'print_revset'):
-            expr = logrevset(repo, pats, opts)
+    def printrevset(orig, repo, wopts):
+        revs, filematcher = orig(repo, wopts)
+        if wopts.opts.get(b'print_revset'):
+            expr = logrevset(repo, wopts)
             if expr:
                 tree = revsetlang.parse(expr)
                 tree = revsetlang.analyze(tree)
             else:
                 tree = []
             ui = repo.ui
-            ui.write(b'%s\n' % stringutil.pprint(opts.get(b'rev', [])))
+            ui.write(b'%s\n' % stringutil.pprint(wopts.opts.get(b'rev', [])))
             ui.write(revsetlang.prettyformat(tree) + b'\n')
             ui.write(stringutil.prettyrepr(revs) + b'\n')
             revs = smartset.baseset()  # display no revisions
--- a/tests/pullext.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/pullext.py	Tue Oct 20 22:04:04 2020 +0530
@@ -13,8 +13,8 @@
     error,
     extensions,
     localrepo,
+    requirements,
 )
-from mercurial.interfaces import repository
 
 
 def clonecommand(orig, ui, repo, *args, **kwargs):
@@ -31,7 +31,7 @@
 
 
 def featuresetup(ui, features):
-    features.add(repository.NARROW_REQUIREMENT)
+    features.add(requirements.NARROW_REQUIREMENT)
 
 
 def extsetup(ui):
--- a/tests/run-tests.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/run-tests.py	Tue Oct 20 22:04:04 2020 +0530
@@ -967,6 +967,7 @@
         if slowtimeout is None:
             slowtimeout = defaults['slowtimeout']
         self.path = path
+        self.relpath = os.path.relpath(path)
         self.bname = os.path.basename(path)
         self.name = _bytes2sys(self.bname)
         self._testdir = os.path.dirname(path)
@@ -2336,7 +2337,6 @@
         jobs=1,
         whitelist=None,
         blacklist=None,
-        retest=False,
         keywords=None,
         loop=False,
         runs_per_test=1,
@@ -2364,9 +2364,6 @@
         backwards compatible behavior which reports skipped tests as part
         of the results.
 
-        retest denotes whether to retest failed tests. This arguably belongs
-        outside of TestSuite.
-
         keywords denotes key words that will be used to filter which tests
         to execute. This arguably belongs outside of TestSuite.
 
@@ -2377,7 +2374,6 @@
         self._jobs = jobs
         self._whitelist = whitelist
         self._blacklist = blacklist
-        self._retest = retest
         self._keywords = keywords
         self._loop = loop
         self._runs_per_test = runs_per_test
@@ -2402,15 +2398,17 @@
                 result.addSkip(test, "Doesn't exist")
                 continue
 
-            if not (self._whitelist and test.bname in self._whitelist):
-                if self._blacklist and test.bname in self._blacklist:
+            is_whitelisted = self._whitelist and (
+                test.relpath in self._whitelist or test.bname in self._whitelist
+            )
+            if not is_whitelisted:
+                is_blacklisted = self._blacklist and (
+                    test.relpath in self._blacklist
+                    or test.bname in self._blacklist
+                )
+                if is_blacklisted:
                     result.addSkip(test, 'blacklisted')
                     continue
-
-                if self._retest and not os.path.exists(test.errpath):
-                    result.addIgnore(test, 'not retesting')
-                    continue
-
                 if self._keywords:
                     with open(test.path, 'rb') as f:
                         t = f.read().lower() + test.bname.lower()
@@ -3253,6 +3251,14 @@
                     tests.append({'path': t})
             else:
                 tests.append({'path': t})
+
+        if self.options.retest:
+            retest_args = []
+            for test in tests:
+                errpath = self._geterrpath(test)
+                if os.path.exists(errpath):
+                    retest_args.append(test)
+            tests = retest_args
         return tests
 
     def _runtests(self, testdescs):
@@ -3269,13 +3275,7 @@
                 orig = list(testdescs)
                 while testdescs:
                     desc = testdescs[0]
-                    # desc['path'] is a relative path
-                    if 'case' in desc:
-                        casestr = b'#'.join(desc['case'])
-                        errpath = b'%s#%s.err' % (desc['path'], casestr)
-                    else:
-                        errpath = b'%s.err' % desc['path']
-                    errpath = os.path.join(self._outputdir, errpath)
+                    errpath = self._geterrpath(desc)
                     if os.path.exists(errpath):
                         break
                     testdescs.pop(0)
@@ -3298,7 +3298,6 @@
                 jobs=jobs,
                 whitelist=self.options.whitelisted,
                 blacklist=self.options.blacklist,
-                retest=self.options.retest,
                 keywords=kws,
                 loop=self.options.loop,
                 runs_per_test=self.options.runs_per_test,
@@ -3346,6 +3345,19 @@
         if failed:
             return 1
 
+    def _geterrpath(self, test):
+        # test['path'] is a relative path
+        if 'case' in test:
+            # for multiple dimensions test cases
+            casestr = b'#'.join(test['case'])
+            errpath = b'%s#%s.err' % (test['path'], casestr)
+        else:
+            errpath = b'%s.err' % test['path']
+        if self.options.outputdir:
+            self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
+            errpath = os.path.join(self._outputdir, errpath)
+        return errpath
+
     def _getport(self, count):
         port = self._ports.get(count)  # do we have a cached entry?
         if port is None:
--- a/tests/test-absorb-unfinished.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-absorb-unfinished.t	Tue Oct 20 22:04:04 2020 +0530
@@ -25,6 +25,6 @@
 
   $ hg --config extensions.rebase= absorb
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
--- a/tests/test-absorb.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-absorb.t	Tue Oct 20 22:04:04 2020 +0530
@@ -603,21 +603,21 @@
   $ hg commit -m a -A a b
   $ hg branch foo -q
   $ echo b > b
-  $ hg commit -m foo  # will become empty
+  $ hg commit -m 'foo (child of 0cde1ae39321)'  # will become empty
   $ hg branch bar -q
-  $ hg commit -m bar  # is already empty
+  $ hg commit -m 'bar (child of e969dc86aefc)'  # is already empty
   $ echo a2 > a
   $ printf '' > b
   $ hg absorb --apply-changes --verbose | grep became
   0:0cde1ae39321: 1 file(s) changed, became 3:fc7fcdd90fdb
-  1:795dfb1adcef: 2 file(s) changed, became 4:a8740537aa53
-  2:b02935f68891: 2 file(s) changed, became 5:59533e01c707
-  $ hg log -T '{rev} (branch: {branch}) {desc}\n' -G --stat
-  @  5 (branch: bar) bar
+  1:e969dc86aefc: 2 file(s) changed, became 4:8fc6b2cb43a5
+  2:0298954ced32: 2 file(s) changed, became 5:ca8386dc4e2c
+  $ hg log -T '{rev}:{node|short} (branch: {branch}) {desc}\n' -G --stat
+  @  5:ca8386dc4e2c (branch: bar) bar (child of 8fc6b2cb43a5)
   |
-  o  4 (branch: foo) foo
+  o  4:8fc6b2cb43a5 (branch: foo) foo (child of fc7fcdd90fdb)
   |
-  o  3 (branch: default) a
+  o  3:fc7fcdd90fdb (branch: default) a
       a |  1 +
       b |  0
       2 files changed, 1 insertions(+), 0 deletions(-)
--- a/tests/test-amend.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-amend.t	Tue Oct 20 22:04:04 2020 +0530
@@ -93,6 +93,29 @@
   nothing changed
   [1]
 
+#if obsstore-on
+  $ hg init repo-merge-state
+  $ cd repo-merge-state
+  $ echo a > f
+  $ hg ci -Aqm a
+  $ echo b > f
+  $ hg ci -Aqm b
+  $ echo c > f
+  $ hg co -m '.^'
+  merging f
+  warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges
+  [1]
+  $ echo d > f
+  $ hg resolve -m f
+  (no more unresolved files)
+  $ hg ci --amend --config experimental.evolution.allowunstable=True
+  1 new orphan changesets
+  $ hg resolve -l
+  $ cd ..
+#endif
+
 Matcher and metadata options
 
   $ echo 3 > C
--- a/tests/test-annotate.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-annotate.t	Tue Oct 20 22:04:04 2020 +0530
@@ -479,26 +479,24 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import error, node
-  > def reposetup(ui, repo):
-  >     class legacyrepo(repo.__class__):
-  >         def _filecommit(self, fctx, manifest1, manifest2,
-  >                         linkrev, tr, changelist, includecopymeta):
-  >             fname = fctx.path()
-  >             text = fctx.data()
-  >             flog = self.file(fname)
-  >             fparent1 = manifest1.get(fname, node.nullid)
-  >             fparent2 = manifest2.get(fname, node.nullid)
-  >             meta = {}
-  >             copy = fctx.copysource()
-  >             if copy and copy != fname:
-  >                 raise error.Abort('copying is not supported')
-  >             if fparent2 != node.nullid:
-  >                 changelist.append(fname)
-  >                 return flog.add(text, meta, tr, linkrev,
-  >                                 fparent1, fparent2)
-  >             raise error.Abort('only merging is supported')
-  >     repo.__class__ = legacyrepo
+  > from mercurial import commit, error, extensions, node
+  > def _filecommit(orig, repo, fctx, manifest1, manifest2,
+  >                 linkrev, tr, includecopymeta, ms):
+  >     fname = fctx.path()
+  >     text = fctx.data()
+  >     flog = repo.file(fname)
+  >     fparent1 = manifest1.get(fname, node.nullid)
+  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     meta = {}
+  >     copy = fctx.copysource()
+  >     if copy and copy != fname:
+  >         raise error.Abort('copying is not supported')
+  >     if fparent2 != node.nullid:
+  >         return flog.add(text, meta, tr, linkrev,
+  >                         fparent1, fparent2), 'modified'
+  >     raise error.Abort('only merging is supported')
+  > def uisetup(ui):
+  >     extensions.wrapfunction(commit, '_filecommit', _filecommit)
   > EOF
 
   $ cat > baz <<EOF
--- a/tests/test-backout.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-backout.t	Tue Oct 20 22:04:04 2020 +0530
@@ -804,5 +804,5 @@
 --no-commit can't be used with --merge
 
   $ hg backout --merge --no-commit 2
-  abort: cannot use --merge with --no-commit
+  abort: cannot specify both --no-commit and --merge
   [255]
--- a/tests/test-bundle2-format.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-bundle2-format.t	Tue Oct 20 22:04:04 2020 +0530
@@ -22,6 +22,7 @@
   > from mercurial import changegroup
   > from mercurial import error
   > from mercurial import obsolete
+  > from mercurial import pycompat
   > from mercurial import registrar
   > from mercurial.utils import procutil
   > 
@@ -169,7 +170,7 @@
   >         for chunk in bundler.getchunks():
   >             file.write(chunk)
   >     except RuntimeError as exc:
-  >         raise error.Abort(exc)
+  >         raise error.Abort(pycompat.bytestr(exc))
   >     finally:
   >         file.flush()
   > 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-cargo-lock.t	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,11 @@
+#require cargo test-repo
+  $ . "$TESTDIR/helpers-testrepo.sh"
+  $ cd "$TESTDIR"/../rust
+
+Check if Cargo.lock is up-to-date. Will fail with a 101 error code if not.
+
+  $ cargo check --locked --all --quiet
+
+However most CIs will run `cargo build` or similar before running the tests, so we need to check if it was modified
+
+  $ testrepohg diff Cargo.lock
--- a/tests/test-check-code.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-check-code.t	Tue Oct 20 22:04:04 2020 +0530
@@ -69,6 +69,7 @@
   hg
   hgeditor
   hgweb.cgi
+  rustfmt.toml
   setup.py
 
 Prevent adding modules which could be shadowed by ancient .so/.dylib.
--- a/tests/test-check-py3-compat.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-check-py3-compat.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,4 @@
-#require test-repo
+#require test-repo pure
 
   $ . "$TESTDIR/helpers-testrepo.sh"
   $ cd "$TESTDIR"/..
--- a/tests/test-chg.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-chg.t	Tue Oct 20 22:04:04 2020 +0530
@@ -197,6 +197,14 @@
 
   $ cd ..
 
+missing stdio
+-------------
+
+  $ CHGDEBUG=1 chg version -q 0<&-
+  chg: debug: * stdio fds are missing (glob)
+  chg: debug: * execute original hg (glob)
+  Mercurial Distributed SCM * (glob)
+
 server lifecycle
 ----------------
 
--- a/tests/test-clone.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-clone.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1241,6 +1241,7 @@
   $ cat >> $HGRCPATH << EOF
   > [fsmonitor]
   > warn_update_file_count = 2
+  > warn_update_file_count_rust = 2
   > EOF
 
 We should see a warning about no fsmonitor on supported platforms
--- a/tests/test-completion.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-completion.t	Tue Oct 20 22:04:04 2020 +0530
@@ -80,6 +80,7 @@
   debugbuilddag
   debugbundle
   debugcapabilities
+  debugchangedfiles
   debugcheckstate
   debugcolor
   debugcommands
@@ -258,7 +259,7 @@
   cat: output, rev, decode, include, exclude, template
   clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
   commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
-  config: untrusted, edit, local, global, template
+  config: untrusted, edit, local, shared, global, template
   continue: dry-run
   copy: forget, after, at-rev, force, include, exclude, dry-run
   debugancestor: 
@@ -268,6 +269,7 @@
   debugbuilddag: mergeable-file, overwritten-file, new-file
   debugbundle: all, part-type, spec
   debugcapabilities: 
+  debugchangedfiles: 
   debugcheckstate: 
   debugcolor: style
   debugcommands: 
@@ -353,7 +355,7 @@
   push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
   recover: verify
   remove: after, force, subrepos, include, exclude, dry-run
-  rename: after, force, include, exclude, dry-run
+  rename: after, at-rev, force, include, exclude, dry-run
   resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
   revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
   rollback: dry-run, force
--- a/tests/test-contrib-emacs.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-contrib-emacs.t	Tue Oct 20 22:04:04 2020 +0530
@@ -2,7 +2,7 @@
   $ emacs -q -no-site-file -batch -l $TESTDIR/../contrib/hg-test-mode.el \
   >  -f ert-run-tests-batch-and-exit
   Running 1 tests (*) (glob)
-     passed  1/1  hg-test-mode--compilation-mode-support
+     passed  1/1  hg-test-mode--compilation-mode-support* (glob)
   
-  Ran 1 tests, 1 results as expected (*) (glob)
+  Ran 1 tests, 1 results as expected* (glob)
   
--- a/tests/test-convert-git.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-convert-git.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,11 +1,7 @@
 #require git
 
-  $ echo "[init]" >> $HOME/.gitconfig
-  $ echo "defaultBranch = master" >> $HOME/.gitconfig
-  $ echo "[core]" >> $HOME/.gitconfig
-  $ echo "autocrlf = false" >> $HOME/.gitconfig
-  $ echo "[core]" >> $HOME/.gitconfig
-  $ echo "autocrlf = false" >> $HOME/.gitconfig
+  $ git config -f $HOME/.gitconfig init.defaultBranch master
+  $ git config -f $HOME/.gitconfig core.autocrlf false
   $ echo "[extensions]" >> $HGRCPATH
   $ echo "convert=" >> $HGRCPATH
   $ cat >> $HGRCPATH <<EOF
@@ -32,7 +28,7 @@
   > }
   $ mkdir git-repo
   $ cd git-repo
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ echo a > a
   $ mkdir d
   $ echo b > d/b
@@ -123,7 +119,7 @@
   $ count=10
   $ mkdir git-repo2
   $ cd git-repo2
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ echo foo > foo
   $ git add foo
   $ commit -a -m 'add foo'
@@ -421,7 +417,7 @@
   $ count=19
   $ mkdir git-repo3
   $ cd git-repo3
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ "$PYTHON" -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)'
   $ git add b
   $ commit -a -m addbinary
@@ -447,7 +443,7 @@
 
   $ mkdir git-repo4
   $ cd git-repo4
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ echo >> foo
   $ git add foo
   $ commit -a -m addfoo
@@ -715,7 +711,7 @@
 
   $ mkdir git-repo5
   $ cd git-repo5
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ echo 'sub' >> foo
   $ git add foo
   $ commit -a -m 'addfoo'
@@ -723,7 +719,7 @@
   $ cd ..
   $ mkdir git-repo6
   $ cd git-repo6
-  $ git init-db >/dev/null 2>/dev/null
+  $ git init >/dev/null 2>/dev/null
   $ git submodule add ${BASE} >/dev/null 2>/dev/null
   $ commit -a -m 'addsubmodule' >/dev/null 2>/dev/null
 
--- a/tests/test-convert-identity.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-convert-identity.t	Tue Oct 20 22:04:04 2020 +0530
@@ -8,9 +8,10 @@
   > convert =
   > EOF
   $ cat <<'EOF' > changefileslist.py
-  > from mercurial import (changelog, extensions)
+  > from mercurial import (changelog, extensions, metadata)
   > def wrap(orig, clog, manifest, files, *args, **kwargs):
-  >   return orig(clog, manifest, [b"a"], *args, **kwargs)
+  >   files = metadata.ChangingFiles(touched=[b"a"])
+  >   return orig(clog, manifest, files, *args, **kwargs)
   > def extsetup(ui):
   >   extensions.wrapfunction(changelog.changelog, 'add', wrap)
   > EOF
--- a/tests/test-copies-chain-merge.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-copies-chain-merge.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,4 @@
-#testcases filelog compatibility sidedata
+#testcases filelog compatibility changeset sidedata upgraded
 
 =====================================================
 Test Copy tracing for chain of copies involving merge
@@ -28,6 +28,14 @@
   > EOF
 #endif
 
+#if changeset
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > copies.read-from = changeset-only
+  > copies.write-to = changeset-only
+  > EOF
+#endif
+
 #if sidedata
   $ cat >> $HGRCPATH << EOF
   > [format]
@@ -42,7 +50,9 @@
 
 Add some linear rename initialy
 
-  $ touch a b h
+  $ echo a > a
+  $ echo b > b
+  $ echo h > h
   $ hg ci -Am 'i-0 initial commit: a b h'
   adding a
   adding b
@@ -154,6 +164,12 @@
   o  0 i-0 initial commit: a b h
   
 
+Setup all merge
+===============
+
+This is done beforehand to validate that the upgrade process creates valid copy
+information.
+
 merging with unrelated change does not interfere with the renames
 ---------------------------------------------------------------
 
@@ -191,6 +207,631 @@
   o  0 i-0 initial commit: a b h
   
 
+
+merging with the side having a delete
+-------------------------------------
+
+case summary:
+- one with change to an unrelated file
+- one deleting the change
+and recreate an unrelated file after the merge
+
+  $ hg up 'desc("b-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("c-1")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBCm-0 simple merge - one way'
+  $ echo bar > d
+  $ hg add d
+  $ hg ci -m 'mBCm-1 re-add d'
+  $ hg up 'desc("c-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mCBm-0 simple merge - the other way'
+  created new head
+  $ echo bar > d
+  $ hg add d
+  $ hg ci -m 'mCBm-1 re-add d'
+  $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
+  @  16 mCBm-1 re-add d
+  |
+  o    15 mCBm-0 simple merge - the other way
+  |\
+  | | o  14 mBCm-1 re-add d
+  | | |
+  +---o  13 mBCm-0 simple merge - one way
+  | |/
+  | o  6 c-1 delete d
+  | |
+  o |  5 b-1: b update
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+Comparing with a merge re-adding the file afterward
+---------------------------------------------------
+
+Merge:
+- one with change to an unrelated file
+- one deleting and recreating the change
+
+  $ hg up 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("d-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBDm-0 simple merge - one way'
+  $ hg up 'desc("d-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mDBm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
+  @    18 mDBm-0 simple merge - the other way
+  |\
+  +---o  17 mBDm-0 simple merge - one way
+  | |/
+  | o  8 d-2 re-add d
+  | |
+  | o  7 d-1 delete d
+  | |
+  o |  5 b-1: b update
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+Comparing with a merge with colliding rename
+--------------------------------------------
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+
+  $ hg up 'desc("a-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("e-2")' --tool :union
+  merging f
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mAEm-0 simple merge - one way'
+  $ hg up 'desc("e-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("a-2")' --tool :union
+  merging f
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mEAm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
+  @    20 mEAm-0 simple merge - the other way
+  |\
+  +---o  19 mAEm-0 simple merge - one way
+  | |/
+  | o  10 e-2 g -move-> f
+  | |
+  | o  9 e-1 b -move-> g
+  | |
+  o |  4 a-2: e -move-> f
+  | |
+  o |  3 a-1: d -move-> e
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+
+  $ hg up 'desc("i-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg mv h i
+  $ hg commit -m "f-1: rename h -> i"
+  created new head
+  $ hg mv --force i d
+  $ hg commit -m "f-2: rename i -> d"
+  $ hg debugindex d
+     rev linkrev nodeid       p1           p2
+       0       2 169be882533b 000000000000 000000000000 (no-changeset !)
+       0       2 b789fdd96dc2 000000000000 000000000000 (changeset !)
+       1       8 b004912a8510 000000000000 000000000000
+       2      22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
+       2      22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
+  $ hg up 'desc("b-1")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("f-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mBFm-0 simple merge - one way'
+  $ hg up 'desc("f-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mFBm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
+  @    24 mFBm-0 simple merge - the other way
+  |\
+  +---o  23 mBFm-0 simple merge - one way
+  | |/
+  | o  22 f-2: rename i -> d
+  | |
+  | o  21 f-1: rename h -> i
+  | |
+  o |  5 b-1: b update
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+Merge:
+- one with change to a file
+- one deleting and recreating the file
+
+Unlike in the 'BD/DB' cases, an actual merge happened here. So we should
+consider history and rename on both branch of the merge.
+
+  $ hg up 'desc("i-2")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo "some update" >> d
+  $ hg commit -m "g-1: update d"
+  created new head
+  $ hg up 'desc("d-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("g-1")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mDGm-0 simple merge - one way'
+  $ hg up 'desc("g-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("d-2")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mGDm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
+  @    27 mGDm-0 simple merge - the other way
+  |\
+  +---o  26 mDGm-0 simple merge - one way
+  | |/
+  | o  25 g-1: update d
+  | |
+  o |  8 d-2 re-add d
+  | |
+  o |  7 d-1 delete d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+Merge:
+- one with change to a file (d)
+- one overwriting that file with a rename (from h to i, to d)
+
+This case is similar to BF/FB, but an actual merge happens, so both side of the
+history are relevant.
+
+Note:
+| In this case, the merge get conflicting information since on one side we have
+| "a -> c -> d". and one the other one we have "h -> i -> d".
+|
+| The current code arbitrarily pick one side
+
+  $ hg up 'desc("f-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("g-1")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mFGm-0 simple merge - one way'
+  created new head
+  $ hg up 'desc("g-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("f-2")' --tool :union
+  merging d
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'mGFm-0 simple merge - the other way'
+  created new head
+  $ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
+  @    29 mGFm-0 simple merge - the other way
+  |\
+  +---o  28 mFGm-0 simple merge - one way
+  | |/
+  | o  25 g-1: update d
+  | |
+  o |  22 f-2: rename i -> d
+  | |
+  o |  21 f-1: rename h -> i
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+Comparing with merging with a deletion (and keeping the file)
+-------------------------------------------------------------
+
+Merge:
+- one removing a file (d)
+- one updating that file
+- the merge keep the modified version of the file (canceling the delete)
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+  $ hg up 'desc("c-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("g-1")'
+  file 'd' was deleted in local [working copy] but was modified in other [merge rev].
+  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg resolve -t :other d
+  (no more unresolved files)
+  $ hg ci -m "mCGm-0"
+  created new head
+
+  $ hg up 'desc("g-1")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("c-1")'
+  file 'd' was deleted in other [merge rev] but was modified in local [working copy].
+  You can use (c)hanged version, (d)elete, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg resolve -t :local d
+  (no more unresolved files)
+  $ hg ci -m "mGCm-0"
+  created new head
+
+  $ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
+  @    31 mGCm-0
+  |\
+  +---o  30 mCGm-0
+  | |/
+  | o  25 g-1: update d
+  | |
+  o |  6 c-1 delete d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+
+Comparing with merge restoring an untouched deleted file
+--------------------------------------------------------
+
+Merge:
+- one removing a file (d)
+- one leaving the file untouched
+- the merge actively restore the file to the same content.
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+  $ hg up 'desc("c-1")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg revert --rev 'desc("b-1")' d
+  $ hg ci -m "mCB-revert-m-0"
+  created new head
+
+  $ hg up 'desc("b-1")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("c-1")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg revert --rev 'desc("b-1")' d
+  $ hg ci -m "mBC-revert-m-0"
+  created new head
+
+  $ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
+  @    33 mBC-revert-m-0
+  |\
+  +---o  32 mCB-revert-m-0
+  | |/
+  | o  6 c-1 delete d
+  | |
+  o |  5 b-1: b update
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+
+  $ hg up null --quiet
+
+
+Test that sidedata computations during upgrades are correct
+===========================================================
+
+We upgrade a repository that is not using sidedata (the filelog case) and
+ check that the same side data have been generated as if they were computed at
+ commit time.
+
+
+#if upgraded
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-side-data = yes
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+  $ hg debugformat -v
+  format-variant     repo config default
+  fncache:            yes    yes     yes
+  dotencode:          yes    yes     yes
+  generaldelta:       yes    yes     yes
+  sparserevlog:       yes    yes     yes
+  sidedata:            no    yes      no
+  persistent-nodemap:  no     no      no
+  copies-sdc:          no    yes      no
+  plain-cl-delta:     yes    yes     yes
+  compression:        * (glob)
+  compression-level:  default default default
+  $ hg debugupgraderepo --run --quiet
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: * (glob)
+     added: exp-copies-sidedata-changeset, exp-sidedata-flag
+  
+#endif
+
+
+#if no-compatibility no-filelog no-changeset
+
+  $ for rev in `hg log --rev 'all()' -T '{rev}\n'`; do
+  >     echo "##### revision $rev #####"
+  >     hg debugsidedata -c -v -- $rev
+  >     hg debugchangedfiles $rev
+  > done
+  ##### revision 0 #####
+  1 sidedata entries
+   entry-0014 size 34
+    '\x00\x00\x00\x03\x04\x00\x00\x00\x01\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00abh'
+  added      : a, ;
+  added      : b, ;
+  added      : h, ;
+  ##### revision 1 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ac'
+  removed    : a, ;
+  added    p1: c, a;
+  ##### revision 2 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00cd'
+  removed    : c, ;
+  added    p1: d, c;
+  ##### revision 3 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00de'
+  removed    : d, ;
+  added    p1: e, d;
+  ##### revision 4 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ef'
+  removed    : e, ;
+  added    p1: f, e;
+  ##### revision 5 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00b'
+  touched    : b, ;
+  ##### revision 6 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
+  removed    : d, ;
+  ##### revision 7 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
+  removed    : d, ;
+  ##### revision 8 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
+  added      : d, ;
+  ##### revision 9 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00bg'
+  removed    : b, ;
+  added    p1: g, b;
+  ##### revision 10 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x06\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00fg'
+  added    p1: f, g;
+  removed    : g, ;
+  ##### revision 11 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 12 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 13 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 14 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
+  added      : d, ;
+  ##### revision 15 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 16 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
+  added      : d, ;
+  ##### revision 17 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 18 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 19 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+  merged     : f, ;
+  ##### revision 20 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+  merged     : f, ;
+  ##### revision 21 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00hi'
+  removed    : h, ;
+  added    p1: i, h;
+  ##### revision 22 #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x16\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00di'
+  touched  p1: d, i;
+  removed    : i, ;
+  ##### revision 23 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 24 #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision 25 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d'
+  touched    : d, ;
+  ##### revision 26 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+  merged     : d, ;
+  ##### revision 27 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+  merged     : d, ;
+  ##### revision 28 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+  merged     : d, ;
+  ##### revision 29 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+  merged     : d, ;
+  ##### revision 30 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+  salvaged   : d, ;
+  ##### revision 31 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+  salvaged   : d, ;
+  ##### revision 32 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+  salvaged   : d, ;
+  ##### revision 33 #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
+  salvaged   : d, ;
+
+#endif
+
+
+Test copy information chaining
+==============================
+
+merging with unrelated change does not interfere with the renames
+---------------------------------------------------------------
+
+- rename on one side
+- unrelated change on the other side
+
+  $ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
+  o    12 mABm-0 simple merge - the other way
+  |\
+  +---o  11 mBAm-0 simple merge - one way
+  | |/
+  | o  5 b-1: b update
+  | |
+  o |  4 a-2: e -move-> f
+  | |
+  o |  3 a-1: d -move-> e
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
   $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mABm")'
   A f
     d
@@ -232,27 +873,8 @@
 - one deleting the change
 and recreate an unrelated file after the merge
 
-  $ hg up 'desc("b-1")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg merge 'desc("c-1")'
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mBCm-0 simple merge - one way'
-  $ echo bar > d
-  $ hg add d
-  $ hg ci -m 'mBCm-1 re-add d'
-  $ hg up 'desc("c-1")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg merge 'desc("b-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mCBm-0 simple merge - the other way'
-  created new head
-  $ echo bar > d
-  $ hg add d
-  $ hg ci -m 'mCBm-1 re-add d'
   $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
-  @  16 mCBm-1 re-add d
+  o  16 mCBm-1 re-add d
   |
   o    15 mCBm-0 simple merge - the other way
   |\
@@ -327,25 +949,8 @@
 - one with change to an unrelated file
 - one deleting and recreating the change
 
-Note:
-| In this case, one of the merge wrongly record a merge while there is none.
-| This lead to bad copy tracing information to be dug up.
-
-  $ hg up 'desc("b-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("d-2")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mBDm-0 simple merge - one way'
-  $ hg up 'desc("d-2")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("b-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mDBm-0 simple merge - the other way'
-  created new head
   $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
-  @    18 mDBm-0 simple merge - the other way
+  o    18 mDBm-0 simple merge - the other way
   |\
   +---o  17 mBDm-0 simple merge - one way
   | |/
@@ -383,33 +988,44 @@
   $ hg manifest --debug --rev 'desc("mDBm-0")' | grep '644   d'
   b004912a8510032a0350a74daa2803dadfb00e12 644   d
 
-The 0bb5445dc4d02f4e0d86cf16f9f3a411d0f17744 entry is wrong, since the file was
-deleted on one side (then recreate) and untouched on the other side, no "merge"
-has happened. The resulting `d` file is the untouched version from branch `D`,
-not a merge.
-
   $ hg manifest --debug --rev 'desc("d-2")' | grep '644   d'
   b004912a8510032a0350a74daa2803dadfb00e12 644   d
   $ hg manifest --debug --rev 'desc("b-1")' | grep '644   d'
-  01c2f5eabdc4ce2bdee42b5f86311955e6c8f573 644   d
-  $ hg debugindex d
+  169be882533bc917905d46c0c951aa9a1e288dcf 644   d (no-changeset !)
+  b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644   d (changeset !)
+  $ hg debugindex d | head -n 4
      rev linkrev nodeid       p1           p2
-       0       2 01c2f5eabdc4 000000000000 000000000000
+       0       2 169be882533b 000000000000 000000000000 (no-changeset !)
+       0       2 b789fdd96dc2 000000000000 000000000000 (changeset !)
        1       8 b004912a8510 000000000000 000000000000
+       2      22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
+       2      22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
 
-(This `hg log` output if wrong, since no merge actually happened).
+Log output should not include a merge commit as it did not happen
 
+#if no-changeset
   $ hg log -Gfr 'desc("mBDm-0")' d
   o  8 d-2 re-add d
   |
   ~
+#else
+  $ hg log -Gfr 'desc("mBDm-0")' d
+  o  8 d-2 re-add d
+  |
+  ~
+#endif
 
-This `hg log` output is correct
-
+#if no-changeset
   $ hg log -Gfr 'desc("mDBm-0")' d
   o  8 d-2 re-add d
   |
   ~
+#else
+  $ hg log -Gfr 'desc("mDBm-0")' d
+  o  8 d-2 re-add d
+  |
+  ~
+#endif
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBDm-0")'
   M b
@@ -427,21 +1043,8 @@
 - the "e-" branch renaming b to f (through 'g')
 - the "a-" branch renaming d to f (through e)
 
-  $ hg up 'desc("a-2")'
-  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg merge 'desc("e-2")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mAEm-0 simple merge - one way'
-  $ hg up 'desc("e-2")'
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("a-2")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mEAm-0 simple merge - the other way'
-  created new head
   $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
-  @    20 mEAm-0 simple merge - the other way
+  o    20 mEAm-0 simple merge - the other way
   |\
   +---o  19 mAEm-0 simple merge - one way
   | |/
@@ -459,19 +1062,37 @@
   |
   o  0 i-0 initial commit: a b h
   
+#if no-changeset
   $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644   f'
-  eb806e34ef6be4c264effd5933d31004ad15a793 644   f
+  c39c6083dad048d5138618a46f123e2f397f4f18 644   f
   $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644   f'
-  eb806e34ef6be4c264effd5933d31004ad15a793 644   f
+  a9a8bc3860c9d8fa5f2f7e6ea8d40498322737fd 644   f
   $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
-  0dd616bc7ab1a111921d95d76f69cda5c2ac539c 644   f
+  263ea25e220aaeb7b9bac551c702037849aa75e8 644   f
   $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
-  6da5a2eecb9c833f830b67a4972366d49a9a142c 644   f
+  71b9b7e73d973572ade6dd765477fcee6890e8b1 644   f
   $ hg debugindex f
      rev linkrev nodeid       p1           p2
-       0       4 0dd616bc7ab1 000000000000 000000000000
-       1      10 6da5a2eecb9c 000000000000 000000000000
-       2      19 eb806e34ef6b 0dd616bc7ab1 6da5a2eecb9c
+       0       4 263ea25e220a 000000000000 000000000000
+       1      10 71b9b7e73d97 000000000000 000000000000
+       2      19 c39c6083dad0 263ea25e220a 71b9b7e73d97
+       3      20 a9a8bc3860c9 71b9b7e73d97 263ea25e220a
+#else
+  $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644   f'
+  498e8799f49f9da1ca06bb2d6d4accf165c5b572 644   f
+  $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644   f'
+  c5b506a7118667a38a9c9348a1f63b679e382f57 644   f
+  $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
+  b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644   f
+  $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
+  1e88685f5ddec574a34c70af492f95b6debc8741 644   f
+  $ hg debugindex f
+     rev linkrev nodeid       p1           p2
+       0       4 b789fdd96dc2 000000000000 000000000000
+       1      10 1e88685f5dde 000000000000 000000000000
+       2      19 498e8799f49f b789fdd96dc2 1e88685f5dde
+       3      20 c5b506a71186 1e88685f5dde b789fdd96dc2
+#endif
 
 # Here the filelog based implementation is not looking at the rename
 # information (because the file exist on both side). However the changelog
@@ -546,33 +1167,8 @@
 - one with change to an unrelated file (b)
 - one overwriting a file (d) with a rename (from h to i to d)
 
-  $ hg up 'desc("i-2")'
-  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg mv h i
-  $ hg commit -m "f-1: rename h -> i"
-  created new head
-  $ hg mv --force i d
-  $ hg commit -m "f-2: rename i -> d"
-  $ hg debugindex d
-     rev linkrev nodeid       p1           p2
-       0       2 01c2f5eabdc4 000000000000 000000000000
-       1       8 b004912a8510 000000000000 000000000000
-       2      22 c72365ee036f 000000000000 000000000000
-  $ hg up 'desc("b-1")'
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("f-2")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mBFm-0 simple merge - one way'
-  $ hg up 'desc("f-2")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("b-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mFBm-0 simple merge - the other way'
-  created new head
   $ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
-  @    24 mFBm-0 simple merge - the other way
+  o    24 mFBm-0 simple merge - the other way
   |\
   +---o  23 mBFm-0 simple merge - one way
   | |/
@@ -588,13 +1184,10 @@
   |
   o  0 i-0 initial commit: a b h
   
-The overwriting should take over. However, the behavior is currently buggy
-
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBFm-0")'
   M b
   A d
     h
-    h (false !)
   R a
   R h
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFBm-0")'
@@ -626,8 +1219,7 @@
     i (no-filelog !)
   R i
 
-The following graphlog is wrong, the "a -> c -> d" chain was overwritten and should not appear.
-
+#if no-changeset
   $ hg log -Gfr 'desc("mBFm-0")' d
   o  22 f-2: rename i -> d
   |
@@ -635,9 +1227,14 @@
   :
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mBFm-0")' d
+  o  22 f-2: rename i -> d
+  |
+  ~
+#endif
 
-The following output is correct.
-
+#if no-changeset
   $ hg log -Gfr 'desc("mFBm-0")' d
   o  22 f-2: rename i -> d
   |
@@ -645,6 +1242,12 @@
   :
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mFBm-0")' d
+  o  22 f-2: rename i -> d
+  |
+  ~
+#endif
 
 
 Merge:
@@ -654,28 +1257,8 @@
 Unlike in the 'BD/DB' cases, an actual merge happened here. So we should
 consider history and rename on both branch of the merge.
 
-  $ hg up 'desc("i-2")'
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ echo "some update" >> d
-  $ hg commit -m "g-1: update d"
-  created new head
-  $ hg up 'desc("d-2")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("g-1")' --tool :union
-  merging d
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mDGm-0 simple merge - one way'
-  $ hg up 'desc("g-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("d-2")' --tool :union
-  merging d
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mGDm-0 simple merge - the other way'
-  created new head
   $ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
-  @    27 mGDm-0 simple merge - the other way
+  o    27 mGDm-0 simple merge - the other way
   |\
   +---o  26 mDGm-0 simple merge - one way
   | |/
@@ -714,6 +1297,7 @@
   $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGDm-0")'
   M d
 
+#if no-changeset
   $ hg log -Gfr 'desc("mDGm-0")' d
   o    26 mDGm-0 simple merge - one way
   |\
@@ -727,8 +1311,21 @@
   |
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mDGm-0")' d
+  o    26 mDGm-0 simple merge - one way
+  |\
+  | o  25 g-1: update d
+  | |
+  o |  8 d-2 re-add d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  ~
+#endif
 
 
+#if no-changeset
   $ hg log -Gfr 'desc("mDGm-0")' d
   o    26 mDGm-0 simple merge - one way
   |\
@@ -742,6 +1339,18 @@
   |
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mDGm-0")' d
+  o    26 mDGm-0 simple merge - one way
+  |\
+  | o  25 g-1: update d
+  | |
+  o |  8 d-2 re-add d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  ~
+#endif
 
 
 Merge:
@@ -757,24 +1366,8 @@
 |
 | The current code arbitrarily pick one side
 
-  $ hg up 'desc("f-2")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg merge 'desc("g-1")' --tool :union
-  merging d
-  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mFGm-0 simple merge - one way'
-  created new head
-  $ hg up 'desc("g-1")'
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("f-2")' --tool :union
-  merging d
-  0 files updated, 1 files merged, 1 files removed, 0 files unresolved
-  (branch merge, don't forget to commit)
-  $ hg ci -m 'mGFm-0 simple merge - the other way'
-  created new head
   $ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
-  @    29 mGFm-0 simple merge - the other way
+  o    29 mGFm-0 simple merge - the other way
   |\
   +---o  28 mFGm-0 simple merge - one way
   | |/
@@ -792,13 +1385,13 @@
   
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
   A d
-    h (no-filelog !)
-    a (filelog !)
+    h
   R a
   R h
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
   A d
-    a
+    a (no-filelog !)
+    h (filelog !)
   R a
   R h
   $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFGm-0")'
@@ -822,6 +1415,7 @@
     h (no-filelog !)
   R h
 
+#if no-changeset
   $ hg log -Gfr 'desc("mFGm-0")' d
   o    28 mFGm-0 simple merge - one way
   |\
@@ -837,9 +1431,22 @@
   |
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mFGm-0")' d
+  o    28 mFGm-0 simple merge - one way
+  |\
+  | o  25 g-1: update d
+  | |
+  o |  22 f-2: rename i -> d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  ~
+#endif
 
+#if no-changeset
   $ hg log -Gfr 'desc("mGFm-0")' d
-  @    29 mGFm-0 simple merge - the other way
+  o    29 mGFm-0 simple merge - the other way
   |\
   | o  25 g-1: update d
   | |
@@ -853,3 +1460,109 @@
   |
   o  0 i-0 initial commit: a b h
   
+#else
+  $ hg log -Gfr 'desc("mGFm-0")' d
+  o    29 mGFm-0 simple merge - the other way
+  |\
+  | o  25 g-1: update d
+  | |
+  o |  22 f-2: rename i -> d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  ~
+#endif
+
+
+Comparing with merging with a deletion (and keeping the file)
+-------------------------------------------------------------
+
+Merge:
+- one removing a file (d)
+- one updating that file
+- the merge keep the modified version of the file (canceling the delete)
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+  $ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
+  o    31 mGCm-0
+  |\
+  +---o  30 mCGm-0
+  | |/
+  | o  25 g-1: update d
+  | |
+  o |  6 c-1 delete d
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+'a' is the copy source of 'd'
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCGm-0")'
+  A d
+    a (no-compatibility no-changeset !)
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGCm-0")'
+  A d
+    a (no-compatibility no-changeset !)
+  R a
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCGm-0")'
+  A d
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mGCm-0")'
+  A d
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mCGm-0")'
+  $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGCm-0")'
+
+
+Comparing with merge restoring an untouched deleted file
+--------------------------------------------------------
+
+Merge:
+- one removing a file (d)
+- one leaving the file untouched
+- the merge actively restore the file to the same content.
+
+In this case, the file keep on living after the merge. So we should not drop its
+copy tracing chain.
+
+  $ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
+  o    33 mBC-revert-m-0
+  |\
+  +---o  32 mCB-revert-m-0
+  | |/
+  | o  6 c-1 delete d
+  | |
+  o |  5 b-1: b update
+  |/
+  o  2 i-2: c -move-> d
+  |
+  o  1 i-1: a -move-> c
+  |
+  o  0 i-0 initial commit: a b h
+  
+
+'a' is the the copy source of 'd'
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB-revert-m-0")'
+  M b
+  A d
+    a (no-compatibility no-changeset !)
+  R a
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")'
+  M b
+  A d
+    a (no-compatibility no-changeset !)
+  R a
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCB-revert-m-0")'
+  M b
+  A d
+  $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBC-revert-m-0")'
+  M b
+  A d
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCB-revert-m-0")'
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBC-revert-m-0")'
--- a/tests/test-copies-in-changeset.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-copies-in-changeset.t	Tue Oct 20 22:04:04 2020 +0530
@@ -79,11 +79,9 @@
   2\x00a (esc)
 #else
   $ hg debugsidedata -c -v -- -1
-  2 sidedata entries
-   entry-0010 size 11
-    '0\x00a\n1\x00a\n2\x00a'
-   entry-0012 size 5
-    '0\n1\n2'
+  1 sidedata entries
+   entry-0014 size 44
+    '\x00\x00\x00\x04\x00\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00abcd'
 #endif
 
   $ hg showcopies
@@ -117,13 +115,9 @@
 
 #else
   $ hg debugsidedata -c -v -- -1
-  3 sidedata entries
-   entry-0010 size 3
-    '1\x00b'
-   entry-0012 size 1
-    '1'
-   entry-0013 size 1
-    '0'
+  1 sidedata entries
+   entry-0014 size 25
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00\x00bb2'
 #endif
 
   $ hg showcopies
@@ -165,8 +159,8 @@
 #else
   $ hg debugsidedata -c -v -- -1
   1 sidedata entries
-   entry-0010 size 4
-    '0\x00b2'
+   entry-0014 size 25
+    '\x00\x00\x00\x02\x00\x00\x00\x00\x02\x00\x00\x00\x00\x16\x00\x00\x00\x03\x00\x00\x00\x00b2c'
 #endif
 
   $ hg showcopies
@@ -221,13 +215,9 @@
 
 #else
   $ hg debugsidedata -c -v -- -1
-  3 sidedata entries
-   entry-0010 size 7
-    '0\x00a\n2\x00f'
-   entry-0011 size 3
-    '1\x00d'
-   entry-0012 size 5
-    '0\n1\n2'
+  1 sidedata entries
+   entry-0014 size 64
+    '\x00\x00\x00\x06\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x07\x00\x00\x00\x05\x00\x00\x00\x01\x06\x00\x00\x00\x06\x00\x00\x00\x02adfghi'
 #endif
 
   $ hg showcopies
@@ -250,11 +240,9 @@
 #else
   $ hg ci -m 'copy a to j'
   $ hg debugsidedata -c -v -- -1
-  2 sidedata entries
-   entry-0010 size 3
-    '0\x00a'
-   entry-0012 size 1
-    '0'
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00aj'
 #endif
   $ hg debugdata j 0
   \x01 (esc)
@@ -281,11 +269,9 @@
   $ hg ci --amend -m 'copy a to j, v2'
   saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
   $ hg debugsidedata -c -v -- -1
-  2 sidedata entries
-   entry-0010 size 3
-    '0\x00a'
-   entry-0012 size 1
-    '0'
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00aj'
 #endif
   $ hg showcopies --config experimental.copies.read-from=filelog-only
   a -> j
@@ -304,6 +290,9 @@
 #else
   $ hg ci -m 'modify j'
   $ hg debugsidedata -c -v -- -1
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00j'
 #endif
 
 Test writing only to filelog
@@ -318,11 +307,9 @@
 #else
   $ hg ci -m 'copy a to k'
   $ hg debugsidedata -c -v -- -1
-  2 sidedata entries
-   entry-0010 size 3
-    '0\x00a'
-   entry-0012 size 1
-    '0'
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ak'
 #endif
 
   $ hg debugdata k 0
@@ -439,10 +426,10 @@
   compression-level:  default default default
   $ hg debugsidedata -c -- 0
   1 sidedata entries
-   entry-0012 size 1
+   entry-0014 size 14
   $ hg debugsidedata -c -- 1
   1 sidedata entries
-   entry-0013 size 1
+   entry-0014 size 14
   $ hg debugsidedata -m -- 0
   $ cat << EOF > .hg/hgrc
   > [format]
@@ -463,7 +450,11 @@
   compression:        zlib   zlib    zlib
   compression-level:  default default default
   $ hg debugsidedata -c -- 0
+  1 sidedata entries
+   entry-0014 size 14
   $ hg debugsidedata -c -- 1
+  1 sidedata entries
+   entry-0014 size 14
   $ hg debugsidedata -m -- 0
 
 upgrading
@@ -487,10 +478,10 @@
   compression-level:  default default default
   $ hg debugsidedata -c -- 0
   1 sidedata entries
-   entry-0012 size 1
+   entry-0014 size 14
   $ hg debugsidedata -c -- 1
   1 sidedata entries
-   entry-0013 size 1
+   entry-0014 size 14
   $ hg debugsidedata -m -- 0
 
 #endif
--- a/tests/test-copies.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-copies.t	Tue Oct 20 22:04:04 2020 +0530
@@ -463,6 +463,53 @@
   x -> z
 
 
+Create x and y, then rename x to z on one side of merge, and rename y to z and
+then delete z on the other side.
+  $ newrepo
+  $ echo x > x
+  $ echo y > y
+  $ hg ci -Aqm 'add x and y'
+  $ hg mv x z
+  $ hg ci -qm 'rename x to z'
+  $ hg co -q 0
+  $ hg mv y z
+  $ hg ci -qm 'rename y to z'
+  $ hg rm z
+  $ hg ci -m 'delete z'
+  $ hg merge -q 1
+  $ echo z > z
+  $ hg ci -m 'merge 1 into 3'
+Try merging the other direction too
+  $ hg co -q 1
+  $ hg merge -q 3
+  $ echo z > z
+  $ hg ci -m 'merge 3 into 1'
+  created new head
+  $ hg l
+  @    5 merge 3 into 1
+  |\   z
+  +---o  4 merge 1 into 3
+  | |/   z
+  | o  3 delete z
+  | |  z
+  | o  2 rename y to z
+  | |  y z
+  o |  1 rename x to z
+  |/   x z
+  o  0 add x and y
+     x y
+  $ hg debugpathcopies 1 4
+  $ hg debugpathcopies 2 4
+  x -> z (no-filelog !)
+  $ hg debugpathcopies 0 4
+  x -> z (filelog !)
+  $ hg debugpathcopies 1 5
+  $ hg debugpathcopies 2 5
+  x -> z (no-filelog !)
+  $ hg debugpathcopies 0 5
+  x -> z
+
+
 Test for a case in fullcopytracing algorithm where neither of the merging csets
 is a descendant of the merge base. This test reflects that the algorithm
 correctly finds the copies:
--- a/tests/test-dirstate.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-dirstate.t	Tue Oct 20 22:04:04 2020 +0530
@@ -74,7 +74,7 @@
   > )
   > 
   > def wraprecordupdates(*args):
-  >     raise error.Abort("simulated error while recording dirstateupdates")
+  >     raise error.Abort(b"simulated error while recording dirstateupdates")
   > 
   > def reposetup(ui, repo):
   >     extensions.wrapfunction(mergestatemod, 'recordupdates',
--- a/tests/test-fastannotate-hg.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-fastannotate-hg.t	Tue Oct 20 22:04:04 2020 +0530
@@ -481,26 +481,25 @@
 and its ancestor by overriding "repo._filecommit".
 
   $ cat > ../legacyrepo.py <<EOF
-  > from mercurial import error, node
-  > def reposetup(ui, repo):
-  >     class legacyrepo(repo.__class__):
-  >         def _filecommit(self, fctx, manifest1, manifest2,
-  >                         linkrev, tr, changelist, includecopymeta):
-  >             fname = fctx.path()
-  >             text = fctx.data()
-  >             flog = self.file(fname)
-  >             fparent1 = manifest1.get(fname, node.nullid)
-  >             fparent2 = manifest2.get(fname, node.nullid)
-  >             meta = {}
-  >             copy = fctx.renamed()
-  >             if copy and copy[0] != fname:
-  >                 raise error.Abort('copying is not supported')
-  >             if fparent2 != node.nullid:
-  >                 changelist.append(fname)
-  >                 return flog.add(text, meta, tr, linkrev,
-  >                                 fparent1, fparent2)
-  >             raise error.Abort('only merging is supported')
-  >     repo.__class__ = legacyrepo
+  > from __future__ import absolute_import
+  > from mercurial import commit, error, extensions, node
+  > def _filecommit(orig, repo, fctx, manifest1, manifest2,
+  >                 linkrev, tr, includecopymeta, ms):
+  >     fname = fctx.path()
+  >     text = fctx.data()
+  >     flog = repo.file(fname)
+  >     fparent1 = manifest1.get(fname, node.nullid)
+  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     meta = {}
+  >     copy = fctx.copysource()
+  >     if copy and copy != fname:
+  >         raise error.Abort('copying is not supported')
+  >     if fparent2 != node.nullid:
+  >         return flog.add(text, meta, tr, linkrev,
+  >                         fparent1, fparent2), 'modified'
+  >     raise error.Abort('only merging is supported')
+  > def uisetup(ui):
+  >     extensions.wrapfunction(commit, '_filecommit', _filecommit)
   > EOF
 
   $ cat > baz <<EOF
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fix-pickle.t	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,45 @@
+A script that implements uppercasing all letters in a file.
+
+  $ UPPERCASEPY="$TESTTMP/uppercase.py"
+  $ cat > $UPPERCASEPY <<EOF
+  > import sys
+  > from mercurial.utils.procutil import setbinary
+  > setbinary(sys.stdin)
+  > setbinary(sys.stdout)
+  > sys.stdout.write(sys.stdin.read().upper())
+  > EOF
+  $ TESTLINES="foo\nbar\nbaz\n"
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY
+  FOO
+  BAR
+  BAZ
+
+This file attempts to test our workarounds for pickle's lack of
+support for short reads.
+
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > fix =
+  > [fix]
+  > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY
+  > uppercase-whole-file:pattern=set:**
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+# Create a file that's large enough that it seems to not fit in
+# pickle's buffer, making it use the code path that expects our
+# _blockingreader's read() method to return bytes.
+  $ echo "some stuff" > file
+  $ for i in $($TESTDIR/seq.py 13); do
+  >   cat file file > tmp
+  >   mv -f tmp file
+  > done
+  $ hg commit -Am "add large file"
+  adding file
+
+Check that we don't get a crash
+
+  $ hg fix -r .
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-fix.hg (glob)
--- a/tests/test-fix-topology.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-fix-topology.t	Tue Oct 20 22:04:04 2020 +0530
@@ -85,7 +85,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ printf "hhhh\n" > h
-  $ hg commit -Am "change H"
+  $ hg commit -Am "change H (child of b53d63e816fb and 0e49f92ee6e9)"
   adding h
   $ hg merge -r 4
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -96,7 +96,7 @@
   $ hg checkout 2
   0 files updated, 0 files merged, 6 files removed, 0 files unresolved
   $ printf "jjjj\n" > j
-  $ hg commit -Am "change J"
+  $ hg commit -Am "change J (child of 7f371349286e)"
   adding j
   created new head
   $ hg checkout 7
@@ -105,26 +105,26 @@
   $ hg add
   adding k
 
-  $ hg log --graph --template '{rev} {desc}\n'
-  o  9 change J
+  $ hg log --graph --template '{rev}:{node|short} {desc}\n'
+  o  9:884041ccc490 change J (child of 7f371349286e)
   |
-  | o    8 change I
+  | o    8:b7c772105fd2 change I
   | |\
-  | | @    7 change H
+  | | @    7:4e7b9312dad2 change H (child of b53d63e816fb and 0e49f92ee6e9)
   | | |\
-  | | | o  6 change G
+  | | | o  6:0e49f92ee6e9 change G
   | | | |
-  | | o |  5 change F
+  | | o |  5:b53d63e816fb change F
   | | |/
-  | o |  4 change E
+  | o |  4:ddad58af5e51 change E
   |/| |
-  | o |  3 change D
+  | o |  3:c015ebfd2bfe change D
   | |/
-  o |  2 change C
+  o |  2:7f371349286e change C
   | |
-  o |  1 change B
+  o |  1:388fdd33fea0 change B
   |/
-  o  0 change A
+  o  0:a55a84d97a24 change A
   
 
 Fix all but the root revision and its four children.
@@ -137,26 +137,26 @@
 (though it is rendered in a slightly different order now).
 
 #if obsstore-on
-  $ hg log --graph --template '{rev} {desc}\n'
-  o  14 change J
+  $ hg log --graph --template '{rev}:{node|short} {desc}\n'
+  o  14:d8d0e7974598 change J (child of 89de0da1d5da)
   |
-  | o    13 change I
+  | o    13:4fc0b354461e change I
   | |\
-  | | @    12 change H
+  | | @    12:1c45f3923443 change H (child of b53d63e816fb and 0e49f92ee6e9)
   | | |\
-  | o | |  11 change E
+  | o | |  11:d75754455722 change E
   |/| | |
-  o | | |  10 change C
+  o | | |  10:89de0da1d5da change C
   | | | |
-  | | | o  6 change G
+  | | | o  6:0e49f92ee6e9 change G
   | | | |
-  | | o |  5 change F
+  | | o |  5:b53d63e816fb change F
   | | |/
-  | o /  3 change D
+  | o /  3:c015ebfd2bfe change D
   | |/
-  o /  1 change B
+  o /  1:388fdd33fea0 change B
   |/
-  o  0 change A
+  o  0:a55a84d97a24 change A
   
   $ C=10
   $ E=11
@@ -164,26 +164,26 @@
   $ I=13
   $ J=14
 #else
-  $ hg log --graph --template '{rev} {desc}\n'
-  o  9 change J
+  $ hg log --graph --template '{rev}:{node|short} {desc}\n'
+  o  9:d8d0e7974598 change J (child of 89de0da1d5da)
   |
-  | o    8 change I
+  | o    8:4fc0b354461e change I
   | |\
-  | | @    7 change H
+  | | @    7:1c45f3923443 change H (child of b53d63e816fb and 0e49f92ee6e9)
   | | |\
-  | o | |  6 change E
+  | o | |  6:d75754455722 change E
   |/| | |
-  o | | |  5 change C
+  o | | |  5:89de0da1d5da change C
   | | | |
-  | | | o  4 change G
+  | | | o  4:0e49f92ee6e9 change G
   | | | |
-  | | o |  3 change F
+  | | o |  3:b53d63e816fb change F
   | | |/
-  | o /  2 change D
+  | o /  2:c015ebfd2bfe change D
   | |/
-  o /  1 change B
+  o /  1:388fdd33fea0 change B
   |/
-  o  0 change A
+  o  0:a55a84d97a24 change A
   
   $ C=5
   $ E=6
--- a/tests/test-fix.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-fix.t	Tue Oct 20 22:04:04 2020 +0530
@@ -84,15 +84,15 @@
       lines of files, unless the --whole flag is used. Some tools may always
       affect the whole file regardless of --whole.
   
-      If revisions are specified with --rev, those revisions will be checked,
-      and they may be replaced with new revisions that have fixed file content.
-      It is desirable to specify all descendants of each specified revision, so
-      that the fixes propagate to the descendants. If all descendants are fixed
-      at the same time, no merging, rebasing, or evolution will be required.
+      If --working-dir is used, files with uncommitted changes in the working
+      copy will be fixed. Note that no backup are made.
   
-      If --working-dir is used, files with uncommitted changes in the working
-      copy will be fixed. If the checked-out revision is also fixed, the working
-      directory will update to the replacement revision.
+      If revisions are specified with --source, those revisions and their
+      descendants will be checked, and they may be replaced with new revisions
+      that have fixed file content. By automatically including the descendants,
+      no merging, rebasing, or evolution will be required. If an ancestor of the
+      working copy is included, then the working copy itself will also be fixed,
+      and the working copy will be updated to the fixed parent.
   
       When determining what lines of each file to fix at each revision, the
       whole set of revisions being fixed is considered, so that fixes to earlier
@@ -878,7 +878,7 @@
 
   $ hg --config extensions.rebase= fix -r .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ cd ..
--- a/tests/test-fncache.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-fncache.t	Tue Oct 20 22:04:04 2020 +0530
@@ -238,7 +238,7 @@
   > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
   >     def releasewrap():
   >         l.held = False # ensure __del__ is a noop
-  >         raise error.Abort("forced lock failure")
+  >         raise error.Abort(b"forced lock failure")
   >     l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
   >     return l
   > 
--- a/tests/test-git-interop.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-git-interop.t	Tue Oct 20 22:04:04 2020 +0530
@@ -270,3 +270,89 @@
   +++ b/beta	Mon Jan 01 00:00:11 2007 +0000
   @@ -0,0 +1,1 @@
   +beta
+
+
+Interactive commit should work as expected
+
+  $ echo bar >> alpha
+  $ echo bar >> beta
+  $ hg commit -m "test interactive commit" -i --config ui.interactive=true --config ui.interface=text << EOF
+  > y
+  > y
+  > n
+  > EOF
+  diff --git a/alpha b/alpha
+  1 hunks, 1 lines changed
+  examine changes to 'alpha'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  @@ -1,3 +1,4 @@
+   alpha
+   a
+   a
+  +bar
+  record change 1/2 to 'alpha'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  diff --git a/beta b/beta
+  1 hunks, 1 lines changed
+  examine changes to 'beta'?
+  (enter ? for help) [Ynesfdaq?] n
+  
+Status should be consistent for both systems
+
+  $ hg status
+  heads mismatch, rebuilding dagcache
+  M beta
+  $ git status
+  On branch master
+  Changes not staged for commit:
+    (use "git add <file>..." to update what will be committed)
+    (use "git checkout -- <file>..." to discard changes in working directory)
+  
+  	modified:   beta
+  
+  no changes added to commit (use "git add" and/or "git commit -a")
+
+Contents of each commit should be the same
+
+  $ hg ex -r .
+  # HG changeset patch
+  # User test <test>
+  # Date 0 0
+  #      Thu Jan 01 00:00:00 1970 +0000
+  # Node ID 80adc61cf57e99f6a412d83fee6239d1556cefcf
+  # Parent  ae1ab744f95bfd5b07cf573baef98a778058537b
+  test interactive commit
+  
+  diff -r ae1ab744f95b -r 80adc61cf57e alpha
+  --- a/alpha	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/alpha	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,3 +1,4 @@
+   alpha
+   a
+   a
+  +bar
+  $ git show
+  commit 80adc61cf57e99f6a412d83fee6239d1556cefcf
+  Author: test <test>
+  Date:   Thu Jan 1 00:00:00 1970 +0000
+  
+      test interactive commit
+  
+  diff --git a/alpha b/alpha
+  index d112a75..d2a2e9a 100644
+  --- a/alpha
+  +++ b/alpha
+  @@ -1,3 +1,4 @@
+   alpha
+   a
+   a
+  +bar
+
+Deleting files should also work (this was issue6398)
+  $ hg revert -r . --all
+  reverting beta
+  $ hg rm beta
+  $ hg ci -m 'remove beta'
+
--- a/tests/test-graft-interrupted.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-graft-interrupted.t	Tue Oct 20 22:04:04 2020 +0530
@@ -622,7 +622,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | %  3:09e253b87e17 A in file a
+  | o  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
@@ -669,7 +669,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | %  3:09e253b87e17 A in file a
+  | o  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
@@ -712,7 +712,7 @@
   $ hg log -GT "{rev}:{node|short} {desc}\n"
   @  4:2aa9ad1006ff B in file a
   |
-  | %  3:09e253b87e17 A in file a
+  | o  3:09e253b87e17 A in file a
   | |
   | o  2:d36c0562f908 c
   | |
--- a/tests/test-graft.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-graft.t	Tue Oct 20 22:04:04 2020 +0530
@@ -247,9 +247,9 @@
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d
-   preserving e for resolve of e
    d: remote is newer -> g
   getting d
+   preserving e for resolve of e
    e: versions differ -> m (premerge)
   picked tool ':merge' for e (binary False symlink False changedelete False)
   merging e
--- a/tests/test-grep.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-grep.t	Tue Oct 20 22:04:04 2020 +0530
@@ -21,6 +21,18 @@
   grep: invalid match pattern: nothing to repeat* (glob)
   [1]
 
+invalid revset syntax
+
+  $ hg log -r 'diff()'
+  hg: parse error: diff takes at least 1 argument
+  [255]
+  $ hg log -r 'diff(:)'
+  hg: parse error: diff requires a string pattern
+  [255]
+  $ hg log -r 'diff("re:**test**")'
+  hg: parse error: invalid regular expression: nothing to repeat* (glob)
+  [255]
+
 simple
 
   $ hg grep -r tip:0 '.*'
@@ -321,14 +333,61 @@
    }
   ]
 
+diff of each revision for reference
+
+  $ hg log -p -T'== rev: {rev} ==\n'
+  == rev: 4 ==
+  diff -r 95040cfd017d -r 914fa752cdea port
+  --- a/port	Thu Jan 01 00:00:03 1970 +0000
+  +++ b/port	Thu Jan 01 00:00:04 1970 +0000
+  @@ -1,4 +1,3 @@
+   export
+   vaportight
+   import/export
+  -import/export
+  
+  == rev: 3 ==
+  diff -r 3b325e3481a1 -r 95040cfd017d port
+  --- a/port	Thu Jan 01 00:00:02 1970 +0000
+  +++ b/port	Thu Jan 01 00:00:03 1970 +0000
+  @@ -1,3 +1,4 @@
+   export
+   vaportight
+   import/export
+  +import/export
+  
+  == rev: 2 ==
+  diff -r 8b20f75c1585 -r 3b325e3481a1 port
+  --- a/port	Thu Jan 01 00:00:01 1970 +0000
+  +++ b/port	Thu Jan 01 00:00:02 1970 +0000
+  @@ -1,2 +1,3 @@
+  -import
+   export
+  +vaportight
+  +import/export
+  
+  == rev: 1 ==
+  diff -r f31323c92170 -r 8b20f75c1585 port
+  --- a/port	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/port	Thu Jan 01 00:00:01 1970 +0000
+  @@ -1,1 +1,2 @@
+   import
+  +export
+  
+  == rev: 0 ==
+  diff -r 000000000000 -r f31323c92170 port
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/port	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +import
+  
+
 all
 
   $ hg grep --traceback --all -nu port port
   port:4:4:-:spam:import/export
   port:3:4:+:eggs:import/export
   port:2:1:-:spam:import
-  port:2:2:-:spam:export
-  port:2:1:+:spam:export
   port:2:2:+:spam:vaportight
   port:2:3:+:spam:import/export
   port:1:2:+:eggs:export
@@ -369,26 +428,6 @@
     "user": "spam"
    },
    {
-    "change": "-",
-    "date": [2, 0],
-    "lineno": 2,
-    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
-    "path": "port",
-    "rev": 2,
-    "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
-    "user": "spam"
-   },
-   {
-    "change": "+",
-    "date": [2, 0],
-    "lineno": 1,
-    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
-    "path": "port",
-    "rev": 2,
-    "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
-    "user": "spam"
-   },
-   {
     "change": "+",
     "date": [2, 0],
     "lineno": 2,
@@ -460,8 +499,6 @@
   port:4:4:-:spam:import/export
   port:3:4:+:eggs:import/export
   port:2:1:-:spam:import
-  port:2:2:-:spam:export
-  port:2:1:+:spam:export
   port:2:2:+:spam:vaportight
   port:2:3:+:spam:import/export
   port:1:2:+:eggs:export
@@ -528,6 +565,18 @@
   color:2:-:orange
   color:1:+:orange
 
+revset predicate for "grep --diff"
+
+  $ hg log -qr 'diff("re:^bl...$")'
+  0:203191eb5e21
+  $ hg log -qr 'diff("orange")'
+  1:7c585a21e0d1
+  2:11bd8bc8d653
+  3:e0116d3829f8
+  $ hg log -qr '2:0 & diff("orange")'
+  2:11bd8bc8d653
+  1:7c585a21e0d1
+
 test substring match: '^' should only match at the beginning
 
   $ hg grep -r tip:0 '^.' --config extensions.color= --color debug
@@ -640,6 +689,49 @@
 
   $ cd ..
 
+Moved line may not be collected by "grep --diff" since it first filters
+the contents to be diffed by the pattern. (i.e.
+"diff <(grep pat a) <(grep pat b)", not "diff a b | grep pat".)
+This is much faster than generating full diff per revision.
+
+  $ hg init moved-line
+  $ cd moved-line
+  $ cat <<'EOF' > a
+  > foo
+  > bar
+  > baz
+  > EOF
+  $ hg ci -Am initial
+  adding a
+  $ cat <<'EOF' > a
+  > bar
+  > baz
+  > foo
+  > EOF
+  $ hg ci -m reorder
+
+  $ hg diff -c 1
+  diff -r a593cc55e81b -r 69789a3b6e80 a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,3 +1,3 @@
+  -foo
+   bar
+   baz
+  +foo
+
+ can't find the move of "foo" at the revision 1:
+
+  $ hg grep --diff foo -r1
+  [1]
+
+ "bar" isn't moved at the revisoin 1:
+
+  $ hg grep --diff bar -r1
+  [1]
+
+  $ cd ..
+
 Test for showing working of allfiles flag
 
   $ hg init sng
@@ -745,3 +837,628 @@
   um:1:unmod
   $ cd ..
 
+--follow with/without --diff and/or paths
+-----------------------------------------
+
+For each test case, we compare the history traversal of "hg log",
+"hg grep --diff", and "hg grep" (--all-files).
+
+"hg grep --diff" should traverse the log in the same way as "hg log".
+"hg grep" (--all-files) is slightly different in that it includes
+unmodified changes.
+
+  $ hg init follow
+  $ cd follow
+
+  $ cat <<'EOF' >> .hg/hgrc
+  > [ui]
+  > logtemplate = '{rev}: {join(files % "{status} {path}", ", ")}\n'
+  > EOF
+
+  $ for f in add0 add0-mod1 add0-rm1 add0-mod2 add0-rm2 add0-mod3 add0-mod4 add0-rm4; do
+  > echo data0 >> $f
+  > done
+  $ hg ci -qAm0
+
+  $ hg cp add0 add0-cp1
+  $ hg cp add0 add0-cp1-mod1
+  $ hg cp add0 add0-cp1-mod1-rm3
+  $ hg rm add0-rm1
+  $ for f in *mod1*; do
+  > echo data1 >> $f
+  > done
+  $ hg ci -qAm1
+
+  $ hg update -q 0
+  $ hg cp add0 add0-cp2
+  $ hg cp add0 add0-cp2-mod2
+  $ hg rm add0-rm2
+  $ for f in *mod2*; do
+  > echo data2 >> $f
+  > done
+  $ hg ci -qAm2
+
+  $ hg update -q 1
+  $ hg cp add0-cp1 add0-cp1-cp3
+  $ hg cp add0-cp1-mod1 add0-cp1-mod1-cp3-mod3
+  $ hg rm add0-cp1-mod1-rm3
+  $ for f in *mod3*; do
+  > echo data3 >> $f
+  > done
+  $ hg ci -qAm3
+
+  $ hg cp add0 add0-cp4
+  $ hg cp add0 add0-cp4-mod4
+  $ hg rm add0-rm4
+  $ for f in *mod4*; do
+  > echo data4 >> $f
+  > done
+
+  $ hg log -Gr':wdir()'
+  o  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  |
+  @  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  |
+  | o  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  | |
+  o |  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  |/
+  o  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+  
+
+follow revision history from wdir parent:
+
+  $ hg log -f
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data
+  add0-cp1-mod1-cp3-mod3:3:+:data3
+  add0-mod3:3:+:data3
+  add0-cp1-mod1:1:+:data1
+  add0-cp1-mod1-rm3:1:+:data1
+  add0-mod1:1:+:data1
+  add0:0:+:data0
+  add0-mod1:0:+:data0
+  add0-mod2:0:+:data0
+  add0-mod3:0:+:data0
+  add0-mod4:0:+:data0
+  add0-rm1:0:+:data0
+  add0-rm2:0:+:data0
+  add0-rm4:0:+:data0
+
+  $ hg grep -f data
+  add0:3:data0
+  add0-cp1:3:data0
+  add0-cp1-cp3:3:data0
+  add0-cp1-mod1:3:data0
+  add0-cp1-mod1:3:data1
+  add0-cp1-mod1-cp3-mod3:3:data0
+  add0-cp1-mod1-cp3-mod3:3:data1
+  add0-cp1-mod1-cp3-mod3:3:data3
+  add0-mod1:3:data0
+  add0-mod1:3:data1
+  add0-mod2:3:data0
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-mod4:3:data0
+  add0-rm2:3:data0
+  add0-rm4:3:data0
+  add0:1:data0
+  add0-cp1:1:data0
+  add0-cp1-mod1:1:data0
+  add0-cp1-mod1:1:data1
+  add0-cp1-mod1-rm3:1:data0
+  add0-cp1-mod1-rm3:1:data1
+  add0-mod1:1:data0
+  add0-mod1:1:data1
+  add0-mod2:1:data0
+  add0-mod3:1:data0
+  add0-mod4:1:data0
+  add0-rm2:1:data0
+  add0-rm4:1:data0
+  add0:0:data0
+  add0-mod1:0:data0
+  add0-mod2:0:data0
+  add0-mod3:0:data0
+  add0-mod4:0:data0
+  add0-rm1:0:data0
+  add0-rm2:0:data0
+  add0-rm4:0:data0
+
+follow revision history from specified revision:
+
+  $ hg log -fr2
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr2 data
+  add0-cp2-mod2:2:+:data2
+  add0-mod2:2:+:data2
+  add0:0:+:data0
+  add0-mod1:0:+:data0
+  add0-mod2:0:+:data0
+  add0-mod3:0:+:data0
+  add0-mod4:0:+:data0
+  add0-rm1:0:+:data0
+  add0-rm2:0:+:data0
+  add0-rm4:0:+:data0
+
+  $ hg grep -fr2 data
+  add0:2:data0
+  add0-cp2:2:data0
+  add0-cp2-mod2:2:data0
+  add0-cp2-mod2:2:data2
+  add0-mod1:2:data0
+  add0-mod2:2:data0
+  add0-mod2:2:data2
+  add0-mod3:2:data0
+  add0-mod4:2:data0
+  add0-rm1:2:data0
+  add0-rm4:2:data0
+  add0:0:data0
+  add0-mod1:0:data0
+  add0-mod2:0:data0
+  add0-mod3:0:data0
+  add0-mod4:0:data0
+  add0-rm1:0:data0
+  add0-rm2:0:data0
+  add0-rm4:0:data0
+
+follow revision history from wdir:
+
+  $ hg log -fr'wdir()'
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+ BROKEN: should not abort because of removed file
+  $ hg grep --diff -fr'wdir()' data
+  add0-cp4-mod4:2147483647:+:data4
+  add0-mod4:2147483647:+:data4
+  add0-rm4:2147483647:-:abort: add0-rm4@None: not found in manifest!
+  [255]
+
+  $ hg grep -fr'wdir()' data
+  add0:2147483647:data0
+  add0-cp1:2147483647:data0
+  add0-cp1-cp3:2147483647:data0
+  add0-cp1-mod1:2147483647:data0
+  add0-cp1-mod1:2147483647:data1
+  add0-cp1-mod1-cp3-mod3:2147483647:data0
+  add0-cp1-mod1-cp3-mod3:2147483647:data1
+  add0-cp1-mod1-cp3-mod3:2147483647:data3
+  add0-cp4:2147483647:data0
+  add0-cp4-mod4:2147483647:data0
+  add0-cp4-mod4:2147483647:data4
+  add0-mod1:2147483647:data0
+  add0-mod1:2147483647:data1
+  add0-mod2:2147483647:data0
+  add0-mod3:2147483647:data0
+  add0-mod3:2147483647:data3
+  add0-mod4:2147483647:data0
+  add0-mod4:2147483647:data4
+  add0-rm2:2147483647:data0
+  add0:3:data0
+  add0-cp1:3:data0
+  add0-cp1-cp3:3:data0
+  add0-cp1-mod1:3:data0
+  add0-cp1-mod1:3:data1
+  add0-cp1-mod1-cp3-mod3:3:data0
+  add0-cp1-mod1-cp3-mod3:3:data1
+  add0-cp1-mod1-cp3-mod3:3:data3
+  add0-mod1:3:data0
+  add0-mod1:3:data1
+  add0-mod2:3:data0
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-mod4:3:data0
+  add0-rm2:3:data0
+  add0-rm4:3:data0
+  add0:1:data0
+  add0-cp1:1:data0
+  add0-cp1-mod1:1:data0
+  add0-cp1-mod1:1:data1
+  add0-cp1-mod1-rm3:1:data0
+  add0-cp1-mod1-rm3:1:data1
+  add0-mod1:1:data0
+  add0-mod1:1:data1
+  add0-mod2:1:data0
+  add0-mod3:1:data0
+  add0-mod4:1:data0
+  add0-rm2:1:data0
+  add0-rm4:1:data0
+  add0:0:data0
+  add0-mod1:0:data0
+  add0-mod2:0:data0
+  add0-mod3:0:data0
+  add0-mod4:0:data0
+  add0-rm1:0:data0
+  add0-rm2:0:data0
+  add0-rm4:0:data0
+
+follow revision history from multiple revisions:
+
+  $ hg log -fr'1+2'
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'1+2' data
+  add0-cp2-mod2:2:+:data2
+  add0-mod2:2:+:data2
+  add0-cp1-mod1:1:+:data1
+  add0-cp1-mod1-rm3:1:+:data1
+  add0-mod1:1:+:data1
+  add0:0:+:data0
+  add0-mod1:0:+:data0
+  add0-mod2:0:+:data0
+  add0-mod3:0:+:data0
+  add0-mod4:0:+:data0
+  add0-rm1:0:+:data0
+  add0-rm2:0:+:data0
+  add0-rm4:0:+:data0
+
+  $ hg grep -fr'1+2' data
+  add0:2:data0
+  add0-cp2:2:data0
+  add0-cp2-mod2:2:data0
+  add0-cp2-mod2:2:data2
+  add0-mod1:2:data0
+  add0-mod2:2:data0
+  add0-mod2:2:data2
+  add0-mod3:2:data0
+  add0-mod4:2:data0
+  add0-rm1:2:data0
+  add0-rm4:2:data0
+  add0:1:data0
+  add0-cp1:1:data0
+  add0-cp1-mod1:1:data0
+  add0-cp1-mod1:1:data1
+  add0-cp1-mod1-rm3:1:data0
+  add0-cp1-mod1-rm3:1:data1
+  add0-mod1:1:data0
+  add0-mod1:1:data1
+  add0-mod2:1:data0
+  add0-mod3:1:data0
+  add0-mod4:1:data0
+  add0-rm2:1:data0
+  add0-rm4:1:data0
+  add0:0:data0
+  add0-mod1:0:data0
+  add0-mod2:0:data0
+  add0-mod3:0:data0
+  add0-mod4:0:data0
+  add0-rm1:0:data0
+  add0-rm2:0:data0
+  add0-rm4:0:data0
+
+follow file history from wdir parent, unmodified in wdir:
+
+  $ hg log -f add0-mod3
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data add0-mod3
+  add0-mod3:3:+:data3
+  add0-mod3:0:+:data0
+
+  $ hg grep -f data add0-mod3
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-mod3:1:data0
+  add0-mod3:0:data0
+
+follow file history from wdir parent, modified in wdir:
+
+  $ hg log -f add0-mod4
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data add0-mod4
+  add0-mod4:0:+:data0
+
+  $ hg grep -f data add0-mod4
+  add0-mod4:3:data0
+  add0-mod4:1:data0
+  add0-mod4:0:data0
+
+follow file history from wdir parent, copied but unmodified:
+
+  $ hg log -f add0-cp1-cp3
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data add0-cp1-cp3
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -f data add0-cp1-cp3
+  add0-cp1-cp3:3:data0
+
+follow file history from wdir parent, copied and modified:
+
+  $ hg log -f add0-cp1-mod1-cp3-mod3
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data add0-cp1-mod1-cp3-mod3
+  add0-cp1-mod1-cp3-mod3:3:+:data3
+  add0-cp1-mod1:1:+:data1
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -f data add0-cp1-mod1-cp3-mod3
+  add0-cp1-mod1-cp3-mod3:3:data0
+  add0-cp1-mod1-cp3-mod3:3:data1
+  add0-cp1-mod1-cp3-mod3:3:data3
+
+follow file history from wdir parent, copied in wdir:
+
+  $ hg log -f add0-cp4
+  abort: cannot follow nonexistent file: "add0-cp4"
+  [255]
+
+  $ hg grep --diff -f data add0-cp4
+  abort: cannot follow nonexistent file: "add0-cp4"
+  [255]
+
+ BROKEN: maybe better to abort
+  $ hg grep -f data add0-cp4
+  [1]
+
+follow file history from wdir parent, removed:
+
+  $ hg log -f add0-cp1-mod1-rm3
+  abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
+  [255]
+
+  $ hg grep --diff -f data add0-cp1-mod1-rm3
+  abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
+  [255]
+
+ BROKEN: maybe better to abort
+  $ hg grep -f data add0-cp1-mod1-rm3
+  add0-cp1-mod1-rm3:1:data0
+  add0-cp1-mod1-rm3:1:data1
+
+follow file history from wdir parent (explicit), removed:
+
+  $ hg log -fr. add0-cp1-mod1-rm3
+  abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
+  [255]
+
+  $ hg grep --diff -fr. data add0-cp1-mod1-rm3
+  abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
+  [255]
+
+ BROKEN: should abort
+  $ hg grep -fr. data add0-cp1-mod1-rm3
+  add0-cp1-mod1-rm3:1:data0
+  add0-cp1-mod1-rm3:1:data1
+
+follow file history from wdir parent, removed in wdir:
+
+  $ hg log -f add0-rm4
+  abort: cannot follow file not in parent revision: "add0-rm4"
+  [255]
+
+  $ hg grep --diff -f data add0-rm4
+  abort: cannot follow file not in parent revision: "add0-rm4"
+  [255]
+
+ BROKEN: should abort
+  $ hg grep -f data add0-rm4
+  add0-rm4:3:data0
+  add0-rm4:1:data0
+  add0-rm4:0:data0
+
+follow file history from wdir parent (explicit), removed in wdir:
+
+  $ hg log -fr. add0-rm4
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr. data add0-rm4
+  add0-rm4:0:+:data0
+
+  $ hg grep -fr. data add0-rm4
+  add0-rm4:3:data0
+  add0-rm4:1:data0
+  add0-rm4:0:data0
+
+follow file history from wdir parent, multiple files:
+
+  $ hg log -f add0-mod3 add0-cp1-mod1
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -f data add0-mod3 add0-cp1-mod1
+  add0-mod3:3:+:data3
+  add0-cp1-mod1:1:+:data1
+  add0:0:+:data0
+  add0-mod3:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -f data add0-mod3 add0-cp1-mod1
+  add0-cp1-mod1:3:data0
+  add0-cp1-mod1:3:data1
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-cp1-mod1:1:data0
+  add0-cp1-mod1:1:data1
+  add0-mod3:1:data0
+  add0-mod3:0:data0
+
+follow file history from specified revision, modified:
+
+  $ hg log -fr2 add0-mod2
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr2 data add0-mod2
+  add0-mod2:2:+:data2
+  add0-mod2:0:+:data0
+
+  $ hg grep -fr2 data add0-mod2
+  add0-mod2:2:data0
+  add0-mod2:2:data2
+  add0-mod2:0:data0
+
+follow file history from specified revision, copied but unmodified:
+
+  $ hg log -fr2 add0-cp2
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr2 data add0-cp2
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr2 data add0-cp2
+  add0-cp2:2:data0
+
+follow file history from specified revision, copied and modified:
+
+  $ hg log -fr2 add0-cp2-mod2
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr2 data add0-cp2-mod2
+  add0-cp2-mod2:2:+:data2
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr2 data add0-cp2-mod2
+  add0-cp2-mod2:2:data0
+  add0-cp2-mod2:2:data2
+
+follow file history from specified revision, removed:
+
+  $ hg log -fr2 add0-rm2
+  abort: cannot follow file not in any of the specified revisions: "add0-rm2"
+  [255]
+
+  $ hg grep --diff -fr2 data add0-rm2
+  abort: cannot follow file not in any of the specified revisions: "add0-rm2"
+  [255]
+
+ BROKEN: should abort
+  $ hg grep -fr2 data add0-rm2
+  add0-rm2:0:data0
+
+follow file history from specified revision, multiple files:
+
+  $ hg log -fr2 add0-cp2 add0-mod2
+  2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr2 data add0-cp2 add0-mod2
+  add0-mod2:2:+:data2
+  add0:0:+:data0
+  add0-mod2:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr2 data add0-cp2 add0-mod2
+  add0-cp2:2:data0
+  add0-mod2:2:data0
+  add0-mod2:2:data2
+  add0-mod2:0:data0
+
+follow file history from wdir, unmodified:
+
+  $ hg log -fr'wdir()' add0-mod3
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'wdir()' data add0-mod3
+  add0-mod3:3:+:data3
+  add0-mod3:0:+:data0
+
+  $ hg grep -fr'wdir()' data add0-mod3
+  add0-mod3:2147483647:data0
+  add0-mod3:2147483647:data3
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-mod3:1:data0
+  add0-mod3:0:data0
+
+follow file history from wdir, modified:
+
+  $ hg log -fr'wdir()' add0-mod4
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'wdir()' data add0-mod4
+  add0-mod4:2147483647:+:data4
+  add0-mod4:0:+:data0
+
+  $ hg grep -fr'wdir()' data add0-mod4
+  add0-mod4:2147483647:data0
+  add0-mod4:2147483647:data4
+  add0-mod4:3:data0
+  add0-mod4:1:data0
+  add0-mod4:0:data0
+
+follow file history from wdir, copied but unmodified:
+
+  $ hg log -fr'wdir()' add0-cp4
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'wdir()' data add0-cp4
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr'wdir()' data add0-cp4
+  add0-cp4:2147483647:data0
+
+follow file history from wdir, copied and modified:
+
+  $ hg log -fr'wdir()' add0-cp4-mod4
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'wdir()' data add0-cp4-mod4
+  add0-cp4-mod4:2147483647:+:data4
+  add0:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr'wdir()' data add0-cp4-mod4
+  add0-cp4-mod4:2147483647:data0
+  add0-cp4-mod4:2147483647:data4
+
+follow file history from wdir, multiple files:
+
+  $ hg log -fr'wdir()' add0-cp4 add0-mod4 add0-mod3
+  2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
+  3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
+  0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
+
+  $ hg grep --diff -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
+  add0-mod4:2147483647:+:data4
+  add0-mod3:3:+:data3
+  add0:0:+:data0
+  add0-mod3:0:+:data0
+  add0-mod4:0:+:data0
+
+ BROKEN: should follow history across renames
+  $ hg grep -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
+  add0-cp4:2147483647:data0
+  add0-mod3:2147483647:data0
+  add0-mod3:2147483647:data3
+  add0-mod4:2147483647:data0
+  add0-mod4:2147483647:data4
+  add0-mod3:3:data0
+  add0-mod3:3:data3
+  add0-mod4:3:data0
+  add0-mod3:1:data0
+  add0-mod4:1:data0
+  add0-mod3:0:data0
+  add0-mod4:0:data0
+
+  $ cd ..
--- a/tests/test-help.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-help.t	Tue Oct 20 22:04:04 2020 +0530
@@ -983,6 +983,8 @@
    debugbundle   lists the contents of a bundle
    debugcapabilities
                  lists the capabilities of a remote peer
+   debugchangedfiles
+                 list the stored files changes for a revision
    debugcheckstate
                  validate the correctness of the current dirstate
    debugcolor    show available color, effects or style
--- a/tests/test-histedit-edit.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-histedit-edit.t	Tue Oct 20 22:04:04 2020 +0530
@@ -311,7 +311,7 @@
   > def reposetup(ui, repo):
   >     class commitfailure(repo.__class__):
   >         def commit(self, *args, **kwargs):
-  >             raise error.Abort('emulating unexpected abort')
+  >             raise error.Abort(b'emulating unexpected abort')
   >     repo.__class__ = commitfailure
   > EOF
   $ cat >> .hg/hgrc <<EOF
--- a/tests/test-import-bypass.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-import-bypass.t	Tue Oct 20 22:04:04 2020 +0530
@@ -26,7 +26,7 @@
 and '--edit')
 
   $ hg import --bypass --exact --edit ../test.diff
-  abort: cannot use --exact with --edit
+  abort: cannot specify both --exact and --edit
   [255]
   $ hg import --bypass --exact ../test.diff
   applying ../test.diff
@@ -188,13 +188,13 @@
 Test unsupported combinations
 
   $ hg import --bypass --no-commit ../test.diff
-  abort: cannot use --no-commit with --bypass
+  abort: cannot specify both --no-commit and --bypass
   [255]
   $ hg import --bypass --similarity 50 ../test.diff
   abort: cannot use --similarity with --bypass
   [255]
   $ hg import --exact --prefix dir/ ../test.diff
-  abort: cannot use --exact with --prefix
+  abort: cannot specify both --exact and --prefix
   [255]
 
 Test commit editor
--- a/tests/test-import.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-import.t	Tue Oct 20 22:04:04 2020 +0530
@@ -444,7 +444,7 @@
 
   $ hg clone -r0 a b -q
   $ hg --cwd b import --no-commit --secret ../exported-tip.patch
-  abort: cannot use --no-commit with --secret
+  abort: cannot specify both --no-commit and --secret
   [255]
   $ hg --cwd b import --secret ../exported-tip.patch
   applying ../exported-tip.patch
--- a/tests/test-install.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-install.t	Tue Oct 20 22:04:04 2020 +0530
@@ -187,6 +187,14 @@
 #if py3 ensurepip
   $ "$PYTHON" -m venv installenv >> pip.log
 
+Hack: Debian does something a bit different in ensurepip.bootstrap. This makes
+it so that pip thinks the 'wheel' wheel is installed so it can build wheels;
+when it goes to try, however, it shells out to run `python3 -u <setup.py>`,
+that *doesn't* get the 'wheel' wheel, and it fails with an invalid command
+'bdist_wheel'. To fix this, we just delete the wheel from where Debian put it in
+our virtual env. Then pip doesn't think it's installed and doesn't try to build.
+  $ rm installenv/share/python-wheels/wheel-*.whl >/dev/null 2>&1 || true
+
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
   $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
@@ -214,7 +222,7 @@
   no problems detected
 #endif
 
-#if no-py3 virtualenv
+#if py2virtualenv
 
 Note: --no-site-packages is deprecated, but some places have an
 ancient virtualenv from their linux distro or similar and it's not yet
--- a/tests/test-journal-share.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-journal-share.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,3 +1,10 @@
+#testcases safe normal
+
+#if safe
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "exp-share-safe = True" >> $HGRCPATH
+#endif
+
 Journal extension test: tests the share extension support
 
   $ cat >> testmocks.py << EOF
--- a/tests/test-largefiles.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-largefiles.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1005,7 +1005,7 @@
   getting changed largefiles
   3 largefiles updated, 0 removed
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  8 additional largefiles cached
+  7 additional largefiles cached
 
   $ rm "${USERCACHE}"/*
   $ hg clone --all-largefiles -u 0 a a-clone0
@@ -1013,7 +1013,7 @@
   getting changed largefiles
   2 largefiles updated, 0 removed
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  9 additional largefiles cached
+  8 additional largefiles cached
   $ hg -R a-clone0 sum
   parent: 0:30d30fe6a5be 
    add files
@@ -1047,7 +1047,7 @@
 
   $ rm "${USERCACHE}"/*
   $ hg clone --all-largefiles -U a a-clone-u
-  11 additional largefiles cached
+  10 additional largefiles cached
   $ hg -R a-clone-u sum
   parent: -1:000000000000  (no revision checked out)
   branch: default
--- a/tests/test-lfconvert.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-lfconvert.t	Tue Oct 20 22:04:04 2020 +0530
@@ -389,17 +389,17 @@
   $ rm largefiles-repo/.hg/largefiles/*
   $ hg lfconvert --to-normal issue3519 normalized3519
   initializing destination normalized3519
-  anotherlarge: largefile 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3 not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
   large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  sub/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob)
   large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
   stuff/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob)
   large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  sub/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob)
-  large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  anotherlarge: largefile 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3 not available from file:/*/$TESTTMP/largefiles-repo (glob)
+  stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob)
   0 additional largefiles cached
   11 largefiles failed to download
   abort: all largefiles must be present locally
--- a/tests/test-lfs-serve.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-lfs-serve.t	Tue Oct 20 22:04:04 2020 +0530
@@ -360,9 +360,11 @@
   # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
   *** runcommand debugprocessors lfs.bin -R ../server
   registered processor '0x8000'
+  registered processor '0x800'
   registered processor '0x2000'
   *** runcommand debugprocessors nonlfs2.txt -R ../server
   registered processor '0x8000'
+  registered processor '0x800'
   registered processor '0x2000'
   *** runcommand config extensions --cwd ../server
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -371,6 +373,7 @@
   # LFS not enabled- revlogs don't have 0x2000 flag
   *** runcommand debugprocessors nonlfs3.txt
   registered processor '0x8000'
+  registered processor '0x800'
   *** runcommand config extensions
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
 
@@ -413,9 +416,11 @@
   # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
   *** runcommand debugprocessors lfs.bin -R ../server
   registered processor '0x8000'
+  registered processor '0x800'
   registered processor '0x2000'
   *** runcommand debugprocessors nonlfs2.txt -R ../server
   registered processor '0x8000'
+  registered processor '0x800'
   registered processor '0x2000'
   *** runcommand config extensions --cwd ../server
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -424,6 +429,7 @@
   # LFS enabled without requirement- revlogs have 0x2000 flag
   *** runcommand debugprocessors nonlfs3.txt
   registered processor '0x8000'
+  registered processor '0x800'
   registered processor '0x2000'
   *** runcommand config extensions
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -432,6 +438,7 @@
   # LFS disabled locally- revlogs don't have 0x2000 flag
   *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
   registered processor '0x8000'
+  registered processor '0x800'
   *** runcommand config extensions --cwd ../nonlfs
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
   extensions.lfs=!
--- a/tests/test-lock.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-lock.py	Tue Oct 20 22:04:04 2020 +0530
@@ -169,121 +169,6 @@
         state.assertpostreleasecalled(True)
         state.assertlockexists(False)
 
-    def testinheritlock(self):
-        d = tempfile.mkdtemp(dir=encoding.getcwd())
-        parentstate = teststate(self, d)
-        parentlock = parentstate.makelock()
-        parentstate.assertacquirecalled(True)
-
-        # set up lock inheritance
-        with parentlock.inherit() as lockname:
-            parentstate.assertreleasecalled(True)
-            parentstate.assertpostreleasecalled(False)
-            parentstate.assertlockexists(True)
-
-            childstate = teststate(self, d, pidoffset=1)
-            childlock = childstate.makelock(parentlock=lockname)
-            childstate.assertacquirecalled(True)
-
-            childlock.release()
-            childstate.assertreleasecalled(True)
-            childstate.assertpostreleasecalled(False)
-            childstate.assertlockexists(True)
-
-            parentstate.resetacquirefn()
-
-        parentstate.assertacquirecalled(True)
-
-        parentlock.release()
-        parentstate.assertreleasecalled(True)
-        parentstate.assertpostreleasecalled(True)
-        parentstate.assertlockexists(False)
-
-    def testmultilock(self):
-        d = tempfile.mkdtemp(dir=encoding.getcwd())
-        state0 = teststate(self, d)
-        lock0 = state0.makelock()
-        state0.assertacquirecalled(True)
-
-        with lock0.inherit() as lock0name:
-            state0.assertreleasecalled(True)
-            state0.assertpostreleasecalled(False)
-            state0.assertlockexists(True)
-
-            state1 = teststate(self, d, pidoffset=1)
-            lock1 = state1.makelock(parentlock=lock0name)
-            state1.assertacquirecalled(True)
-
-            # from within lock1, acquire another lock
-            with lock1.inherit() as lock1name:
-                # since the file on disk is lock0's this should have the same
-                # name
-                self.assertEqual(lock0name, lock1name)
-
-                state2 = teststate(self, d, pidoffset=2)
-                lock2 = state2.makelock(parentlock=lock1name)
-                state2.assertacquirecalled(True)
-
-                lock2.release()
-                state2.assertreleasecalled(True)
-                state2.assertpostreleasecalled(False)
-                state2.assertlockexists(True)
-
-                state1.resetacquirefn()
-
-            state1.assertacquirecalled(True)
-
-            lock1.release()
-            state1.assertreleasecalled(True)
-            state1.assertpostreleasecalled(False)
-            state1.assertlockexists(True)
-
-        lock0.release()
-
-    def testinheritlockfork(self):
-        d = tempfile.mkdtemp(dir=encoding.getcwd())
-        parentstate = teststate(self, d)
-        parentlock = parentstate.makelock()
-        parentstate.assertacquirecalled(True)
-
-        # set up lock inheritance
-        with parentlock.inherit() as lockname:
-            childstate = teststate(self, d, pidoffset=1)
-            childlock = childstate.makelock(parentlock=lockname)
-            childstate.assertacquirecalled(True)
-
-            # fork the child lock
-            forkchildlock = copy.copy(childlock)
-            forkchildlock._pidoffset += 1
-            forkchildlock.release()
-            childstate.assertreleasecalled(False)
-            childstate.assertpostreleasecalled(False)
-            childstate.assertlockexists(True)
-
-            # release the child lock
-            childlock.release()
-            childstate.assertreleasecalled(True)
-            childstate.assertpostreleasecalled(False)
-            childstate.assertlockexists(True)
-
-        parentlock.release()
-
-    def testinheritcheck(self):
-        d = tempfile.mkdtemp(dir=encoding.getcwd())
-        state = teststate(self, d)
-
-        def check():
-            raise error.LockInheritanceContractViolation('check failed')
-
-        lock = state.makelock(inheritchecker=check)
-        state.assertacquirecalled(True)
-
-        with self.assertRaises(error.LockInheritanceContractViolation):
-            with lock.inherit():
-                pass
-
-        lock.release()
-
     def testfrequentlockunlock(self):
         """This tests whether lock acquisition fails as expected, even if
         (1) lock can't be acquired (makelock fails by EEXIST), and
--- a/tests/test-log.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-log.t	Tue Oct 20 22:04:04 2020 +0530
@@ -504,14 +504,50 @@
   0 (false !)
 
 follow files from the specified revisions with missing patterns
-(BROKEN: should follow copies from e@4)
 
   $ hg log -T '{rev}\n' -fr4 e x
-  4
-  2 (false !)
+  abort: cannot follow file not in any of the specified revisions: "x"
+  [255]
+
+follow files from the specified revisions with directory patterns
+(BROKEN: should follow copies from dir/b@2)
+
+  $ hg log -T '{rev}\n' -fr2 dir/b dir
+  2
   1 (false !)
   0 (false !)
 
+follow files from multiple revisions, but the pattern is missing in
+one of the specified revisions
+
+  $ hg log -T '{rev}\n' -fr'2+4' dir/b e
+  e: no such file in rev f8954cd4dc1f
+  dir/b: no such file in rev 7e4639b4691b
+  4
+  2
+  1
+  0
+
+follow files from multiple revisions, and the pattern matches a file in
+one revision but matches a directory in another:
+(BROKEN: should follow copies from dir/b@2 and dir/b/g@5)
+(BROKEN: the revision 4 should not be included since dir/b/g@5 is unchanged)
+
+  $ mkdir -p dir/b
+  $ hg mv g dir/b
+  $ hg ci -m 'make dir/b a directory'
+
+  $ hg log -T '{rev}\n' -fr'2+5' dir/b
+  5
+  4
+  3 (false !)
+  2
+  1 (false !)
+  0 (false !)
+
+  $ hg --config extensions.strip= strip -r. --no-backup
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
 follow files from the specified revisions across copies with -p/--patch
 
   $ hg log -T '== rev: {rev},{file_copies % " {source}->{name}"} ==\n' -fpr 4 e g
@@ -2295,18 +2331,46 @@
    1 files changed, 1 insertions(+), 0 deletions(-)
   
 
- BROKEN: added file should exist in wdir
   $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat d1/f2
+  == 2147483647 ==
+   d1/f2 |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  
+
+  $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat f1-copy
+  == 2147483647 ==
+   f1-copy |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  
+  == 0 ==
+   d1/f1 |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  
+
+  $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound
+  abort: cannot follow file not in any of the specified revisions: "notfound"
+  [255]
+
+follow files from wdir and non-wdir revision:
+
+  $ hg log -T '{rev}\n' -fr'wdir()+.' f1-copy
+  f1-copy: no such file in rev 65624cd9070a
+  2147483647
+  0
+
+follow added/removed files from wdir parent
+
+  $ hg log -T '{rev}\n' -f d1/f2
   abort: cannot follow nonexistent file: "d1/f2"
   [255]
 
- BROKEN: copied file should exist in wdir
-  $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat f1-copy
+  $ hg log -T '{rev}\n' -f f1-copy
   abort: cannot follow nonexistent file: "f1-copy"
   [255]
 
-  $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound
-  notfound: $ENOENT$
+  $ hg log -T '{rev}\n' -f .d6/f1
+  abort: cannot follow file not in parent revision: ".d6/f1"
+  [255]
 
   $ hg revert -aqC
 
--- a/tests/test-merge-changedelete.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-merge-changedelete.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,3 +1,15 @@
+#testcases newfilenode old
+
+#if newfilenode
+Enable the config option
+------------------------
+
+  $ cat >> $HGRCPATH <<EOF
+  > [experimental]
+  > merge-track-salvaged = True
+  > EOF
+#endif
+
 Tests for change/delete conflicts, including:
 b5605d88dc27: Make ui.prompt repeat on "unrecognized response" again
  (issue897)
@@ -83,11 +95,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -148,11 +162,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "r")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -226,11 +242,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "r")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -288,11 +306,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -337,11 +357,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "r")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "r")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -382,11 +404,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "r")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "r")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -428,11 +452,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -485,11 +511,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -544,11 +572,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
@@ -600,11 +630,13 @@
     ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be)
     other path: file1 (node 0000000000000000000000000000000000000000)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file2 (state "u")
     local path: file2 (hash 0000000000000000000000000000000000000000, flags "")
     ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
     other path: file2 (node e7c1328648519852e723de86c0c0525acd779257)
     extra: ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff
+    extra: merge-removal-candidate = yes
   file: file3 (state "u")
     local path: file3 (hash d5b0a58bc47161b1b8a831084b366f757c4f0b11, flags "")
     ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4)
--- a/tests/test-merge-criss-cross.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-merge-criss-cross.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,3 +1,15 @@
+#testcases old newfilenode
+
+#if newfilenode
+Enable the config option
+------------------------
+
+  $ cat >> $HGRCPATH <<EOF
+  > [experimental]
+  > merge-track-salvaged = True
+  > EOF
+#endif
+
 Criss cross merging
 
   $ hg init criss-cross
@@ -78,9 +90,9 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922
-   preserving f2 for resolve of f2
    f1: remote is newer -> g
   getting f1
+   preserving f2 for resolve of f2
    f2: versions differ -> m (premerge)
   picked tool ':dump' for f2 (binary False symlink False changedelete False)
   merging f2
@@ -149,8 +161,14 @@
    f1: versions differ -> m
    f2: remote unchanged -> k
   
-  auction for merging merge bids
+  auction for merging merge bids (2 ancestors)
+   list of bids for f1:
+     remote is newer -> g
+     versions differ -> m
    f1: picking 'get' action
+   list of bids for f2:
+     remote unchanged -> k
+     versions differ -> m
    f2: picking 'keep' action
   end of auction
   
@@ -192,8 +210,14 @@
    f1: versions differ -> m
    f2: remote is newer -> g
   
-  auction for merging merge bids
+  auction for merging merge bids (2 ancestors)
+   list of bids for f1:
+     remote unchanged -> k
+     versions differ -> m
    f1: picking 'keep' action
+   list of bids for f2:
+     remote is newer -> g
+     versions differ -> m
    f2: picking 'get' action
   end of auction
   
@@ -230,7 +254,7 @@
   calculating bids for ancestor 40663881a6dd
   resolving manifests
   
-  auction for merging merge bids
+  auction for merging merge bids (2 ancestors)
    f1: picking 'get' action
    f2: picking 'keep' action
   end of auction
@@ -257,8 +281,14 @@
    f1: versions differ -> m
    f2: remote unchanged -> k
   
-  auction for merging merge bids
+  auction for merging merge bids (2 ancestors)
+   list of bids for f1:
+     remote is newer -> g
+     versions differ -> m
    f1: picking 'get' action
+   list of bids for f2:
+     remote unchanged -> k
+     versions differ -> m
    f2: picking 'keep' action
   end of auction
   
@@ -343,7 +373,7 @@
   calculating bids for ancestor b211bbc6eb3c
   resolving manifests
   
-  auction for merging merge bids
+  auction for merging merge bids (2 ancestors)
    x: multiple bids for merge action:
     versions differ -> m
     versions differ -> m
@@ -413,6 +443,8 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 11b5b303e36c, local: c0ef19750a22+, remote: 6ca01f7342b9
+   d1/a: ancestor missing, remote missing -> kn
+   d1/b: ancestor missing, remote missing -> kn
    d2/b: remote created -> g
   
   calculating bids for ancestor 154e6000f54e
@@ -430,17 +462,470 @@
    d1/b: other deleted -> r
    d2/b: remote created -> g
   
-  auction for merging merge bids
-   d1/a: consensus for r
-   d1/b: consensus for r
+  auction for merging merge bids (2 ancestors)
+   list of bids for d1/a:
+     ancestor missing, remote missing -> kn
+     other deleted -> r
+   d1/a: picking 'keep new' action
+   list of bids for d1/b:
+     ancestor missing, remote missing -> kn
+     other deleted -> r
+   d1/b: picking 'keep new' action
+   list of bids for d2/b:
+     remote created -> g
+     remote created -> g
    d2/b: consensus for g
   end of auction
   
-   d1/a: other deleted -> r
-  removing d1/a
-   d1/b: other deleted -> r
-  removing d1/b
    d2/b: remote created -> g
   getting d2/b
-  1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+   d1/a: ancestor missing, remote missing -> kn
+   d1/b: ancestor missing, remote missing -> kn
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+
+Check that removal reversion does not go unotified
+==================================================
+
+On a merge, a file can be removed and user can revert that removal. This means
+user has made an explicit choice of keeping the file or reverting the removal
+even though the merge algo wanted to remove it.
+Based on this, when we do criss cross merges, merge algorithm should not again
+choose to remove the file as in one of the merges, user made an explicit choice
+to revert the removal.
+Following test cases demonstrate how merge algo does not take in account
+explicit choices made by users to revert the removal and on criss-cross merging
+removes the file again.
+
+"Simple" case where the filenode changes
+----------------------------------------
+
+  $ cd ..
+  $ hg init criss-cross-merge-reversal-with-update
+  $ cd criss-cross-merge-reversal-with-update
+  $ echo the-file > the-file
+  $ echo other-file > other-file
+  $ hg add the-file other-file
+  $ hg ci -m 'root-commit'
+  $ echo foo >> the-file
+  $ echo bar >> other-file
+  $ hg ci -m 'updating-both-file'
+  $ hg up 'desc("root-commit")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg rm the-file
+  $ hg ci -m 'delete-the-file'
+  created new head
+  $ hg log -G -T '{node|short} {desc}\n'
+  @  7801bc9b9899 delete-the-file
+  |
+  | o  9b610631ab29 updating-both-file
+  |/
+  o  955800955977 root-commit
+  
+
+Do all the merge combination (from the deleted or the update side × keeping and deleting the file
+
+  $ hg update 'desc("delete-the-file")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("updating-both-file")' -t :local
+  1 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg debugmergestate
+  local (working copy): 7801bc9b9899de5e304bd162cafde9b78e10ab9b
+  other (merge rev): 9b610631ab29024c5f44af7d2c19658ef8f8f071
+  file: the-file (state "r")
+    local path: the-file (hash 0000000000000000000000000000000000000000, flags "")
+    ancestor path: the-file (node 4b69178b9bdae28b651393b46e631427a72f217a)
+    other path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
+    extra: merge-removal-candidate = yes
+  extra: other-file (filenode-source = other)
+  $ hg ci -m "merge-deleting-the-file-from-deleted"
+  $ hg manifest
+  other-file
+  $ hg debugrevlogindex the-file
+     rev linkrev nodeid       p1           p2
+       0       0 4b69178b9bda 000000000000 000000000000
+       1       1 59e363a07dc8 4b69178b9bda 000000000000
+
+  $ hg update 'desc("updating-both-file")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("delete-the-file")' -t :other
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg debugmergestate
+  local (working copy): 9b610631ab29024c5f44af7d2c19658ef8f8f071
+  other (merge rev): 7801bc9b9899de5e304bd162cafde9b78e10ab9b
+  file: the-file (state "r")
+    local path: the-file (hash 6d2e02da5a9fe0691363dc6b573845fa271eaa35, flags "")
+    ancestor path: the-file (node 4b69178b9bdae28b651393b46e631427a72f217a)
+    other path: the-file (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
+    extra: merge-removal-candidate = yes
+  $ hg ci -m "merge-deleting-the-file-from-updated"
+  created new head
+  $ hg manifest
+  other-file
+  $ hg debugrevlogindex the-file
+     rev linkrev nodeid       p1           p2
+       0       0 4b69178b9bda 000000000000 000000000000
+       1       1 59e363a07dc8 4b69178b9bda 000000000000
+
+  $ hg update 'desc("delete-the-file")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("updating-both-file")' -t :other
+  1 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg debugmergestate
+  local (working copy): 7801bc9b9899de5e304bd162cafde9b78e10ab9b
+  other (merge rev): 9b610631ab29024c5f44af7d2c19658ef8f8f071
+  file: the-file (state "r")
+    local path: the-file (hash 0000000000000000000000000000000000000000, flags "")
+    ancestor path: the-file (node 4b69178b9bdae28b651393b46e631427a72f217a)
+    other path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
+    extra: merge-removal-candidate = yes
+  extra: other-file (filenode-source = other)
+  $ hg ci -m "merge-keeping-the-file-from-deleted"
+  created new head
+  $ hg manifest
+  other-file
+  the-file
+
+  $ hg debugrevlogindex the-file
+     rev linkrev nodeid       p1           p2
+       0       0 4b69178b9bda 000000000000 000000000000
+       1       1 59e363a07dc8 4b69178b9bda 000000000000
+       2       5 885af55420b3 59e363a07dc8 000000000000 (newfilenode !)
+
+  $ hg update 'desc("updating-both-file")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  $ hg merge 'desc("delete-the-file")' -t :local
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg debugmergestate
+  local (working copy): 9b610631ab29024c5f44af7d2c19658ef8f8f071
+  other (merge rev): 7801bc9b9899de5e304bd162cafde9b78e10ab9b
+  file: the-file (state "r")
+    local path: the-file (hash 6d2e02da5a9fe0691363dc6b573845fa271eaa35, flags "")
+    ancestor path: the-file (node 4b69178b9bdae28b651393b46e631427a72f217a)
+    other path: the-file (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = 955800955977bd6c103836ee3e437276e940a589
+    extra: merge-removal-candidate = yes
+  $ hg ci -m "merge-keeping-the-file-from-updated"
+  created new head
+  $ hg manifest
+  other-file
+  the-file
+
+XXX: This should create a new filenode because user explicitly decided to keep
+the file. If we reuse the same filenode, future merges (criss-cross ones mostly)
+will think that file remain unchanged and user explicit choice will not be taken
+in consideration.
+  $ hg debugrevlogindex the-file
+     rev linkrev nodeid       p1           p2
+       0       0 4b69178b9bda 000000000000 000000000000
+       1       1 59e363a07dc8 4b69178b9bda 000000000000
+       2       5 885af55420b3 59e363a07dc8 000000000000 (newfilenode !)
+
+  $ hg log -G -T '{node|short} {desc}\n'
+  @    5e3eccec60d8 merge-keeping-the-file-from-updated
+  |\
+  +---o  38a4c3e7cac8 merge-keeping-the-file-from-deleted (newfilenode !)
+  +---o  e9b708131723 merge-keeping-the-file-from-deleted (old !)
+  | |/
+  +---o  a4e0e44229dc merge-deleting-the-file-from-updated
+  | |/
+  +---o  adfd88e5d7d3 merge-deleting-the-file-from-deleted
+  | |/
+  | o  7801bc9b9899 delete-the-file
+  | |
+  o |  9b610631ab29 updating-both-file
+  |/
+  o  955800955977 root-commit
+  
+
+There the resulting merge together (leading to criss cross situation). Check
+the conflict is properly detected.
+
+(merging two deletion together → no conflict)
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge          'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  $ hg debugmergestate
+  no merge state found
+
+(merging a deletion with keeping → conflict)
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+#if newfilenode
+  $ hg merge          'desc("merge-keeping-the-file-from-deleted")'
+  file 'the-file' was deleted in local [working copy] but was modified in other [merge rev].
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+#else
+  $ hg merge          'desc("merge-keeping-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
+#endif
+  $ ls -1
+  other-file
+  the-file (newfilenode !)
+
+#if newfilenode
+  $ hg debugmergestate
+  local (working copy): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  other (merge rev): 38a4c3e7cac8c294ecb0a7a85a05464e9836ca78
+  file: the-file (state "u")
+    local path: the-file (hash 0000000000000000000000000000000000000000, flags "")
+    ancestor path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    other path: the-file (node 885af55420b35d7bf3bbd6f546615295bfe6544a)
+    extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
+    extra: merge-removal-candidate = yes
+#else
+  $ hg debugmergestate
+  local (working copy): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  other (merge rev): e9b7081317232edce73f7ad5ae0b7807ff5c326a
+  extra: the-file (merge-removal-candidate = yes)
+#endif
+
+(merging a deletion with keeping → conflict)
+BROKEN: this should result in conflict
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  $ hg merge          'desc("merge-keeping-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  $ hg debugmergestate
+  local (working copy): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  other (merge rev): 5e3eccec60d88f94a7ba57c351f32cb24c15fe0c
+  extra: the-file (merge-removal-candidate = yes)
+
+(merging two deletion together → no conflict)
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge          'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  $ hg debugmergestate
+  no merge state found
+
+(merging a deletion with keeping → conflict)
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+#if newfilenode
+  $ hg merge          'desc("merge-keeping-the-file-from-deleted")'
+  file 'the-file' was deleted in local [working copy] but was modified in other [merge rev].
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+#else
+  $ hg merge          'desc("merge-keeping-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+#endif
+
+  $ ls -1
+  other-file
+  the-file (newfilenode !)
+#if newfilenode
+  $ hg debugmergestate
+  local (working copy): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  other (merge rev): 38a4c3e7cac8c294ecb0a7a85a05464e9836ca78
+  file: the-file (state "u")
+    local path: the-file (hash 0000000000000000000000000000000000000000, flags "")
+    ancestor path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    other path: the-file (node 885af55420b35d7bf3bbd6f546615295bfe6544a)
+    extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
+    extra: merge-removal-candidate = yes
+#else
+  $ hg debugmergestate
+  local (working copy): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  other (merge rev): e9b7081317232edce73f7ad5ae0b7807ff5c326a
+  extra: the-file (merge-removal-candidate = yes)
+#endif
+
+(merging a deletion with keeping → conflict)
+BROKEN: this should result in conflict
+
+  $ hg update --clean 'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  $ hg merge          'desc("merge-keeping-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  $ hg debugmergestate
+  local (working copy): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  other (merge rev): 5e3eccec60d88f94a7ba57c351f32cb24c15fe0c
+  extra: the-file (merge-removal-candidate = yes)
+
+(merging two "keeping" together → no conflict)
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-updated")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge          'desc("merge-keeping-the-file-from-deleted")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  the-file
+#if newfilenode
+  $ hg debugmergestate
+  local (working copy): 5e3eccec60d88f94a7ba57c351f32cb24c15fe0c
+  other (merge rev): 38a4c3e7cac8c294ecb0a7a85a05464e9836ca78
+  extra: the-file (filenode-source = other)
+#else
+  $ hg debugmergestate
+  no merge state found
+#endif
+
+(merging a deletion with keeping → conflict)
+BROKEN: this should result in conflict
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-updated")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  $ hg merge          'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  the-file
+  $ hg debugmergestate
+  local (working copy): 5e3eccec60d88f94a7ba57c351f32cb24c15fe0c
+  other (merge rev): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  extra: the-file (merge-removal-candidate = yes)
+
+(merging a deletion with keeping → conflict)
+BROKEN: this should result in conflict
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge          'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  the-file
+  $ hg debugmergestate
+  local (working copy): 5e3eccec60d88f94a7ba57c351f32cb24c15fe0c
+  other (merge rev): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  extra: the-file (merge-removal-candidate = yes)
+
+(merging two "keeping" together → no conflict)
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (newfilenode !)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved (old !)
+  $ hg merge          'desc("merge-keeping-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ ls -1
+  other-file
+  the-file
+  $ hg debugmergestate
+  no merge state found
+
+(merging a deletion with keeping → conflict)
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#if newfilenode
+  $ hg merge          'desc("merge-deleting-the-file-from-deleted")'
+  file 'the-file' was deleted in other [merge rev] but was modified in local [working copy].
+  You can use (c)hanged version, (d)elete, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+#else
+  $ hg merge          'desc("merge-deleting-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+#endif
+  $ ls -1
+  other-file
+  the-file
+
+#if newfilenode
+  $ hg debugmergestate
+  local (working copy): 38a4c3e7cac8c294ecb0a7a85a05464e9836ca78 (newfilenode !)
+  local (working copy): e9b7081317232edce73f7ad5ae0b7807ff5c326a (old !)
+  other (merge rev): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  file: the-file (state "u")
+    local path: the-file (hash 6d2e02da5a9fe0691363dc6b573845fa271eaa35, flags "")
+    ancestor path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    other path: the-file (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
+    extra: merge-removal-candidate = yes
+#else
+  $ hg debugmergestate
+  local (working copy): e9b7081317232edce73f7ad5ae0b7807ff5c326a
+  other (merge rev): adfd88e5d7d3d3e22bdd26512991ee64d59c1d8f
+  extra: the-file (merge-removal-candidate = yes)
+#endif
+
+(merging a deletion with keeping → conflict)
+
+  $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+#if newfilenode
+  $ hg merge          'desc("merge-deleting-the-file-from-updated")'
+  file 'the-file' was deleted in other [merge rev] but was modified in local [working copy].
+  You can use (c)hanged version, (d)elete, or leave (u)nresolved.
+  What do you want to do? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+#else
+  $ hg merge          'desc("merge-deleting-the-file-from-updated")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+#endif
+  $ ls -1
+  other-file
+  the-file
+#if newfilenode
+  $ hg debugmergestate
+  local (working copy): 38a4c3e7cac8c294ecb0a7a85a05464e9836ca78
+  other (merge rev): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  file: the-file (state "u")
+    local path: the-file (hash 6d2e02da5a9fe0691363dc6b573845fa271eaa35, flags "")
+    ancestor path: the-file (node 59e363a07dc876278f0e41756236f30213b6b460)
+    other path: the-file (node 0000000000000000000000000000000000000000)
+    extra: ancestorlinknode = 9b610631ab29024c5f44af7d2c19658ef8f8f071
+    extra: merge-removal-candidate = yes
+#else
+  $ hg debugmergestate
+  local (working copy): e9b7081317232edce73f7ad5ae0b7807ff5c326a
+  other (merge rev): a4e0e44229dc130be2915b92c957c093f8c7ee3e
+  extra: the-file (merge-removal-candidate = yes)
+#endif
--- a/tests/test-merge1.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-merge1.t	Tue Oct 20 22:04:04 2020 +0530
@@ -360,7 +360,7 @@
   > )
   > def applyupdates(orig, *args, **kwargs):
   >     orig(*args, **kwargs)
-  >     raise error.Abort('intentional aborting')
+  >     raise error.Abort(b'intentional aborting')
   > def extsetup(ui):
   >     extensions.wrapfunction(merge, "applyupdates", applyupdates)
   > EOF
--- a/tests/test-mq-qfold.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-mq-qfold.t	Tue Oct 20 22:04:04 2020 +0530
@@ -153,7 +153,7 @@
   > def reposetup(ui, repo):
   >     class commitfailure(repo.__class__):
   >         def commit(self, *args, **kwargs):
-  >             raise error.Abort('emulating unexpected abort')
+  >             raise error.Abort(b'emulating unexpected abort')
   >     repo.__class__ = commitfailure
   > EOF
 
--- a/tests/test-mq-qnew.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-mq-qnew.t	Tue Oct 20 22:04:04 2020 +0530
@@ -256,7 +256,7 @@
   > def reposetup(ui, repo):
   >     class commitfailure(repo.__class__):
   >         def commit(self, *args, **kwargs):
-  >             raise error.Abort('emulating unexpected abort')
+  >             raise error.Abort(b'emulating unexpected abort')
   >     repo.__class__ = commitfailure
   > EOF
   $ cat >> .hg/hgrc <<EOF
--- a/tests/test-narrow-share.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-narrow-share.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,10 @@
 #testcases flat tree
+#testcases safe normal
+
+#if safe
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "exp-share-safe = True" >> $HGRCPATH
+#endif
 
   $ . "$TESTDIR/narrow-library.sh"
 
--- a/tests/test-obsmarker-template.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-obsmarker-template.t	Tue Oct 20 22:04:04 2020 +0530
@@ -20,13 +20,13 @@
   > obsfatetempl = "{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}; "
   > [alias]
   > tlog = log -G -T '{node|short}\
-  >     {if(predecessors, "\n  Predecessors: {predecessors}")}\
-  >     {if(predecessors, "\n  semi-colon: {join(predecessors, "; ")}")}\
-  >     {if(predecessors, "\n  json: {predecessors|json}")}\
-  >     {if(predecessors, "\n  map: {join(predecessors % "{rev}:{node}", " ")}")}\
-  >     {if(successorssets, "\n  Successors: {successorssets}")}\
-  >     {if(successorssets, "\n  multi-line: {join(successorssets, "\n  multi-line: ")}")}\
-  >     {if(successorssets, "\n  json: {successorssets|json}")}\n'
+  >     \n  Predecessors: {predecessors}\
+  >     \n  semi-colon: {join(predecessors, "; ")}\
+  >     \n  json: {predecessors|json}\
+  >     \n  map: {join(predecessors % "{rev}:{node}", " ")}\
+  >     \n  Successors: {successorssets}\
+  >     \n  multi-line: {join(successorssets, "\n  multi-line: ")}\
+  >     \n  json: {successorssets|json}\n'
   > fatelog = log -G -T '{node|short}\n{if(succsandmarkers, "  Obsfate: {succsandmarkers % "{obsfatetempl}"} \n" )}'
   > fatelogjson = log -G -T '{node|short}\n{if(succsandmarkers, "  Obsfate: {succsandmarkers|json}\n")}'
   > fatelogkw = log -G -T '{node|short}\n{if(obsfate, "{obsfate % "  Obsfate: {fate}\n"}")}'
@@ -86,12 +86,26 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471f378eab4c
-  |/     Successors: 3:d004c8f274b9
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 3:d004c8f274b9
   |      multi-line: 3:d004c8f274b9
   |      json: [["d004c8f274b9ec480a47a93c10dac5eee63adb78"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
   $ hg fatelog
   o  d004c8f274b9
   |
@@ -159,12 +173,26 @@
   |    semi-colon: 2:a468dc9b3633
   |    json: ["a468dc9b36338b14fdb7825f55ce3df4e71517ad"]
   |    map: 2:a468dc9b36338b14fdb7825f55ce3df4e71517ad
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  a468dc9b3633
-  |/     Successors: 3:d004c8f274b9
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 3:d004c8f274b9
   |      multi-line: 3:d004c8f274b9
   |      json: [["d004c8f274b9ec480a47a93c10dac5eee63adb78"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
   $ hg fatelog
   o  d004c8f274b9
   |
@@ -180,6 +208,9 @@
   |    semi-colon: 2:a468dc9b3633
   |    json: ["a468dc9b36338b14fdb7825f55ce3df4e71517ad"]
   |    map: 2:a468dc9b36338b14fdb7825f55ce3df4e71517ad
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  a468dc9b3633
   |/     Predecessors: 1:471f378eab4c
   |      semi-colon: 1:471f378eab4c
@@ -189,11 +220,22 @@
   |      multi-line: 3:d004c8f274b9
   |      json: [["d004c8f274b9ec480a47a93c10dac5eee63adb78"]]
   | x  471f378eab4c
-  |/     Successors: 2:a468dc9b3633
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:a468dc9b3633
   |      multi-line: 2:a468dc9b3633
   |      json: [["a468dc9b36338b14fdb7825f55ce3df4e71517ad"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
   $ hg fatelog --hidden
   o  d004c8f274b9
   |
@@ -210,15 +252,31 @@
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg tlog
   @  d004c8f274b9
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
   $ hg tlog --hidden
   @  d004c8f274b9
   |    Predecessors: 2:a468dc9b3633
   |    semi-colon: 2:a468dc9b3633
   |    json: ["a468dc9b36338b14fdb7825f55ce3df4e71517ad"]
   |    map: 2:a468dc9b36338b14fdb7825f55ce3df4e71517ad
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  a468dc9b3633
   |/     Predecessors: 1:471f378eab4c
   |      semi-colon: 1:471f378eab4c
@@ -228,11 +286,22 @@
   |      multi-line: 3:d004c8f274b9
   |      json: [["d004c8f274b9ec480a47a93c10dac5eee63adb78"]]
   | x  471f378eab4c
-  |/     Successors: 2:a468dc9b3633
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:a468dc9b3633
   |      multi-line: 2:a468dc9b3633
   |      json: [["a468dc9b36338b14fdb7825f55ce3df4e71517ad"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
   $ hg fatelog
   @  d004c8f274b9
   |
@@ -429,17 +498,33 @@
   |    semi-colon: 1:471597cad322
   |    json: ["471597cad322d1f659bb169751be9133dad92ef3"]
   |    map: 1:471597cad322d1f659bb169751be9133dad92ef3
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  337fec4d2edc
   |    Predecessors: 1:471597cad322
   |    semi-colon: 1:471597cad322
   |    json: ["471597cad322d1f659bb169751be9133dad92ef3"]
   |    map: 1:471597cad322d1f659bb169751be9133dad92ef3
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471597cad322
-  |/     Successors: 2:337fec4d2edc 3:f257fde29c7a
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:337fec4d2edc 3:f257fde29c7a
   |      multi-line: 2:337fec4d2edc 3:f257fde29c7a
   |      json: [["337fec4d2edcf0e7a467e35f818234bc620068b5", "f257fde29c7a847c9b607f6e958656d0df0fb15c"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  f257fde29c7a
@@ -457,11 +542,30 @@
 the log
   $ hg tlog
   @  f257fde29c7a
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  337fec4d2edc
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
 Predecessors template should show both predecessors as we force their display
 with --hidden
   $ hg tlog --hidden
@@ -470,17 +574,33 @@
   |    semi-colon: 1:471597cad322
   |    json: ["471597cad322d1f659bb169751be9133dad92ef3"]
   |    map: 1:471597cad322d1f659bb169751be9133dad92ef3
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  337fec4d2edc
   |    Predecessors: 1:471597cad322
   |    semi-colon: 1:471597cad322
   |    json: ["471597cad322d1f659bb169751be9133dad92ef3"]
   |    map: 1:471597cad322d1f659bb169751be9133dad92ef3
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  471597cad322
-  |/     Successors: 2:337fec4d2edc 3:f257fde29c7a
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:337fec4d2edc 3:f257fde29c7a
   |      multi-line: 2:337fec4d2edc 3:f257fde29c7a
   |      json: [["337fec4d2edcf0e7a467e35f818234bc620068b5", "f257fde29c7a847c9b607f6e958656d0df0fb15c"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog --hidden
   @  f257fde29c7a
@@ -641,12 +761,25 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471f378eab4c
-  |/     Successors: 3:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 3:eb5a0daa2192
   |      multi-line: 3:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  eb5a0daa2192
@@ -668,16 +801,33 @@
   |    semi-colon: 2:0dec01379d3b; 1:471f378eab4c
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5", "471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  0dec01379d3b
+  | |    Predecessors:
+  | |    semi-colon:
+  | |    json: []
+  | |    map:
   | |    Successors: 3:eb5a0daa2192
   | |    multi-line: 3:eb5a0daa2192
   | |    json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   | x  471f378eab4c
-  |/     Successors: 3:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 3:eb5a0daa2192
   |      multi-line: 3:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  eb5a0daa2192
@@ -695,9 +845,21 @@
 the log
   $ hg tlog
   @  eb5a0daa2192
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 Predecessors template should show both predecessors as we force their display
 with --hidden
   $ hg tlog --hidden
@@ -706,16 +868,33 @@
   |    semi-colon: 2:0dec01379d3b; 1:471f378eab4c
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5", "471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  0dec01379d3b
+  | |    Predecessors:
+  | |    semi-colon:
+  | |    json: []
+  | |    map:
   | |    Successors: 3:eb5a0daa2192
   | |    multi-line: 3:eb5a0daa2192
   | |    json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   | x  471f378eab4c
-  |/     Successors: 3:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 3:eb5a0daa2192
   |      multi-line: 3:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog --hidden
   @  eb5a0daa2192
@@ -908,18 +1087,34 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | *  fdf9bde5129a
   |/     Predecessors: 1:471f378eab4c
   |      semi-colon: 1:471f378eab4c
   |      json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |      map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |      Successors:
+  |      multi-line:
+  |      json: []
   | @  471f378eab4c
-  |/     Successors: 2:fdf9bde5129a; 4:019fadeab383
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:fdf9bde5129a; 4:019fadeab383
   |      multi-line: 2:fdf9bde5129a
   |      multi-line: 4:019fadeab383
   |      json: [["fdf9bde5129a28d4548fadd3f62b265cdd3b7a2e"], ["019fadeab383f6699fa83ad7bdb4d82ed2c0e5ab"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
   $ hg fatelog
   *  019fadeab383
   |
@@ -936,11 +1131,29 @@
 the log
   $ hg tlog
   *  019fadeab383
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  fdf9bde5129a
-  |/
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors:
+  |      multi-line:
+  |      json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   *  019fadeab383
@@ -957,6 +1170,9 @@
   |    semi-colon: 3:65b757b745b9
   |    json: ["65b757b745b935093c87a2bccd877521cccffcbd"]
   |    map: 3:65b757b745b935093c87a2bccd877521cccffcbd
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  65b757b745b9
   |/     Predecessors: 1:471f378eab4c
   |      semi-colon: 1:471f378eab4c
@@ -970,13 +1186,26 @@
   |      semi-colon: 1:471f378eab4c
   |      json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |      map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |      Successors:
+  |      multi-line:
+  |      json: []
   | x  471f378eab4c
-  |/     Successors: 2:fdf9bde5129a; 3:65b757b745b9
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:fdf9bde5129a; 3:65b757b745b9
   |      multi-line: 2:fdf9bde5129a
   |      multi-line: 3:65b757b745b9
   |      json: [["fdf9bde5129a28d4548fadd3f62b265cdd3b7a2e"], ["65b757b745b935093c87a2bccd877521cccffcbd"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog --hidden
   *  019fadeab383
@@ -1177,12 +1406,25 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471f378eab4c
-  |/     Successors: 4:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 4:eb5a0daa2192
   |      multi-line: 4:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  eb5a0daa2192
@@ -1203,16 +1445,33 @@
   |    semi-colon: 2:0dec01379d3b; 1:471f378eab4c
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5", "471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  0dec01379d3b
+  | |    Predecessors:
+  | |    semi-colon:
+  | |    json: []
+  | |    map:
   | |    Successors: 4:eb5a0daa2192
   | |    multi-line: 4:eb5a0daa2192
   | |    json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   | x  471f378eab4c
-  |/     Successors: 4:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 4:eb5a0daa2192
   |      multi-line: 4:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  eb5a0daa2192
@@ -1235,16 +1494,33 @@
   |    semi-colon: 1:471f378eab4c; 3:b7ea6d14e664
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874", "b7ea6d14e664bdc8922221f7992631b50da3fb07"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874 3:b7ea6d14e664bdc8922221f7992631b50da3fb07
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  b7ea6d14e664
+  | |    Predecessors:
+  | |    semi-colon:
+  | |    json: []
+  | |    map:
   | |    Successors: 4:eb5a0daa2192
   | |    multi-line: 4:eb5a0daa2192
   | |    json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   | x  471f378eab4c
-  |/     Successors: 4:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 4:eb5a0daa2192
   |      multi-line: 4:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  eb5a0daa2192
@@ -1261,9 +1537,21 @@
 Predecessors template should show no predecessors as they are both non visible
   $ hg tlog
   @  eb5a0daa2192
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   @  eb5a0daa2192
@@ -1278,6 +1566,9 @@
   |    semi-colon: 1:471f378eab4c; 3:b7ea6d14e664
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874", "b7ea6d14e664bdc8922221f7992631b50da3fb07"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874 3:b7ea6d14e664bdc8922221f7992631b50da3fb07
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  b7ea6d14e664
   | |    Predecessors: 2:0dec01379d3b
   | |    semi-colon: 2:0dec01379d3b
@@ -1287,15 +1578,29 @@
   | |    multi-line: 4:eb5a0daa2192
   | |    json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   | | x  0dec01379d3b
-  | |/     Successors: 3:b7ea6d14e664
+  | |/     Predecessors:
+  | |      semi-colon:
+  | |      json: []
+  | |      map:
+  | |      Successors: 3:b7ea6d14e664
   | |      multi-line: 3:b7ea6d14e664
   | |      json: [["b7ea6d14e664bdc8922221f7992631b50da3fb07"]]
   | x  471f378eab4c
-  |/     Successors: 4:eb5a0daa2192
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 4:eb5a0daa2192
   |      multi-line: 4:eb5a0daa2192
   |      json: [["eb5a0daa21923bbf8caeb2c42085b9e463861fd0"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog --hidden
   @  eb5a0daa2192
@@ -1491,12 +1796,25 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471f378eab4c
-  |/     Successors: 2:7a230b46bf61
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:7a230b46bf61
   |      multi-line: 2:7a230b46bf61
   |      json: [["7a230b46bf61e50b30308c6cfd7bd1269ef54702"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  7a230b46bf61
@@ -1511,9 +1829,21 @@
 Predecessors template should show no predecessors as they are non visible
   $ hg tlog
   @  7a230b46bf61
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   @  7a230b46bf61
@@ -1528,12 +1858,25 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  471f378eab4c
-  |/     Successors: 2:7a230b46bf61
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 2:7a230b46bf61
   |      multi-line: 2:7a230b46bf61
   |      json: [["7a230b46bf61e50b30308c6cfd7bd1269ef54702"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog --hidden
   @  7a230b46bf61
@@ -1620,9 +1963,21 @@
 
   $ hg tlog
   @  f897c6137566
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   @  f897c6137566
@@ -1640,6 +1995,9 @@
   |    semi-colon: 2:0dec01379d3b
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  0dec01379d3b
   | |    Predecessors: 1:471f378eab4c
   | |    semi-colon: 1:471f378eab4c
@@ -1658,7 +2016,13 @@
   |      multi-line: 2:0dec01379d3b
   |      json: [["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  f897c6137566
@@ -1678,10 +2042,25 @@
   |    semi-colon: 1:471f378eab4c
   |    json: ["471f378eab4c5e25f6c77f785b27c936efb22874"]
   |    map: 1:471f378eab4c5e25f6c77f785b27c936efb22874
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  471f378eab4c
-  |/
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors:
+  |      multi-line:
+  |      json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  f897c6137566
@@ -1695,9 +2074,21 @@
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg tlog
   o  f897c6137566
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   @  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   o  f897c6137566
@@ -1711,6 +2102,9 @@
   |    semi-colon: 2:0dec01379d3b
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  0dec01379d3b
   | |    Predecessors: 1:471f378eab4c
   | |    semi-colon: 1:471f378eab4c
@@ -1729,7 +2123,13 @@
   |      multi-line: 2:0dec01379d3b
   |      json: [["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]]
   @  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
 Check other fatelog implementations
 -----------------------------------
@@ -1962,24 +2362,58 @@
   |    semi-colon: 6:4a004186e638
   |    json: ["4a004186e63889f20cb16434fcbd72220bd1eace"]
   |    map: 6:4a004186e63889f20cb16434fcbd72220bd1eace
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | *  b18bc8331526
   |/     Predecessors: 6:4a004186e638
   |      semi-colon: 6:4a004186e638
   |      json: ["4a004186e63889f20cb16434fcbd72220bd1eace"]
   |      map: 6:4a004186e63889f20cb16434fcbd72220bd1eace
+  |      Successors:
+  |      multi-line:
+  |      json: []
   | *  ba2ed02b0c9a
-  | |
+  | |    Predecessors:
+  | |    semi-colon:
+  | |    json: []
+  | |    map:
+  | |    Successors:
+  | |    multi-line:
+  | |    json: []
   | x  4a004186e638
-  |/     Successors: 8:b18bc8331526; 9:0b997eb7ceee
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 8:b18bc8331526; 9:0b997eb7ceee
   |      multi-line: 8:b18bc8331526
   |      multi-line: 9:0b997eb7ceee
   |      json: [["b18bc8331526a22cbb1801022bd1555bf291c48b"], ["0b997eb7ceeee06200a02f8aab185979092d514e"]]
   *  dd800401bd8c
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  f897c6137566
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
   $ hg fatelog
   @  0b997eb7ceee
   |
@@ -2001,16 +2435,25 @@
   |    semi-colon: 6:4a004186e638
   |    json: ["4a004186e63889f20cb16434fcbd72220bd1eace"]
   |    map: 6:4a004186e63889f20cb16434fcbd72220bd1eace
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | *  b18bc8331526
   |/     Predecessors: 6:4a004186e638
   |      semi-colon: 6:4a004186e638
   |      json: ["4a004186e63889f20cb16434fcbd72220bd1eace"]
   |      map: 6:4a004186e63889f20cb16434fcbd72220bd1eace
+  |      Successors:
+  |      multi-line:
+  |      json: []
   | *  ba2ed02b0c9a
   | |    Predecessors: 4:9bd10a0775e4
   | |    semi-colon: 4:9bd10a0775e4
   | |    json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   | |    map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  | |    Successors:
+  | |    multi-line:
+  | |    json: []
   | x  4a004186e638
   |/     Predecessors: 4:9bd10a0775e4
   |      semi-colon: 4:9bd10a0775e4
@@ -2025,8 +2468,15 @@
   |    semi-colon: 4:9bd10a0775e4
   |    json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   |    map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  9bd10a0775e4
-  |/     Successors: 5:dd800401bd8c 6:4a004186e638 7:ba2ed02b0c9a
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 5:dd800401bd8c 6:4a004186e638 7:ba2ed02b0c9a
   |      multi-line: 5:dd800401bd8c 6:4a004186e638 7:ba2ed02b0c9a
   |      json: [["dd800401bd8c79d815329277739e433e883f784e", "4a004186e63889f20cb16434fcbd72220bd1eace", "ba2ed02b0c9a56b9fdbc4e79c7e57866984d8a1f"]]
   o  f897c6137566
@@ -2034,6 +2484,9 @@
   |    semi-colon: 2:0dec01379d3b
   |    json: ["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]
   |    map: 2:0dec01379d3be6318c470ead31b1fe7ae7cb53d5
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | x  0dec01379d3b
   | |    Predecessors: 1:471f378eab4c
   | |    semi-colon: 1:471f378eab4c
@@ -2052,7 +2505,13 @@
   |      multi-line: 2:0dec01379d3b
   |      json: [["0dec01379d3be6318c470ead31b1fe7ae7cb53d5"]]
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
   $ hg fatelog --hidden
   @  0b997eb7ceee
   |
@@ -2107,30 +2566,58 @@
   |    semi-colon: 4:9bd10a0775e4
   |    json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   |    map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | *  0b997eb7ceee
   | |    Predecessors: 4:9bd10a0775e4
   | |    semi-colon: 4:9bd10a0775e4
   | |    json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   | |    map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  | |    Successors:
+  | |    multi-line:
+  | |    json: []
   * |  b18bc8331526
   |/     Predecessors: 4:9bd10a0775e4
   |      semi-colon: 4:9bd10a0775e4
   |      json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   |      map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  |      Successors:
+  |      multi-line:
+  |      json: []
   *  dd800401bd8c
   |    Predecessors: 4:9bd10a0775e4
   |    semi-colon: 4:9bd10a0775e4
   |    json: ["9bd10a0775e478708cada5f176ec6de654359ce7"]
   |    map: 4:9bd10a0775e478708cada5f176ec6de654359ce7
+  |    Successors:
+  |    multi-line:
+  |    json: []
   | @  9bd10a0775e4
-  |/     Successors: 5:dd800401bd8c 9:0b997eb7ceee 10:eceed8f98ffc; 5:dd800401bd8c 8:b18bc8331526 10:eceed8f98ffc
+  |/     Predecessors:
+  |      semi-colon:
+  |      json: []
+  |      map:
+  |      Successors: 5:dd800401bd8c 9:0b997eb7ceee 10:eceed8f98ffc; 5:dd800401bd8c 8:b18bc8331526 10:eceed8f98ffc
   |      multi-line: 5:dd800401bd8c 9:0b997eb7ceee 10:eceed8f98ffc
   |      multi-line: 5:dd800401bd8c 8:b18bc8331526 10:eceed8f98ffc
   |      json: [["dd800401bd8c79d815329277739e433e883f784e", "0b997eb7ceeee06200a02f8aab185979092d514e", "eceed8f98ffc4186032e29a6542ab98888ebf68d"], ["dd800401bd8c79d815329277739e433e883f784e", "b18bc8331526a22cbb1801022bd1555bf291c48b", "eceed8f98ffc4186032e29a6542ab98888ebf68d"]]
   o  f897c6137566
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
 
   $ hg fatelog
   *  eceed8f98ffc
@@ -2320,9 +2807,21 @@
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg tlog
   @  471f378eab4c
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
   $ hg fatelog
   @  471f378eab4c
   |    Obsfate: pruned by test (at 1970-01-01 00:00 +0000);
@@ -2361,9 +2860,22 @@
   (hidden revision '471f378eab4c' is pruned)
   $ hg tlog
   @  471f378eab4c
-  |
+  |    Predecessors:
+  |    semi-colon:
+  |    json: []
+  |    map:
+  |    Successors:
+  |    multi-line:
+  |    json: []
   o  ea207398892e
-  
+       Predecessors:
+       semi-colon:
+       json: []
+       map:
+       Successors:
+       multi-line:
+       json: []
+
 # todo: the obsfate output is not ideal
   $ hg fatelog
   @  471f378eab4c
--- a/tests/test-phabricator.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-phabricator.t	Tue Oct 20 22:04:04 2020 +0530
@@ -24,6 +24,11 @@
   > EOF
   $ VCR="$TESTDIR/phabricator"
 
+debugcallconduit doesn't claim invalid arguments without --test-vcr:
+  $ echo '{}' | HGRCSKIPREPO= hg debugcallconduit 'conduit.ping'
+  abort: config phabricator.url is required
+  [255]
+
 Error is handled reasonably. We override the phabtoken here so that
 when you're developing changes to phabricator.py you can edit the
 above config and have a real token in the test but not have to edit
@@ -965,5 +970,37 @@
   Differential Revision: https://phab.mercurial-scm.org/D8388
   
   
+Hashes in the messages are updated automatically as phabsend amends and restacks
+them.  This covers both commits that are posted and descendants that are
+restacked.
 
+  $ cat >> .hg/hgrc << EOF
+  > [experimental]
+  > evolution = all
+  > EOF
+
+  $ echo content > file.txt
+  $ hg ci -m 'base review (generate test for phabsend)'
+  $ echo 'more content' > file.txt
+  $ hg ci -m '133c1c6c6449 is my parent (generate test for phabsend)'
+  $ echo 'even more content' > file.txt
+  $ hg ci -m 'c2874a398f7e is my parent (generate test for phabsend)'
+
+  $ hg phabsend -r 17::18  --test-vcr "$VCR/phabsend-hash-fixes.json"
+  D8945 - created - 133c1c6c6449: base review (generate test for phabsend)
+  D8946 - created - c2874a398f7e: 133c1c6c6449 is my parent (generate test for phabsend)
+  new commits: ['f444f060f4d6']
+  new commits: ['9c9290f945b1']
+  restabilizing 1528c12fa2e4 as b28b20212bd4
+
+  $ hg log -l 3 -Tcompact
+  22[tip]   b28b20212bd4   1970-01-01 00:00 +0000   test
+    9c9290f945b1 is my parent (generate test for phabsend)
+  
+  21   9c9290f945b1   1970-01-01 00:00 +0000   test
+    f444f060f4d6 is my parent (generate test for phabsend)
+  
+  20:16   f444f060f4d6   1970-01-01 00:00 +0000   test
+    base review (generate test for phabsend)
+  
   $ cd ..
--- a/tests/test-profile.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-profile.t	Tue Oct 20 22:04:04 2020 +0530
@@ -87,7 +87,7 @@
 Various statprof formatters work
 
   $ hg --profile --config profiling.statformat=byline sleep 2>../out || cat ../out
-  $ head -n 3 ../out
+  $ grep -v _path_stat ../out | head -n 3
     %   cumulative      self          
    time    seconds   seconds  name    
   * sleepext.py:*:sleep (glob)
--- a/tests/test-pull-bundle.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-pull-bundle.t	Tue Oct 20 22:04:04 2020 +0530
@@ -52,7 +52,7 @@
   > 1.hg BUNDLESPEC=bzip2-v2 heads=ed1b79f46b9a29f5a6efa59cf12fcfca43bead5a bases=bbd179dfa0a71671c253b3ae0aa1513b60d199fa
   > 0.hg BUNDLESPEC=gzip-v2 heads=bbd179dfa0a71671c253b3ae0aa1513b60d199fa
   > EOF
-  $ hg --config blackbox.track=debug --debug serve -p $HGPORT2 -d --pid-file=../repo.pid
+  $ hg --config blackbox.track=debug --debug serve -p $HGPORT2 -d --pid-file=../repo.pid -E ../error.txt
   listening at http://*:$HGPORT2/ (bound to $LOCALIP:$HGPORT2) (glob) (?)
   $ cat ../repo.pid >> $DAEMON_PIDS
   $ cd ..
@@ -64,6 +64,7 @@
   new changesets bbd179dfa0a7 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat error.txt
   $ cd repo.pullbundle
   $ hg pull -r 1
   pulling from http://localhost:$HGPORT2/
--- a/tests/test-rebase-abort.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rebase-abort.t	Tue Oct 20 22:04:04 2020 +0530
@@ -327,7 +327,7 @@
   $ echo new > a
   $ hg up 1               # user gets an error saying to run hg rebase --abort
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ cat a
@@ -397,20 +397,20 @@
 
   $ hg rebase -s 3 -d tip
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg up .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg up -C .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ hg graft 3
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ hg abort
--- a/tests/test-rebase-collapse.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rebase-collapse.t	Tue Oct 20 22:04:04 2020 +0530
@@ -762,7 +762,7 @@
   abort: edit failed: false exited with status 1
   [255]
   $ hg tglog
-  %  3: 63668d570d21 'C'
+  o  3: 63668d570d21 'C'
   |
   | @  2: 82b8abf9c185 'D'
   | |
--- a/tests/test-rebase-inmemory.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rebase-inmemory.t	Tue Oct 20 22:04:04 2020 +0530
@@ -41,7 +41,7 @@
   $ hg cat -r 2 b
   b (no-eol)
   $ hg rebase --debug -r b -d c | grep rebasing
-  rebasing in-memory
+  rebasing in memory
   rebasing 2:db0e82a16a62 "b" (b)
   $ hg tglog
   o  3: ca58782ad1e4 'b'
@@ -101,7 +101,7 @@
   $ hg cat -r 3 e
   somefile (no-eol)
   $ hg rebase --debug -s b -d a | grep rebasing
-  rebasing in-memory
+  rebasing in memory
   rebasing 2:db0e82a16a62 "b" (b)
   $ hg tglog
   o  3: fc055c3b4d33 'b'
@@ -117,7 +117,7 @@
   $ hg cat -r 3 b
   b (no-eol)
   $ hg rebase --debug -s 1 -d 3 | grep rebasing
-  rebasing in-memory
+  rebasing in memory
   rebasing 1:02952614a83d "d" (d)
   rebasing 2:f56b71190a8f "c"
   $ hg tglog
@@ -148,7 +148,7 @@
   $ hg up -C 3
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg rebase -r 3 -d 0 --debug | grep rebasing
-  rebasing in-memory
+  rebasing in memory
   rebasing 3:753feb6fd12a "c" (tip)
   $ hg tglog
   @  3: 844a7de3e617 'c'
@@ -415,8 +415,6 @@
   rebasing 3:055a42cdd887 "d"
   rebasing 4:e860deea161a "e"
   merging e
-  transaction abort!
-  rollback completed
   hit a merge conflict
   [1]
   $ hg diff
@@ -463,12 +461,7 @@
   rebasing 3:055a42cdd887 "d"
   rebasing 4:e860deea161a "e"
   merging e
-  transaction abort!
-  rollback completed
-  hit merge conflicts; re-running rebase without in-memory merge
-  rebasing 2:177f92b77385 "c"
-  rebasing 3:055a42cdd887 "d"
-  rebasing 4:e860deea161a "e"
+  hit merge conflicts; rebasing that commit again in the working copy
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
@@ -487,9 +480,9 @@
   rebasing 3:055a42cdd887 "d"
   rebasing 4:e860deea161a "e"
   merging e
+  hit merge conflicts; rebasing that commit again in the working copy
   transaction abort!
   rollback completed
-  hit merge conflicts; re-running rebase without in-memory merge
   abort: uncommitted changes
   [255]
   $ cat a
@@ -859,8 +852,7 @@
   $ hg rebase -r . -d 1 --config ui.merge=internal:merge3
   rebasing 2:fb62b706688e "add b to foo" (tip)
   merging foo
-  hit merge conflicts; re-running rebase without in-memory merge
-  rebasing 2:fb62b706688e "add b to foo" (tip)
+  hit merge conflicts; rebasing that commit again in the working copy
   merging foo
   warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
@@ -893,15 +885,14 @@
   $ hg rebase -r 2 -d 1 -t:merge3
   rebasing 2:b4d249fbf8dd "bye from foo"
   merging foo
-  hit merge conflicts; re-running rebase without in-memory merge
-  rebasing 2:b4d249fbf8dd "bye from foo"
+  hit merge conflicts; rebasing that commit again in the working copy
   merging foo
   warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
   [1]
   $ hg rebase -r 3 -d 1 -t:merge3
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg resolve --list
   U foo
--- a/tests/test-rebase-obsolete.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rebase-obsolete.t	Tue Oct 20 22:04:04 2020 +0530
@@ -2057,7 +2057,7 @@
 
   $ hg rebase -s 3 -d 5
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg rebase --stop --continue
   abort: cannot specify both --stop and --continue
--- a/tests/test-remotefilelog-share.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-remotefilelog-share.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,12 @@
 #require no-windows
 
+#testcases safe normal
+
+#if safe
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "exp-share-safe = True" >> $HGRCPATH
+#endif
+
   $ . "$TESTDIR/remotefilelog-library.sh"
 
   $ cat >> $HGRCPATH <<EOF
--- a/tests/test-rename-merge1.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rename-merge1.t	Tue Oct 20 22:04:04 2020 +0530
@@ -40,10 +40,10 @@
   note: possible conflict - a2 was renamed multiple times to:
    b2
    c2
+   b2: remote created -> g
+  getting b2
    preserving a for resolve of b
   removing a
-   b2: remote created -> g
-  getting b2
    b: remote moved from a -> m (premerge)
   picked tool ':merge' for b (binary False symlink False changedelete False)
   merging a and b to b
--- a/tests/test-rename-merge2.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rename-merge2.t	Tue Oct 20 22:04:04 2020 +0530
@@ -89,6 +89,7 @@
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
    b: remote copied from a -> m (premerge)
+  starting 4 threads for background file closing (?)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging a and b to b
   my b@e300d1c794ec+ other b@4ce40f5aca24 ancestor a@924404dff337
@@ -124,10 +125,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: f4db7e329e71
+   a: remote is newer -> g
+  getting a
    preserving b for resolve of b
    preserving rev for resolve of rev
-   a: remote is newer -> g
-  getting a
    b: local copied/moved from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b and a to b
@@ -241,9 +242,9 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 94b33a1b7f2d+, remote: 4ce40f5aca24
-   preserving rev for resolve of rev
    b: remote created -> g
   getting b
+   preserving rev for resolve of rev
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
@@ -306,11 +307,11 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 94b33a1b7f2d+, remote: bdb19105162a
-   preserving rev for resolve of rev
    a: other deleted -> r
   removing a
    b: remote created -> g
   getting b
+   preserving rev for resolve of rev
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
@@ -422,9 +423,9 @@
   note: possible conflict - a was renamed multiple times to:
    b
    c
-   preserving rev for resolve of rev
    c: remote created -> g
   getting c
+   preserving rev for resolve of rev
    rev: versions differ -> m (premerge)
   picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
   merging rev
@@ -493,10 +494,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a
+   a: other deleted -> r
+  removing a
    preserving b for resolve of b
    preserving rev for resolve of rev
-   a: other deleted -> r
-  removing a
   starting 4 threads for background file closing (?)
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
@@ -534,10 +535,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a
+   a: remote is newer -> g
+  getting a
    preserving b for resolve of b
    preserving rev for resolve of rev
-   a: remote is newer -> g
-  getting a
    b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
@@ -571,10 +572,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a
+   a: other deleted -> r
+  removing a
    preserving b for resolve of b
    preserving rev for resolve of rev
-   a: other deleted -> r
-  removing a
   starting 4 threads for background file closing (?)
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
@@ -612,10 +613,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a
+   a: remote is newer -> g
+  getting a
    preserving b for resolve of b
    preserving rev for resolve of rev
-   a: remote is newer -> g
-  getting a
    b: both renamed from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
@@ -653,6 +654,7 @@
    preserving rev for resolve of rev
   starting 4 threads for background file closing (?)
    b: both renamed from a -> m (premerge)
+  starting 4 threads for background file closing (?)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b
   my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337
@@ -848,10 +850,10 @@
   resolving manifests
    branchmerge: True, force: False, partial: False
    ancestor: 924404dff337, local: 02963e448370+, remote: 2b958612230f
+   c: remote created -> g
+  getting c
    preserving b for resolve of b
    preserving rev for resolve of rev
-   c: remote created -> g
-  getting c
    b: local copied/moved from a -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
   merging b and a to b
--- a/tests/test-rename-rev.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rename-rev.t	Tue Oct 20 22:04:04 2020 +0530
@@ -43,7 +43,7 @@
   A d1/d
     d1/b
 
-Test moved file (not copied)
+Test moved file (not copied) using 'hg cp' command
 
   $ hg co 0
   0 files updated, 0 files merged, 2 files removed, 0 files unresolved
@@ -59,10 +59,40 @@
     d1/b
   R d1/b
 
+Test moved file (not copied) using 'hg mv' command
+
+  $ hg co 0
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mv d1/b d1/d
+  $ hg rm -A d1/b
+  $ hg add d1/d
+  $ hg ci -m 'move d1/b to d1/d'
+  created new head
+  $ hg mv -A --at-rev . d1/b d1/d
+  saved backup bundle to $TESTTMP/.hg/strip-backup/519850c3ea27-153c8fbb-copy.hg
+  $ hg st -C --change .
+  A d1/d
+    d1/b
+  R d1/b
+
+Test moved file (not copied) for which source still exists
+
+  $ hg co 0
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cp d1/b d1/d
+  $ hg add d1/d
+  $ hg ci -m 'copy d1/b to d1/d'
+  created new head
+  $ hg mv -A --at-rev . d1/b d1/d
+  saved backup bundle to $TESTTMP/.hg/strip-backup/c8d0f6bcf7ca-1c9bb53e-copy.hg
+  $ hg st -C --change .
+  A d1/d
+    d1/b
+
 Test using directory as destination
 
   $ hg co 0
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ cp -R d1 d3
   $ hg add d3
   adding d3/a
--- a/tests/test-resolve.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-resolve.t	Tue Oct 20 22:04:04 2020 +0530
@@ -87,64 +87,6 @@
   $ cd ..
   $ rmdir nested
 
-don't allow marking or unmarking driver-resolved files
-
-  $ cat > $TESTTMP/markdriver.py << EOF
-  > '''mark and unmark files as driver-resolved'''
-  > from mercurial import (
-  >    mergestate,
-  >    pycompat,
-  >    registrar,
-  >    scmutil,
-  > )
-  > cmdtable = {}
-  > command = registrar.command(cmdtable)
-  > @command(b'markdriver',
-  >   [(b'u', b'unmark', None, b'')],
-  >   b'FILE...')
-  > def markdriver(ui, repo, *pats, **opts):
-  >     wlock = repo.wlock()
-  >     opts = pycompat.byteskwargs(opts)
-  >     try:
-  >         ms = mergestate.mergestate.read(repo)
-  >         m = scmutil.match(repo[None], pats, opts)
-  >         for f in ms:
-  >             if not m(f):
-  >                 continue
-  >             if not opts[b'unmark']:
-  >                 ms.mark(f, b'd')
-  >             else:
-  >                 ms.mark(f, b'u')
-  >         ms.commit()
-  >     finally:
-  >         wlock.release()
-  > EOF
-  $ hg --config extensions.markdriver=$TESTTMP/markdriver.py markdriver file1
-  $ hg resolve --list
-  D file1
-  U file2
-  $ hg resolve --mark file1
-  not marking file1 as it is driver-resolved
-this should not print out file1
-  $ hg resolve --mark --all
-  (no more unresolved files -- run "hg resolve --all" to conclude)
-  $ hg resolve --mark 'glob:file*'
-  (no more unresolved files -- run "hg resolve --all" to conclude)
-  $ hg resolve --list
-  D file1
-  R file2
-  $ hg resolve --unmark file1
-  not unmarking file1 as it is driver-resolved
-  (no more unresolved files -- run "hg resolve --all" to conclude)
-  $ hg resolve --unmark --all
-  $ hg resolve --list
-  D file1
-  U file2
-  $ hg --config extensions.markdriver=$TESTTMP/markdriver.py markdriver --unmark file1
-  $ hg resolve --list
-  U file1
-  U file2
-
 resolve the failure
 
   $ echo resolved > file1
@@ -328,6 +270,7 @@
   [
    {
     "commits": [{"label": "working copy", "name": "local", "node": "57653b9f834a4493f7240b0681efcb9ae7cab745"}, {"label": "merge rev", "name": "other", "node": "dc77451844e37f03f5c559e3b8529b2b48d381d1"}],
+    "extras": [],
     "files": [{"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file1", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "60b27f004e454aca81b0480209cce5081ec52390", "local_path": "file1", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file1", "path": "file1", "state": "r"}, {"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file2", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523", "local_path": "file2", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file2", "path": "file2", "state": "u"}]
    }
   ]
--- a/tests/test-revlog-v2.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-revlog-v2.t	Tue Oct 20 22:04:04 2020 +0530
@@ -32,10 +32,10 @@
 Unknown flags to revlog are rejected
 
   >>> with open('.hg/store/00changelog.i', 'wb') as fh:
-  ...     fh.write(b'\x00\x04\xde\xad') and None
+  ...     fh.write(b'\xff\x00\xde\xad') and None
 
   $ hg log
-  abort: unknown flags (0x04) in version 57005 revlog 00changelog.i!
+  abort: unknown flags (0xff00) in version 57005 revlog 00changelog.i!
   [255]
 
   $ cd ..
--- a/tests/test-revset.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-revset.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1448,6 +1448,9 @@
     (string '('))
   hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re)
   [255]
+  $ log 'desc("re:(")'
+  hg: parse error: invalid regular expression: (unbalanced parenthesis|missing \),.*) (re)
+  [255]
   $ try 'grep("\bissue\d+")'
   (func
     (symbol 'grep')
@@ -2885,7 +2888,7 @@
   3 b112 m111 u11  120 0
   0 b12  m111 u112 111 10800
 
-  $ hg log -r 'sort(all(), "-desc -date")'
+  $ hg log -r 'sort(all(), "-desc -node")'
   1 b11  m12  u111 112 7200
   4 b111 m112 u111 110 14400
   3 b112 m111 u11  120 0
@@ -2899,6 +2902,29 @@
   0 b12  m111 u112 111 10800
   2 b111 m11  u12  111 3600
 
+ sort including wdir (rev/-rev has fast path):
+
+  $ hg log -r 'sort(. + wdir(), rev)' -T '{rev}\n'
+  4
+  2147483647
+  $ hg log -r 'sort(. + wdir(), -rev)' -T '{rev}\n'
+  2147483647
+  4
+
+  $ hg log -r 'sort(. + wdir(), "branch rev")' -T '{rev}\n'
+  4
+  2147483647
+  $ hg log -r 'sort(. + wdir(), "branch -rev")' -T '{rev}\n'
+  2147483647
+  4
+
+  $ hg log -r 'sort(. + wdir(), node)' -T '{node}\n'
+  ec7c1c90b589ade8603d5fb619dc6c25173a723f
+  ffffffffffffffffffffffffffffffffffffffff
+  $ hg log -r 'sort(. + wdir(), -node)' -T '{node}\n'
+  ffffffffffffffffffffffffffffffffffffffff
+  ec7c1c90b589ade8603d5fb619dc6c25173a723f
+
  toposort prioritises graph branches
 
   $ hg up 2
--- a/tests/test-rhg.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-rhg.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,5 +1,6 @@
 #require rust
 
+Define an rhg function that will only run if rhg exists
   $ rhg() {
   > if [ -f "$RUNTESTDIR/../rust/target/debug/rhg" ]; then
   >   "$RUNTESTDIR/../rust/target/debug/rhg" "$@"
@@ -8,19 +9,84 @@
   >   exit 80
   > fi
   > }
+
+Unimplemented command
   $ rhg unimplemented-command
+  error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
+  
+  USAGE:
+      rhg <SUBCOMMAND>
+  
+  For more information try --help
   [252]
+
+Finding root
   $ rhg root
   abort: no repository found in '$TESTTMP' (.hg not found)!
   [255]
+
   $ hg init repository
   $ cd repository
   $ rhg root
   $TESTTMP/repository
+
+Unwritable file descriptor
   $ rhg root > /dev/full
   abort: No space left on device (os error 28)
   [255]
+
+Deleted repository
   $ rm -rf `pwd`
   $ rhg root
   abort: error getting current working directory: $ENOENT$
   [255]
+
+Listing tracked files
+  $ cd $TESTTMP
+  $ hg init repository
+  $ cd repository
+  $ for i in 1 2 3; do
+  >   echo $i >> file$i
+  >   hg add file$i
+  > done
+  > hg commit -m "commit $i" -q
+
+Listing tracked files from root
+  $ rhg files
+  file1
+  file2
+  file3
+
+Listing tracked files from subdirectory
+  $ mkdir -p path/to/directory
+  $ cd path/to/directory
+  $ rhg files
+  ../../../file1
+  ../../../file2
+  ../../../file3
+
+Listing tracked files through broken pipe
+  $ rhg files | head -n 1
+  ../../../file1
+
+Debuging data in inline index
+  $ cd $TESTTMP
+  $ rm -rf repository
+  $ hg init repository
+  $ cd repository
+  $ for i in 1 2 3; do
+  >   echo $i >> file$i
+  >   hg add file$i
+  >   hg commit -m "commit $i" -q
+  > done
+  $ rhg debugdata -c 2
+  e36fa63d37a576b27a69057598351db6ee5746bd
+  test
+  0 0
+  file3
+  
+  commit 3 (no-eol)
+  $ rhg debugdata -m 2
+  file1\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
+  file2\x005d9299349fc01ddd25d0070d149b124d8f10411e (esc)
+  file3\x002661d26c649684b482d10f91960cc3db683c38b4 (esc)
--- a/tests/test-run-tests.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-run-tests.t	Tue Oct 20 22:04:04 2020 +0530
@@ -497,7 +497,7 @@
 ====================
 
   $ rt --retest
-  running 2 tests using 1 parallel processes 
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -512,7 +512,7 @@
   ERROR: test-failure.t output changed
   !
   Failed test-failure.t: output changed
-  # Ran 2 tests, 1 skipped, 1 failed.
+  # Ran 1 tests, 0 skipped, 1 failed.
   python hash seed: * (glob)
   [1]
 
@@ -521,7 +521,7 @@
   $ mkdir output
   $ mv test-failure.t.err output
   $ rt --retest --outputdir output
-  running 2 tests using 1 parallel processes 
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/output/test-failure.t.err
@@ -536,7 +536,7 @@
   ERROR: test-failure.t output changed
   !
   Failed test-failure.t: output changed
-  # Ran 2 tests, 1 skipped, 1 failed.
+  # Ran 1 tests, 0 skipped, 1 failed.
   python hash seed: * (glob)
   [1]
 
@@ -844,6 +844,8 @@
     $ echo 'saved backup bundle to $TESTTMP/foo.hg'
     saved backup bundle to $TESTTMP/*.hg (glob)<
 
+  $ rm test-failure.t
+
 Race condition - test file was modified when test is running
 
   $ TESTRACEDIR=`pwd`
@@ -972,6 +974,25 @@
   python hash seed: * (glob)
   [1]
 
+  $ rt --retest
+  running 1 tests using 1 parallel processes 
+  
+  --- $TESTTMP/test-cases.t
+  +++ $TESTTMP/test-cases.t#b#c.err
+  @@ -6,5 +6,5 @@
+   #endif
+   #if b c
+     $ echo yes
+  -  no
+  +  yes
+   #endif
+  
+  ERROR: test-cases.t#b#c output changed
+  !
+  Failed test-cases.t#b#c: output changed
+  # Ran 1 tests, 0 skipped, 1 failed.
+  python hash seed: * (glob)
+  [1]
   $ rm test-cases.t#b#c.err
   $ rm test-cases.t
 
--- a/tests/test-share-bookmarks.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-share-bookmarks.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,4 +1,10 @@
 #testcases vfs svfs
+#testcases safe normal
+
+#if safe
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "exp-share-safe = True" >> $HGRCPATH
+#endif
 
   $ echo "[extensions]"      >> $HGRCPATH
   $ echo "share = "          >> $HGRCPATH
@@ -224,7 +230,7 @@
   > )
   > def _pullbookmarks(orig, pullop):
   >     orig(pullop)
-  >     raise error.HookAbort('forced failure by extension')
+  >     raise error.HookAbort(b'forced failure by extension')
   > def extsetup(ui):
   >     extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
   > EOF
@@ -279,3 +285,9 @@
      bm3                       4:62f4ded848e4
      bm4                       5:92793bfc8cad
   $ cd ..
+
+Test that if store is disabled, we drop the bookmarksinstore requirement
+
+  $ hg init brokenrepo --config format.bookmarks-in-store=True --config format.usestore=false
+  ignoring enabled 'format.bookmarks-in-store' config beacuse it is incompatible with disabled 'format.usestore' config
+  ignoring enabled 'format.exp-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-share-safe.t	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,243 @@
+setup
+
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > share =
+  > [format]
+  > exp-share-safe = True
+  > EOF
+
+prepare source repo
+
+  $ hg init source
+  $ cd source
+  $ cat .hg/requires
+  exp-sharesafe
+  $ cat .hg/store/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  sparserevlog
+  store
+  $ hg debugrequirements
+  dotencode
+  exp-sharesafe
+  fncache
+  generaldelta
+  revlogv1
+  sparserevlog
+  store
+
+  $ echo a > a
+  $ hg ci -Aqm "added a"
+  $ echo b > b
+  $ hg ci -Aqm "added b"
+
+  $ HGEDITOR=cat hg config --shared
+  abort: repository is not shared; can't use --shared
+  [255]
+  $ cd ..
+
+Create a shared repo and check the requirements are shared and read correctly
+  $ hg share source shared1
+  updating working directory
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd shared1
+  $ cat .hg/requires
+  exp-sharesafe
+  shared
+
+  $ hg debugrequirements -R ../source
+  dotencode
+  exp-sharesafe
+  fncache
+  generaldelta
+  revlogv1
+  sparserevlog
+  store
+
+  $ hg debugrequirements
+  dotencode
+  exp-sharesafe
+  fncache
+  generaldelta
+  revlogv1
+  shared
+  sparserevlog
+  store
+
+  $ echo c > c
+  $ hg ci -Aqm "added c"
+
+Check that config of the source repository is also loaded
+
+  $ hg showconfig ui.curses
+  [1]
+
+  $ echo "[ui]" >> ../source/.hg/hgrc
+  $ echo "curses=true" >> ../source/.hg/hgrc
+
+  $ hg showconfig ui.curses
+  true
+
+However, local .hg/hgrc should override the config set by share source
+
+  $ echo "[ui]" >> .hg/hgrc
+  $ echo "curses=false" >> .hg/hgrc
+
+  $ hg showconfig ui.curses
+  false
+
+  $ HGEDITOR=cat hg config --shared
+  [ui]
+  curses=true
+
+  $ HGEDITOR=cat hg config --local
+  [ui]
+  curses=false
+
+Testing that hooks set in source repository also runs in shared repo
+
+  $ cd ../source
+  $ cat <<EOF >> .hg/hgrc
+  > [extensions]
+  > hooklib=
+  > [hooks]
+  > pretxnchangegroup.reject_merge_commits = \
+  >   python:hgext.hooklib.reject_merge_commits.hook
+  > EOF
+
+  $ cd ..
+  $ hg clone source cloned
+  updating to branch default
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd cloned
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo bar > bar
+  $ hg ci -Aqm "added bar"
+  $ hg merge
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "merge commit"
+
+  $ hg push ../source
+  pushing to ../source
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  transaction abort!
+  rollback completed
+  abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  [255]
+
+  $ hg push ../shared1
+  pushing to ../shared1
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  transaction abort!
+  rollback completed
+  abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  [255]
+
+Test that if share source config is untrusted, we dont read it
+
+  $ cd ../shared1
+
+  $ cat << EOF > $TESTTMP/untrusted.py
+  > from mercurial import scmutil, util
+  > def uisetup(ui):
+  >     class untrustedui(ui.__class__):
+  >         def _trusted(self, fp, f):
+  >             if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
+  >                 return False
+  >             return super(untrustedui, self)._trusted(fp, f)
+  >     ui.__class__ = untrustedui
+  > EOF
+
+  $ hg showconfig hooks
+  hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
+
+  $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
+  [1]
+
+Update the source repository format and check that shared repo works
+
+  $ cd ../source
+
+Disable zstd related tests because its not present on pure version
+#if zstd
+  $ echo "[format]" >> .hg/hgrc
+  $ echo "revlog-compression=zstd" >> .hg/hgrc
+
+  $ hg debugupgraderepo --run -q
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
+     added: revlog-compression-zstd
+  
+  $ hg log -r .
+  changeset:   1:5f6d8a4bf34a
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     added b
+  
+#endif
+  $ echo "[format]" >> .hg/hgrc
+  $ echo "use-persistent-nodemap=True" >> .hg/hgrc
+
+  $ hg debugupgraderepo --run -q -R ../shared1
+  abort: cannot upgrade repository; unsupported source requirement: shared
+  [255]
+
+  $ hg debugupgraderepo --run -q
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+     preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+     added: persistent-nodemap
+  
+  $ hg log -r .
+  changeset:   1:5f6d8a4bf34a
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     added b
+  
+
+Shared one should work
+  $ cd ../shared1
+  $ hg log -r .
+  changeset:   2:155349b645be
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     added c
+  
+Unsharing works
+
+  $ hg unshare
+
+Test that source config is added to the shared one after unshare, and the config
+of current repo is still respected over the config which came from source config
+  $ cd ../cloned
+  $ hg push ../shared1
+  pushing to ../shared1
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  transaction abort!
+  rollback completed
+  abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
+  [255]
+  $ hg showconfig ui.curses -R ../shared1
+  false
--- a/tests/test-share.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-share.t	Tue Oct 20 22:04:04 2020 +0530
@@ -1,3 +1,10 @@
+#testcases safe normal
+
+#if safe
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "exp-share-safe = True" >> $HGRCPATH
+#endif
+
   $ echo "[extensions]"      >> $HGRCPATH
   $ echo "share = "          >> $HGRCPATH
 
@@ -252,3 +259,10 @@
 
   $ killdaemons.py
 
+Test sharing a repository which was created with store requirement disable
+
+  $ hg init nostore --config format.usestore=false
+  ignoring enabled 'format.exp-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
+  $ hg share nostore sharednostore
+  abort: cannot create shared repository as source was created with 'format.usestore' config disabled
+  [255]
--- a/tests/test-simple-update.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-simple-update.t	Tue Oct 20 22:04:04 2020 +0530
@@ -19,7 +19,7 @@
   $ hg co
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ echo bar>>foo
-  $ hg commit -m "2"
+  $ hg commit -m "2" -d '1 0'
 
   $ cd ../test
 
@@ -30,7 +30,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 30aff43faee1
+  new changesets 84b9316f7b31
   1 local changesets published
   (run 'hg update' to get a working copy)
 
@@ -57,25 +57,47 @@
   abort: you can't specify a revision and a date
   [255]
 
+update by date
+
+  $ hg update -d '<1970-01-01 00:00:02 +0000'
+  found revision 1 from Thu Jan 01 00:00:01 1970 +0000
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg update -d '<1970-01-01 00:00:01 +0000'
+  found revision 1 from Thu Jan 01 00:00:01 1970 +0000
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg update -d '<1970-01-01 00:00:00 +0000'
+  found revision 0 from Thu Jan 01 00:00:00 1970 +0000
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg update -d '>1970-01-01 00:00:02 +0000'
+  abort: revision matching date not found
+  [255]
+  $ hg update -d '>1970-01-01 00:00:01 +0000'
+  found revision 1 from Thu Jan 01 00:00:01 1970 +0000
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg update -d '>1970-01-01 00:00:00 +0000'
+  found revision 1 from Thu Jan 01 00:00:01 1970 +0000
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
 update to default destination (with empty revspec)
 
   $ hg update -q null
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg id
-  30aff43faee1 tip
+  84b9316f7b31 tip
 
   $ hg update -q null
   $ hg update -r ''
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg id
-  30aff43faee1 tip
+  84b9316f7b31 tip
 
   $ hg update -q null
   $ hg update ''
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg id
-  30aff43faee1 tip
+  84b9316f7b31 tip
 
   $ cd ..
 
--- a/tests/test-sparse-import.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-sparse-import.t	Tue Oct 20 22:04:04 2020 +0530
@@ -153,7 +153,7 @@
   > from mercurial import error, sparse
   > def extsetup(ui):
   >     def abort_refresh(*args, **kwargs):
-  >         raise error.Abort('sparse._refresh called!')
+  >         raise error.Abort(b'sparse._refresh called!')
   >     sparse.refreshwdir = abort_refresh
   > EOF
   $ cat >> $HGRCPATH <<EOF
--- a/tests/test-subrepo-recursion.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-subrepo-recursion.t	Tue Oct 20 22:04:04 2020 +0530
@@ -681,7 +681,7 @@
   no changes found
 
   $ hg incoming -S --bundle incoming.hg
-  abort: cannot combine --bundle and --subrepos
+  abort: cannot specify both --subrepos and --bundle
   [255]
 
 Test missing subrepo:
--- a/tests/test-tags.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-tags.t	Tue Oct 20 22:04:04 2020 +0530
@@ -156,7 +156,7 @@
 Failure to acquire lock results in no write
 
   $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
-  $ echo 'foo:1' > .hg/wlock
+  $ echo 'foo:1' > .hg/store/lock
   $ hg identify
   b9154636be93 tip
   $ hg blackbox -l 6
@@ -170,7 +170,7 @@
   $ fnodescacheexists
   no fnodes cache
 
-  $ rm .hg/wlock
+  $ rm .hg/store/lock
 
   $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
   $ hg identify
--- a/tests/test-template-map.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-template-map.t	Tue Oct 20 22:04:04 2020 +0530
@@ -125,6 +125,54 @@
   date:        Wed Jan 01 10:01:00 2020 +0000
   summary:     third
   
+Test map inheritance with non-existent base
+
+  $ echo "__base__ = non-existent" > map-base-nonexistent
+  $ hg log -l1 -T./map-base-nonexistent
+  abort: style '$TESTTMP/a/non-existent' not found
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
+  [255]
+
+Test map inheritance with directory as base
+
+  $ mkdir somedir
+  $ echo "__base__ = somedir" > map-base-dir
+  $ hg log -l1 -T./map-base-dir
+  abort: Is a directory: '$TESTTMP/a/somedir'
+  [255]
+
+Test including a built-in template map
+
+  $ cat <<'EOF' > map-include-builtin
+  > %include map-cmdline.default
+  > [templates]
+  > changeset = "{changeset_quiet}\n"
+  > EOF
+  $ hg log -l1 -T./map-include-builtin
+  8:95c24699272e
+  
+
+Test including a nonexistent template map
+BROKEN: This should probably be an error just like the bad __base__ above
+
+  $ cat <<'EOF' > map-include-nonexistent
+  > %include nonexistent
+  > [templates]
+  > changeset = "test\n"
+  > EOF
+  $ hg log -l1 -T./map-include-nonexistent
+  test
+
+Test including a directory as template map
+BROKEN: This should probably be an error just like the bad __base__ above
+
+  $ cat <<'EOF' > map-include-dir
+  > %include somedir
+  > [templates]
+  > changeset = "test\n"
+  > EOF
+  $ hg log -l1 -T./map-include-dir
+  test
 
 Test docheader, docfooter and separator in template map
 
@@ -1227,6 +1275,19 @@
   abort: specify a template
   [255]
 
+Error if style is a directory:
+
+  $ hg log --style somedir
+  abort: Is a directory: 'somedir'
+  [255]
+
+Error if style is a directory whose name is a built-in style:
+
+  $ hg log --style coal
+  abort: style 'coal' not found
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
+  [255]
+
 Error if style missing key:
 
   $ echo 'q = q' > t
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-transaction-rollback-on-sigpipe.t	Tue Oct 20 22:04:04 2020 +0530
@@ -0,0 +1,67 @@
+Test that, when an hg push is interrupted and the remote side recieves SIGPIPE,
+the remote hg is able to successfully roll back the transaction.
+
+  $ hg init -q remote
+  $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local
+
+  $ check_for_abandoned_transaction() {
+  >     [[ -f $TESTTMP/remote/.hg/store/journal ]] && echo "Abandoned transaction!"
+  > }
+
+  $ pidfile=`pwd`/pidfile
+  $ >$pidfile
+
+  $ script() {
+  >     cat >"$1"
+  >     chmod +x "$1"
+  > }
+
+On the remote end, run hg, piping stdout and stderr through processes that we
+know the PIDs of. We will later kill these to simulate an ssh client
+disconnecting.
+
+  $ killable_pipe=`pwd`/killable_pipe.sh
+  $ script $killable_pipe <<EOF
+  > #!/bin/bash
+  > echo \$\$ >> $pidfile
+  > exec cat
+  > EOF
+
+  $ remotecmd=`pwd`/remotecmd.sh
+  $ script $remotecmd <<EOF
+  > #!/bin/bash
+  > hg "\$@" 1> >($killable_pipe) 2> >($killable_pipe >&2)
+  > EOF
+
+In the pretxnchangegroup hook, kill the PIDs recorded above to simulate ssh
+disconnecting. Then exit nonzero, to force a transaction rollback.
+
+  $ hook_script=`pwd`/pretxnchangegroup.sh
+  $ script $hook_script <<EOF
+  > #!/bin/bash
+  > for pid in \$(cat $pidfile) ; do
+  >   kill \$pid
+  >   while kill -0 \$pid 2>/dev/null ; do
+  >     sleep 0.1
+  >   done
+  > done
+  > exit 1
+  > EOF
+
+  $ cat >remote/.hg/hgrc <<EOF
+  > [hooks]
+  > pretxnchangegroup.break-things=$hook_script
+  > EOF
+
+  $ cd local
+  $ echo foo > foo ; hg commit -qAm "commit"
+  $ hg push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd $remotecmd 2>&1 | grep -v $killable_pipe
+  pushing to ssh://user@dummy/$TESTTMP/remote
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  abort: stream ended unexpectedly (got 0 bytes, expected 4)
+
+  $ check_for_abandoned_transaction
+  [1]
--- a/tests/test-up-local-change.t	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-up-local-change.t	Tue Oct 20 22:04:04 2020 +0530
@@ -43,9 +43,9 @@
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
-   preserving a for resolve of a
    b: remote created -> g
   getting b
+   preserving a for resolve of a
    a: versions differ -> m (premerge)
   picked tool 'true' for a (binary False symlink False changedelete False)
   merging a
@@ -68,9 +68,9 @@
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: 1e71731e6fbb, local: 1e71731e6fbb+, remote: c19d34741b0a
-   preserving a for resolve of a
    b: other deleted -> r
   removing b
+   preserving a for resolve of a
   starting 4 threads for background file closing (?)
    a: versions differ -> m (premerge)
   picked tool 'true' for a (binary False symlink False changedelete False)
@@ -92,9 +92,9 @@
   resolving manifests
    branchmerge: False, force: False, partial: False
    ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb
-   preserving a for resolve of a
    b: remote created -> g
   getting b
+   preserving a for resolve of a
    a: versions differ -> m (premerge)
   picked tool 'true' for a (binary False symlink False changedelete False)
   merging a
--- a/tests/test-url.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/test-url.py	Tue Oct 20 22:04:04 2020 +0530
@@ -392,7 +392,7 @@
     >>> try:
     ...   u = url(b'file://mercurial-scm.org/foo')
     ... except error.Abort as e:
-    ...   forcebytestr(e)
+    ...   pycompat.bytestr(e.message)
     'file:// URLs can only refer to localhost'
 
     Empty URL:
--- a/tests/testlib/ext-sidedata.py	Thu Oct 08 13:45:56 2020 -0700
+++ b/tests/testlib/ext-sidedata.py	Tue Oct 20 22:04:04 2020 +0530
@@ -12,8 +12,8 @@
 
 from mercurial import (
     extensions,
-    localrepo,
     node,
+    requirements,
     revlog,
     upgrade,
 )
@@ -54,7 +54,7 @@
 def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
     sidedatacompanion = orig(srcrepo, dstrepo)
     addedreqs = dstrepo.requirements - srcrepo.requirements
-    if localrepo.SIDEDATA_REQUIREMENT in addedreqs:
+    if requirements.SIDEDATA_REQUIREMENT in addedreqs:
         assert sidedatacompanion is None  # deal with composition later
 
         def sidedatacompanion(revlog, rev):
@@ -70,7 +70,7 @@
             # and sha2 hashes
             sha256 = hashlib.sha256(text).digest()
             update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
-            return False, (), update
+            return False, (), update, 0, 0
 
     return sidedatacompanion