--- a/.hgignore Thu Jan 11 20:37:34 2024 +0100
+++ b/.hgignore Sat Oct 26 04:16:00 2024 +0200
@@ -40,10 +40,17 @@
dist
packages
doc/common.txt
+doc/commandlist.txt
+doc/extensionlist.txt
+doc/topiclist.txt
+doc/*.mk
doc/*.[0-9]
doc/*.[0-9].txt
doc/*.[0-9].gendoc.txt
doc/*.[0-9].{x,ht}ml
+doc/build
+doc/html
+doc/man
MANIFEST
MANIFEST.in
patches
--- a/contrib/automation/hgautomation/cli.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/automation/hgautomation/cli.py Sat Oct 26 04:16:00 2024 +0200
@@ -201,7 +201,6 @@
with aws.temporary_linux_dev_instances(
c, image, instance_type, ensure_extra_volume=ensure_extra_volume
) as insts:
-
instance = insts[0]
linux.prepare_exec_environment(
@@ -447,9 +446,6 @@
help='Python version to use',
choices={
'system3',
- '3.5',
- '3.6',
- '3.7',
'3.8',
'pypy',
'pypy3.5',
@@ -476,7 +472,7 @@
sp.add_argument(
'--python-version',
help='Python version to use',
- choices={'3.5', '3.6', '3.7', '3.8', '3.9', '3.10'},
+ choices={'3.8', '3.9', '3.10'},
default='3.9',
)
sp.add_argument(
--- a/contrib/automation/hgautomation/linux.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/automation/hgautomation/linux.py Sat Oct 26 04:16:00 2024 +0200
@@ -25,7 +25,7 @@
}
INSTALL_PYTHONS = r'''
-PYENV3_VERSIONS="3.5.10 3.6.13 3.7.10 3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3"
+PYENV3_VERSIONS="3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3"
git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
pushd /hgdev/pyenv
--- a/contrib/automation/linux-requirements-py3.5.txt Thu Jan 11 20:37:34 2024 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,194 +0,0 @@
-#
-# This file is autogenerated by pip-compile
-# To update, run:
-#
-# pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.5.txt contrib/automation/linux-requirements.txt.in
-#
-astroid==2.4.2 \
- --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
- --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386
- # via pylint
-docutils==0.17.1 \
- --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
- --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
- # via -r contrib/automation/linux-requirements.txt.in
-fuzzywuzzy==0.18.0 \
- --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
- --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
- # via -r contrib/automation/linux-requirements.txt.in
-idna==3.1 \
- --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
- --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
- # via yarl
-isort==4.3.21 \
- --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
- --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
- # via
- # -r contrib/automation/linux-requirements.txt.in
- # pylint
-lazy-object-proxy==1.4.3 \
- --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
- --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
- --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
- --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
- --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
- --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
- --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
- --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
- --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
- --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
- --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
- --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
- --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
- --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
- --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
- --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
- --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
- --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
- --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
- --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
- --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0
- # via astroid
-mccabe==0.6.1 \
- --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
- --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
- # via pylint
-multidict==5.0.2 \
- --hash=sha256:060d68ae3e674c913ec41a464916f12c4d7ff17a3a9ebbf37ba7f2c681c2b33e \
- --hash=sha256:06f39f0ddc308dab4e5fa282d145f90cd38d7ed75390fc83335636909a9ec191 \
- --hash=sha256:17847fede1aafdb7e74e01bb34ab47a1a1ea726e8184c623c45d7e428d2d5d34 \
- --hash=sha256:1cd102057b09223b919f9447c669cf2efabeefb42a42ae6233f25ffd7ee31a79 \
- --hash=sha256:20cc9b2dd31761990abff7d0e63cd14dbfca4ebb52a77afc917b603473951a38 \
- --hash=sha256:2576e30bbec004e863d87216bc34abe24962cc2e964613241a1c01c7681092ab \
- --hash=sha256:2ab9cad4c5ef5c41e1123ed1f89f555aabefb9391d4e01fd6182de970b7267ed \
- --hash=sha256:359ea00e1b53ceef282232308da9d9a3f60d645868a97f64df19485c7f9ef628 \
- --hash=sha256:3e61cc244fd30bd9fdfae13bdd0c5ec65da51a86575ff1191255cae677045ffe \
- --hash=sha256:43c7a87d8c31913311a1ab24b138254a0ee89142983b327a2c2eab7a7d10fea9 \
- --hash=sha256:4a3f19da871befa53b48dd81ee48542f519beffa13090dc135fffc18d8fe36db \
- --hash=sha256:4df708ef412fd9b59b7e6c77857e64c1f6b4c0116b751cb399384ec9a28baa66 \
- --hash=sha256:59182e975b8c197d0146a003d0f0d5dc5487ce4899502061d8df585b0f51fba2 \
- --hash=sha256:6128d2c0956fd60e39ec7d1c8f79426f0c915d36458df59ddd1f0cff0340305f \
- --hash=sha256:6168839491a533fa75f3f5d48acbb829475e6c7d9fa5c6e245153b5f79b986a3 \
- --hash=sha256:62abab8088704121297d39c8f47156cb8fab1da731f513e59ba73946b22cf3d0 \
- --hash=sha256:653b2bbb0bbf282c37279dd04f429947ac92713049e1efc615f68d4e64b1dbc2 \
- --hash=sha256:6566749cd78cb37cbf8e8171b5cd2cbfc03c99f0891de12255cf17a11c07b1a3 \
- --hash=sha256:76cbdb22f48de64811f9ce1dd4dee09665f84f32d6a26de249a50c1e90e244e0 \
- --hash=sha256:8efcf070d60fd497db771429b1c769a3783e3a0dd96c78c027e676990176adc5 \
- --hash=sha256:8fa4549f341a057feec4c3139056ba73e17ed03a506469f447797a51f85081b5 \
- --hash=sha256:9380b3f2b00b23a4106ba9dd022df3e6e2e84e1788acdbdd27603b621b3288df \
- --hash=sha256:9ed9b280f7778ad6f71826b38a73c2fdca4077817c64bc1102fdada58e75c03c \
- --hash=sha256:a7b8b5bd16376c8ac2977748bd978a200326af5145d8d0e7f799e2b355d425b6 \
- --hash=sha256:af271c2540d1cd2a137bef8d95a8052230aa1cda26dd3b2c73d858d89993d518 \
- --hash=sha256:b561e76c9e21402d9a446cdae13398f9942388b9bff529f32dfa46220af54d00 \
- --hash=sha256:b82400ef848bbac6b9035a105ac6acaa1fb3eea0d164e35bbb21619b88e49fed \
- --hash=sha256:b98af08d7bb37d3456a22f689819ea793e8d6961b9629322d7728c4039071641 \
- --hash=sha256:c58e53e1c73109fdf4b759db9f2939325f510a8a5215135330fe6755921e4886 \
- --hash=sha256:cbabfc12b401d074298bfda099c58dfa5348415ae2e4ec841290627cb7cb6b2e \
- --hash=sha256:d4a6fb98e9e9be3f7d70fd3e852369c00a027bd5ed0f3e8ade3821bcad257408 \
- --hash=sha256:d99da85d6890267292065e654a329e1d2f483a5d2485e347383800e616a8c0b1 \
- --hash=sha256:e58db0e0d60029915f7fc95a8683fa815e204f2e1990f1fb46a7778d57ca8c35 \
- --hash=sha256:e5bf89fe57f702a046c7ec718fe330ed50efd4bcf74722940db2eb0919cddb1c \
- --hash=sha256:f612e8ef8408391a4a3366e3508bab8ef97b063b4918a317cb6e6de4415f01af \
- --hash=sha256:f65a2442c113afde52fb09f9a6276bbc31da71add99dc76c3adf6083234e07c6 \
- --hash=sha256:fa0503947a99a1be94f799fac89d67a5e20c333e78ddae16e8534b151cdc588a
- # via yarl
-pyflakes==2.3.1 \
- --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
- --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
- # via -r contrib/automation/linux-requirements.txt.in
-pygments==2.9.0 \
- --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
- --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
- # via -r contrib/automation/linux-requirements.txt.in
-pylint==2.6.2 \
- --hash=sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9 \
- --hash=sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf
- # via -r contrib/automation/linux-requirements.txt.in
-python-levenshtein==0.12.2 \
- --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
- # via -r contrib/automation/linux-requirements.txt.in
-pyyaml==5.3.1 \
- --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
- --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
- --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
- --hash=sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e \
- --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
- --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
- --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
- --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
- --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
- --hash=sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a \
- --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
- --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
- --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a
- # via vcrpy
-six==1.16.0 \
- --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
- # via
- # astroid
- # vcrpy
-toml==0.10.2 \
- --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
- --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
- # via pylint
-typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
- --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
- --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
- --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
- --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
- --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
- --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
- --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
- --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
- --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
- --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
- --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
- --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
- --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
- --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
- --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
- --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
- --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
- --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
- --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
- --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
- --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
- --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
- --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
- --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
- --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
- --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
- --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
- --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
- --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
- --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
- # via
- # -r contrib/automation/linux-requirements.txt.in
- # astroid
-vcrpy==4.1.1 \
- --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
- --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
- # via -r contrib/automation/linux-requirements.txt.in
-wrapt==1.12.1 \
- --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
- # via
- # astroid
- # vcrpy
-yarl==1.3.0 \
- --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
- --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
- --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
- --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
- --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
- --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
- --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
- --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
- --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
- --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
- --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1
- # via vcrpy
-
-# WARNING: The following packages were not pinned, but pip requires them to be
-# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
-# setuptools
--- a/contrib/automation/linux-requirements-py3.txt Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/automation/linux-requirements-py3.txt Sat Oct 26 04:16:00 2024 +0200
@@ -16,7 +16,7 @@
--hash=sha256:3901be1cb7c2a780f14668691474d9252c070a756be0a9ead98cfeabfa11aeb8 \
--hash=sha256:8ee1e5f5a1afc5b19bdfae4fdf0c35ed324074bdce3500c939842c8f818645d9
# via black
-black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \
+black==19.10b0 ; python_version >= "3.8" and platform_python_implementation != "PyPy" \
--hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \
--hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539
# via -r contrib/automation/linux-requirements.txt.in
--- a/contrib/automation/linux-requirements.txt.in Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/automation/linux-requirements.txt.in Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,5 @@
# black pulls in typed-ast, which doesn't install on PyPy.
-black==19.10b0 ; python_version >= '3.6' and platform_python_implementation != 'PyPy'
+black==19.10b0 ; python_version >= '3.8' and platform_python_implementation != 'PyPy'
# Bazaar doesn't work with Python 3 nor PyPy.
bzr ; python_version <= '2.7' and platform_python_implementation == 'CPython'
docutils
@@ -13,5 +13,5 @@
# Needed to avoid warnings from fuzzywuzzy.
python-Levenshtein
# typed-ast dependency doesn't install on PyPy.
-typed-ast ; python_version >= '3.0' and platform_python_implementation != 'PyPy'
+typed-ast ; python_version >= '3.8' and platform_python_implementation != 'PyPy'
vcrpy
--- a/contrib/check-config.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/check-config.py Sat Oct 26 04:16:00 2024 +0200
@@ -57,7 +57,6 @@
return b
return b.decode('utf8')
-
else:
mkstr = lambda x: x
--- a/contrib/check-py3-compat.py Thu Jan 11 20:37:34 2024 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,92 +0,0 @@
-#!/usr/bin/env python3
-#
-# check-py3-compat - check Python 3 compatibility of Mercurial files
-#
-# Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-import ast
-import importlib
-import os
-import sys
-import traceback
-import warnings
-
-
-def check_compat_py3(f):
- """Check Python 3 compatibility of a file with Python 3."""
- with open(f, 'rb') as fh:
- content = fh.read()
-
- try:
- ast.parse(content, filename=f)
- except SyntaxError as e:
- print('%s: invalid syntax: %s' % (f, e))
- return
-
- # Try to import the module.
- # For now we only support modules in packages because figuring out module
- # paths for things not in a package can be confusing.
- if f.startswith(
- ('hgdemandimport/', 'hgext/', 'mercurial/')
- ) and not f.endswith('__init__.py'):
- assert f.endswith('.py')
- name = f.replace('/', '.')[:-3]
- try:
- importlib.import_module(name)
- except Exception as e:
- exc_type, exc_value, tb = sys.exc_info()
- # We walk the stack and ignore frames from our custom importer,
- # import mechanisms, and stdlib modules. This kinda/sorta
- # emulates CPython behavior in import.c while also attempting
- # to pin blame on a Mercurial file.
- for frame in reversed(traceback.extract_tb(tb)):
- if frame.name == '_call_with_frames_removed':
- continue
- if 'importlib' in frame.filename:
- continue
- if 'mercurial/__init__.py' in frame.filename:
- continue
- if frame.filename.startswith(sys.prefix):
- continue
- break
-
- if frame.filename:
- filename = os.path.basename(frame.filename)
- print(
- '%s: error importing: <%s> %s (error at %s:%d)'
- % (f, type(e).__name__, e, filename, frame.lineno)
- )
- else:
- print(
- '%s: error importing module: <%s> %s (line %d)'
- % (f, type(e).__name__, e, frame.lineno)
- )
-
-
-if __name__ == '__main__':
- # check_compat_py3 will import every filename we specify as long as it
- # starts with one of a few prefixes. It does this by converting
- # specified filenames like 'mercurial/foo.py' to 'mercurial.foo' and
- # importing that. When running standalone (not as part of a test), this
- # means we actually import the installed versions, not the files we just
- # specified. When running as test-check-py3-compat.t, we technically
- # would import the correct paths, but it's cleaner to have both cases
- # use the same import logic.
- sys.path.insert(0, os.getcwd())
-
- for f in sys.argv[1:]:
- with warnings.catch_warnings(record=True) as warns:
- check_compat_py3(f)
-
- for w in warns:
- print(
- warnings.formatwarning(
- w.message, w.category, w.filename, w.lineno
- ).rstrip()
- )
-
- sys.exit(0)
--- a/contrib/check-pytype.sh Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/check-pytype.sh Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,9 @@
cd "$(hg root)"
+printf "pytype version: "
+pytype --version
+
# Many of the individual files that are excluded here confuse pytype
# because they do a mix of Python 2 and Python 3 things
# conditionally. There's no good way to help it out with that as far as
@@ -41,7 +44,6 @@
# hgext/sqlitestore.py # [attribute-error]
# hgext/zeroconf/__init__.py # bytes vs str; tests fail on macOS
#
-# mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
# mercurial/context.py # many [attribute-error]
# mercurial/crecord.py # tons of [attribute-error], [module-attr]
# mercurial/debugcommands.py # [wrong-arg-types]
@@ -54,13 +56,10 @@
# mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
# mercurial/keepalive.py # [attribute-error]
# mercurial/localrepo.py # [attribute-error]
-# mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
# mercurial/minirst.py # [unsupported-operands], [attribute-error]
-# mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
# mercurial/pure/parsers.py # [attribute-error]
# mercurial/repoview.py # [attribute-error]
# mercurial/testing/storage.py # tons of [attribute-error]
-# mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
# mercurial/win32.py # [not-callable]
# mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
# mercurial/wireprotov1peer.py # [attribute-error]
@@ -101,7 +100,6 @@
-x hgext/remotefilelog/shallowbundle.py \
-x hgext/sqlitestore.py \
-x hgext/zeroconf/__init__.py \
- -x mercurial/bundlerepo.py \
-x mercurial/context.py \
-x mercurial/crecord.py \
-x mercurial/debugcommands.py \
@@ -114,14 +112,11 @@
-x mercurial/interfaces \
-x mercurial/keepalive.py \
-x mercurial/localrepo.py \
- -x mercurial/manifest.py \
-x mercurial/minirst.py \
- -x mercurial/pure/osutil.py \
-x mercurial/pure/parsers.py \
-x mercurial/repoview.py \
-x mercurial/testing/storage.py \
-x mercurial/thirdparty \
- -x mercurial/unionrepo.py \
-x mercurial/win32.py \
-x mercurial/wireprotoframing.py \
-x mercurial/wireprotov1peer.py \
--- a/contrib/docker/pytype/Dockerfile Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/docker/pytype/Dockerfile Sat Oct 26 04:16:00 2024 +0200
@@ -1,4 +1,4 @@
-FROM registry.heptapod.net/mercurial/ci-images/mercurial-core:v2.0
+FROM registry.heptapod.net/mercurial/ci-images/mercurial-core:v2.1
USER ci-runner
--- a/contrib/fuzz/mpatch_corpus.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/fuzz/mpatch_corpus.py Sat Oct 26 04:16:00 2024 +0200
@@ -25,7 +25,6 @@
"""Py2 calls __repr__ for `bytes(foo)`, forward to __bytes__"""
return self.__bytes__()
-
else:
class py2reprhack:
--- a/contrib/heptapod-ci.yml Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/heptapod-ci.yml Sat Oct 26 04:16:00 2024 +0200
@@ -26,8 +26,9 @@
variables:
PYTHON: python
- HG_CI_IMAGE_TAG: "v1.0"
+ HG_CI_IMAGE_TAG: "v2.1"
TEST_HGTESTS_ALLOW_NETIO: "0"
+ SHOW_VERSION_OF: "$PYTHON"
.all_template: &all
when: on_success
@@ -38,83 +39,107 @@
# The runner made a clone as root.
# We make a new clone owned by user used to run the step.
before_script:
+ - echo "python used, $PYTHON"
+ - for tool in $SHOW_VERSION_OF ; do echo '#' version of $tool; $tool --version; done
+ - rm -rf /tmp/mercurial-ci/ # Clean slate if not using containers
- hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
- hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
- cd /tmp/mercurial-ci/
- ls -1 tests/test-check-*.* > /tmp/check-tests.txt
- - black --version
- - clang-format --version
script:
- - echo "python used, $PYTHON"
- - $PYTHON --version
- echo "$RUNTEST_ARGS"
- HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
checks:
<<: *runtests
variables:
+ SHOW_VERSION_OF: "$PYTHON black clang-format"
RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
- PYTHON: python3
CI_CLEVER_CLOUD_FLAVOR: S
rust-cargo-test:
<<: *all
stage: tests
script:
- - echo "python used, $PYTHON"
- make rust-tests
- make cargo-clippy
variables:
- PYTHON: python3
CI_CLEVER_CLOUD_FLAVOR: S
-test-c:
+test-c: &test_c
<<: *runtests
variables:
RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
- PYTHON: python3
TEST_HGTESTS_ALLOW_NETIO: "1"
test-pure:
<<: *runtests
variables:
RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
- PYTHON: python3
-test-rust:
+test-rust: &test_rust
<<: *runtests
variables:
HGWITHRUSTEXT: cpython
RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
- PYTHON: python3
test-rhg:
<<: *runtests
variables:
HGWITHRUSTEXT: cpython
RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
- PYTHON: python3
test-chg:
<<: *runtests
variables:
- PYTHON: python3
RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
+# note: we should probably get a full matrix for flavor × py-version, but this
+# is a simple start to be able to check if we break the lowest supported
+# version (and 3.12 have been giving us various troubles)
+test-3.8-c:
+ <<: *test_c
+ when: manual # avoid overloading the CI by default
+ variables:
+ PYTHON: python3.8
+
+test-3.12-c:
+ <<: *test_c
+ when: manual # avoid overloading the CI by default
+ variables:
+ PYTHON: python3.12
+
+test-3.12-rust:
+ <<: *test_rust
+ when: manual # avoid overloading the CI by default
+ variables:
+ PYTHON: python3.12
+
+test-3.13-c:
+ <<: *test_c
+ when: manual # avoid overloading the CI by default
+ variables:
+ PYTHON: python3.13
+
+test-3.13-rust:
+ <<: *test_rust
+ when: manual # avoid overloading the CI by default
+ variables:
+ PYTHON: python3.13
+
check-pytype:
extends: .runtests_template
before_script:
+ - export PATH="/home/ci-runner/vendor/pyenv/pyenv-2.4.7-adf3c2bccf09cdb81febcfd15b186711a33ac7a8/shims:/home/ci-runner/vendor/pyenv/pyenv-2.4.7-adf3c2bccf09cdb81febcfd15b186711a33ac7a8/bin:$PATH"
+ - echo "PATH, $PATH"
- hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
- hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
- cd /tmp/mercurial-ci/
- make local PYTHON=$PYTHON
- - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.11.18
- ./contrib/setup-pytype.sh
script:
- echo "Entering script section"
- sh contrib/check-pytype.sh
- variables:
- PYTHON: python3
# `sh.exe --login` sets a couple of extra environment variables that are defined
# in the MinGW shell, but switches CWD to /home/$username. The previous value
@@ -155,3 +180,9 @@
variables:
RUNTEST_ARGS: "--blacklist C:/Temp/check-tests.txt --pyoxidized"
PYTHON: py -3
+
+macos:
+ <<: *test_c
+ when: manual # avoid overloading the CI by default
+ tags:
+ - macos
--- a/contrib/hgclient.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/hgclient.py Sat Oct 26 04:16:00 2024 +0200
@@ -21,7 +21,6 @@
pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args]
stdout.write(b' '.join(pargs) + b'\n')
-
else:
import cStringIO
--- a/contrib/install-windows-dependencies.ps1 Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/install-windows-dependencies.ps1 Sat Oct 26 04:16:00 2024 +0200
@@ -19,29 +19,40 @@
$VS_BUILD_TOOLS_URL = "https://download.visualstudio.microsoft.com/download/pr/a1603c02-8a66-4b83-b821-811e3610a7c4/aa2db8bb39e0cbd23e9940d8951e0bc3/vs_buildtools.exe"
$VS_BUILD_TOOLS_SHA256 = "911E292B8E6E5F46CBC17003BDCD2D27A70E616E8D5E6E69D5D489A605CAA139"
-$PYTHON37_x86_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9.exe"
-$PYTHON37_x86_SHA256 = "769bb7c74ad1df6d7d74071cc16a984ff6182e4016e11b8949b93db487977220"
-$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe"
-$PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987"
-
$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10.exe"
$PYTHON38_x86_SHA256 = "ad07633a1f0cd795f3bf9da33729f662281df196b4567fa795829f3bb38a30ac"
$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe"
$PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a"
-$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.12/python-3.9.12.exe"
-$PYTHON39_x86_SHA256 = "3d883326f30ac231c06b33f2a8ea700a185c20bf98d01da118079e9134d5fd20"
-$PYTHON39_X64_URL = "https://www.python.org/ftp/python/3.9.12/python-3.9.12-amd64.exe"
-$PYTHON39_x64_SHA256 = "2ba57ab2281094f78fc0227a27f4d47c90d94094e7cca35ce78419e616b3cb63"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.13/python-3.9.13.exe"
+$PYTHON39_x86_SHA256 = "F363935897BF32ADF6822BA15ED1BFED7AE2AE96477F0262650055B6E9637C35"
+$PYTHON39_X64_URL = "https://www.python.org/ftp/python/3.9.13/python-3.9.13-amd64.exe"
+$PYTHON39_x64_SHA256 = "FB3D0466F3754752CA7FD839A09FFE53375FF2C981279FD4BC23A005458F7F5D"
+
+$PYTHON310_x86_URL = "https://www.python.org/ftp/python/3.10.11/python-3.10.11.exe"
+$PYTHON310_x86_SHA256 = "BD115A575E86E61CEA9136C5A2C47E090BA484DC2DEE8B51A34111BB094266D5"
+$PYTHON310_X64_URL = "https://www.python.org/ftp/python/3.10.11/python-3.10.11-amd64.exe"
+$PYTHON310_x64_SHA256 = "D8DEDE5005564B408BA50317108B765ED9C3C510342A598F9FD42681CBE0648B"
-$PYTHON310_x86_URL = "https://www.python.org/ftp/python/3.10.4/python-3.10.4.exe"
-$PYTHON310_x86_SHA256 = "97c37c53c7a826f5b00e185754ab2a324a919f7afc469b20764b71715c80041d"
-$PYTHON310_X64_URL = "https://www.python.org/ftp/python/3.10.4/python-3.10.4-amd64.exe"
-$PYTHON310_x64_SHA256 = "a81fc4180f34e5733c3f15526c668ff55de096366f9006d8a44c0336704e50f1"
+# Final installer release for this version
+$PYTHON311_x86_URL = "https://www.python.org/ftp/python/3.11.9/python-3.11.9.exe"
+$PYTHON311_x86_SHA256 = "AF19E5E2F03E715A822181F2CB7D4EFEF4EDA13FA4A2DB6DA12E998E46F5CBF9"
+$PYTHON311_X64_URL = "https://www.python.org/ftp/python/3.11.9/python-3.11.9-amd64.exe"
+$PYTHON311_x64_SHA256 = "5EE42C4EEE1E6B4464BB23722F90B45303F79442DF63083F05322F1785F5FDDE"
-# PIP 22.0.4.
-$PIP_URL = "https://github.com/pypa/get-pip/raw/38e54e5de07c66e875c11a1ebbdb938854625dd8/public/get-pip.py"
-$PIP_SHA256 = "e235c437e5c7d7524fbce3880ca39b917a73dc565e0c813465b7a7a329bb279a"
+$PYTHON312_X86_URL = "https://www.python.org/ftp/python/3.12.7/python-3.12.7.exe"
+$PYTHON312_x86_SHA256 = "5BF4F3F0A58E1661A26754AE2FF0C2499EFFF093F34833EE0921922887FB3851"
+$PYTHON312_x64_URL = "https://www.python.org/ftp/python/3.12.7/python-3.12.7-amd64.exe"
+$PYTHON312_x64_SHA256 = "1206721601A62C925D4E4A0DCFC371E88F2DDBE8C0C07962EBB2BE9B5BDE4570"
+
+$PYTHON313_x86_URL = "https://www.python.org/ftp/python/3.13.0/python-3.13.0.exe"
+$PYTHON313_x86_SHA256 = "A9BE7082CCD3D0B947D14A87BCEADB1A3551382A68FCB64D245A2EBCC779B272"
+$PYTHON313_X64_URL = "https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe"
+$PYTHON313_x64_SHA256 = "78156AD0CF0EC4123BFB5333B40F078596EBF15F2D062A10144863680AFBDEFC"
+
+# PIP 24.2.
+$PIP_URL = "https://github.com/pypa/get-pip/raw/66d8a0f637083e2c3ddffc0cb1e65ce126afb856/public/get-pip.py"
+$PIP_SHA256 = "6FB7B781206356F45AD79EFBB19322CAA6C2A5AD39092D0D44D0FEC94117E118"
$INNO_SETUP_URL = "http://files.jrsoftware.org/is/5/innosetup-5.6.1-unicode.exe"
$INNO_SETUP_SHA256 = "27D49E9BC769E9D1B214C153011978DB90DC01C2ACD1DDCD9ED7B3FE3B96B538"
@@ -137,14 +148,19 @@
$pip = "${prefix}\assets\get-pip.py"
- Secure-Download $PYTHON37_x86_URL ${prefix}\assets\python37-x86.exe $PYTHON37_x86_SHA256
- Secure-Download $PYTHON37_x64_URL ${prefix}\assets\python37-x64.exe $PYTHON37_x64_SHA256
Secure-Download $PYTHON38_x86_URL ${prefix}\assets\python38-x86.exe $PYTHON38_x86_SHA256
Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256
Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256
Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256
Secure-Download $PYTHON310_x86_URL ${prefix}\assets\python310-x86.exe $PYTHON310_x86_SHA256
Secure-Download $PYTHON310_x64_URL ${prefix}\assets\python310-x64.exe $PYTHON310_x64_SHA256
+ Secure-Download $PYTHON311_x86_URL ${prefix}\assets\python311-x86.exe $PYTHON311_x86_SHA256
+ Secure-Download $PYTHON311_x64_URL ${prefix}\assets\python311-x64.exe $PYTHON311_x64_SHA256
+ Secure-Download $PYTHON312_x86_URL ${prefix}\assets\python312-x86.exe $PYTHON312_x86_SHA256
+ Secure-Download $PYTHON312_x64_URL ${prefix}\assets\python312-x64.exe $PYTHON312_x64_SHA256
+ Secure-Download $PYTHON313_x86_URL ${prefix}\assets\python313-x86.exe $PYTHON313_x86_SHA256
+ Secure-Download $PYTHON313_x64_URL ${prefix}\assets\python313-x64.exe $PYTHON313_x64_SHA256
+
Secure-Download $PIP_URL ${pip} $PIP_SHA256
Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256
Secure-Download $INNO_SETUP_URL ${prefix}\assets\InnoSetup.exe $INNO_SETUP_SHA256
@@ -153,14 +169,18 @@
Secure-Download $RUSTUP_INIT_URL ${prefix}\assets\rustup-init.exe $RUSTUP_INIT_SHA256
Secure-Download $PYOXIDIZER_URL ${prefix}\assets\PyOxidizer.msi $PYOXIDIZER_SHA256
- Install-Python3 "Python 3.7 32-bit" ${prefix}\assets\python37-x86.exe ${prefix}\python37-x86 ${pip}
- Install-Python3 "Python 3.7 64-bit" ${prefix}\assets\python37-x64.exe ${prefix}\python37-x64 ${pip}
Install-Python3 "Python 3.8 32-bit" ${prefix}\assets\python38-x86.exe ${prefix}\python38-x86 ${pip}
Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip}
Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip}
Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip}
Install-Python3 "Python 3.10 32-bit" ${prefix}\assets\python310-x86.exe ${prefix}\python310-x86 ${pip}
Install-Python3 "Python 3.10 64-bit" ${prefix}\assets\python310-x64.exe ${prefix}\python310-x64 ${pip}
+ Install-Python3 "Python 3.11 32-bit" ${prefix}\assets\python311-x86.exe ${prefix}\python311-x86 ${pip}
+ Install-Python3 "Python 3.11 64-bit" ${prefix}\assets\python311-x64.exe ${prefix}\python311-x64 ${pip}
+ Install-Python3 "Python 3.12 32-bit" ${prefix}\assets\python312-x86.exe ${prefix}\python312-x86 ${pip}
+ Install-Python3 "Python 3.12 64-bit" ${prefix}\assets\python312-x64.exe ${prefix}\python312-x64 ${pip}
+ Install-Python3 "Python 3.13 32-bit" ${prefix}\assets\python313-x86.exe ${prefix}\python313-x86 ${pip}
+ Install-Python3 "Python 3.13 64-bit" ${prefix}\assets\python313-x64.exe ${prefix}\python313-x64 ${pip}
Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
--- a/contrib/nix/flake.nix Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/nix/flake.nix Sat Oct 26 04:16:00 2024 +0200
@@ -53,7 +53,7 @@
# but uses formatter features from nightly.
# TODO: make cargo use the formatter from nightly automatically
# (not supported by rustup/cargo yet? workaround?)
- # rustPlatform = pkgs.rust-bin.stable."1.61.0".default;
+ # rustPlatform = pkgs.rust-bin.stable."1.79.0".default;
# rustPlatformFormatter = pkgs.rust-bin.nightly."2023-04-20".default;
# The CI uses an old version of the Black code formatter,
--- a/contrib/packaging/debian/rules Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/packaging/debian/rules Sat Oct 26 04:16:00 2024 +0200
@@ -15,7 +15,7 @@
# the actual versions that are installed, see the comment above where we set
# DEB_HG_PYTHON_VERSIONS below. If you choose to set `DEB_HG_PYTHON_VERSIONS`
# yourself, set it to a space-separated string of python version numbers, like:
-# DEB_HG_PYTHON_VERSIONS="3.7 3.8" make deb
+# DEB_HG_PYTHON_VERSIONS="3.8" make deb
DEB_HG_MULTI_VERSION?=0
# Set to 1 to make /usr/bin/hg a symlink to chg, and move hg to
--- a/contrib/packaging/inno/readme.rst Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/packaging/inno/readme.rst Sat Oct 26 04:16:00 2024 +0200
@@ -8,7 +8,7 @@
* Inno Setup (http://jrsoftware.org/isdl.php) version 5.4 or newer.
Be sure to install the optional Inno Setup Preprocessor feature,
which is required.
-* Python 3.6+ (to run the ``packaging.py`` script)
+* Python 3.8+ (to run the ``packaging.py`` script)
Building
========
--- a/contrib/packaging/wix/readme.rst Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/packaging/wix/readme.rst Sat Oct 26 04:16:00 2024 +0200
@@ -16,7 +16,7 @@
The following system dependencies must be installed:
-* Python 3.6+ (to run the ``packaging.py`` script)
+* Python 3.8+ (to run the ``packaging.py`` script)
Building
========
--- a/contrib/perf-utils/compare-discovery-case Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/perf-utils/compare-discovery-case Sat Oct 26 04:16:00 2024 +0200
@@ -205,7 +205,6 @@
if __name__ == '__main__':
-
argv = sys.argv[:]
kwargs = {}
--- a/contrib/perf.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/perf.py Sat Oct 26 04:16:00 2024 +0200
@@ -130,7 +130,6 @@
def revlog(opener, *args, **kwargs):
return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
-
except (ImportError, AttributeError):
perf_rl_kind = None
@@ -261,7 +260,6 @@
commands.norepo += b' %s' % b' '.join(parsealiases(name))
return _command(name, list(options), synopsis)
-
else:
# for "historical portability":
# define "@command" annotation locally, because cmdutil.command
@@ -1926,7 +1924,7 @@
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
+ mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
if opts[b'no_lookup']:
if opts['rev']:
raise error.Abort('--no-lookup and --rev are mutually exclusive')
@@ -1985,7 +1983,7 @@
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
+ mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
unfi = repo.unfiltered()
clearcaches = opts[b'clear_caches']
@@ -2389,7 +2387,7 @@
timer, fm = gettimer(ui, opts)
import mercurial.revlog
- mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
+ mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
n = scmutil.revsingle(repo, rev).node()
try:
@@ -3102,7 +3100,7 @@
# disable inlining
old_max_inline = mercurial.revlog._maxinline
# large enough to never happen
- mercurial.revlog._maxinline = 2 ** 50
+ mercurial.revlog._maxinline = 2**50
with repo.lock():
bundle = [None, None]
@@ -4511,7 +4509,7 @@
sets=10000,
mixed=10000,
mixedgetfreq=50,
- **opts
+ **opts,
):
opts = _byteskwargs(opts)
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py Sat Oct 26 04:16:00 2024 +0200
@@ -137,7 +137,6 @@
def test_buffer_source_read_variance(
self, original, level, source_read_size, read_sizes
):
-
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -203,7 +202,6 @@
def test_buffer_source_readinto(
self, original, level, source_read_size, read_size
):
-
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -273,7 +271,6 @@
def test_buffer_source_readinto_variance(
self, original, level, source_read_size, read_sizes
):
-
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -410,7 +407,6 @@
def test_buffer_source_read1_variance(
self, original, level, source_read_size, read_sizes
):
-
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
@@ -551,7 +547,6 @@
def test_buffer_source_readinto1_variance(
self, original, level, source_read_size, read_sizes
):
-
refctx = zstd.ZstdCompressor(level=level)
ref_frame = refctx.compress(original)
--- a/contrib/python-zstandard/tests/test_decompressor.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor.py Sat Oct 26 04:16:00 2024 +0200
@@ -189,7 +189,7 @@
# Will get OverflowError on some Python distributions that can't
# handle really large integers.
with self.assertRaises((MemoryError, OverflowError)):
- dctx.decompress(compressed, max_output_size=2 ** 62)
+ dctx.decompress(compressed, max_output_size=2**62)
def test_dictionary(self):
samples = []
@@ -238,7 +238,7 @@
cctx = zstd.ZstdCompressor(write_content_size=False)
frame = cctx.compress(source)
- dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
+ dctx = zstd.ZstdDecompressor(max_window_size=2**zstd.WINDOWLOG_MIN)
with self.assertRaisesRegex(
zstd.ZstdError,
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Sat Oct 26 04:16:00 2024 +0200
@@ -353,7 +353,6 @@
def test_multiple_frames(
self, originals, frame_count, level, source_read_size, read_sizes
):
-
cctx = zstd.ZstdCompressor(level=level)
source = io.BytesIO()
buffer = io.BytesIO()
--- a/contrib/python-zstandard/zstandard/cffi.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python-zstandard/zstandard/cffi.py Sat Oct 26 04:16:00 2024 +0200
@@ -273,7 +273,6 @@
ldm_hash_every_log=-1,
threads=0,
):
-
params = lib.ZSTD_createCCtxParams()
if params == ffi.NULL:
raise MemoryError()
@@ -1423,7 +1422,6 @@
read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
):
-
if not hasattr(ifh, "read"):
raise ValueError("first argument must have a read() method")
if not hasattr(ofh, "write"):
@@ -1523,7 +1521,6 @@
write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
write_return_read=False,
):
-
if not hasattr(writer, "write"):
raise ValueError("must pass an object with a write() method")
--- a/contrib/python3-ratchet.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python3-ratchet.py Sat Oct 26 04:16:00 2024 +0200
@@ -93,23 +93,6 @@
'--working-tests must be from that repo'
)
sys.exit(1)
- try:
- subprocess.check_call(
- [
- opts.python3,
- '-c',
- 'import sys ; '
- 'assert ((3, 5) <= sys.version_info < (3, 6) '
- 'or sys.version_info >= (3, 6, 2))',
- ]
- )
- except subprocess.CalledProcessError:
- print(
- 'warning: Python 3.6.0 and 3.6.1 have '
- 'a bug which breaks Mercurial'
- )
- print('(see https://bugs.python.org/issue29714 for details)')
- sys.exit(1)
rt = subprocess.Popen(
[
--- a/contrib/python3-whitelist Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/python3-whitelist Sat Oct 26 04:16:00 2024 +0200
@@ -74,7 +74,6 @@
test-check-help.t
test-check-interfaces.py
test-check-module-imports.t
-test-check-py3-compat.t
test-check-pyflakes.t
test-check-pylint.t
test-check-shbang.t
--- a/contrib/revsetbenchmarks.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/revsetbenchmarks.py Sat Oct 26 04:16:00 2024 +0200
@@ -191,7 +191,7 @@
def formattiming(value):
"""format a value to strictly 8 char, dropping some precision if needed"""
- if value < 10 ** 7:
+ if value < 10**7:
return ('%.6f' % value)[:8]
else:
# value is HUGE very unlikely to happen (4+ month run)
@@ -371,7 +371,6 @@
print()
for ridx, rset in enumerate(revsets):
-
print("revset #%i: %s" % (ridx, rset))
printheader(variants, len(results), verbose=options.verbose, relative=True)
ref = None
--- a/contrib/setup-pytype.sh Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/setup-pytype.sh Sat Oct 26 04:16:00 2024 +0200
@@ -5,7 +5,7 @@
# Find the python3 setup that would run pytype
PYTYPE=`which pytype`
-PYTHON3=`head -n1 ${PYTYPE} | sed -s 's/#!//'`
+PYTHON3=${PYTHON:-`head -n1 ${PYTYPE} | sed -s 's/#!//'`}
# Existing stubs that pytype processes live here
TYPESHED=$(${PYTHON3} -c "import pytype; print(pytype.__path__[0])")/typeshed/stubs
--- a/contrib/win32/hgwebdir_wsgi.py Thu Jan 11 20:37:34 2024 +0100
+++ b/contrib/win32/hgwebdir_wsgi.py Sat Oct 26 04:16:00 2024 +0200
@@ -101,6 +101,7 @@
import isapi_wsgi
from mercurial.hgweb.hgwebdir_mod import hgwebdir
+
# Example tweak: Replace isapi_wsgi's handler to provide better error message
# Other stuff could also be done here, like logging errors etc.
class WsgiHandler(isapi_wsgi.IsapiWsgiHandler):
@@ -114,7 +115,6 @@
def handler(environ, start_response):
-
# Translate IIS's weird URLs
url = environ['SCRIPT_NAME'] + environ['PATH_INFO']
paths = url[1:].split('/')[path_strip:]
--- a/doc/Makefile Thu Jan 11 20:37:34 2024 +0100
+++ b/doc/Makefile Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,7 @@
HTML=$(SOURCES:%.txt=%.html)
GENDOC=gendoc.py ../mercurial/commands.py ../mercurial/help.py \
../mercurial/helptext/*.txt ../hgext/*.py ../hgext/*/__init__.py
+RUNRST=runrst
PREFIX=/usr/local
MANDIR=$(PREFIX)/share/man
INSTALL=install -m 644
@@ -14,10 +15,150 @@
else
PYTHON?=python3
endif
+
RSTARGS=
+GENDOCARGS=
+GENDOCCMD=$(PYTHON) gendoc.py $(GENDOCARGS)
+
+# Output directories for individual help pages.
+MANOUT=man
+HTMLOUT=html
+BUILDDIR=build
export HGENCODING=UTF-8
+.PHONY: all man html install clean knownrefs
+
+# Generate a list of hg commands and extensions.
+commandlist.txt: $(GENDOC)
+ ${GENDOCCMD} commandlist > $@.tmp
+ mv $@.tmp $@
+
+topiclist.txt: $(GENDOC)
+ ${GENDOCCMD} topiclist > $@.tmp
+ mv $@.tmp $@
+
+extensionlist.txt: $(GENDOC)
+ ${GENDOCCMD} extensionlist > $@.tmp
+ mv $@.tmp $@
+
+# Build target for running runrst more easily by hand
+knownrefs: commandlist.txt topiclist.txt extensionlist.txt
+
+BUILDFILES=commandlist.txt topiclist.txt extensionlist.txt
+
+# We want to generate a sub-Makefile that can build the RST/man/html doc for
+# each hg command. Here are templates that we'll use to generate this
+# sub-Makefile.
+HGCMDTPL=templates/cmdheader.txt
+TOPICTPL=templates/topicheader.txt
+EXTTPL=templates/extheader.txt
+
+define RuleAllCommandsTemplate
+HG_COMMANDS=$(1)
+all-commands: $$(HG_COMMANDS:%=$$(BUILDDIR)/hg-%.gendoc.txt)
+endef
+
+define RuleAllTopicsTemplate
+HG_TOPICS=$(1)
+all-topics: $$(HG_TOPICS:%=$$(BUILDDIR)/%.gendoc.txt)
+endef
+
+define RuleAllExtensionsTemplate
+HG_EXTENSIONS=$(1)
+all-extensions: $$(HG_EXTENSIONS:%=$$(BUILDDIR)/%.gendoc.txt)
+endef
+
+define RuleCommandTemplate
+$$(BUILDDIR)/hg-$C.gendoc.txt: $$(GENDOC) $$(HGCMDTPL)
+ mkdir -p $$(@D)
+ $${GENDOCCMD} cmd-$C > $$@.tmp
+ mv $$@.tmp $$@
+endef
+
+define RuleTopicTemplate
+$$(BUILDDIR)/topic-$T.gendoc.txt: $$(GENDOC) $$(TOPICTPL)
+ mkdir -p $$(@D)
+ $${GENDOCCMD} topic-$T > $$@.tmp
+ mv $$@.tmp $$@
+endef
+
+define RuleExtensionTemplate
+$$(BUILDDIR)/ext-$E.gendoc.txt: $$(GENDOC) $$(EXTTPL)
+ mkdir -p $$(@D)
+ $${GENDOCCMD} ext-$E > $$@.tmp
+ mv $$@.tmp $$@
+endef
+
+# Actually generate the sub-Makefile.
+# The $file function is only supported by GNU Make 4 and above.
+CommandsTopicsExtensions.mk: commandlist.txt topiclist.txt extensionlist.txt Makefile
+ifeq (4.0,$(firstword $(sort $(MAKE_VERSION) 4.0)))
+ $(file > $@.tmp,# Generated by Makefile)
+ $(file >> $@.tmp,$(call RuleAllCommandsTemplate,$(file < commandlist.txt)))
+ $(file >> $@.tmp,$(call RuleAllTopicsTemplate,$(file < topiclist.txt)))
+ $(file >> $@.tmp,$(call RuleAllExtensionsTemplate,$(file < extensionlist.txt)))
+ $(foreach C,$(file < commandlist.txt),$(file >> $@.tmp,$(RuleCommandTemplate)))
+ $(foreach T,$(file < topiclist.txt),$(file >> $@.tmp,$(RuleTopicTemplate)))
+ $(foreach E,$(file < extensionlist.txt),$(file >> $@.tmp,$(RuleExtensionTemplate)))
+ mv $@.tmp $@
+else
+ @echo "You are running make ${MAKE_VERSION} but you need make 4.0 or above"
+endif
+
+BUILDFILES+=CommandsTopicsExtensions.mk
+
+# Include the sub-Makefile that contains rules for generating each individual
+# command/help-topic/extension help page. This sub-Makefile is created by the
+# rule above (CommandsTopicsExtensions.mk) which in turn is created from the
+# plain-text lists of commands/help-topics/extensions.
+#
+# Any time the source code changes, these plain-text lists and this
+# sub-Makefile will get regenerated. Make will then restart itself to take
+# into account the rules inside the sub-Makefile.
+#
+# We want to avoid doing all this work for targets that we know don't need it
+# however. For example, running `make clean` would only generate these files
+# in order to delete them immediately. As a result, we don't include the
+# sub-Makefile (and therefore don't require generating it) if clean is one of
+# the targets. This might not do what we want when other targets are specified
+# but it's most likely what we want.
+ifeq (,$(filter clean,$(MAKECMDGOALS)))
+-include CommandsTopicsExtensions.mk
+endif
+
+# If the sub-Makefile is available, add all the hg commands, help-topics, and
+# extensions to the list of things to generate html and man pages for.
+#
+# Naming convention:
+# - commands: hg-foo (html and man)
+# - help topics: topic-foo (html), hgfoo (man)
+# - extensions: ext-foo (html), hgext-foo (man)
+#
+# Man pages for commands are in section 1 (user commands), topics and
+# extensions are in section 7 (miscellanea)
+#
+# NOTE: topics and extension are temporarily disabled for man pages because
+# they make docutils' RST converter crash.
+ifdef HG_COMMANDS
+HTML+=$(HG_COMMANDS:%=$(HTMLOUT)/hg-%.html)
+MAN+=$(HG_COMMANDS:%=$(MANOUT)/hg-%.1)
+endif
+
+ifdef HG_TOPICS
+HTML+=$(HG_TOPICS:%=$(HTMLOUT)/topic-%.html)
+#MAN+=$(HG_TOPICS:%=$(MANOUT)/hg%.7)
+endif
+
+ifdef HG_EXTENSIONS
+HTML+=$(HG_EXTENSIONS:%=$(HTMLOUT)/ext-%.html)
+#MAN+=$(HG_EXTENSIONS:%=$(MANOUT)/hgext-%.7)
+endif
+
+# Also add the HTML index page
+HTML+=$(HTMLOUT)/index.html
+
+
all: man html
man: $(MAN)
@@ -26,17 +167,45 @@
# This logic is duplicated in setup.py:hgbuilddoc()
common.txt $(SOURCES) $(SOURCES:%.txt=%.gendoc.txt): $(GENDOC)
- ${PYTHON} gendoc.py "$(basename $@)" > $@.tmp
+ ${GENDOCCMD} "$(basename $@)" > $@.tmp
mv $@.tmp $@
-%: %.txt %.gendoc.txt common.txt
+%: %.txt %.gendoc.txt common.txt $(RUNRST)
$(PYTHON) runrst hgmanpage $(RSTARGS) --halt warning \
--strip-elements-with-class htmlonly $*.txt $*
-%.html: %.txt %.gendoc.txt common.txt
+%.html: %.txt %.gendoc.txt common.txt $(RUNRST)
$(PYTHON) runrst html $(RSTARGS) --halt warning \
--link-stylesheet --stylesheet-path style.css $*.txt $*.html
+# Rules for index page and individual command/help-topic/extension pages
+# Because the naming isn't the same between html and man pages, we need to
+# break down man pages rules a bit more.
+$(BUILDDIR)/index.gendoc.txt: $(GENDOC)
+ mkdir -p $(@D)
+ ${GENDOCCMD} index > $@.tmp
+ mv $@.tmp $@
+
+$(MANOUT)/hg-%.1: $(BUILDDIR)/hg-%.gendoc.txt common.txt $(RUNRST)
+ mkdir -p $(@D)
+ $(PYTHON) runrst hgmanpage --hg-individual-pages $(RSTARGS) --halt warning \
+ --strip-elements-with-class htmlonly $(BUILDDIR)/hg-$*.gendoc.txt $@
+
+$(MANOUT)/hg%.7: $(BUILDDIR)/topic-%.gendoc.txt common.txt $(RUNRST)
+ mkdir -p $(@D)
+ $(PYTHON) runrst hgmanpage --hg-individual-pages $(RSTARGS) --halt warning \
+ --strip-elements-with-class htmlonly $(BUILDDIR)/topic-$*.gendoc.txt $@
+
+$(MANOUT)/hgext-%.7: $(BUILDDIR)/ext-%.gendoc.txt common.txt $(RUNRST)
+ mkdir -p $(@D)
+ $(PYTHON) runrst hgmanpage --hg-individual-pages $(RSTARGS) --halt warning \
+ --strip-elements-with-class htmlonly $(BUILDDIR)/ext-$*.gendoc.txt $@
+
+$(HTMLOUT)/%.html: $(BUILDDIR)/%.gendoc.txt common.txt $(RUNRST)
+ mkdir -p $(@D)
+ $(PYTHON) runrst html --hg-individual-pages $(RSTARGS) --halt warning \
+ --link-stylesheet --stylesheet-path style.css $(BUILDDIR)/$*.gendoc.txt $@
+
MANIFEST: man html
# tracked files are already in the main MANIFEST
$(RM) $@
@@ -51,5 +220,9 @@
$(INSTALL) $$i "$(DESTDIR)$(MANDIR)"/$$subdir ; \
done
+# The clean target explicitly doesn't bother with the sub-Makefile, so we don't
+# know anything about all the command/topic/extension targets and files.
+# $(HTML) only has the basic topics, so we need to delete $(HTMLOUT)/*.html and
+# other similar files "by hand" here.
clean:
- $(RM) $(MAN) $(HTML) common.txt $(SOURCES) $(SOURCES:%.txt=%.gendoc.txt) MANIFEST
+ $(RM) $(MAN) $(HTML) common.txt $(SOURCES) MANIFEST *.gendoc.txt $(BUILDFILES) $(BUILDDIR)/*.gendoc.* $(HTMLOUT)/*.html
--- a/doc/gendoc.py Thu Jan 11 20:37:34 2024 +0100
+++ b/doc/gendoc.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
import os
import sys
import textwrap
+import argparse
try:
import msvcrt
@@ -115,7 +116,7 @@
return d
-def showdoc(ui):
+def showdoc(ui, debugcmds=False):
# print options
ui.write(minirst.section(_(b"Options")))
multioccur = False
@@ -129,11 +130,18 @@
# print cmds
ui.write(minirst.section(_(b"Commands")))
- commandprinter(ui, table, minirst.subsection, minirst.subsubsection)
+ commandprinter(
+ ui,
+ table,
+ minirst.subsection,
+ minirst.subsubsection,
+ debugcmds=debugcmds,
+ )
# print help topics
# The config help topic is included in the hgrc.5 man page.
- helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
+ topics = findtopics(helptable, exclude=[b'config'])
+ helpprinter(ui, topics, minirst.section)
ui.write(minirst.section(_(b"Extensions")))
ui.write(
@@ -156,7 +164,8 @@
for extensionname in sorted(allextensionnames()):
mod = extensions.load(ui, extensionname, None)
ui.write(minirst.subsection(extensionname))
- ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod)))
+ ext_doc = help.ext_help(ui, mod)
+ ui.write(b"%s\n\n" % ext_doc)
cmdtable = getattr(mod, 'cmdtable', None)
if cmdtable:
ui.write(minirst.subsubsection(_(b'Commands')))
@@ -165,10 +174,207 @@
cmdtable,
minirst.subsubsubsection,
minirst.subsubsubsubsection,
+ debugcmds=debugcmds,
)
-def showtopic(ui, topic):
+def showcommandlist(ui, debugcmds=False):
+ """Render a plain text list of all command names
+
+ Args:
+ ui: the UI object to output to
+ debugcmds: whether to include debug commands
+ """
+ cmdnames = allcommandnames(table, debugcmds=debugcmds)
+ for mainname in cmdnames.keys():
+ # Make does not like semicolons in filenames (or what it
+ # considers as filenames). We use command names as targets so
+ # it applies here. For now let's skip commands with semicolons
+ # in them (at this time it only includes the `admin::verify`
+ # advanced command).
+ if b'::' in mainname:
+ continue
+ ui.write(mainname)
+ ui.write(b" ")
+
+
+def showtopiclist(ui):
+ """Render a plain text list of all help topic names
+
+ Args:
+ ui: the UI object to output to
+ """
+ for topic in helptable:
+ topicname = topic[0][0]
+ if help.filtertopic(ui, topicname):
+ continue
+ ui.write(topicname)
+ ui.write(b" ")
+
+
+def showextensionlist(ui):
+ """Render a plain text list of all extension names
+
+ Args:
+ ui: the UI object to output to
+ """
+ for extensionname in allextensionnames():
+ ui.write(extensionname)
+ ui.write(b" ")
+
+
+def showhelpindex(ui, debugcmds=False):
+ """Render restructured text for a complete mercurial help index
+
+ This index will show a list of commands, followed by a list of help topics,
+ and finally a list of extensions. These lists are split in categories and
+ ordered 'nicely' as defined by alphabetical and categeory order.
+
+ Each entry in this index is a reference to the specific help page of the
+ command, topic, or extension at hand.
+ """
+ ui.write(minirst.section(_(b"Mercurial Distributed SCM")))
+
+ missingdoc = _(b"(no help text available)")
+
+ cats, h, syns = help._getcategorizedhelpcmds(ui, table, None)
+ ui.write(minirst.subsection(_(b"Commands")))
+
+ for cat in help.CATEGORY_ORDER:
+ catfns = sorted(cats.get(cat, []))
+ if not catfns:
+ continue
+
+ catname = gettext(help.CATEGORY_NAMES[cat])
+ ui.write(minirst.subsubsection(catname))
+ for c in catfns:
+ url = b'hg-%s.html' % c
+ ui.write(b" :`%s <%s>`__: %s" % (c, url, h[c]))
+ syns[c].remove(c)
+ if syns[c]:
+ ui.write(_(b" (aliases: *%s*)") % (b', '.join(syns[c])))
+ ui.write(b"\n")
+ ui.write(b"\n\n")
+
+ ui.write(b"\n\n")
+
+ ui.write(minirst.subsection(_(b"Additional Help Topics")))
+ topiccats, topicsyns = help._getcategorizedhelptopics(ui, helptable)
+ for cat in help.TOPIC_CATEGORY_ORDER:
+ topics = topiccats.get(cat, [])
+ if not topics:
+ continue
+
+ catname = gettext(help.TOPIC_CATEGORY_NAMES[cat])
+ ui.write(minirst.subsubsection(catname))
+ for t, desc in topics:
+ url = b'topic-%s.html' % t
+ ui.write(b" :`%s <%s>`__: %s" % (t, url, desc))
+ topicsyns[t].remove(t)
+ if topicsyns[t]:
+ ui.write(_(b" (aliases: *%s*)") % (b', '.join(topicsyns[t])))
+ ui.write(b"\n")
+ ui.write(b"\n\n")
+
+ ui.write(b"\n\n")
+
+ # Add an alphabetical list of extensions, categorized by group.
+ sectionkeywords = [
+ (b"(ADVANCED)", _(b"(ADVANCED)")),
+ (b"(EXPERIMENTAL)", _(b"(EXPERIMENTAL)")),
+ (b"(DEPRECATED)", _(b"(DEPRECATED)")),
+ ]
+ extensionsections = [
+ (b"Extensions", []),
+ (b"Advanced Extensions", []),
+ (b"Experimental Extensions", []),
+ (b"Deprecated Extensions", []),
+ ]
+ for extensionname in allextensionnames():
+ mod = extensions.load(ui, extensionname, None)
+ shortdoc, longdoc = _splitdoc(mod)
+ for i, kwds in enumerate(sectionkeywords):
+ if any([kwd in shortdoc for kwd in kwds]):
+ extensionsections[i + 1][1].append(
+ (extensionname, mod, shortdoc)
+ )
+ break
+ else:
+ extensionsections[0][1].append((extensionname, mod, shortdoc))
+ for sectiontitle, extinfos in extensionsections:
+ ui.write(minirst.subsection(_(sectiontitle)))
+ for extinfo in sorted(extinfos, key=lambda ei: ei[0]):
+ extensionname, mod, shortdoc = extinfo
+ url = b'ext-%s.html' % extensionname
+ ui.write(
+ minirst.subsubsection(b'`%s <%s>`__' % (extensionname, url))
+ )
+ ui.write(shortdoc)
+ ui.write(b'\n\n')
+ cmdtable = getattr(mod, 'cmdtable', None)
+ if cmdtable:
+ cmdnames = allcommandnames(cmdtable, debugcmds=debugcmds)
+ for f in sorted(cmdnames.keys()):
+ d = get_cmd(cmdnames[f], cmdtable)
+ ui.write(b':%s: ' % d[b'cmd'])
+ ui.write(d[b'desc'][0] or (missingdoc + b"\n"))
+ ui.write(b'\n')
+ ui.write(b'\n')
+
+
+def showcommand(ui, mainname):
+ # Always pass debugcmds=True so that we find whatever command we are told
+ # to display.
+ cmdnames = allcommandnames(table, debugcmds=True)
+ allnames = cmdnames[mainname]
+ d = get_cmd(allnames, table)
+
+ header = _rendertpl(
+ 'cmdheader.txt',
+ {
+ 'cmdname': mainname,
+ 'cmdtitle': minirst.section(b'hg ' + mainname),
+ 'cmdshortdesc': minirst.subsection(d[b'desc'][0]),
+ 'cmdlongdesc': d[b'desc'][1],
+ 'cmdsynopsis': d[b'synopsis'],
+ },
+ )
+ ui.write(header.encode())
+
+ _optionsprinter(ui, d, minirst.subsubsection)
+ if d[b'aliases']:
+ ui.write(minirst.subsubsection(_(b"Aliases")))
+ ui.write(b"::\n\n ")
+ ui.write(b", ".join(d[b'aliases']))
+ ui.write(b"\n")
+
+
+def _splitdoc(obj):
+ objdoc = pycompat.getdoc(obj)
+ firstnl = objdoc.find(b'\n')
+ if firstnl > 0:
+ shortdoc = objdoc[:firstnl]
+ longdoc = objdoc[firstnl + 1 :]
+ else:
+ shortdoc = objdoc
+ longdoc = ''
+ return shortdoc.lstrip(), longdoc.lstrip()
+
+
+def _rendertpl(tplname, data):
+ tplpath = os.path.join(os.path.dirname(__file__), 'templates', tplname)
+ with open(tplpath, 'r') as f:
+ tpl = f.read()
+
+ if isinstance(tpl, bytes):
+ tpl = tpl.decode()
+ for k in data:
+ data[k] = data[k].decode()
+
+ return tpl % data
+
+
+def gettopicstable():
extrahelptable = [
([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
@@ -180,6 +386,7 @@
help.TOPIC_CATEGORY_CONFIG,
),
([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hg-ssh.8.gendoc"], b'', b'', help.TOPIC_CATEGORY_CONFIG),
(
[b"hgignore.5.gendoc"],
b'',
@@ -193,16 +400,59 @@
help.TOPIC_CATEGORY_CONFIG,
),
]
- helpprinter(ui, helptable + extrahelptable, None, include=[topic])
+ return helptable + extrahelptable
-def helpprinter(ui, helptable, sectionfunc, include=[], exclude=[]):
+def findtopics(helptable, include=[], exclude=[]):
+ """Find topics whose names match the given include/exclude rules
+
+ Note that exclude rules take precedence over include rules.
+ """
+ found = []
for h in helptable:
names, sec, doc = h[0:3]
if exclude and names[0] in exclude:
continue
if include and names[0] not in include:
continue
+ found.append((names, sec, doc))
+ return found
+
+
+def showtopic(ui, topic, wraptpl=False):
+ """Render a help topic
+
+ Args:
+ ui: the UI object to output to
+ topic: the topic name to output
+ wraptpl: whether to wrap the output in the individual help topic
+ pages' header/footer
+ """
+ found = findtopics(gettopicstable(), include=[topic])
+ if not found:
+ ui.write_err(_(b"ERROR: no such topic: %s\n") % topic)
+ sys.exit(1)
+
+ if wraptpl:
+ header = _rendertpl(
+ 'topicheader.txt',
+ {'topicname': topic, 'topictitle': minirst.section(found[0][1])},
+ )
+ ui.write(header.encode())
+ helpprinter(ui, found, None)
+ return True
+
+
+def helpprinter(ui, topics, sectionfunc):
+ """Print a help topic
+
+ Args:
+ ui: the UI object to output to
+ topics: a list of help topics to output
+ sectionfunc: a callback to write the section title
+ """
+ for h in topics:
+ names, sec, doc = h[0:3]
for name in names:
ui.write(b".. _%s:\n" % name)
ui.write(b"\n")
@@ -214,7 +464,42 @@
ui.write(b"\n")
-def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc):
+def showextension(ui, extensionname, debugcmds=False):
+ """Render the help text for an extension
+
+ Args:
+ ui: the UI object to output to
+ extensionname: the name of the extension to output
+ debugcmds: whether to include the extension's debug commands, if any
+ """
+ mod = extensions.load(ui, extensionname, None)
+
+ header = _rendertpl(
+ 'extheader.txt',
+ {'extname': extensionname, 'exttitle': minirst.section(extensionname)},
+ )
+ ui.write(header.encode())
+
+ shortdoc, longdoc = _splitdoc(mod)
+ if shortdoc:
+ ui.write(b"%s\n\n" % gettext(shortdoc))
+ if longdoc:
+ ui.write(minirst.subsection(_(b"Description")))
+ ui.write(b"%s\n\n" % gettext(longdoc))
+
+ cmdtable = getattr(mod, 'cmdtable', None)
+ if cmdtable:
+ ui.write(minirst.subsection(_(b'Commands')))
+ commandprinter(
+ ui,
+ cmdtable,
+ minirst.subsubsection,
+ minirst.subsubsubsection,
+ debugcmds=debugcmds,
+ )
+
+
+def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc, debugcmds=False):
"""Render restructuredtext describing a list of commands and their
documentations, grouped by command category.
@@ -235,11 +520,7 @@
sectionfunc: minirst function to format command category headers
subsectionfunc: minirst function to format command headers
"""
- h = {}
- for c, attr in cmdtable.items():
- f = c.split(b"|")[0]
- f = f.lstrip(b"^")
- h[f] = c
+ h = allcommandnames(cmdtable, debugcmds=debugcmds)
cmds = h.keys()
def helpcategory(cmd):
@@ -276,8 +557,6 @@
ui.write(sectionfunc(help.CATEGORY_NAMES[category]))
# Print each command in the category
for f in sorted(categorycmds):
- if f.startswith(b"debug"):
- continue
d = get_cmd(h[f], cmdtable)
ui.write(subsectionfunc(d[b'cmd']))
# short description
@@ -292,28 +571,12 @@
ui.write(b'\n')
# description
ui.write(b"%s\n\n" % d[b'desc'][1])
+
# options
- opt_output = list(d[b'opts'])
- if opt_output:
- opts_len = max([len(line[0]) for line in opt_output])
- ui.write(_(b"Options:\n\n"))
- multioccur = False
- for optstr, desc in opt_output:
- if desc:
- s = b"%-*s %s" % (opts_len, optstr, desc)
- else:
- s = optstr
- ui.write(b"%s\n" % s)
- if optstr.endswith(b"[+]>"):
- multioccur = True
- if multioccur:
- ui.write(
- _(
- b"\n[+] marked option can be specified"
- b" multiple times\n"
- )
- )
- ui.write(b"\n")
+ def _optsection(s):
+ return b"%s:\n\n" % s
+
+ _optionsprinter(ui, d, _optsection)
# aliases
if d[b'aliases']:
# Note the empty comment, this is required to separate this
@@ -324,14 +587,67 @@
)
+def _optionsprinter(ui, cmd, sectionfunc):
+ """Outputs the list of options for a given command object"""
+ opt_output = list(cmd[b'opts'])
+ if opt_output:
+ opts_len = max([len(line[0]) for line in opt_output])
+ ui.write(sectionfunc(_(b"Options")))
+ multioccur = False
+ for optstr, desc in opt_output:
+ if desc:
+ s = b"%-*s %s" % (opts_len, optstr, desc)
+ else:
+ s = optstr
+ ui.write(b"%s\n" % s)
+ if optstr.endswith(b"[+]>"):
+ multioccur = True
+ if multioccur:
+ ui.write(
+ _(b"\n[+] marked option can be specified multiple times\n")
+ )
+ ui.write(b"\n")
+
+
+def allcommandnames(cmdtable, debugcmds=False):
+ """Get a collection of all command names in the given command table
+
+ Args:
+ cmdtable: the command table to get the names from
+ debugcmds: whether to include debug commands
+
+ Returns a dictionary where the keys are the main command names, and the
+ values are the "raw" names (in the form of `name|alias1|alias2`).
+ """
+ allcmdnames = {}
+ for rawnames, attr in cmdtable.items():
+ mainname = rawnames.split(b"|")[0].lstrip(b"^")
+ if not debugcmds and mainname.startswith(b"debug"):
+ continue
+ allcmdnames[mainname] = rawnames
+ return allcmdnames
+
+
def allextensionnames():
+ """Get a set of all known extension names"""
return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
if __name__ == "__main__":
- doc = b'hg.1.gendoc'
- if len(sys.argv) > 1:
- doc = encoding.strtolocal(sys.argv[1])
+ parser = argparse.ArgumentParser(
+ prog='gendoc', description="Generate mercurial documentation files"
+ )
+ parser.add_argument('doc', default='hg.1.gendoc', nargs='?')
+ parser.add_argument(
+ '-d',
+ '--debug-cmds',
+ action='store_true',
+ help="Show debug commands in help pages",
+ )
+ args = parser.parse_args()
+
+ doc = encoding.strtolocal(args.doc)
+ debugcmds = args.debug_cmds
ui = uimod.ui.load()
# Trigger extensions to load. This is disabled by default because it uses
@@ -339,7 +655,30 @@
if encoding.environ.get(b'GENDOC_LOAD_CONFIGURED_EXTENSIONS', b'0') != b'0':
extensions.loadall(ui)
+ # ui.debugflag determines if the help module returns debug commands to us.
+ ui.debugflag = debugcmds
+
+ # Render the 'all-in-one' giant documentation file
if doc == b'hg.1.gendoc':
showdoc(ui)
+ # Render a command/help-topic/extension name list (for internal use)
+ elif doc == b'commandlist':
+ showcommandlist(ui, debugcmds=debugcmds)
+ elif doc == b'topiclist':
+ showtopiclist(ui)
+ elif doc == b'extensionlist':
+ showextensionlist(ui)
+ # Render the help index/main page
+ elif doc == b'index':
+ showhelpindex(ui, debugcmds=debugcmds)
+ # Render an individual command/help-topic/extension page
+ elif doc.startswith(b'cmd-'):
+ showcommand(ui, doc[4:])
+ elif doc.startswith(b'topic-'):
+ showtopic(ui, doc[6:], wraptpl=True)
+ elif doc.startswith(b'ext-'):
+ showextension(ui, doc[4:], debugcmds=debugcmds)
+ # Render a help-topic page without any title/footer, for later inclusion
+ # into a hand-written help text file
else:
- showtopic(ui, encoding.strtolocal(sys.argv[1]))
+ showtopic(ui, doc)
--- a/doc/hgmanpage.py Thu Jan 11 20:37:34 2024 +0100
+++ b/doc/hgmanpage.py Sat Oct 26 04:16:00 2024 +0200
@@ -95,7 +95,6 @@
class Writer(writers.Writer):
-
supported = 'manpage'
"""Formats this writer supports."""
@@ -297,7 +296,7 @@
(u'´', u"\\'"),
(u'`', u'\\(ga'),
]
- for (in_char, out_markup) in replace_pairs:
+ for in_char, out_markup in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
--- a/doc/runrst Thu Jan 11 20:37:34 2024 +0100
+++ b/doc/runrst Sat Oct 26 04:16:00 2024 +0200
@@ -13,6 +13,7 @@
"""
+import re
import sys
try:
@@ -31,13 +32,63 @@
)
sys.exit(-1)
+# Whether we are rendering a help page for a single topic.
+# If false, we are rendering a monolithic page with all topics together.
+is_individual_pages_mode = False
+
+
+def make_cmd_ref_uri(cmd):
+ if is_individual_pages_mode:
+ return "hg-%s.html" % cmd
+ else:
+ return "hg.1.html#%s" % cmd
+
+
+known_refs = None
+
+
+def load_known_refs(fname):
+ try:
+ with open(fname, 'r') as fp:
+ text = fp.read()
+ return re.split(r'[ \n]+', text)
+ except OSError:
+ sys.stderr.write(
+ "abort: couldn't find '%', please run documentation generation "
+ "through the Makefile, or run 'make knownrefs'\n"
+ )
+ sys.exit(-1)
+
+
+def find_known_ref(ref):
+ global known_refs
+ if known_refs is None:
+ cmds = load_known_refs('commandlist.txt')
+ topics = load_known_refs('topiclist.txt')
+ exts = load_known_refs('extensionlist.txt')
+ known_refs = {'hg': cmds, 'topic': topics, 'ext': exts}
+ for reftype, refnames in known_refs.items():
+ if ref in refnames:
+ return reftype
+ return None
+
+
+def make_any_ref_uri(ref):
+ if is_individual_pages_mode:
+ # Try to find if ref is a command, topic, or extension. If not,
+ # reference the anchor in the main hg.1 help page.
+ reftype = find_known_ref(ref)
+ if reftype:
+ return '%s-%s.html' % (reftype, ref)
+ return "hg.1.html#%s" % ref
+
def role_hg(name, rawtext, text, lineno, inliner, options=None, content=None):
text = "hg " + utils.unescape(text)
linktext = nodes.literal(rawtext, text)
parts = text.split()
cmd, args = parts[1], parts[2:]
- refuri = "hg.1.html#%s" % cmd
+ refuri = make_cmd_ref_uri(cmd)
if cmd == 'help' and args:
if args[0] == 'config':
# :hg:`help config`
@@ -48,9 +99,9 @@
elif len(args) >= 2 and args[0] == '-c':
# :hg:`help -c COMMAND ...` is equivalent to :hg:`COMMAND`
# (mainly for :hg:`help -c config`)
- refuri = "hg.1.html#%s" % args[1]
+ refuri = make_cmd_ref_uri(args[1])
else:
- refuri = "hg.1.html#%s" % args[0]
+ refuri = make_any_ref_uri(args[0])
node = nodes.reference(rawtext, '', linktext, refuri=refuri)
return [node], []
@@ -65,4 +116,8 @@
writer = sys.argv[1]
del sys.argv[1]
+ if sys.argv[1] == '--hg-individual-pages':
+ is_individual_pages_mode = True
+ del sys.argv[1]
+
core.publish_cmdline(writer_name=writer)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/templates/cmdheader.txt Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,22 @@
+.. _hg-%(cmdname)s.1:
+
+%(cmdtitle)s
+
+%(cmdshortdesc)s
+
+.. contents::
+ :backlinks: top
+ :class: htmlonly
+ :depth: 1
+
+Synopsis
+--------
+
+::
+
+ %(cmdsynopsis)s
+
+Description
+-----------
+%(cmdlongdesc)s
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/templates/extheader.txt Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,9 @@
+.. _ext-%(extname)s:
+
+%(exttitle)s
+
+.. contents::
+ :backlinks: top
+ :class: htmlonly
+ :depth: 2
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/templates/topicheader.txt Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,9 @@
+.. _topic-%(topicname)s:
+
+%(topictitle)s
+
+.. contents::
+ :backlinks: top
+ :class: htmlonly
+ :depth: 2
+
--- a/hg Thu Jan 11 20:37:34 2024 +0100
+++ b/hg Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import sys
--- a/hgdemandimport/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgdemandimport/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,7 @@
# demand loading is per-package. Keeping demandimport in the mercurial package
# would disable demand loading for any modules in mercurial.
+from __future__ import annotations
import os
import sys
--- a/hgdemandimport/demandimportpy3.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgdemandimport/demandimportpy3.py Sat Oct 26 04:16:00 2024 +0200
@@ -23,6 +23,8 @@
enabled.
"""
+from __future__ import annotations
+
import contextlib
import importlib.util
import sys
--- a/hgdemandimport/tracing.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgdemandimport/tracing.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import contextlib
import os
--- a/hgext/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
--- a/hgext/absorb.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/absorb.py Sat Oct 26 04:16:00 2024 +0200
@@ -31,6 +31,7 @@
# * Converge getdraftstack() with other code in core
# * move many attributes on fixupstate to be private
+from __future__ import annotations
import collections
--- a/hgext/acl.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/acl.py Sat Oct 26 04:16:00 2024 +0200
@@ -213,6 +213,7 @@
'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
@@ -279,7 +280,6 @@
def _getusers(ui, group):
-
# First, try to use group definition from section [acl.groups]
hgrcusers = ui.configlist(b'acl.groups', group)
if hgrcusers:
@@ -294,12 +294,10 @@
def _usermatch(ui, user, usersorgroups):
-
if usersorgroups == b'*':
return True
for ug in usersorgroups.replace(b',', b' ').split():
-
if ug.startswith(b'!'):
# Test for excluded user or group. Format:
# if ug is a user name: !username
@@ -368,7 +366,6 @@
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
-
ensureenabled(ui)
if hooktype not in [b'pretxnchangegroup', b'pretxncommit', b'prepushkey']:
--- a/hgext/amend.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/amend.py Sat Oct 26 04:16:00 2024 +0200
@@ -10,6 +10,7 @@
``commit --amend`` but does not prompt an editor.
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/automv.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/automv.py Sat Oct 26 04:16:00 2024 +0200
@@ -24,6 +24,7 @@
#
# See http://markmail.org/thread/5pxnljesvufvom57 for context.
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/beautifygraph.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/beautifygraph.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,7 @@
A terminal with UTF-8 support and monospace narrow text are required.
'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/blackbox.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/blackbox.py Sat Oct 26 04:16:00 2024 +0200
@@ -42,6 +42,7 @@
"""
+from __future__ import annotations
import re
--- a/hgext/bookflow.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/bookflow.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,12 +1,12 @@
"""implements bookmark-based branching (EXPERIMENTAL)
- - Disables creation of new branches (config: enable_branches=False).
- - Requires an active bookmark on commit (config: require_bookmark=True).
- - Doesn't move the active bookmark on update, only on commit.
- - Requires '--rev' for moving an existing bookmark.
- - Protects special bookmarks (config: protect=@).
+- Disables creation of new branches (config: enable_branches=False).
+- Requires an active bookmark on commit (config: require_bookmark=True).
+- Doesn't move the active bookmark on update, only on commit.
+- Requires '--rev' for moving an existing bookmark.
+- Protects special bookmarks (config: protect=@).
- flow related commands
+flow related commands
:hg book NAME: create a new bookmark
:hg book NAME -r REV: move bookmark to revision (fast-forward)
@@ -14,6 +14,8 @@
:hg push -B .: push active bookmark
"""
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial import (
bookmarks,
--- a/hgext/bugzilla.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/bugzilla.py Sat Oct 26 04:16:00 2024 +0200
@@ -291,6 +291,7 @@
Changeset commit comment. Bug 1234.
'''
+from __future__ import annotations
import json
import re
--- a/hgext/censor.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/censor.py Sat Oct 26 04:16:00 2024 +0200
@@ -24,12 +24,14 @@
function in a meaningful way. Such commands only tolerate censored file
As having a censored version in a checkout is impractical. The current head
revisions of the repository are checked. If the revision to be censored is in
-any of them the command will abort.
+any of them the command will abort. You can configure this behavior using the
+following option:
-A few informative commands such as ``hg grep`` will unconditionally
-ignore censored data and merely report that it was encountered.
+ `censor.policy`
+ :config-doc:`censor.policy`
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial.node import short
--- a/hgext/children.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/children.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,7 @@
"children(REV)"` instead.
'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/churn.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/churn.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
'''command to display statistics about repository history'''
+from __future__ import annotations
import datetime
import os
--- a/hgext/clonebundles.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/clonebundles.py Sat Oct 26 04:16:00 2024 +0200
@@ -308,6 +308,7 @@
If the file was already deleted, the command must still succeed.
"""
+from __future__ import annotations
import os
import weakref
@@ -785,9 +786,8 @@
else:
cmd = repo.ui.config(b'clone-bundles', b'upload-command')
url = repo.ui.config(b'clone-bundles', b'url-template')
- filepath = procutil.shellquote(bundle.filepath)
variables = {
- b'HGCB_BUNDLE_PATH': filepath,
+ b'HGCB_BUNDLE_PATH': bundle.filepath,
b'HGCB_BUNDLE_BASENAME': basename,
}
env = procutil.shellenviron(environ=variables)
--- a/hgext/closehead.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/closehead.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
'''close arbitrary heads without checking them out first'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/commitextras.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/commitextras.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
'''adds a new flag extras to commit (ADVANCED)'''
+from __future__ import annotations
import re
--- a/hgext/convert/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
'''import revisions from foreign VCS repositories into Mercurial'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import registrar
--- a/hgext/convert/bzr.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/bzr.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,6 +9,8 @@
# with Bazaar or `bzr`, that was formerly known as Bazaar-NG;
# it cannot access `bar` repositories, but they were never used very much.
+from __future__ import annotations
+
import os
from mercurial.i18n import _
--- a/hgext/convert/common.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/common.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,13 +5,21 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import base64
-import datetime
import os
import pickle
import re
import shlex
import subprocess
+import typing
+
+from typing import (
+ Any,
+ AnyStr,
+ Optional,
+)
from mercurial.i18n import _
from mercurial.pycompat import open
@@ -22,11 +30,33 @@
pycompat,
util,
)
-from mercurial.utils import procutil
+from mercurial.utils import (
+ dateutil,
+ procutil,
+)
+
+if typing.TYPE_CHECKING:
+ from typing import (
+ overload,
+ )
+ from mercurial import (
+ ui as uimod,
+ )
propertycache = util.propertycache
+if typing.TYPE_CHECKING:
+
+ @overload
+ def _encodeornone(d: str) -> bytes:
+ pass
+
+ @overload
+ def _encodeornone(d: None) -> None:
+ pass
+
+
def _encodeornone(d):
if d is None:
return
@@ -34,7 +64,7 @@
class _shlexpy3proxy:
- def __init__(self, l):
+ def __init__(self, l: shlex.shlex) -> None:
self._l = l
def __iter__(self):
@@ -44,15 +74,22 @@
return _encodeornone(self._l.get_token())
@property
- def infile(self):
- return self._l.infile or b'<unknown>'
+ def infile(self) -> bytes:
+ if self._l.infile is not None:
+ return encoding.strtolocal(self._l.infile)
+ return b'<unknown>'
@property
- def lineno(self):
+ def lineno(self) -> int:
return self._l.lineno
-def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
+def shlexer(
+ data=None,
+ filepath: Optional[bytes] = None,
+ wordchars: Optional[bytes] = None,
+ whitespace: Optional[bytes] = None,
+):
if data is None:
data = open(filepath, b'r', encoding='latin1')
else:
@@ -61,7 +98,8 @@
b'shlexer only accepts data or filepath, not both'
)
data = data.decode('latin1')
- l = shlex.shlex(data, infile=filepath, posix=True)
+ infile = encoding.strfromlocal(filepath) if filepath is not None else None
+ l = shlex.shlex(data, infile=infile, posix=True)
if whitespace is not None:
l.whitespace_split = True
l.whitespace += whitespace.decode('latin1')
@@ -70,8 +108,8 @@
return _shlexpy3proxy(l)
-def encodeargs(args):
- def encodearg(s):
+def encodeargs(args: Any) -> bytes:
+ def encodearg(s: bytes) -> bytes:
lines = base64.encodebytes(s)
lines = [l.splitlines()[0] for l in pycompat.iterbytestr(lines)]
return b''.join(lines)
@@ -80,7 +118,7 @@
return encodearg(s)
-def decodeargs(s):
+def decodeargs(s: bytes) -> Any:
s = base64.decodebytes(s)
return pickle.loads(s)
@@ -89,7 +127,9 @@
pass
-def checktool(exe, name=None, abort=True):
+def checktool(
+ exe: bytes, name: Optional[bytes] = None, abort: bool = True
+) -> None:
name = name or exe
if not procutil.findexe(exe):
if abort:
@@ -103,25 +143,25 @@
pass
-SKIPREV = b'SKIP'
+SKIPREV: bytes = b'SKIP'
class commit:
def __init__(
self,
- author,
- date,
- desc,
+ author: bytes,
+ date: bytes,
+ desc: bytes,
parents,
- branch=None,
+ branch: Optional[bytes] = None,
rev=None,
extra=None,
sortkey=None,
saverev=True,
- phase=phases.draft,
+ phase: int = phases.draft,
optparents=None,
ctx=None,
- ):
+ ) -> None:
self.author = author or b'unknown'
self.date = date or b'0 0'
self.desc = desc
@@ -139,7 +179,13 @@
class converter_source:
"""Conversion source interface"""
- def __init__(self, ui, repotype, path=None, revs=None):
+ def __init__(
+ self,
+ ui: "uimod.ui",
+ repotype: bytes,
+ path: Optional[bytes] = None,
+ revs=None,
+ ) -> None:
"""Initialize conversion source (or raise NoRepo("message")
exception if path is not a valid repository)"""
self.ui = ui
@@ -149,7 +195,9 @@
self.encoding = b'utf-8'
- def checkhexformat(self, revstr, mapname=b'splicemap'):
+ def checkhexformat(
+ self, revstr: bytes, mapname: bytes = b'splicemap'
+ ) -> None:
"""fails if revstr is not a 40 byte hex. mercurial and git both uses
such format for their revision numbering
"""
@@ -159,10 +207,10 @@
% (mapname, revstr)
)
- def before(self):
+ def before(self) -> None:
pass
- def after(self):
+ def after(self) -> None:
pass
def targetfilebelongstosource(self, targetfilename):
@@ -221,7 +269,7 @@
"""
raise NotImplementedError
- def recode(self, s, encoding=None):
+ def recode(self, s: AnyStr, encoding: Optional[bytes] = None) -> bytes:
if not encoding:
encoding = self.encoding or b'utf-8'
@@ -250,17 +298,17 @@
"""
raise NotImplementedError
- def converted(self, rev, sinkrev):
+ def converted(self, rev, sinkrev) -> None:
'''Notify the source that a revision has been converted.'''
- def hasnativeorder(self):
+ def hasnativeorder(self) -> bool:
"""Return true if this source has a meaningful, native revision
order. For instance, Mercurial revisions are store sequentially
while there is no such global ordering with Darcs.
"""
return False
- def hasnativeclose(self):
+ def hasnativeclose(self) -> bool:
"""Return true if this source has ability to close branch."""
return False
@@ -278,7 +326,7 @@
"""
return {}
- def checkrevformat(self, revstr, mapname=b'splicemap'):
+ def checkrevformat(self, revstr, mapname: bytes = b'splicemap') -> bool:
"""revstr is a string that describes a revision in the given
source control system. Return true if revstr has correct
format.
@@ -289,7 +337,7 @@
class converter_sink:
"""Conversion sink (target) interface"""
- def __init__(self, ui, repotype, path):
+ def __init__(self, ui: "uimod.ui", repotype: bytes, path: bytes) -> None:
"""Initialize conversion sink (or raise NoRepo("message")
exception if path is not a valid repository)
@@ -357,10 +405,10 @@
filter empty revisions.
"""
- def before(self):
+ def before(self) -> None:
pass
- def after(self):
+ def after(self) -> None:
pass
def putbookmarks(self, bookmarks):
@@ -383,17 +431,17 @@
class commandline:
- def __init__(self, ui, command):
+ def __init__(self, ui: "uimod.ui", command: bytes) -> None:
self.ui = ui
self.command = command
- def prerun(self):
+ def prerun(self) -> None:
pass
- def postrun(self):
+ def postrun(self) -> None:
pass
- def _cmdline(self, cmd, *args, **kwargs):
+ def _cmdline(self, cmd: bytes, *args: bytes, **kwargs) -> bytes:
kwargs = pycompat.byteskwargs(kwargs)
cmdline = [self.command, cmd] + list(args)
for k, v in kwargs.items():
@@ -414,7 +462,7 @@
cmdline = b' '.join(cmdline)
return cmdline
- def _run(self, cmd, *args, **kwargs):
+ def _run(self, cmd: bytes, *args: bytes, **kwargs):
def popen(cmdline):
p = subprocess.Popen(
procutil.tonativestr(cmdline),
@@ -427,13 +475,13 @@
return self._dorun(popen, cmd, *args, **kwargs)
- def _run2(self, cmd, *args, **kwargs):
+ def _run2(self, cmd: bytes, *args: bytes, **kwargs):
return self._dorun(procutil.popen2, cmd, *args, **kwargs)
- def _run3(self, cmd, *args, **kwargs):
+ def _run3(self, cmd: bytes, *args: bytes, **kwargs):
return self._dorun(procutil.popen3, cmd, *args, **kwargs)
- def _dorun(self, openfunc, cmd, *args, **kwargs):
+ def _dorun(self, openfunc, cmd: bytes, *args: bytes, **kwargs):
cmdline = self._cmdline(cmd, *args, **kwargs)
self.ui.debug(b'running: %s\n' % (cmdline,))
self.prerun()
@@ -442,20 +490,20 @@
finally:
self.postrun()
- def run(self, cmd, *args, **kwargs):
+ def run(self, cmd: bytes, *args: bytes, **kwargs):
p = self._run(cmd, *args, **kwargs)
output = p.communicate()[0]
self.ui.debug(output)
return output, p.returncode
- def runlines(self, cmd, *args, **kwargs):
+ def runlines(self, cmd: bytes, *args: bytes, **kwargs):
p = self._run(cmd, *args, **kwargs)
output = p.stdout.readlines()
p.wait()
self.ui.debug(b''.join(output))
return output, p.returncode
- def checkexit(self, status, output=b''):
+ def checkexit(self, status, output: bytes = b'') -> None:
if status:
if output:
self.ui.warn(_(b'%s error:\n') % self.command)
@@ -463,12 +511,12 @@
msg = procutil.explainexit(status)
raise error.Abort(b'%s %s' % (self.command, msg))
- def run0(self, cmd, *args, **kwargs):
+ def run0(self, cmd: bytes, *args: bytes, **kwargs):
output, status = self.run(cmd, *args, **kwargs)
self.checkexit(status, output)
return output
- def runlines0(self, cmd, *args, **kwargs):
+ def runlines0(self, cmd: bytes, *args: bytes, **kwargs):
output, status = self.runlines(cmd, *args, **kwargs)
self.checkexit(status, b''.join(output))
return output
@@ -491,7 +539,7 @@
# (and make happy Windows shells while doing this).
return argmax // 2 - 1
- def _limit_arglist(self, arglist, cmd, *args, **kwargs):
+ def _limit_arglist(self, arglist, cmd: bytes, *args: bytes, **kwargs):
cmdlen = len(self._cmdline(cmd, *args, **kwargs))
limit = self.argmax - cmdlen
numbytes = 0
@@ -508,13 +556,13 @@
if fl:
yield fl
- def xargs(self, arglist, cmd, *args, **kwargs):
+ def xargs(self, arglist, cmd: bytes, *args: bytes, **kwargs):
for l in self._limit_arglist(arglist, cmd, *args, **kwargs):
self.run0(cmd, *(list(args) + l), **kwargs)
class mapfile(dict):
- def __init__(self, ui, path):
+ def __init__(self, ui: "uimod.ui", path: bytes) -> None:
super(mapfile, self).__init__()
self.ui = ui
self.path = path
@@ -522,31 +570,34 @@
self.order = []
self._read()
- def _read(self):
+ def _read(self) -> None:
if not self.path:
return
try:
fp = open(self.path, b'rb')
except FileNotFoundError:
return
- for i, line in enumerate(fp):
- line = line.splitlines()[0].rstrip()
- if not line:
- # Ignore blank lines
- continue
- try:
- key, value = line.rsplit(b' ', 1)
- except ValueError:
- raise error.Abort(
- _(b'syntax error in %s(%d): key/value pair expected')
- % (self.path, i + 1)
- )
- if key not in self:
- self.order.append(key)
- super(mapfile, self).__setitem__(key, value)
- fp.close()
- def __setitem__(self, key, value):
+ try:
+ for i, line in enumerate(fp):
+ line = line.splitlines()[0].rstrip()
+ if not line:
+ # Ignore blank lines
+ continue
+ try:
+ key, value = line.rsplit(b' ', 1)
+ except ValueError:
+ raise error.Abort(
+ _(b'syntax error in %s(%d): key/value pair expected')
+ % (self.path, i + 1)
+ )
+ if key not in self:
+ self.order.append(key)
+ super(mapfile, self).__setitem__(key, value)
+ finally:
+ fp.close()
+
+ def __setitem__(self, key, value) -> None:
if self.fp is None:
try:
self.fp = open(self.path, b'ab')
@@ -559,18 +610,11 @@
self.fp.flush()
super(mapfile, self).__setitem__(key, value)
- def close(self):
+ def close(self) -> None:
if self.fp:
self.fp.close()
self.fp = None
-def makedatetimestamp(t):
- """Like dateutil.makedate() but for time t instead of current time"""
- tz = round(
- t
- - datetime.datetime.fromtimestamp(t)
- .replace(tzinfo=datetime.timezone.utc)
- .timestamp()
- )
- return t, tz
+def makedatetimestamp(t: float) -> dateutil.hgdate:
+ return dateutil.makedate(t)
--- a/hgext/convert/convcmd.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/convcmd.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,10 +5,22 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import heapq
import os
import shutil
+import typing
+
+from typing import (
+ AnyStr,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Union,
+)
from mercurial.i18n import _
from mercurial.pycompat import open
@@ -36,6 +48,11 @@
subversion,
)
+if typing.TYPE_CHECKING:
+ from mercurial import (
+ ui as uimod,
+ )
+
mapfile = common.mapfile
MissingTool = common.MissingTool
NoRepo = common.NoRepo
@@ -53,15 +70,14 @@
svn_sink = subversion.svn_sink
svn_source = subversion.svn_source
-orig_encoding = b'ascii'
+orig_encoding: bytes = b'ascii'
-def readauthormap(ui, authorfile, authors=None):
+def readauthormap(ui: "uimod.ui", authorfile, authors=None):
if authors is None:
authors = {}
with open(authorfile, b'rb') as afile:
for line in afile:
-
line = line.strip()
if not line or line.startswith(b'#'):
continue
@@ -86,7 +102,7 @@
return authors
-def recode(s):
+def recode(s: AnyStr) -> bytes:
if isinstance(s, str):
return s.encode(pycompat.sysstr(orig_encoding), 'replace')
else:
@@ -95,7 +111,7 @@
)
-def mapbranch(branch, branchmap):
+def mapbranch(branch: bytes, branchmap: Mapping[bytes, bytes]) -> bytes:
"""
>>> bmap = {b'default': b'branch1'}
>>> for i in [b'', None]:
@@ -147,7 +163,7 @@
]
-def convertsource(ui, path, type, revs):
+def convertsource(ui: "uimod.ui", path: bytes, type: bytes, revs):
exceptions = []
if type and type not in [s[0] for s in source_converters]:
raise error.Abort(_(b'%s: invalid source repository type') % type)
@@ -163,7 +179,9 @@
raise error.Abort(_(b'%s: missing or unsupported repository') % path)
-def convertsink(ui, path, type):
+def convertsink(
+ ui: "uimod.ui", path: bytes, type: bytes
+) -> Union[hgconvert.mercurial_sink, subversion.svn_sink]:
if type and type not in [s[0] for s in sink_converters]:
raise error.Abort(_(b'%s: invalid destination repository type') % type)
for name, sink in sink_converters:
@@ -178,7 +196,9 @@
class progresssource:
- def __init__(self, ui, source, filecount):
+ def __init__(
+ self, ui: "uimod.ui", source, filecount: Optional[int]
+ ) -> None:
self.ui = ui
self.source = source
self.progress = ui.makeprogress(
@@ -253,8 +273,7 @@
class converter:
- def __init__(self, ui, source, dest, revmapfile, opts):
-
+ def __init__(self, ui: "uimod.ui", source, dest, revmapfile, opts) -> None:
self.source = source
self.dest = dest
self.ui = ui
@@ -280,7 +299,7 @@
self.splicemap = self.parsesplicemap(opts.get(b'splicemap'))
self.branchmap = mapfile(ui, opts.get(b'branchmap'))
- def parsesplicemap(self, path):
+ def parsesplicemap(self, path: bytes) -> Dict[bytes, List[bytes]]:
"""check and validate the splicemap format and
return a child/parents dictionary.
Format checking has two parts.
@@ -295,31 +314,31 @@
return {}
m = {}
try:
- fp = open(path, b'rb')
- for i, line in enumerate(fp):
- line = line.splitlines()[0].rstrip()
- if not line:
- # Ignore blank lines
- continue
- # split line
- lex = common.shlexer(data=line, whitespace=b',')
- line = list(lex)
- # check number of parents
- if not (2 <= len(line) <= 3):
- raise error.Abort(
- _(
- b'syntax error in %s(%d): child parent1'
- b'[,parent2] expected'
+ with open(path, b'rb') as fp:
+ for i, line in enumerate(fp):
+ line = line.splitlines()[0].rstrip()
+ if not line:
+ # Ignore blank lines
+ continue
+ # split line
+ lex = common.shlexer(data=line, whitespace=b',')
+ line = list(lex)
+ # check number of parents
+ if not (2 <= len(line) <= 3):
+ raise error.Abort(
+ _(
+ b'syntax error in %s(%d): child parent1'
+ b'[,parent2] expected'
+ )
+ % (path, i + 1)
)
- % (path, i + 1)
- )
- for part in line:
- self.source.checkrevformat(part)
- child, p1, p2 = line[0], line[1:2], line[2:]
- if p1 == p2:
- m[child] = p1
- else:
- m[child] = p1 + p2
+ for part in line:
+ self.source.checkrevformat(part)
+ child, p1, p2 = line[0], line[1:2], line[2:]
+ if p1 == p2:
+ m[child] = p1
+ else:
+ m[child] = p1 + p2
# if file does not exist or error reading, exit
except IOError:
raise error.Abort(
@@ -356,7 +375,7 @@
return parents
- def mergesplicemap(self, parents, splicemap):
+ def mergesplicemap(self, parents, splicemap) -> None:
"""A splicemap redefines child/parent relationships. Check the
map contains valid revision identifiers and merge the new
links in the source graph.
@@ -488,20 +507,19 @@
return s
- def writeauthormap(self):
+ def writeauthormap(self) -> None:
authorfile = self.authorfile
if authorfile:
self.ui.status(_(b'writing author map file %s\n') % authorfile)
- ofile = open(authorfile, b'wb+')
- for author in self.authors:
- ofile.write(
- util.tonativeeol(
- b"%s=%s\n" % (author, self.authors[author])
+ with open(authorfile, b'wb+') as ofile:
+ for author in self.authors:
+ ofile.write(
+ util.tonativeeol(
+ b"%s=%s\n" % (author, self.authors[author])
+ )
)
- )
- ofile.close()
- def readauthormap(self, authorfile):
+ def readauthormap(self, authorfile) -> None:
self.authors = readauthormap(self.ui, authorfile, self.authors)
def cachecommit(self, rev):
@@ -511,7 +529,7 @@
self.commitcache[rev] = commit
return commit
- def copy(self, rev):
+ def copy(self, rev) -> None:
commit = self.commitcache[rev]
full = self.opts.get(b'full')
changes = self.source.getchanges(rev, full)
@@ -563,7 +581,7 @@
self.source.converted(rev, newnode)
self.map[rev] = newnode
- def convert(self, sortmode):
+ def convert(self, sortmode) -> None:
try:
self.source.before()
self.dest.before()
@@ -628,7 +646,7 @@
finally:
self.cleanup()
- def cleanup(self):
+ def cleanup(self) -> None:
try:
self.dest.after()
finally:
@@ -636,7 +654,9 @@
self.map.close()
-def convert(ui, src, dest=None, revmapfile=None, **opts):
+def convert(
+ ui: "uimod.ui", src, dest: Optional[bytes] = None, revmapfile=None, **opts
+) -> None:
opts = pycompat.byteskwargs(opts)
global orig_encoding
orig_encoding = encoding.encoding
--- a/hgext/convert/cvs.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/cvs.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,15 +5,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import errno
import os
import re
import socket
from mercurial.i18n import _
-from mercurial.pycompat import (
- open,
-)
from mercurial import (
encoding,
error,
@@ -52,8 +51,8 @@
self.tags = {}
self.lastbranch = {}
self.socket = None
- self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
- self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
+ self.cvsroot = util.readfile(os.path.join(cvs, b"Root"))[:-1]
+ self.cvsrepo = util.readfile(os.path.join(cvs, b"Repository"))[:-1]
self.encoding = encoding.encoding
self._connect()
@@ -160,8 +159,7 @@
passw = b"A"
cvspass = os.path.expanduser(b"~/.cvspass")
try:
- pf = open(cvspass, b'rb')
- for line in pf.read().splitlines():
+ for line in util.readfile(cvspass).splitlines():
part1, part2 = line.split(b' ', 1)
# /1 :pserver:user@example.com:2401/cvsroot/foo
# Ah<Z
@@ -174,7 +172,6 @@
if part1 == format:
passw = part2
break
- pf.close()
except IOError as inst:
if inst.errno != errno.ENOENT:
if not getattr(inst, 'filename', None):
--- a/hgext/convert/cvsps.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/cvsps.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import functools
import os
import pickle
@@ -161,7 +163,7 @@
# Use the Root file in the sandbox, if it exists
try:
- root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
+ root = util.readfile(os.path.join(b'CVS', b'Root')).strip()
except IOError:
pass
@@ -195,16 +197,17 @@
if cache == b'update':
try:
ui.note(_(b'reading cvs log cache %s\n') % cachefile)
- oldlog = pickle.load(open(cachefile, b'rb'))
- for e in oldlog:
- if not (
- hasattr(e, 'branchpoints')
- and hasattr(e, 'commitid')
- and hasattr(e, 'mergepoint')
- ):
- ui.status(_(b'ignoring old cache\n'))
- oldlog = []
- break
+ with open(cachefile, b'rb') as fp:
+ oldlog = pickle.load(fp)
+ for e in oldlog:
+ if not (
+ hasattr(e, 'branchpoints')
+ and hasattr(e, 'commitid')
+ and hasattr(e, 'mergepoint')
+ ):
+ ui.status(_(b'ignoring old cache\n'))
+ oldlog = []
+ break
ui.note(_(b'cache has %d log entries\n') % len(oldlog))
except Exception as e:
@@ -526,7 +529,9 @@
# write the new cachefile
ui.note(_(b'writing cvs log cache %s\n') % cachefile)
- pickle.dump(log, open(cachefile, b'wb'))
+
+ with open(cachefile, b'wb') as fp:
+ pickle.dump(log, fp)
else:
log = oldlog
@@ -636,7 +641,6 @@
files = set()
c = None
for i, e in enumerate(log):
-
# Check if log entry belongs to the current changeset or not.
# Since CVS is file-centric, two different file revisions with
@@ -980,7 +984,6 @@
branches = {} # latest version number in each branch
ancestors = {} # parent branch
for cs in changesets:
-
if opts[b"ancestors"]:
if cs.branch not in branches and cs.parents and cs.parents[0].id:
ancestors[cs.branch] = (
--- a/hgext/convert/darcs.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/darcs.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import re
import shutil
--- a/hgext/convert/filemap.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/filemap.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,8 +4,20 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import posixpath
+import typing
+
+from typing import (
+ Iterator,
+ Mapping,
+ MutableMapping,
+ Optional,
+ Set,
+ Tuple,
+ overload,
+)
from mercurial.i18n import _
from mercurial import (
@@ -14,10 +26,15 @@
)
from . import common
+if typing.TYPE_CHECKING:
+ from mercurial import (
+ ui as uimod,
+ )
+
SKIPREV = common.SKIPREV
-def rpairs(path):
+def rpairs(path: bytes) -> Iterator[Tuple[bytes, bytes]]:
"""Yield tuples with path split at '/', starting with the full path.
No leading, trailing or double '/', please.
>>> for x in rpairs(b'foo/bar/baz'): print(x)
@@ -33,6 +50,17 @@
yield b'.', path
+if typing.TYPE_CHECKING:
+
+ @overload
+ def normalize(path: bytes) -> bytes:
+ pass
+
+ @overload
+ def normalize(path: None) -> None:
+ pass
+
+
def normalize(path):
"""We use posixpath.normpath to support cross-platform path format.
However, it doesn't handle None input. So we wrap it up."""
@@ -46,7 +74,10 @@
A name can be mapped to itself, a new name, or None (omit from new
repository)."""
- def __init__(self, ui, path=None):
+ rename: MutableMapping[bytes, bytes]
+ targetprefixes: Optional[Set[bytes]]
+
+ def __init__(self, ui: "uimod.ui", path: Optional[bytes] = None) -> None:
self.ui = ui
self.include = {}
self.exclude = {}
@@ -56,10 +87,10 @@
if self.parse(path):
raise error.Abort(_(b'errors in filemap'))
- def parse(self, path):
+ def parse(self, path: Optional[bytes]) -> int:
errs = 0
- def check(name, mapping, listname):
+ def check(name: bytes, mapping, listname: bytes):
if not name:
self.ui.warn(
_(b'%s:%d: path to %s is missing\n')
@@ -110,7 +141,9 @@
cmd = lex.get_token()
return errs
- def lookup(self, name, mapping):
+ def lookup(
+ self, name: bytes, mapping: Mapping[bytes, bytes]
+ ) -> Tuple[bytes, bytes, bytes]:
name = normalize(name)
for pre, suf in rpairs(name):
try:
@@ -119,7 +152,7 @@
pass
return b'', name, b''
- def istargetfile(self, filename):
+ def istargetfile(self, filename: bytes) -> bool:
"""Return true if the given target filename is covered as a destination
of the filemap. This is useful for identifying what parts of the target
repo belong to the source repo and what parts don't."""
@@ -143,7 +176,7 @@
return True
return False
- def __call__(self, name):
+ def __call__(self, name: bytes) -> Optional[bytes]:
if self.include:
inc = self.lookup(name, self.include)[0]
else:
@@ -165,7 +198,7 @@
return newpre
return name
- def active(self):
+ def active(self) -> bool:
return bool(self.include or self.exclude or self.rename)
@@ -185,7 +218,9 @@
class filemap_source(common.converter_source):
- def __init__(self, ui, baseconverter, filemap):
+ def __init__(
+ self, ui: "uimod.ui", baseconverter, filemap: Optional[bytes]
+ ) -> None:
super(filemap_source, self).__init__(ui, baseconverter.repotype)
self.base = baseconverter
self.filemapper = filemapper(ui, filemap)
@@ -206,10 +241,10 @@
b'convert', b'ignoreancestorcheck'
)
- def before(self):
+ def before(self) -> None:
self.base.before()
- def after(self):
+ def after(self) -> None:
self.base.after()
def setrevmap(self, revmap):
@@ -243,7 +278,7 @@
self.convertedorder = converted
return self.base.setrevmap(revmap)
- def rebuild(self):
+ def rebuild(self) -> bool:
if self._rebuilt:
return True
self._rebuilt = True
@@ -276,7 +311,7 @@
def getheads(self):
return self.base.getheads()
- def getcommit(self, rev):
+ def getcommit(self, rev: bytes):
# We want to save a reference to the commit objects to be able
# to rewrite their parents later on.
c = self.commits[rev] = self.base.getcommit(rev)
@@ -292,7 +327,7 @@
return self.commits[rev]
return self.base.getcommit(rev)
- def _discard(self, *revs):
+ def _discard(self, *revs) -> None:
for r in revs:
if r is None:
continue
@@ -304,7 +339,7 @@
if self._rebuilt:
del self.children[r]
- def wanted(self, rev, i):
+ def wanted(self, rev, i) -> bool:
# Return True if we're directly interested in rev.
#
# i is an index selecting one of the parents of rev (if rev
@@ -332,7 +367,7 @@
# doesn't consider it significant, and this revision should be dropped.
return not files and b'close' not in self.commits[rev].extra
- def mark_not_wanted(self, rev, p):
+ def mark_not_wanted(self, rev, p) -> None:
# Mark rev as not interesting and update data structures.
if p is None:
@@ -347,7 +382,7 @@
self.parentmap[rev] = self.parentmap[p]
self.wantedancestors[rev] = self.wantedancestors[p]
- def mark_wanted(self, rev, parents):
+ def mark_wanted(self, rev, parents) -> None:
# Mark rev ss wanted and update data structures.
# rev will be in the restricted graph, so children of rev in
@@ -474,7 +509,7 @@
return files, ncopies, ncleanp2
- def targetfilebelongstosource(self, targetfilename):
+ def targetfilebelongstosource(self, targetfilename: bytes) -> bool:
return self.filemapper.istargetfile(targetfilename)
def getfile(self, name, rev):
@@ -484,7 +519,7 @@
def gettags(self):
return self.base.gettags()
- def hasnativeorder(self):
+ def hasnativeorder(self) -> bool:
return self.base.hasnativeorder()
def lookuprev(self, rev):
--- a/hgext/convert/git.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/git.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
from mercurial.i18n import _
--- a/hgext/convert/gnuarch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/gnuarch.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import shutil
import stat
--- a/hgext/convert/hg.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/hg.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,8 @@
# the converted revision to have a different identity than the
# source.
+from __future__ import annotations
+
import os
import re
import time
--- a/hgext/convert/monotone.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/monotone.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,8 +6,13 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import re
+from typing import (
+ Tuple,
+)
from mercurial.i18n import _
from mercurial.pycompat import open
@@ -43,9 +48,8 @@
if not os.path.exists(os.path.join(path, b'_MTN')):
# Could be a monotone repository (SQLite db file)
try:
- f = open(path, b'rb')
- header = f.read(16)
- f.close()
+ with open(path, b'rb') as f:
+ header = f.read(16)
except IOError:
header = b''
if header != b'SQLite format 3\x00':
@@ -122,7 +126,7 @@
return self.mtnstdioreadcommandoutput(command)
- def mtnstdioreadpacket(self):
+ def mtnstdioreadpacket(self) -> Tuple[bytes, bytes, int, bytes]:
read = None
commandnbr = b''
while read != b':':
@@ -161,14 +165,14 @@
raise error.Abort(
_(
b"bad mtn packet - unable to read full packet "
- b"read %s of %s"
+ b"read %s of %d"
)
% (len(read), length)
)
return (commandnbr, stream, length, read)
- def mtnstdioreadcommandoutput(self, command):
+ def mtnstdioreadcommandoutput(self, command) -> bytes:
retval = []
while True:
commandnbr, stream, length, output = self.mtnstdioreadpacket()
@@ -399,7 +403,7 @@
)
else:
self.ui.debug(
- b"mtn automate version %s - not using automate stdio "
+ b"mtn automate version %f - not using automate stdio "
b"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version
)
--- a/hgext/convert/p4.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/p4.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import marshal
import re
--- a/hgext/convert/subversion.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/subversion.py Sat Oct 26 04:16:00 2024 +0200
@@ -2,6 +2,8 @@
#
# Copyright(C) 2007 Daniel Holth et al
+from __future__ import annotations
+
import codecs
import locale
import os
@@ -1425,7 +1427,6 @@
return self.join(b'hg-authormap')
def __init__(self, ui, repotype, path):
-
converter_sink.__init__(self, ui, repotype, path)
commandline.__init__(self, ui, b'svn')
self.delete = []
@@ -1488,9 +1489,11 @@
prop_actions_allowed.append((b'M', b'svn:date'))
hook = os.path.join(created, b'hooks', b'pre-revprop-change')
- fp = open(hook, b'wb')
- fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
- fp.close()
+
+ util.writefile(
+ hook, gen_pre_revprop_change_hook(prop_actions_allowed)
+ )
+
util.setflags(hook, False, True)
output = self.run0(b'info')
--- a/hgext/convert/transport.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/convert/transport.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
+from __future__ import annotations
+
# pytype: disable=import-error
import svn.client
import svn.core
--- a/hgext/eol.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/eol.py Sat Oct 26 04:16:00 2024 +0200
@@ -91,6 +91,7 @@
used.
"""
+from __future__ import annotations
import os
import re
@@ -100,6 +101,7 @@
error as errormod,
extensions,
match,
+ merge,
pycompat,
registrar,
scmutil,
@@ -375,6 +377,7 @@
def reposetup(ui, repo):
uisetup(repo.ui)
+ merge.MAYBE_USE_RUST_UPDATE = False
if not repo.local():
return
--- a/hgext/extdiff.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/extdiff.py Sat Oct 26 04:16:00 2024 +0200
@@ -81,12 +81,19 @@
pretty fast (at least faster than having to compare the entire tree).
'''
+from __future__ import annotations
import os
import re
import shutil
import stat
import subprocess
+import typing
+from typing import (
+ List,
+ Optional,
+ Tuple,
+)
from mercurial.i18n import _
from mercurial.node import (
@@ -111,6 +118,12 @@
stringutil,
)
+if typing.TYPE_CHECKING:
+ from mercurial import (
+ localrepo,
+ ui as uimod,
+ )
+
cmdtable = {}
command = registrar.command(cmdtable)
@@ -150,7 +163,14 @@
testedwith = b'ships-with-hg-core'
-def snapshot(ui, repo, files, node, tmproot, listsubrepos):
+def snapshot(
+ ui: "uimod.ui",
+ repo: "localrepo.localrepository",
+ files,
+ node: Optional[bytes],
+ tmproot: bytes,
+ listsubrepos: bool,
+) -> Tuple[bytes, List[Tuple[bytes, bytes, os.stat_result]]]:
"""snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files."""
@@ -405,7 +425,6 @@
guitool,
opts,
):
-
subrepos = opts.get(b'subrepos')
# calculate list of files changed between both revs
@@ -710,16 +729,24 @@
to its parent.
"""
- def __init__(self, path, cmdline, isgui):
+ def __init__(self, cmd, path, cmdline, isgui):
# We can't pass non-ASCII through docstrings (and path is
# in an unknown encoding anyway), but avoid double separators on
# Windows
docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
+ self._name = cmd
self._cmdline = cmdline
self._isgui = isgui
def __call__(self, ui, repo, *pats, **opts):
+ if self._isgui and not procutil.gui():
+ msg = _(b"tool '%s' requires a GUI") % self._name
+ hint = (
+ _(b"to override, use: --config diff-tools.%s.gui=False")
+ % self._name
+ )
+ raise error.Abort(msg, hint=hint)
opts = pycompat.byteskwargs(opts)
options = b' '.join(map(procutil.shellquote, opts[b'option']))
if options:
@@ -779,9 +806,15 @@
args = ui.config(section, key)
if args:
cmdline += b' ' + args
- if isgui is None:
- isgui = ui.configbool(section, cmd + b'.gui') or False
break
+ if isgui is None:
+ key = cmd + b'.gui'
+ for section in (b'diff-tools', b'merge-tools'):
+ isgui = ui.configbool(section, key)
+ if isgui is not None:
+ break
+ if isgui is None:
+ isgui = False
return cmd, path, cmdline, isgui
@@ -796,7 +829,7 @@
_(b'hg %s [OPTION]... [FILE]...') % cmd,
helpcategory=command.CATEGORY_FILE_CONTENTS,
inferrepo=True,
- )(savedcmd(path, cmdline, isgui))
+ )(savedcmd(cmd, path, cmdline, isgui))
# tell hggettext to extract docstrings from these functions:
--- a/hgext/factotum.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/factotum.py Sat Oct 26 04:16:00 2024 +0200
@@ -45,6 +45,7 @@
'''
+from __future__ import annotations
import os
from mercurial.i18n import _
--- a/hgext/fastannotate/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -102,6 +102,8 @@
# * format changes to the revmap file (maybe use length-encoding
# instead of null-terminated file paths at least?)
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial import (
error as hgerror,
--- a/hgext/fastannotate/commands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/commands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
+from typing import (
+ Set,
+)
from mercurial.i18n import _
from mercurial import (
@@ -254,7 +258,7 @@
_newopts = set()
-_knownopts = {
+_knownopts: Set[bytes] = {
opt[1].replace(b'-', b'_')
for opt in (fastannotatecommandargs['options'] + commands.globalopts)
}
--- a/hgext/fastannotate/context.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/context.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import contextlib
@@ -38,6 +39,7 @@
revmap as revmapmod,
)
+
# given path, get filelog, cached
@util.lrucachefunc
def _getflog(repo, path):
@@ -173,12 +175,16 @@
'followmerge': True,
}
+ diffopts: mdiff.diffopts
+ followrename: bool
+ followmerge: bool
+
def __init__(self, **opts):
for k, v in self.defaults.items():
setattr(self, k, opts.get(k, v))
@util.propertycache
- def shortstr(self):
+ def shortstr(self) -> bytes:
"""represent opts in a short string, suitable for a directory name"""
result = b''
if not self.followrename:
--- a/hgext/fastannotate/error.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/error.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,7 @@
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
class CorruptedFileError(Exception):
--- a/hgext/fastannotate/formatter.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/formatter.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,7 @@
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial.node import (
hex,
@@ -17,6 +18,7 @@
)
from mercurial.utils import dateutil
+
# imitating mercurial.commands.annotate, not using the vanilla formatter since
# the data structures are a bit different, and we have some fast paths.
class defaultformatter:
--- a/hgext/fastannotate/protocol.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/protocol.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,7 @@
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import os
--- a/hgext/fastannotate/revmap.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/revmap.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import bisect
import io
--- a/hgext/fastannotate/support.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastannotate/support.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial import (
context as hgcontext,
--- a/hgext/fastexport.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fastexport.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
# The format specification for fast-import streams can be found at
# https://git-scm.com/docs/git-fast-import#_input_format
+from __future__ import annotations
+
import re
from mercurial.i18n import _
--- a/hgext/fetch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fetch.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
'''pull, update and merge in one command (DEPRECATED)'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial.node import short
--- a/hgext/fix.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fix.py Sat Oct 26 04:16:00 2024 +0200
@@ -122,6 +122,7 @@
previously passed to the "postfixfile" hook.
"""
+from __future__ import annotations
import collections
import itertools
@@ -339,7 +340,7 @@
repo.hook(
b'postfixfile',
throw=False,
- **pycompat.strkwargs(hookargs)
+ **pycompat.strkwargs(hookargs),
)
numitems[rev] -= 1
# Apply the fixes for this and any other revisions that are
--- a/hgext/fsmonitor/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -107,6 +107,7 @@
# The issues related to nested repos and subrepos are probably not fundamental
# ones. Patches to fix them are welcome.
+from __future__ import annotations
import codecs
import os
@@ -891,9 +892,8 @@
mergeancestor=False,
labels=None,
matcher=None,
- **kwargs
+ **kwargs,
):
-
distance = 0
partial = True
oldnode = repo[b'.'].node()
@@ -919,7 +919,7 @@
mergeancestor,
labels,
matcher,
- **kwargs
+ **kwargs,
)
--- a/hgext/fsmonitor/pywatchman/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
import inspect
import math
import os
@@ -210,7 +212,6 @@
)
)
-
else:
def log(fmt, *args):
@@ -806,7 +807,7 @@
cmd = bser.dumps(
*args,
version=self.bser_version,
- capabilities=self.bser_capabilities
+ capabilities=self.bser_capabilities,
)
else:
cmd = bser.dumps(*args)
--- a/hgext/fsmonitor/pywatchman/capabilities.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/capabilities.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
def parse_version(vstr):
res = 0
--- a/hgext/fsmonitor/pywatchman/compat.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/compat.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
import sys
--- a/hgext/fsmonitor/pywatchman/encoding.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/encoding.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
import sys
from . import compat
@@ -46,7 +48,6 @@
# returns None.
return sys.getfilesystemencoding()
-
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
--- a/hgext/fsmonitor/pywatchman/load.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/load.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
import ctypes
--- a/hgext/fsmonitor/pywatchman/pybser.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/pywatchman/pybser.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,8 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
+
import binascii
import collections
import ctypes
--- a/hgext/fsmonitor/state.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/state.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import os
--- a/hgext/fsmonitor/watchmanclient.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/fsmonitor/watchmanclient.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import getpass
--- a/hgext/git/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,7 @@
firstborn a la Rumpelstiltskin, etc.
"""
+from __future__ import annotations
import os
--- a/hgext/git/dirstate.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/dirstate.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,6 +1,18 @@
+from __future__ import annotations
+
import contextlib
import os
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+)
+
from mercurial.node import sha1nodeconstants
from mercurial import (
dirstatemap,
@@ -16,7 +28,6 @@
)
from mercurial.interfaces import (
dirstate as intdirstate,
- util as interfaceutil,
)
from . import gitutil
@@ -71,8 +82,7 @@
}
-@interfaceutil.implementer(intdirstate.idirstate)
-class gitdirstate:
+class gitdirstate(intdirstate.idirstate):
def __init__(self, ui, vfs, gitrepo, use_dirstate_v2):
self._ui = ui
self._root = os.path.dirname(vfs.base)
@@ -96,7 +106,7 @@
)
return self._map
- def p1(self):
+ def p1(self) -> bytes:
try:
return self.git.head.peel().id.raw
except pygit2.GitError:
@@ -104,11 +114,11 @@
# empty repository.
return sha1nodeconstants.nullid
- def p2(self):
+ def p2(self) -> bytes:
# TODO: MERGE_HEAD? something like that, right?
return sha1nodeconstants.nullid
- def setparents(self, p1, p2=None):
+ def setparents(self, p1: bytes, p2: Optional[bytes] = None):
if p2 is None:
p2 = sha1nodeconstants.nullid
assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
@@ -120,17 +130,17 @@
os.path.join(self._root, b'.git', b'index')
)
- def branch(self):
+ def branch(self) -> bytes:
return b'default'
- def parents(self):
+ def parents(self) -> List[bytes]:
# TODO how on earth do we find p2 if a merge is in flight?
- return self.p1(), sha1nodeconstants.nullid
+ return [self.p1(), sha1nodeconstants.nullid]
- def __iter__(self):
+ def __iter__(self) -> Iterator[bytes]:
return (pycompat.fsencode(f.path) for f in self.git.index)
- def items(self):
+ def items(self) -> Iterator[Tuple[bytes, intdirstate.DirstateItemT]]:
for ie in self.git.index:
yield ie.path, None # value should be a DirstateItem
@@ -144,14 +154,21 @@
return b'?'
return _STATUS_MAP[gs]
- def __contains__(self, filename):
+ def __contains__(self, filename: Any) -> bool:
try:
gs = self.git.status_file(filename)
return _STATUS_MAP[gs] != b'?'
except KeyError:
return False
- def status(self, match, subrepos, ignored, clean, unknown):
+ def status(
+ self,
+ match: matchmod.basematcher,
+ subrepos: bool,
+ ignored: bool,
+ clean: bool,
+ unknown: bool,
+ ) -> intdirstate.StatusReturnT:
listclean = clean
# TODO handling of clean files - can we get that from git.status()?
modified, added, removed, deleted, unknown, ignored, clean = (
@@ -224,24 +241,28 @@
mtime_boundary,
)
- def flagfunc(self, buildfallback):
+ def flagfunc(
+ self, buildfallback: intdirstate.FlagFuncFallbackT
+ ) -> intdirstate.FlagFuncReturnT:
# TODO we can do better
return buildfallback()
- def getcwd(self):
+ def getcwd(self) -> bytes:
# TODO is this a good way to do this?
return os.path.dirname(
os.path.dirname(pycompat.fsencode(self.git.path))
)
- def get_entry(self, path):
+ def get_entry(self, path: bytes) -> intdirstate.DirstateItemT:
"""return a DirstateItem for the associated path"""
entry = self._map.get(path)
if entry is None:
return DirstateItem()
return entry
- def normalize(self, path):
+ def normalize(
+ self, path: bytes, isknown: bool = False, ignoremissing: bool = False
+ ) -> bytes:
normed = util.normcase(path)
assert normed == path, b"TODO handling of case folding: %s != %s" % (
normed,
@@ -250,27 +271,29 @@
return path
@property
- def _checklink(self):
+ def _checklink(self) -> bool:
return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
- def copies(self):
+ def copies(self) -> Dict[bytes, bytes]:
# TODO support copies?
return {}
# # TODO what the heck is this
_filecache = set()
- def is_changing_parents(self):
+ @property
+ def is_changing_parents(self) -> bool:
# TODO: we need to implement the context manager bits and
# correctly stage/revert index edits.
return False
- def is_changing_any(self):
+ @property
+ def is_changing_any(self) -> bool:
# TODO: we need to implement the context manager bits and
# correctly stage/revert index edits.
return False
- def write(self, tr):
+ def write(self, tr: Optional[intdirstate.TransactionT]) -> None:
# TODO: call parent change callbacks
if tr:
@@ -282,7 +305,7 @@
else:
self.git.index.write()
- def pathto(self, f, cwd=None):
+ def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
if cwd is None:
cwd = self.getcwd()
# TODO core dirstate does something about slashes here
@@ -290,11 +313,11 @@
r = util.pathto(self._root, cwd, f)
return r
- def matches(self, match):
+ def matches(self, match: matchmod.basematcher) -> Iterable[bytes]:
for x in self.git.index:
p = pycompat.fsencode(x.path)
if match(p):
- yield p
+ yield p # TODO: return list instead of yielding?
def set_clean(self, f, parentfiledata):
"""Mark a file normal and clean."""
@@ -306,7 +329,14 @@
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
- def walk(self, match, subrepos, unknown, ignored, full=True):
+ def walk(
+ self,
+ match: matchmod.basematcher,
+ subrepos: Any,
+ unknown: bool,
+ ignored: bool,
+ full: bool = True,
+ ) -> intdirstate.WalkReturnT:
# TODO: we need to use .status() and not iterate the index,
# because the index doesn't force a re-walk and so `hg add` of
# a new file without an intervening call to status will
@@ -368,7 +398,7 @@
index.remove(pycompat.fsdecode(f))
index.write()
- def copied(self, path):
+ def copied(self, file: bytes) -> Optional[bytes]:
# TODO: track copies?
return None
@@ -385,11 +415,15 @@
# TODO: track this maybe?
yield
- def addparentchangecallback(self, category, callback):
+ def addparentchangecallback(
+ self, category: bytes, callback: intdirstate.AddParentChangeCallbackT
+ ) -> None:
# TODO: should this be added to the dirstate interface?
self._plchangecallbacks[category] = callback
- def setbranch(self, branch, transaction):
+ def setbranch(
+ self, branch: bytes, transaction: Optional[intdirstate.TransactionT]
+ ) -> None:
raise error.Abort(
b'git repos do not support branches. try using bookmarks'
)
--- a/hgext/git/gitlog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/gitlog.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial.node import (
--- a/hgext/git/gitutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/gitutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,7 @@
"""utilities to assist in working with pygit2"""
+from __future__ import annotations
+
from mercurial.node import bin, sha1nodeconstants
pygit2_module = None
--- a/hgext/git/index.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/index.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import collections
import os
import sqlite3
--- a/hgext/git/manifest.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/git/manifest.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from mercurial import (
match as matchmod,
pathutil,
--- a/hgext/githelp.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/githelp.py Sat Oct 26 04:16:00 2024 +0200
@@ -15,6 +15,7 @@
produced.
"""
+from __future__ import annotations
import getopt
import re
--- a/hgext/gpg.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/gpg.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
'''commands to sign and verify changesets'''
+from __future__ import annotations
import binascii
import os
--- a/hgext/graphlog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/graphlog.py Sat Oct 26 04:16:00 2024 +0200
@@ -15,6 +15,7 @@
revision graph is also shown.
'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/hgk.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hgk.py Sat Oct 26 04:16:00 2024 +0200
@@ -34,6 +34,7 @@
vdiff on hovered and selected revisions.
'''
+from __future__ import annotations
import os
--- a/hgext/highlight/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/highlight/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,7 @@
match (even matches with a low confidence score) will be used.
"""
+from __future__ import annotations
from . import highlight
from mercurial.hgweb import (
--- a/hgext/highlight/highlight.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/highlight/highlight.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
# The original module was split in an interface and an implementation
# file to defer pygments loading and speedup extension setup.
+from __future__ import annotations
from mercurial import demandimport
@@ -43,7 +44,6 @@
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):
-
# append a <link ...> to the syntax highlighting css
tmpl.load(b'header')
old_header = tmpl.cache[b'header']
--- a/hgext/histedit.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/histedit.py Sat Oct 26 04:16:00 2024 +0200
@@ -190,6 +190,7 @@
"""
+from __future__ import annotations
# chistedit dependencies that are not available everywhere
try:
@@ -1526,7 +1527,8 @@
def move_cursor(self, oldpos, newpos):
"""Change the rule/changeset that the cursor is pointing to, regardless of
- current mode (you can switch between patches from the view patch window)."""
+ current mode (you can switch between patches from the view patch window).
+ """
self.pos = newpos
mode, _ = self.mode
@@ -1605,7 +1607,8 @@
def change_view(self, delta, unit):
"""Change the region of whatever is being viewed (a patch or the list of
- changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
+ changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.
+ """
mode, _ = self.mode
if mode != MODE_PATCH:
return
--- a/hgext/hooklib/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -13,6 +13,9 @@
extension as option. The functionality itself is planned to be supported
long-term.
"""
+
+from __future__ import annotations
+
from . import (
changeset_obsoleted,
changeset_published,
--- a/hgext/hooklib/changeset_obsoleted.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/changeset_obsoleted.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,7 @@
python:hgext.hooklib.changeset_obsoleted.hook
"""
+from __future__ import annotations
import email.errors as emailerrors
import email.utils as emailutils
--- a/hgext/hooklib/changeset_published.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/changeset_published.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,7 @@
python:hgext.hooklib.changeset_published.hook
"""
+from __future__ import annotations
import email.errors as emailerrors
import email.utils as emailutils
--- a/hgext/hooklib/enforce_draft_commits.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/enforce_draft_commits.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,7 @@
python:hgext.hooklib.enforce_draft_commits.hook
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/hooklib/reject_merge_commits.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/reject_merge_commits.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,7 @@
python:hgext.hooklib.reject_merge_commits.hook
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/hooklib/reject_new_heads.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/hooklib/reject_new_heads.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,7 @@
python:hgext.hooklib.reject_new_heads.hook
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/journal.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/journal.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,7 @@
"""
+from __future__ import annotations
import collections
import os
@@ -33,6 +34,7 @@
localrepo,
lock,
logcmdutil,
+ merge,
pycompat,
registrar,
util,
@@ -64,8 +66,10 @@
bookmarktype: hg.sharedbookmarks,
}
+
# Journal recording, register hooks and storage object
def extsetup(ui):
+ merge.MAYBE_USE_RUST_UPDATE = False
extensions.wrapfunction(dispatch, 'runcommand', runcommand)
extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
extensions.wrapfilecache(
--- a/hgext/keyword.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/keyword.py Sat Oct 26 04:16:00 2024 +0200
@@ -83,6 +83,8 @@
'''
+from __future__ import annotations
+
import os
import re
import weakref
@@ -100,6 +102,7 @@
localrepo,
logcmdutil,
match,
+ merge,
patch,
pathutil,
pycompat,
@@ -160,6 +163,8 @@
b'svn',
default=False,
)
+
+
# date like in cvs' $Date
@templatefilter(b'utcdate', intype=templateutil.date)
def utcdate(date):
@@ -814,6 +819,7 @@
extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
for c in nokwwebcommands.split():
extensions.wrapfunction(webcommands, c, kwweb_skip)
+ merge.MAYBE_USE_RUST_UPDATE = False
def reposetup(ui, repo):
--- a/hgext/largefiles/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -105,6 +105,8 @@
command.
'''
+from __future__ import annotations
+
from mercurial import (
cmdutil,
configitems,
@@ -112,6 +114,7 @@
exthelper,
hg,
localrepo,
+ merge,
wireprotov1server,
)
@@ -163,6 +166,7 @@
@eh.uisetup
def _uisetup(ui):
+ merge.MAYBE_USE_RUST_UPDATE = False
localrepo.featuresetupfuncs.add(featuresetup)
hg.wirepeersetupfuncs.append(proto.wirereposetup)
--- a/hgext/largefiles/basestore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/basestore.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,8 @@
'''base class for store implementations and store-related utility code'''
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial.node import short
--- a/hgext/largefiles/lfcommands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/lfcommands.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,8 @@
'''High-level command function for lfconvert, plus the cmdtable.'''
+from __future__ import annotations
+
import binascii
import os
import shutil
--- a/hgext/largefiles/lfutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/lfutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,8 @@
'''largefiles utility code: must not import other modules in this package.'''
+from __future__ import annotations
+
import contextlib
import copy
import os
@@ -162,36 +164,18 @@
_large_file_dirstate = True
_tr_key_suffix = b'-large-files'
- def __getitem__(self, key):
- return super(largefilesdirstate, self).__getitem__(unixpath(key))
+ # XXX: why are there overrides to fix the path, if the path should already
+ # be in unix form for the superclass?
- def set_tracked(self, f):
- return super(largefilesdirstate, self).set_tracked(unixpath(f))
+ def set_tracked(self, f, reset_copy=False):
+ return super(largefilesdirstate, self).set_tracked(
+ unixpath(f), reset_copy=reset_copy
+ )
def set_untracked(self, f):
return super(largefilesdirstate, self).set_untracked(unixpath(f))
- def normal(self, f, parentfiledata=None):
- # not sure if we should pass the `parentfiledata` down or throw it
- # away. So throwing it away to stay on the safe side.
- return super(largefilesdirstate, self).normal(unixpath(f))
-
- def remove(self, f):
- return super(largefilesdirstate, self).remove(unixpath(f))
-
- def add(self, f):
- return super(largefilesdirstate, self).add(unixpath(f))
-
- def drop(self, f):
- return super(largefilesdirstate, self).drop(unixpath(f))
-
- def forget(self, f):
- return super(largefilesdirstate, self).forget(unixpath(f))
-
- def normallookup(self, f):
- return super(largefilesdirstate, self).normallookup(unixpath(f))
-
- def _ignore(self, f):
+ def _dirignore(self, f):
return False
def write(self, tr):
--- a/hgext/largefiles/localstore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/localstore.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,8 @@
'''store class for local filesystem'''
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial.pycompat import open
from mercurial import util
--- a/hgext/largefiles/overrides.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/overrides.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,9 +8,14 @@
'''Overridden Mercurial commands and functions for the largefiles extension'''
+from __future__ import annotations
+
import contextlib
import copy
import os
+from typing import (
+ Optional,
+)
from mercurial.i18n import _
@@ -827,11 +832,11 @@
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
+ # When we call orig below it creates the standins but we don't add
+ # them to the dir state until later so lock during that time.
+ wlock = repo.wlock()
+
try:
- # When we call orig below it creates the standins but we don't add
- # them to the dir state until later so lock during that time.
- wlock = repo.wlock()
-
manifest = repo[None].manifest()
def overridematch(
@@ -899,7 +904,7 @@
result += orig(ui, repo, listpats, opts, rename)
lfdirstate = lfutil.openlfdirstate(ui, repo)
- for (src, dest) in copiedfiles:
+ for src, dest in copiedfiles:
if lfutil.shortname in src and dest.startswith(
repo.wjoin(lfutil.shortname)
):
@@ -1229,7 +1234,7 @@
node,
kind,
decode=True,
- match=None,
+ match: Optional[matchmod.basematcher] = None,
prefix=b'',
mtime=None,
subrepos=None,
@@ -1249,32 +1254,56 @@
if kind not in archival.archivers:
raise error.Abort(_(b"unknown archive type '%s'") % kind)
- ctx = repo[node]
-
if kind == b'files':
if prefix:
raise error.Abort(_(b'cannot give prefix when archiving to files'))
else:
prefix = archival.tidyprefix(dest, kind, prefix)
+ if not match:
+ match = scmutil.matchall(repo)
+ archiver = None
+ ctx = repo[node]
+
+ def opencallback():
+ """Return the archiver instance, creating it if necessary.
+
+ This function is called when the first actual entry is created.
+ It may be called multiple times from different layers.
+ When serving the archive via hgweb, no errors should happen after
+ this point.
+ """
+ nonlocal archiver
+ if archiver is None:
+ if callable(dest):
+ output = dest()
+ else:
+ output = dest
+ archiver = archival.archivers[kind](output, mtime or ctx.date()[0])
+ assert archiver is not None
+
+ if repo.ui.configbool(b"ui", b"archivemeta"):
+ metaname = b'.hg_archival.txt'
+ if match(metaname):
+ write(
+ metaname,
+ 0o644,
+ False,
+ lambda: archival.buildmetadata(ctx),
+ )
+ return archiver
+
def write(name, mode, islink, getdata):
- if match and not match(name):
+ if not match(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
+ if archiver is None:
+ opencallback()
+ assert archiver is not None, "archive should be opened by now"
archiver.addfile(prefix + name, mode, islink, data)
- archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
-
- if repo.ui.configbool(b"ui", b"archivemeta"):
- write(
- b'.hg_archival.txt',
- 0o644,
- False,
- lambda: archival.buildmetadata(ctx),
- )
-
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
@@ -1313,16 +1342,19 @@
and lfstatus(sub._repo)
or util.nullcontextmanager()
):
- sub.archive(archiver, subprefix, submatch)
+ sub.archive(opencallback, subprefix, submatch)
- archiver.done()
+ if archiver:
+ archiver.done()
@eh.wrapfunction(subrepo.hgsubrepo, 'archive')
-def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
+def hgsubrepoarchive(
+ orig, repo, opener, prefix, match: matchmod.basematcher, decode=True
+):
lfenabled = hasattr(repo._repo, '_largefilesenabled')
if not lfenabled or not repo._repo.lfstatus:
- return orig(repo, archiver, prefix, match, decode)
+ return orig(repo, opener, prefix, match, decode)
repo._get(repo._state + (b'hg',))
rev = repo._state[1]
@@ -1331,15 +1363,20 @@
if ctx.node() is not None:
lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
+ archiver = None
+
def write(name, mode, islink, getdata):
+ nonlocal archiver
# At this point, the standin has been replaced with the largefile name,
# so the normal matcher works here without the lfutil variants.
- if match and not match(f):
+ if not match(f):
return
data = getdata()
if decode:
data = repo._repo.wwritedata(name, data)
+ if archiver is None:
+ archiver = opener()
archiver.addfile(prefix + name, mode, islink, data)
for f in ctx:
@@ -1379,7 +1416,7 @@
and lfstatus(sub._repo)
or util.nullcontextmanager()
):
- sub.archive(archiver, subprefix, submatch, decode)
+ sub.archive(opener, subprefix, submatch, decode)
# If a largefile is modified, the change is not reflected in its
@@ -1622,7 +1659,7 @@
m,
uipathfn,
opts.get(b'dry_run'),
- **pycompat.strkwargs(opts)
+ **pycompat.strkwargs(opts),
)
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
--- a/hgext/largefiles/proto.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/proto.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
from mercurial.i18n import _
--- a/hgext/largefiles/remotestore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/remotestore.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
'''remote largefile store; the base class for wirestore'''
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial import (
--- a/hgext/largefiles/reposetup.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/reposetup.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,8 @@
'''setup for largefiles repositories: reposetup'''
+from __future__ import annotations
+
import copy
from mercurial.i18n import _
@@ -140,7 +142,6 @@
wlock = util.nullcontextmanager()
gotlock = False
with wlock, self.dirstate.running_status(self):
-
# First check if paths or patterns were specified on the
# command line. If there were, and they don't match any
# largefiles, we should just bail here and let super
--- a/hgext/largefiles/storefactory.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/storefactory.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,6 +1,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
--- a/hgext/largefiles/wirestore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/largefiles/wirestore.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
'''largefile store working over Mercurial's wire protocol'''
+from __future__ import annotations
+
from . import (
lfutil,
remotestore,
--- a/hgext/lfs/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/lfs/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -120,6 +120,7 @@
usercache = /path/to/global/cache
"""
+from __future__ import annotations
import sys
@@ -137,6 +138,7 @@
filesetlang,
localrepo,
logcmdutil,
+ merge,
minifileset,
pycompat,
revlog,
@@ -233,6 +235,7 @@
@eh.uisetup
def _uisetup(ui):
+ merge.MAYBE_USE_RUST_UPDATE = False
localrepo.featuresetupfuncs.add(featuresetup)
--- a/hgext/lfs/blobstore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/lfs/blobstore.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import errno
@@ -14,6 +15,10 @@
import re
import socket
+from typing import (
+ Optional,
+)
+
from mercurial.i18n import _
from mercurial.node import hex
@@ -41,11 +46,11 @@
class lfsvfs(vfsmod.vfs):
- def join(self, path):
+ def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
"""split the path at first two characters, like: XX/XXXXX..."""
if not _lfsre.match(path):
raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
- return super(lfsvfs, self).join(path[0:2], path[2:])
+ return super(lfsvfs, self).join(path[0:2], path[2:], *insidef)
def walk(self, path=None, onerror=None):
"""Yield (dirpath, [], oids) tuple for blobs under path
@@ -76,7 +81,7 @@
def __init__(self):
pass
- def exists(self, oid):
+ def exists(self, path: Optional[bytes] = None) -> bool:
return False
def read(self, oid):
@@ -92,8 +97,8 @@
def walk(self, path=None, onerror=None):
return (b'', [], [])
- def write(self, oid, data):
- pass
+ def write(self, *args, **kwargs) -> int:
+ return 0
class lfsuploadfile(httpconnectionmod.httpsendfile):
--- a/hgext/lfs/pointer.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/lfs/pointer.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
--- a/hgext/lfs/wireprotolfsserver.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/lfs/wireprotolfsserver.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import datetime
import errno
--- a/hgext/lfs/wrapper.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/lfs/wrapper.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import hashlib
@@ -169,7 +170,7 @@
cachedelta=None,
node=None,
flags=revlog.REVIDX_DEFAULT_FLAGS,
- **kwds
+ **kwds,
):
# The matcher isn't available if reposetup() wasn't called.
lfstrack = self._revlog.opener.options.get(b'lfstrack')
@@ -194,7 +195,7 @@
cachedelta=cachedelta,
node=node,
flags=flags,
- **kwds
+ **kwds,
)
--- a/hgext/logtoprocess.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/logtoprocess.py Sat Oct 26 04:16:00 2024 +0200
@@ -32,6 +32,7 @@
"""
+from __future__ import annotations
import os
--- a/hgext/mq.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/mq.py Sat Oct 26 04:16:00 2024 +0200
@@ -62,6 +62,7 @@
in the strip extension.
'''
+from __future__ import annotations
import os
import re
--- a/hgext/narrow/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# GNU General Public License version 2 or any later version.
'''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
+from __future__ import annotations
from mercurial import (
localrepo,
--- a/hgext/narrow/narrowbundle2.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowbundle2.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
@@ -37,6 +38,7 @@
_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
+
# Serve a changegroup for a client with a narrow clone.
def getbundlechangegrouppart_narrow(
bundler,
@@ -46,7 +48,7 @@
b2caps=None,
heads=None,
common=None,
- **kwargs
+ **kwargs,
):
assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses')
--- a/hgext/narrow/narrowcommands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowcommands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import itertools
import os
--- a/hgext/narrow/narrowdirstate.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowdirstate.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import error
--- a/hgext/narrow/narrowrepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowrepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial import wireprototypes
--- a/hgext/narrow/narrowtemplates.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowtemplates.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial import (
registrar,
--- a/hgext/narrow/narrowwirepeer.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/narrow/narrowwirepeer.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial.i18n import _
--- a/hgext/notify.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/notify.py Sat Oct 26 04:16:00 2024 +0200
@@ -155,6 +155,8 @@
'''
+from __future__ import annotations
+
import email.errors as emailerrors
import email.utils as emailutils
import fnmatch
@@ -416,7 +418,7 @@
baseurl=self.ui.config(b'web', b'baseurl'),
root=self.repo.root,
webroot=self.root,
- **props
+ **props,
)
return True
@@ -543,7 +545,6 @@
)
def diff(self, ctx, ref=None):
-
maxdiff = int(self.ui.config(b'notify', b'maxdiff'))
prev = ctx.p1().node()
if ref:
--- a/hgext/pager.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/pager.py Sat Oct 26 04:16:00 2024 +0200
@@ -22,6 +22,8 @@
attend-cat = false
'''
+from __future__ import annotations
+
from mercurial import (
cmdutil,
commands,
--- a/hgext/patchbomb.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/patchbomb.py Sat Oct 26 04:16:00 2024 +0200
@@ -72,6 +72,8 @@
``patchbomb.confirm`` to true.
'''
+from __future__ import annotations
+
import email.encoders as emailencoders
import email.mime.base as emimebase
import email.mime.multipart as emimemultipart
@@ -261,7 +263,6 @@
numbered,
patchname=None,
):
-
desc = []
node = None
body = b''
--- a/hgext/phabricator.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/phabricator.py Sat Oct 26 04:16:00 2024 +0200
@@ -57,6 +57,7 @@
example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
"""
+from __future__ import annotations
import base64
import contextlib
@@ -68,10 +69,17 @@
import operator
import re
import time
+import typing
from mercurial.node import bin, short
from mercurial.i18n import _
from mercurial.thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from mercurial import (
cmdutil,
context,
@@ -698,7 +706,7 @@
oldLength = attr.ib(default=0) # camelcase-required
newOffset = attr.ib(default=0) # camelcase-required
newLength = attr.ib(default=0) # camelcase-required
- corpus = attr.ib(default='')
+ corpus = attr.ib(default=b'')
# These get added to the phabchange's equivalents
addLines = attr.ib(default=0) # camelcase-required
delLines = attr.ib(default=0) # camelcase-required
--- a/hgext/purge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/purge.py Sat Oct 26 04:16:00 2024 +0200
@@ -36,4 +36,7 @@
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
+
+from __future__ import annotations
+
testedwith = b'ships-with-hg-core'
--- a/hgext/rebase.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/rebase.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,7 @@
https://mercurial-scm.org/wiki/RebaseExtension
'''
+from __future__ import annotations
import os
@@ -830,7 +831,6 @@
cleanup = False
if cleanup:
-
if rebased:
strippoints = [
c.node() for c in repo.set(b'roots(%ld)', rebased)
--- a/hgext/record.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/record.py Sat Oct 26 04:16:00 2024 +0200
@@ -10,6 +10,7 @@
The feature provided by this extension has been moved into core Mercurial as
:hg:`commit --interactive`.'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
@@ -136,7 +137,7 @@
False,
cmdutil.recordfilter,
*pats,
- **opts
+ **opts,
)
--- a/hgext/releasenotes.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/releasenotes.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,7 @@
process simpler by automating it.
"""
+from __future__ import annotations
import difflib
import re
--- a/hgext/relink.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/relink.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
"""recreates hardlinks between repository clones"""
+from __future__ import annotations
+
import os
import stat
--- a/hgext/remotefilelog/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -125,6 +125,8 @@
"""
+from __future__ import annotations
+
import os
import time
import traceback
@@ -619,7 +621,7 @@
copy,
getfilectx,
*args,
- **kwargs
+ **kwargs,
):
if isenabled(repo):
prefetch = []
@@ -648,7 +650,7 @@
copy,
getfilectx,
*args,
- **kwargs
+ **kwargs,
)
--- a/hgext/remotefilelog/basepack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/basepack.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import collections
import errno
import mmap
@@ -45,7 +47,7 @@
# bisect) with (8 step fanout scan + 1 step bisect)
# 5 step bisect = log(2^16 / 8 / 255) # fanout
# 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
-SMALLFANOUTCUTOFF = 2 ** 16 // 8
+SMALLFANOUTCUTOFF = 2**16 // 8
# The amount of time to wait between checking for new packs. This prevents an
# exception when data is moved to a new pack after the process has already
@@ -275,7 +277,7 @@
class basepack(versionmixin):
# The maximum amount we should read via mmap before remmaping so the old
# pages can be released (100MB)
- MAXPAGEDIN = 100 * 1024 ** 2
+ MAXPAGEDIN = 100 * 1024**2
SUPPORTED_VERSIONS = [2]
--- a/hgext/remotefilelog/basestore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/basestore.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import os
import shutil
import stat
--- a/hgext/remotefilelog/connectionpool.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/connectionpool.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from mercurial import (
hg,
@@ -38,7 +39,6 @@
pass
if conn is None:
-
peer = hg.peer(self._repo.ui, {}, path)
if hasattr(peer, '_cleanup'):
--- a/hgext/remotefilelog/constants.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/constants.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import struct
from mercurial.i18n import _
--- a/hgext/remotefilelog/contentstore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/contentstore.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import threading
from mercurial.node import (
--- a/hgext/remotefilelog/datapack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/datapack.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import struct
import zlib
@@ -414,7 +416,7 @@
def add(self, name, node, deltabasenode, delta, metadata=None):
# metadata is a dict, ex. {METAKEYFLAG: flag}
- if len(name) > 2 ** 16:
+ if len(name) > 2**16:
raise RuntimeError(_(b"name too long %s") % name)
if len(node) != 20:
raise RuntimeError(_(b"node should be 20 bytes %s") % node)
--- a/hgext/remotefilelog/debugcommands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/debugcommands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import zlib
@@ -34,7 +36,7 @@
)
-def debugremotefilelog(ui, path, **opts):
+def debugremotefilelog(ui, path, **opts) -> None:
decompress = opts.get('decompress')
size, firstnode, mapping = parsefileblob(path, decompress)
@@ -62,7 +64,7 @@
queue.append(p2)
-def buildtemprevlog(repo, file):
+def buildtemprevlog(repo, file) -> filelog.filelog:
# get filename key
filekey = hex(hashutil.sha1(file).digest())
filedir = os.path.join(repo.path, b'store/data', filekey)
@@ -115,11 +117,11 @@
r = buildtemprevlog(repo, file_)
# debugindex like normal
- format = opts.get(b'format', 0)
+ format = opts.get('format', 0)
if format not in (0, 1):
raise error.Abort(_(b"unknown format %d") % format)
- generaldelta = r.version & revlog.FLAG_GENERALDELTA
+ generaldelta = r.get_revlog()._format_flags & revlog.FLAG_GENERALDELTA
if generaldelta:
basehdr = b' delta'
else:
@@ -144,9 +146,9 @@
for i in r:
node = r.node(i)
if generaldelta:
- base = r.deltaparent(i)
+ base = r.get_revlog().deltaparent(i)
else:
- base = r.chainbase(i)
+ base = r.get_revlog().chainbase(i)
if format == 0:
try:
pp = r.parents(node)
@@ -156,8 +158,8 @@
b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
% (
i,
- r.start(i),
- r.length(i),
+ r.get_revlog().start(i),
+ r.get_revlog().length(i),
base,
r.linkrev(i),
short(node),
@@ -171,10 +173,10 @@
b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
% (
i,
- r.flags(i),
- r.start(i),
- r.length(i),
- r.rawsize(i),
+ r.get_revlog().flags(i),
+ r.get_revlog().start(i),
+ r.get_revlog().length(i),
+ r.get_revlog().rawsize(i),
base,
r.linkrev(i),
pr[0],
--- a/hgext/remotefilelog/fileserverclient.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/fileserverclient.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import io
import os
--- a/hgext/remotefilelog/historypack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/historypack.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import struct
from mercurial.node import (
--- a/hgext/remotefilelog/metadatastore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/metadatastore.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from mercurial.node import (
hex,
sha1nodeconstants,
--- a/hgext/remotefilelog/remotefilectx.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/remotefilectx.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import time
@@ -100,7 +102,11 @@
if path in data[3]: # checking the 'files' field.
# The file has been touched, check if the hash is what we're
# looking for.
- if fileid == mfl[data[0]].readfast().get(path):
+ #
+ # The change has to be against a parent, otherwise we might be
+ # missing linkrev worthy changes.
+ m = mfl[data[0]].read_delta_parents(exact=False)
+ if fileid == m.get(path):
return rev
# Couldn't find the linkrev. This should generally not happen, and will
@@ -199,8 +205,10 @@
manifestnode, files = ancctx[0], ancctx[3]
# If the file was touched in this ancestor, and the content is similar
# to the one we are searching for.
- if path in files and fnode == mfl[manifestnode].readfast().get(path):
- return cl.node(ancrev)
+ if path in files:
+ m = mfl[manifestnode].read_delta_parents(exact=False)
+ if fnode == m.get(path):
+ return cl.node(ancrev)
return None
def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
@@ -345,7 +353,7 @@
b'linkrevfixup',
logmsg + b'\n',
elapsed=elapsed * 1000,
- **commonlogkwargs
+ **commonlogkwargs,
)
def _verifylinknode(self, revs, linknode):
--- a/hgext/remotefilelog/remotefilelog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/remotefilelog.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,8 +6,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
+from typing import (
+ Iterator,
+)
+
from mercurial.node import bin
from mercurial.i18n import _
from mercurial import (
@@ -41,7 +47,6 @@
class remotefilelog:
-
_flagserrorclass = error.RevlogError
def __init__(self, opener, path, repo):
@@ -297,7 +302,7 @@
deltamode=None,
sidedata_helpers=None,
debug_info=None,
- ):
+ ) -> Iterator[revlog.revlogrevisiondelta]:
# we don't use any of these parameters here
del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
del deltamode
--- a/hgext/remotefilelog/remotefilelogserver.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/remotefilelogserver.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
import stat
import time
--- a/hgext/remotefilelog/repack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/repack.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import os
import time
@@ -468,18 +470,14 @@
# process the commits in toposorted order starting from the oldest
for r in reversed(keep._list):
- if repo[r].p1().rev() in processed:
- # if the direct parent has already been processed
- # then we only need to process the delta
- m = repo[r].manifestctx().readdelta()
- else:
- # otherwise take the manifest and diff it
- # with the previous manifest if one exists
+ delta_from, m = repo[r].manifestctx().read_any_fast_delta(processed)
+ if delta_from is None and lastmanifest is not None:
+ # could not find a delta, compute one.
+ # XXX (is this really faster?)
+ full = m
if lastmanifest:
- m = repo[r].manifest().diff(lastmanifest)
- else:
- m = repo[r].manifest()
- lastmanifest = repo[r].manifest()
+ m = m.diff(lastmanifest)
+ lastmanifest = full
processed.add(r)
# populate keepkeys with keys from the current manifest
--- a/hgext/remotefilelog/shallowbundle.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/shallowbundle.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial.node import bin, hex
from mercurial import (
--- a/hgext/remotefilelog/shallowrepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/shallowrepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import os
from mercurial.i18n import _
@@ -32,6 +34,7 @@
shallowutil,
)
+
# These make*stores functions are global so that other extensions can replace
# them.
def makelocalstores(repo):
@@ -291,11 +294,7 @@
# Decompressing manifests is expensive.
# When possible, only read the deltas.
- p1, p2 = mfrevlog.parentrevs(mfrev)
- if p1 in visited and p2 in visited:
- mfdict = mfl[mfnode].readfast()
- else:
- mfdict = mfl[mfnode].read()
+ mfdict = mfl[mfnode].read_any_fast_delta(visited)[1]
diff = mfdict.items()
if pats:
--- a/hgext/remotefilelog/shallowstore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/shallowstore.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
def wrapstore(store):
class shallowstore(store.__class__):
--- a/hgext/remotefilelog/shallowutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/shallowutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import os
import stat
--- a/hgext/remotefilelog/shallowverifier.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotefilelog/shallowverifier.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
from mercurial.i18n import _
from mercurial import verify
--- a/hgext/remotenames.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/remotenames.py Sat Oct 26 04:16:00 2024 +0200
@@ -24,6 +24,7 @@
namespace (default: 'default')
"""
+from __future__ import annotations
import collections.abc
@@ -259,7 +260,6 @@
def reposetup(ui, repo):
-
# set the config option to store remotenames
repo.ui.setconfig(b'experimental', b'remotenames', True, b'remotenames-ext')
--- a/hgext/schemes.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/schemes.py Sat Oct 26 04:16:00 2024 +0200
@@ -40,6 +40,8 @@
same name.
"""
+from __future__ import annotations
+
import os
import re
--- a/hgext/share.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/share.py Sat Oct 26 04:16:00 2024 +0200
@@ -65,6 +65,7 @@
and there are no untracked files, delete that share and create a new share.
'''
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/show.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/show.py Sat Oct 26 04:16:00 2024 +0200
@@ -25,6 +25,7 @@
performed.
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial.node import nullrev
@@ -513,7 +514,7 @@
entries = []
for key in sorted(showview._table.keys()):
entries.append(
- r' %s %s'
+ r'%s %s'
% (
pycompat.sysstr(key.ljust(longest)),
showview._table[key]._origdoc,
@@ -521,7 +522,7 @@
)
cmdtable[b'show'][0].__doc__ = pycompat.sysstr(b'%s\n\n%s\n ') % (
- cmdtable[b'show'][0].__doc__.rstrip(),
+ pycompat.cleandoc(cmdtable[b'show'][0].__doc__),
pycompat.sysstr(b'\n\n').join(entries),
)
--- a/hgext/sparse.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/sparse.py Sat Oct 26 04:16:00 2024 +0200
@@ -71,6 +71,7 @@
tools/tests/**
"""
+from __future__ import annotations
from mercurial.i18n import _
from mercurial import (
--- a/hgext/split.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/split.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
# GNU General Public License version 2 or any later version.
"""command to split a changeset into smaller ones (EXPERIMENTAL)"""
+from __future__ import annotations
from mercurial.i18n import _
--- a/hgext/sqlitestore.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/sqlitestore.py Sat Oct 26 04:16:00 2024 +0200
@@ -43,10 +43,12 @@
# --extra-config-opt extensions.sqlitestore= \
# --extra-config-opt storage.new-repo-backend=sqlite
+from __future__ import annotations
import sqlite3
import struct
import threading
+import typing
import zlib
from mercurial.i18n import _
@@ -56,6 +58,12 @@
short,
)
from mercurial.thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from mercurial import (
ancestor,
dagop,
@@ -649,7 +657,6 @@
deltamode=deltamode,
sidedata_helpers=sidedata_helpers,
):
-
yield delta
# End of ifiledata interface.
--- a/hgext/strip.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/strip.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
repository. See the command help for details.
"""
+from __future__ import annotations
+
from mercurial import commands
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
--- a/hgext/transplant.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/transplant.py Sat Oct 26 04:16:00 2024 +0200
@@ -14,6 +14,8 @@
map from a changeset hash to its hash in the source repository.
'''
+from __future__ import annotations
+
import os
from mercurial.i18n import _
--- a/hgext/uncommit.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/uncommit.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,7 @@
added and removed in the working directory.
"""
+from __future__ import annotations
from mercurial.i18n import _
@@ -154,7 +155,6 @@
cmdutil.resolve_commit_options(ui, opts)
with repo.wlock(), repo.lock():
-
st = repo.status()
m, a, r, d = st.modified, st.added, st.removed, st.deleted
isdirtypath = any(set(m + a + r + d) & set(pats))
@@ -264,7 +264,6 @@
unfi = repo.unfiltered()
with repo.wlock(), repo.lock(), repo.transaction(b'unamend'):
-
# identify the commit from which to unamend
curctx = repo[b'.']
--- a/hgext/win32mbcs.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/win32mbcs.py Sat Oct 26 04:16:00 2024 +0200
@@ -45,6 +45,8 @@
It is useful for the users who want to commit with UTF-8 log message.
'''
+from __future__ import annotations
+
import os
import sys
--- a/hgext/win32text.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/win32text.py Sat Oct 26 04:16:00 2024 +0200
@@ -41,6 +41,7 @@
# or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
'''
+from __future__ import annotations
import re
from mercurial.i18n import _
--- a/hgext/zeroconf/Zeroconf.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/zeroconf/Zeroconf.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
""" Multicast DNS Service Discovery for Python, v0.12
Copyright (C) 2003, Paul Scott-Murphy
@@ -93,7 +95,7 @@
# hook for threads
-globals()[b'_GLOBAL_DONE'] = 0
+globals()['_GLOBAL_DONE'] = 0
# Some timing constants
@@ -230,6 +232,16 @@
# implementation classes
+_SOL_IP = socket.SOL_IP
+
+if pycompat.iswindows:
+ # XXX: Not sure if there are newer versions of python where this would fail,
+ # but apparently socket.SOL_IP used to be 0, and socket.IPPROTO_IP is 0, so
+ # this would work with older versions of python.
+ #
+ # https://github.com/python/cpython/issues/101960
+ _SOL_IP = socket.IPPROTO_IP
+
class DNSEntry:
"""A DNS entry"""
@@ -367,7 +379,7 @@
"""Abstract method"""
raise AbstractMethodException
- def toString(self, other):
+ def toString(self, hdr, other):
"""String representation with additional information"""
arg = b"%s/%s,%s" % (
self.ttl,
@@ -445,7 +457,7 @@
def __repr__(self):
"""String representation"""
- return self.toString(self.alias)
+ return self.toString(b'', self.alias)
class DNSText(DNSRecord):
@@ -468,9 +480,9 @@
def __repr__(self):
"""String representation"""
if len(self.text) > 10:
- return self.toString(self.text[:7] + b"...")
+ return self.toString(b'', self.text[:7] + b"...")
else:
- return self.toString(self.text)
+ return self.toString(b'', self.text)
class DNSService(DNSRecord):
@@ -503,7 +515,7 @@
def __repr__(self):
"""String representation"""
- return self.toString(b"%s:%s" % (self.server, self.port))
+ return self.toString(b'', b"%s:%s" % (self.server, self.port))
class DNSIncoming:
@@ -937,7 +949,7 @@
self.start()
def run(self):
- while not globals()[b'_GLOBAL_DONE']:
+ while not globals()['_GLOBAL_DONE']:
rs = self.getReaders()
if len(rs) == 0:
# No sockets to manage, but we wait for the timeout
@@ -953,7 +965,7 @@
try:
self.readers[sock].handle_read()
except Exception:
- if not globals()[b'_GLOBAL_DONE']:
+ if not globals()['_GLOBAL_DONE']:
traceback.print_exc()
except Exception:
pass
@@ -1033,7 +1045,7 @@
def run(self):
while True:
self.zeroconf.wait(10 * 1000)
- if globals()[b'_GLOBAL_DONE']:
+ if globals()['_GLOBAL_DONE']:
return
now = currentTimeMillis()
for record in self.zeroconf.cache.entries():
@@ -1106,7 +1118,7 @@
now = currentTimeMillis()
if len(self.list) == 0 and self.nexttime > now:
self.zeroconf.wait(self.nexttime - now)
- if globals()[b'_GLOBAL_DONE'] or self.done:
+ if globals()['_GLOBAL_DONE'] or self.done:
return
now = currentTimeMillis()
@@ -1307,6 +1319,7 @@
delay = _LISTENER_TIME
next = now + delay
last = now + timeout
+ result = False
try:
zeroconf.addListener(
self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)
@@ -1352,7 +1365,7 @@
zeroconf.wait(min(next, last) - now)
now = currentTimeMillis()
- result = 1
+ result = True
finally:
zeroconf.removeListener(self)
@@ -1395,7 +1408,7 @@
def __init__(self, bindaddress=None):
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads."""
- globals()[b'_GLOBAL_DONE'] = 0
+ globals()['_GLOBAL_DONE'] = 0
if bindaddress is None:
self.intf = socket.gethostbyname(socket.gethostname())
else:
@@ -1416,8 +1429,8 @@
# work as expected.
#
pass
- self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, b"\xff")
- self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, b"\x01")
+ self.socket.setsockopt(_SOL_IP, socket.IP_MULTICAST_TTL, b"\xff")
+ self.socket.setsockopt(_SOL_IP, socket.IP_MULTICAST_LOOP, b"\x01")
try:
self.socket.bind(self.group)
except Exception:
@@ -1425,7 +1438,7 @@
# SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it
pass
self.socket.setsockopt(
- socket.SOL_IP,
+ _SOL_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'),
)
@@ -1835,13 +1848,13 @@
def close(self):
"""Ends the background threads, and prevent this instance from
servicing further queries."""
- if globals()[b'_GLOBAL_DONE'] == 0:
- globals()[b'_GLOBAL_DONE'] = 1
+ if globals()['_GLOBAL_DONE'] == 0:
+ globals()['_GLOBAL_DONE'] = 1
self.notifyAll()
self.engine.notify()
self.unregisterAllServices()
self.socket.setsockopt(
- socket.SOL_IP,
+ _SOL_IP,
socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'),
)
--- a/hgext/zeroconf/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext/zeroconf/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -23,6 +23,8 @@
zc-test = http://example.com:8000/test
'''
+from __future__ import annotations
+
import os
import socket
import time
--- a/hgext3rd/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/hgext3rd/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,4 +1,7 @@
# name space package to host third party extensions
+
+from __future__ import annotations
+
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
--- a/i18n/check-translation.py Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/check-translation.py Sat Oct 26 04:16:00 2024 +0200
@@ -2,6 +2,8 @@
#
# check-translation.py - check Mercurial specific translation problems
+from __future__ import annotations
+
import re
import polib
--- a/i18n/de.po Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/de.po Sat Oct 26 04:16:00 2024 +0200
@@ -10044,11 +10044,11 @@
msgstr " Merkt Dateien zur Versionskontrolle im Projektarchiv vor."
msgid ""
-" The files will be added to the repository at the next commit. To\n"
-" undo an add before that, see :hg:`forget`."
-msgstr ""
-" Die Dateien werden dem Projektarchiv beim nächsten Übernehmen (commit)\n"
-" hinzugefügt. Um dies vorher rückgängig zu machen, siehe:hg:`forget`."
+"The files will be added to the repository at the next commit. To\n"
+"undo an add before that, see :hg:`forget`."
+msgstr ""
+"Die Dateien werden dem Projektarchiv beim nächsten Übernehmen (commit)\n"
+"hinzugefügt. Um dies vorher rückgängig zu machen, siehe:hg:`forget`."
msgid " If no names are given, add all files to the repository."
msgstr ""
@@ -13297,37 +13297,37 @@
msgid "pull changes from the specified source"
msgstr "Ruft Änderungen von der angegebenen Quelle ab"
-msgid " Pull changes from a remote repository to a local one."
-msgstr " Überträgt Änderungen aus einem entfernten Archiv in das lokale."
-
-msgid ""
-" This finds all changes from the repository at the specified path\n"
-" or URL and adds them to a local repository (the current one unless\n"
-" -R is specified). By default, this does not update the copy of the\n"
-" project in the working directory."
-msgstr ""
-" Dabei werden alle Änderungen vom Archiv am angegebenen Pfad oder\n"
-" URL gesucht und dem lokalen Archiv hinzugefügt (das aktuelle, oder das\n"
-" mit -R gegebene). Standardmäßig wird die Kopie des Projektes im\n"
-" Arbeitsverzeichnis nicht aktualisiert."
-
-msgid ""
-" Use :hg:`incoming` if you want to see what would have been added\n"
-" by a pull at the time you issued this command. If you then decide\n"
-" to add those changes to the repository, you should use :hg:`pull\n"
-" -r X` where ``X`` is the last changeset listed by :hg:`incoming`."
-msgstr ""
-" Um zu sehen, was beim nächsten 'pull' geholt würde, ohne dem Archiv\n"
-" tatsächlich Änderungen hinzuzufügen, nutze :hg:`incoming`. Wenn diese\n"
-" dann hinzugefügt werden sollen, kann mit :hg:`pull -r X` als X der\n"
-" letzte von incoming gezeigte Änderungssatz angegeben werden."
-
-msgid ""
-" Returns 0 on success, 1 if an update had unresolved files.\n"
-" "
-msgstr ""
-" Gibt 0 bei Erfolg zurück, 1 bei Konfliken während der Aktualisierung.\n"
-" "
+msgid "Pull changes from a remote repository to a local one."
+msgstr "Überträgt Änderungen aus einem entfernten Archiv in das lokale."
+
+msgid ""
+"This finds all changes from the repository at the specified path\n"
+"or URL and adds them to a local repository (the current one unless\n"
+"-R is specified). By default, this does not update the copy of the\n"
+"project in the working directory."
+msgstr ""
+"Dabei werden alle Änderungen vom Archiv am angegebenen Pfad oder\n"
+"URL gesucht und dem lokalen Archiv hinzugefügt (das aktuelle, oder das\n"
+"mit -R gegebene). Standardmäßig wird die Kopie des Projektes im\n"
+"Arbeitsverzeichnis nicht aktualisiert."
+
+msgid ""
+"Use :hg:`incoming` if you want to see what would have been added\n"
+"by a pull at the time you issued this command. If you then decide\n"
+"to add those changes to the repository, you should use :hg:`pull\n"
+"-r X` where ``X`` is the last changeset listed by :hg:`incoming`."
+msgstr ""
+"Um zu sehen, was beim nächsten 'pull' geholt würde, ohne dem Archiv\n"
+"tatsächlich Änderungen hinzuzufügen, nutze :hg:`incoming`. Wenn diese\n"
+"dann hinzugefügt werden sollen, kann mit :hg:`pull -r X` als X der\n"
+"letzte von incoming gezeigte Änderungssatz angegeben werden."
+
+msgid ""
+"Returns 0 on success, 1 if an update had unresolved files.\n"
+""
+msgstr ""
+"Gibt 0 bei Erfolg zurück, 1 bei Konfliken während der Aktualisierung.\n"
+""
#, python-format
msgid "remote bookmark %s not found!"
--- a/i18n/hggettext Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/hggettext Sat Oct 26 04:16:00 2024 +0200
@@ -20,6 +20,7 @@
join the message cataloges to get the final catalog.
"""
+from __future__ import annotations
import inspect
import os
--- a/i18n/ja.po Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/ja.po Sat Oct 26 04:16:00 2024 +0200
@@ -8388,16 +8388,16 @@
" 入力したメッセージは ``.hg/last-message.txt`` に保存されます。"
msgid ""
-" hg add/remove/copy/rename work as usual, though you might want to\n"
-" use git-style patches (-g/--git or [diff] git=1) to track copies\n"
-" and renames. See the diffs help topic for more information on the\n"
-" git diff format."
-msgstr ""
-" 複製/改名に関する履歴保存のために git 差分形式を使用(-g/--git 指定や\n"
-" 設定ファイルでの [diff] git=1 記述)するのであれば、\n"
-" add/remove/copy/rename といった hg のコマンドによる履歴記録も、\n"
-" 通常と変わりなく機能します。 git 差分形式の詳細に関しては、\n"
-" 'help diffs' を参照してください。"
+"hg add/remove/copy/rename work as usual, though you might want to\n"
+"use git-style patches (-g/--git or [diff] git=1) to track copies\n"
+"and renames. See the diffs help topic for more information on the\n"
+"git diff format."
+msgstr ""
+"複製/改名に関する履歴保存のために git 差分形式を使用(-g/--git 指定や\n"
+"設定ファイルでの [diff] git=1 記述)するのであれば、\n"
+"add/remove/copy/rename といった hg のコマンドによる履歴記録も、\n"
+"通常と変わりなく機能します。 git 差分形式の詳細に関しては、\n"
+"'help diffs' を参照してください。"
msgid "hg qdiff [OPTION]... [FILE]..."
msgstr "hg qdiff [OPTION]... [FILE]..."
--- a/i18n/polib.py Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/polib.py Sat Oct 26 04:16:00 2024 +0200
@@ -13,6 +13,7 @@
:func:`~polib.mofile` convenience functions.
"""
+from __future__ import annotations
__author__ = 'David Jean Louis <izimobil@gmail.com>'
__version__ = '1.0.7'
@@ -64,7 +65,6 @@
def u(s):
return unicode(s, "unicode_escape")
-
else:
PY3 = True
text_type = str
@@ -1889,7 +1889,6 @@
chunks.reverse()
while chunks:
-
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
--- a/i18n/posplit Thu Jan 11 20:37:34 2024 +0100
+++ b/i18n/posplit Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# license: MIT/X11/Expat
#
+from __future__ import annotations
import polib
import re
--- a/mercurial/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
# Allow 'from mercurial import demandimport' to keep working.
import hgdemandimport
--- a/mercurial/__main__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/__main__.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,6 @@
+from __future__ import annotations
+
+
def run():
from . import demandimport
--- a/mercurial/admin/chainsaw.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/admin/chainsaw.py Sat Oct 26 04:16:00 2024 +0200
@@ -19,6 +19,8 @@
variable (see :hg:`help scripting`).
"""
+from __future__ import annotations
+
import shutil
from ..i18n import _
--- a/mercurial/admin/verify.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/admin/verify.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import copy
import functools
--- a/mercurial/admin_commands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/admin_commands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
from .i18n import _
from .admin import chainsaw, verify
from . import error, registrar, transaction
--- a/mercurial/ancestor.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/ancestor.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import heapq
@@ -88,7 +89,7 @@
depth = [0] * count
seen = [0] * count
mapping = []
- for (i, n) in enumerate(sorted(nodes)):
+ for i, n in enumerate(sorted(nodes)):
depth[n] = 1
b = 1 << i
seen[n] = b
--- a/mercurial/archival.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/archival.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,15 +5,21 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import gzip
import os
import struct
import tarfile
import time
+import typing
import zipfile
import zlib
+from typing import (
+ Optional,
+)
+
from .i18n import _
from .node import nullrev
from .pycompat import open
@@ -30,6 +36,11 @@
from .utils import stringutil
+if typing.TYPE_CHECKING:
+ from . import (
+ localrepo,
+ )
+
stringio = util.stringio
# from unzip source code:
@@ -153,16 +164,7 @@
mtime=mtime,
)
self.fileobj = gzfileobj
- return (
- # taropen() wants Literal['a', 'r', 'w', 'x'] for the mode,
- # but Literal[] is only available in 3.8+ without the
- # typing_extensions backport.
- # pytype: disable=wrong-arg-types
- tarfile.TarFile.taropen( # pytype: disable=attribute-error
- name, pycompat.sysstr(mode), gzfileobj
- )
- # pytype: enable=wrong-arg-types
- )
+ return tarfile.TarFile.taropen(name, "w", gzfileobj)
else:
try:
return tarfile.open(
@@ -281,20 +283,21 @@
def archive(
- repo,
- dest,
+ repo: "localrepo.localrepository",
+ dest, # TODO: should be bytes, but could be Callable
node,
- kind,
- decode=True,
+ kind: bytes,
+ decode: bool = True,
match=None,
- prefix=b'',
- mtime=None,
- subrepos=False,
-):
+ prefix: bytes = b'',
+ mtime: Optional[float] = None,
+ subrepos: bool = False,
+) -> int:
"""create archive of repo as it was at node.
- dest can be name of directory, name of archive file, or file
- object to write archive to.
+ dest can be name of directory, name of archive file, a callable, or file
+ object to write archive to. If it is a callable, it will called to open
+ the actual file object before the first archive member is written.
kind is type of archive to create.
@@ -316,7 +319,37 @@
else:
prefix = tidyprefix(dest, kind, prefix)
+ archiver = None
+ ctx = repo[node]
+
+ def opencallback():
+ """Return the archiver instance, creating it if necessary.
+
+ This function is called when the first actual entry is created.
+ It may be called multiple times from different layers.
+ When serving the archive via hgweb, no errors should happen after
+ this point.
+ """
+ nonlocal archiver
+ if archiver is None:
+ if callable(dest):
+ output = dest()
+ else:
+ output = dest
+ archiver = archivers[kind](output, mtime or ctx.date()[0])
+ assert archiver is not None
+
+ if repo.ui.configbool(b"ui", b"archivemeta"):
+ metaname = b'.hg_archival.txt'
+ if match(metaname):
+ write(metaname, 0o644, False, lambda: buildmetadata(ctx))
+ return archiver
+
def write(name, mode, islink, getdata):
+ if archiver is None:
+ opencallback()
+ assert archiver is not None, "archive should be opened by now"
+
data = getdata()
if decode:
data = repo.wwritedata(name, data)
@@ -325,17 +358,9 @@
if kind not in archivers:
raise error.Abort(_(b"unknown archive type '%s'") % kind)
- ctx = repo[node]
- archiver = archivers[kind](dest, mtime or ctx.date()[0])
-
if not match:
match = scmutil.matchall(repo)
- if repo.ui.configbool(b"ui", b"archivemeta"):
- name = b'.hg_archival.txt'
- if match(name):
- write(name, 0o644, False, lambda: buildmetadata(ctx))
-
files = list(ctx.manifest().walk(match))
total = len(files)
if total:
@@ -358,10 +383,11 @@
sub = ctx.workingsub(subpath)
submatch = matchmod.subdirmatcher(subpath, match)
subprefix = prefix + subpath + b'/'
- total += sub.archive(archiver, subprefix, submatch, decode)
+ total += sub.archive(opencallback, subprefix, submatch, decode)
if total == 0:
raise error.Abort(_(b'no files match the archive pattern'))
+ assert archiver is not None, "archive should have been opened before"
archiver.done()
return total
--- a/mercurial/bookmarks.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/bookmarks.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
@@ -685,7 +686,7 @@
remotemarks"""
changed = []
localmarks = repo._bookmarks
- for (b, id) in remotemarks.items():
+ for b, id in remotemarks.items():
if id != localmarks.get(b, None) and id in repo:
changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
for b in localmarks:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/branching/rev_cache.py Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,442 @@
+# rev_cache.py - caching branch information per revision
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import os
+import struct
+
+from ..node import (
+ nullrev,
+)
+
+from .. import (
+ encoding,
+ error,
+ util,
+)
+
+from ..utils import (
+ stringutil,
+)
+
+calcsize = struct.calcsize
+pack_into = struct.pack_into
+unpack_from = struct.unpack_from
+
+
+# Revision branch info cache
+
+# The "V2" version use the same format as the "V1" but garantee it won't be
+# truncated, preventing SIGBUS when it is mmap-ed
+_rbcversion = b'-v2'
+_rbcnames = b'rbc-names' + _rbcversion
+_rbcrevs = b'rbc-revs' + _rbcversion
+_rbc_legacy_version = b'-v1'
+_rbc_legacy_names = b'rbc-names' + _rbc_legacy_version
+_rbc_legacy_revs = b'rbc-revs' + _rbc_legacy_version
+# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
+_rbcrecfmt = b'>4sI'
+_rbcrecsize = calcsize(_rbcrecfmt)
+_rbcmininc = 64 * _rbcrecsize
+_rbcnodelen = 4
+_rbcbranchidxmask = 0x7FFFFFFF
+_rbccloseflag = 0x80000000
+
+
+# with atomic replacement.
+REWRITE_RATIO = 0.2
+
+
+class rbcrevs:
+ """a byte string consisting of an immutable prefix followed by a mutable suffix"""
+
+ def __init__(self, revs):
+ self._prefix = revs
+ self._rest = bytearray()
+
+ @property
+ def len_prefix(self):
+ size = len(self._prefix)
+ return size - (size % _rbcrecsize)
+
+ def __len__(self):
+ return self.len_prefix + len(self._rest)
+
+ def unpack_record(self, rbcrevidx):
+ if rbcrevidx < self.len_prefix:
+ return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
+ else:
+ return unpack_from(
+ _rbcrecfmt,
+ util.buffer(self._rest),
+ rbcrevidx - self.len_prefix,
+ )
+
+ def make_mutable(self):
+ if self.len_prefix > 0:
+ entirety = bytearray()
+ entirety[:] = self._prefix[: self.len_prefix]
+ entirety.extend(self._rest)
+ self._rest = entirety
+ self._prefix = bytearray()
+
+ def truncate(self, pos):
+ self.make_mutable()
+ del self._rest[pos:]
+
+ def pack_into(self, rbcrevidx, node, branchidx):
+ if rbcrevidx < self.len_prefix:
+ self.make_mutable()
+ buf = self._rest
+ start_offset = rbcrevidx - self.len_prefix
+ end_offset = start_offset + _rbcrecsize
+
+ if len(self._rest) < end_offset:
+ # bytearray doesn't allocate extra space at least in Python 3.7.
+ # When multiple changesets are added in a row, precise resize would
+ # result in quadratic complexity. Overallocate to compensate by
+ # using the classic doubling technique for dynamic arrays instead.
+ # If there was a gap in the map before, less space will be reserved.
+ self._rest.extend(b'\0' * end_offset)
+ return pack_into(
+ _rbcrecfmt,
+ buf,
+ start_offset,
+ node,
+ branchidx,
+ )
+
+ def extend(self, extension):
+ return self._rest.extend(extension)
+
+ def slice(self, begin, end):
+ if begin < self.len_prefix:
+ acc = bytearray()
+ acc[:] = self._prefix[begin : min(end, self.len_prefix)]
+ acc.extend(
+ self._rest[begin - self.len_prefix : end - self.len_prefix]
+ )
+ return acc
+ return self._rest[begin - self.len_prefix : end - self.len_prefix]
+
+
+class revbranchcache:
+ """Persistent cache, mapping from revision number to branch name and close.
+ This is a low level cache, independent of filtering.
+
+ Branch names are stored in rbc-names in internal encoding separated by 0.
+ rbc-names is append-only, and each branch name is only stored once and will
+ thus have a unique index.
+
+ The branch info for each revision is stored in rbc-revs as constant size
+ records. The whole file is read into memory, but it is only 'parsed' on
+ demand. The file is usually append-only but will be truncated if repo
+ modification is detected.
+ The record for each revision contains the first 4 bytes of the
+ corresponding node hash, and the record is only used if it still matches.
+ Even a completely trashed rbc-revs fill thus still give the right result
+ while converging towards full recovery ... assuming no incorrectly matching
+ node hashes.
+ The record also contains 4 bytes where 31 bits contains the index of the
+ branch and the last bit indicate that it is a branch close commit.
+ The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
+ and will grow with it but be 1/8th of its size.
+ """
+
+ def __init__(self, repo, readonly=True):
+ assert repo.filtername is None
+ self._repo = repo
+ self._names = [] # branch names in local encoding with static index
+ self._rbcrevs = rbcrevs(bytearray())
+ self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
+ self._force_overwrite = False
+ v1_fallback = False
+ try:
+ try:
+ bndata = repo.cachevfs.read(_rbcnames)
+ except (IOError, OSError):
+ # If we don't have "v2" data, we might have "v1" data worth
+ # using.
+ #
+ # consider stop doing this many version after hg-6.9 release
+ bndata = repo.cachevfs.read(_rbc_legacy_names)
+ v1_fallback = True
+ self._force_overwrite = True
+ self._rbcsnameslen = len(bndata) # for verification before writing
+ if bndata:
+ self._names = [
+ encoding.tolocal(bn) for bn in bndata.split(b'\0')
+ ]
+ except (IOError, OSError):
+ if readonly:
+ # don't try to use cache - fall back to the slow path
+ self.branchinfo = self._branchinfo
+
+ if self._names:
+ try:
+ usemmap = repo.ui.configbool(b'storage', b'revbranchcache.mmap')
+ if not v1_fallback:
+ with repo.cachevfs(_rbcrevs) as fp:
+ if usemmap and repo.cachevfs.is_mmap_safe(_rbcrevs):
+ data = util.buffer(util.mmapread(fp))
+ else:
+ data = fp.read()
+ else:
+ # If we don't have "v2" data, we might have "v1" data worth
+ # using.
+ #
+ # Consider stop doing this many version after hg-6.9
+ # release.
+ with repo.cachevfs(_rbc_legacy_revs) as fp:
+ data = fp.read()
+ self._rbcrevs = rbcrevs(data)
+ except (IOError, OSError) as inst:
+ repo.ui.debug(
+ b"couldn't read revision branch cache: %s\n"
+ % stringutil.forcebytestr(inst)
+ )
+ # remember number of good records on disk
+ self._rbcrevslen = min(
+ len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
+ )
+ if self._rbcrevslen == 0:
+ self._names = []
+ self._rbcnamescount = len(self._names) # number of names read at
+ # _rbcsnameslen
+
+ def _clear(self):
+ self._rbcsnameslen = 0
+ del self._names[:]
+ self._rbcnamescount = 0
+ self._rbcrevslen = len(self._repo.changelog)
+ self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
+ util.clearcachedproperty(self, b'_namesreverse')
+ self._force_overwrite = True
+
+ def invalidate(self, rev=0):
+ self._rbcrevslen = rev
+ self._rbcrevs.truncate(rev)
+ self._force_overwrite = True
+
+ @util.propertycache
+ def _namesreverse(self):
+ return {b: r for r, b in enumerate(self._names)}
+
+ def branchinfo(self, rev):
+ """Return branch name and close flag for rev, using and updating
+ persistent cache."""
+ changelog = self._repo.changelog
+ rbcrevidx = rev * _rbcrecsize
+
+ # avoid negative index, changelog.read(nullrev) is fast without cache
+ if rev == nullrev:
+ return changelog.branchinfo(rev)
+
+ # if requested rev isn't allocated, grow and cache the rev info
+ if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
+ return self._branchinfo(rev)
+
+ # fast path: extract data from cache, use it if node is matching
+ reponode = changelog.node(rev)[:_rbcnodelen]
+ cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
+ close = bool(branchidx & _rbccloseflag)
+ if close:
+ branchidx &= _rbcbranchidxmask
+ if cachenode == b'\0\0\0\0':
+ pass
+ elif cachenode == reponode:
+ try:
+ return self._names[branchidx], close
+ except IndexError:
+ # recover from invalid reference to unknown branch
+ self._repo.ui.debug(
+ b"referenced branch names not found"
+ b" - rebuilding revision branch cache from scratch\n"
+ )
+ self._clear()
+ else:
+ # rev/node map has changed, invalidate the cache from here up
+ self._repo.ui.debug(
+ b"history modification detected - truncating "
+ b"revision branch cache to revision %d\n" % rev
+ )
+ truncate = rbcrevidx + _rbcrecsize
+ self._rbcrevs.truncate(truncate)
+ self._rbcrevslen = min(self._rbcrevslen, truncate)
+
+ # fall back to slow path and make sure it will be written to disk
+ return self._branchinfo(rev)
+
+ def _branchinfo(self, rev):
+ """Retrieve branch info from changelog and update _rbcrevs"""
+ changelog = self._repo.changelog
+ b, close = changelog.branchinfo(rev)
+ if b in self._namesreverse:
+ branchidx = self._namesreverse[b]
+ else:
+ branchidx = len(self._names)
+ self._names.append(b)
+ self._namesreverse[b] = branchidx
+ reponode = changelog.node(rev)
+ if close:
+ branchidx |= _rbccloseflag
+ self._setcachedata(rev, reponode, branchidx)
+ return b, close
+
+ def setdata(self, rev, changelogrevision):
+ """add new data information to the cache"""
+ branch, close = changelogrevision.branchinfo
+
+ if branch in self._namesreverse:
+ branchidx = self._namesreverse[branch]
+ else:
+ branchidx = len(self._names)
+ self._names.append(branch)
+ self._namesreverse[branch] = branchidx
+ if close:
+ branchidx |= _rbccloseflag
+ self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
+ # If no cache data were readable (non exists, bad permission, etc)
+ # the cache was bypassing itself by setting:
+ #
+ # self.branchinfo = self._branchinfo
+ #
+ # Since we now have data in the cache, we need to drop this bypassing.
+ if 'branchinfo' in vars(self):
+ del self.branchinfo
+
+ def _setcachedata(self, rev, node, branchidx):
+ """Writes the node's branch data to the in-memory cache data."""
+ if rev == nullrev:
+ return
+ rbcrevidx = rev * _rbcrecsize
+ self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
+ self._rbcrevslen = min(self._rbcrevslen, rev)
+
+ tr = self._repo.currenttransaction()
+ if tr:
+ tr.addfinalize(b'write-revbranchcache', self.write)
+
+ def write(self, tr=None):
+ """Save branch cache if it is dirty."""
+ repo = self._repo
+ wlock = None
+ step = b''
+ try:
+ # write the new names
+ if self._force_overwrite or self._rbcnamescount < len(self._names):
+ wlock = repo.wlock(wait=False)
+ step = b' names'
+ self._writenames(repo)
+
+ # write the new revs
+ start = self._rbcrevslen * _rbcrecsize
+ if self._force_overwrite or start != len(self._rbcrevs):
+ step = b''
+ if wlock is None:
+ wlock = repo.wlock(wait=False)
+ self._writerevs(repo, start)
+
+ except (IOError, OSError, error.Abort, error.LockError) as inst:
+ repo.ui.debug(
+ b"couldn't write revision branch cache%s: %s\n"
+ % (step, stringutil.forcebytestr(inst))
+ )
+ finally:
+ if wlock is not None:
+ wlock.release()
+
+ def _writenames(self, repo):
+ """write the new branch names to revbranchcache"""
+ f = None
+ if self._force_overwrite:
+ self._rbcsnameslen = 0
+ self._rbcnamescount = 0
+ try:
+ if self._force_overwrite or self._rbcnamescount != 0:
+ f = repo.cachevfs.open(_rbcnames, b'ab')
+ current_size = f.tell()
+ if current_size == self._rbcsnameslen:
+ f.write(b'\0')
+ else:
+ f.close()
+ if self._force_overwrite:
+ dbg = b"resetting content of %s\n"
+ elif current_size > 0:
+ dbg = b"%s changed - rewriting it\n"
+ else:
+ dbg = b"%s is missing - rewriting it\n"
+ repo.ui.debug(dbg % _rbcnames)
+ self._rbcnamescount = 0
+ self._rbcrevslen = 0
+ if self._rbcnamescount == 0:
+ # before rewriting names, make sure references are removed
+ repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
+ f = repo.cachevfs.open(_rbcnames, b'wb')
+ names = self._names[self._rbcnamescount :]
+ from_local = encoding.fromlocal
+ data = b'\0'.join(from_local(b) for b in names)
+ f.write(data)
+ self._rbcsnameslen = f.tell()
+ finally:
+ if f is not None:
+ f.close()
+ self._rbcnamescount = len(self._names)
+
+ def _writerevs(self, repo, start):
+ """write the new revs to revbranchcache"""
+ revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
+
+ end = revs * _rbcrecsize
+ if self._force_overwrite:
+ start = 0
+
+ # align start on entry boundary
+ start = _rbcrecsize * (start // _rbcrecsize)
+
+ with repo.cachevfs.open(_rbcrevs, b'a+b') as f:
+ pass # this make sure the file exist…
+ with repo.cachevfs.open(_rbcrevs, b'r+b') as f:
+ f.seek(0, os.SEEK_END)
+ current_size = f.tell()
+ if current_size < start:
+ start = 0
+ if current_size != start:
+ threshold = current_size * REWRITE_RATIO
+ overwritten = min(end, current_size) - start
+ if (max(end, current_size) - start) >= threshold:
+ start = 0
+ dbg = b"resetting content of cache/%s\n" % _rbcrevs
+ repo.ui.debug(dbg)
+ elif overwritten > 0:
+ # end affected, let us overwrite the bad value
+ dbg = b"overwriting %d bytes from %d in cache/%s"
+ dbg %= (current_size - start, start, _rbcrevs)
+ if end < current_size:
+ extra = b" leaving (%d trailing bytes)"
+ extra %= current_size - end
+ dbg += extra
+ dbg += b'\n'
+ repo.ui.debug(dbg)
+ else:
+ # extra untouched data at the end, lets warn about them
+ assert start == end # since don't write anything
+ dbg = b"cache/%s contains %d unknown trailing bytes\n"
+ dbg %= (_rbcrevs, current_size - start)
+ repo.ui.debug(dbg)
+
+ if start > 0:
+ f.seek(start)
+ f.write(self._rbcrevs.slice(start, end))
+ else:
+ f.close()
+ with repo.cachevfs.open(
+ _rbcrevs,
+ b'wb',
+ atomictemp=True,
+ ) as rev_file:
+ rev_file.write(self._rbcrevs.slice(start, end))
+ self._rbcrevslen = revs
+ self._force_overwrite = False
--- a/mercurial/branchmap.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/branchmap.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-
-import struct
+from __future__ import annotations
from .node import (
bin,
@@ -48,10 +47,6 @@
subsettable = repoviewutil.subsettable
-calcsize = struct.calcsize
-pack_into = struct.pack_into
-unpack_from = struct.unpack_from
-
class BranchMapCache:
"""mapping of filtered views of repo with their branchcache"""
@@ -889,8 +884,7 @@
elif self.tiprev == cl.tiprev():
return cl.headrevs()
else:
- # XXX passing tiprev as ceiling of cl.headrevs could be faster
- heads = cl.headrevs(cl.revs(stop=self.tiprev))
+ heads = cl.headrevs(stop_rev=self.tiprev + 1)
return heads
def _write_header(self, fp) -> None:
@@ -915,11 +909,13 @@
"""write list of heads to a file
Return the number of heads written."""
+ to_node = repo.changelog.node
nodecount = 0
topo_heads = None
if self._pure_topo_branch is None:
- topo_heads = set(self._get_topo_heads(repo))
- to_rev = repo.changelog.index.rev
+ # we match using node because it is faster to built the set of node
+ # than to resolve node → rev later.
+ topo_heads = set(to_node(r) for r in self._get_topo_heads(repo))
for label, nodes in sorted(self._entries.items()):
if label == self._pure_topo_branch:
# not need to write anything the header took care of that
@@ -927,8 +923,7 @@
label = encoding.fromlocal(label)
for node in nodes:
if topo_heads is not None:
- rev = to_rev(node)
- if rev in topo_heads:
+ if node in topo_heads:
continue
if node in self._closednodes:
state = b'c'
@@ -942,6 +937,10 @@
def _load_header(cls, repo, lineiter):
header_line = next(lineiter)
pieces = header_line.rstrip(b'\n').split(b" ")
+ for p in pieces:
+ if b'=' not in p:
+ msg = b"invalid header_line: %r" % header_line
+ raise ValueError(msg)
cache_keys = dict(p.split(b'=', 1) for p in pieces)
args = {}
@@ -1071,6 +1070,7 @@
for branch, heads in self._entries.items():
if heads == topo_heads:
self._pure_topo_branch = branch
+ self._state = STATE_DIRTY
break
@@ -1086,328 +1086,3 @@
closednodes: Optional[Set[bytes]] = None,
) -> None:
super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
-
-
-# Revision branch info cache
-
-_rbcversion = b'-v1'
-_rbcnames = b'rbc-names' + _rbcversion
-_rbcrevs = b'rbc-revs' + _rbcversion
-# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
-_rbcrecfmt = b'>4sI'
-_rbcrecsize = calcsize(_rbcrecfmt)
-_rbcmininc = 64 * _rbcrecsize
-_rbcnodelen = 4
-_rbcbranchidxmask = 0x7FFFFFFF
-_rbccloseflag = 0x80000000
-
-
-class rbcrevs:
- """a byte string consisting of an immutable prefix followed by a mutable suffix"""
-
- def __init__(self, revs):
- self._prefix = revs
- self._rest = bytearray()
-
- def __len__(self):
- return len(self._prefix) + len(self._rest)
-
- def unpack_record(self, rbcrevidx):
- if rbcrevidx < len(self._prefix):
- return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
- else:
- return unpack_from(
- _rbcrecfmt,
- util.buffer(self._rest),
- rbcrevidx - len(self._prefix),
- )
-
- def make_mutable(self):
- if len(self._prefix) > 0:
- entirety = bytearray()
- entirety[:] = self._prefix
- entirety.extend(self._rest)
- self._rest = entirety
- self._prefix = bytearray()
-
- def truncate(self, pos):
- self.make_mutable()
- del self._rest[pos:]
-
- def pack_into(self, rbcrevidx, node, branchidx):
- if rbcrevidx < len(self._prefix):
- self.make_mutable()
- buf = self._rest
- start_offset = rbcrevidx - len(self._prefix)
- end_offset = start_offset + _rbcrecsize
-
- if len(self._rest) < end_offset:
- # bytearray doesn't allocate extra space at least in Python 3.7.
- # When multiple changesets are added in a row, precise resize would
- # result in quadratic complexity. Overallocate to compensate by
- # using the classic doubling technique for dynamic arrays instead.
- # If there was a gap in the map before, less space will be reserved.
- self._rest.extend(b'\0' * end_offset)
- return pack_into(
- _rbcrecfmt,
- buf,
- start_offset,
- node,
- branchidx,
- )
-
- def extend(self, extension):
- return self._rest.extend(extension)
-
- def slice(self, begin, end):
- if begin < len(self._prefix):
- acc = bytearray()
- acc[:] = self._prefix[begin:end]
- acc.extend(
- self._rest[begin - len(self._prefix) : end - len(self._prefix)]
- )
- return acc
- return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
-
-
-class revbranchcache:
- """Persistent cache, mapping from revision number to branch name and close.
- This is a low level cache, independent of filtering.
-
- Branch names are stored in rbc-names in internal encoding separated by 0.
- rbc-names is append-only, and each branch name is only stored once and will
- thus have a unique index.
-
- The branch info for each revision is stored in rbc-revs as constant size
- records. The whole file is read into memory, but it is only 'parsed' on
- demand. The file is usually append-only but will be truncated if repo
- modification is detected.
- The record for each revision contains the first 4 bytes of the
- corresponding node hash, and the record is only used if it still matches.
- Even a completely trashed rbc-revs fill thus still give the right result
- while converging towards full recovery ... assuming no incorrectly matching
- node hashes.
- The record also contains 4 bytes where 31 bits contains the index of the
- branch and the last bit indicate that it is a branch close commit.
- The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
- and will grow with it but be 1/8th of its size.
- """
-
- def __init__(self, repo, readonly=True):
- assert repo.filtername is None
- self._repo = repo
- self._names = [] # branch names in local encoding with static index
- self._rbcrevs = rbcrevs(bytearray())
- self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
- try:
- bndata = repo.cachevfs.read(_rbcnames)
- self._rbcsnameslen = len(bndata) # for verification before writing
- if bndata:
- self._names = [
- encoding.tolocal(bn) for bn in bndata.split(b'\0')
- ]
- except (IOError, OSError):
- if readonly:
- # don't try to use cache - fall back to the slow path
- self.branchinfo = self._branchinfo
-
- if self._names:
- try:
- usemmap = repo.ui.configbool(b'storage', b'revbranchcache.mmap')
- with repo.cachevfs(_rbcrevs) as fp:
- if usemmap and repo.cachevfs.is_mmap_safe(_rbcrevs):
- data = util.buffer(util.mmapread(fp))
- else:
- data = fp.read()
- self._rbcrevs = rbcrevs(data)
- except (IOError, OSError) as inst:
- repo.ui.debug(
- b"couldn't read revision branch cache: %s\n"
- % stringutil.forcebytestr(inst)
- )
- # remember number of good records on disk
- self._rbcrevslen = min(
- len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
- )
- if self._rbcrevslen == 0:
- self._names = []
- self._rbcnamescount = len(self._names) # number of names read at
- # _rbcsnameslen
-
- def _clear(self):
- self._rbcsnameslen = 0
- del self._names[:]
- self._rbcnamescount = 0
- self._rbcrevslen = len(self._repo.changelog)
- self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
- util.clearcachedproperty(self, b'_namesreverse')
-
- @util.propertycache
- def _namesreverse(self):
- return {b: r for r, b in enumerate(self._names)}
-
- def branchinfo(self, rev):
- """Return branch name and close flag for rev, using and updating
- persistent cache."""
- changelog = self._repo.changelog
- rbcrevidx = rev * _rbcrecsize
-
- # avoid negative index, changelog.read(nullrev) is fast without cache
- if rev == nullrev:
- return changelog.branchinfo(rev)
-
- # if requested rev isn't allocated, grow and cache the rev info
- if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
- return self._branchinfo(rev)
-
- # fast path: extract data from cache, use it if node is matching
- reponode = changelog.node(rev)[:_rbcnodelen]
- cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
- close = bool(branchidx & _rbccloseflag)
- if close:
- branchidx &= _rbcbranchidxmask
- if cachenode == b'\0\0\0\0':
- pass
- elif cachenode == reponode:
- try:
- return self._names[branchidx], close
- except IndexError:
- # recover from invalid reference to unknown branch
- self._repo.ui.debug(
- b"referenced branch names not found"
- b" - rebuilding revision branch cache from scratch\n"
- )
- self._clear()
- else:
- # rev/node map has changed, invalidate the cache from here up
- self._repo.ui.debug(
- b"history modification detected - truncating "
- b"revision branch cache to revision %d\n" % rev
- )
- truncate = rbcrevidx + _rbcrecsize
- self._rbcrevs.truncate(truncate)
- self._rbcrevslen = min(self._rbcrevslen, truncate)
-
- # fall back to slow path and make sure it will be written to disk
- return self._branchinfo(rev)
-
- def _branchinfo(self, rev):
- """Retrieve branch info from changelog and update _rbcrevs"""
- changelog = self._repo.changelog
- b, close = changelog.branchinfo(rev)
- if b in self._namesreverse:
- branchidx = self._namesreverse[b]
- else:
- branchidx = len(self._names)
- self._names.append(b)
- self._namesreverse[b] = branchidx
- reponode = changelog.node(rev)
- if close:
- branchidx |= _rbccloseflag
- self._setcachedata(rev, reponode, branchidx)
- return b, close
-
- def setdata(self, rev, changelogrevision):
- """add new data information to the cache"""
- branch, close = changelogrevision.branchinfo
-
- if branch in self._namesreverse:
- branchidx = self._namesreverse[branch]
- else:
- branchidx = len(self._names)
- self._names.append(branch)
- self._namesreverse[branch] = branchidx
- if close:
- branchidx |= _rbccloseflag
- self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
- # If no cache data were readable (non exists, bad permission, etc)
- # the cache was bypassing itself by setting:
- #
- # self.branchinfo = self._branchinfo
- #
- # Since we now have data in the cache, we need to drop this bypassing.
- if 'branchinfo' in vars(self):
- del self.branchinfo
-
- def _setcachedata(self, rev, node, branchidx):
- """Writes the node's branch data to the in-memory cache data."""
- if rev == nullrev:
- return
- rbcrevidx = rev * _rbcrecsize
- self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
- self._rbcrevslen = min(self._rbcrevslen, rev)
-
- tr = self._repo.currenttransaction()
- if tr:
- tr.addfinalize(b'write-revbranchcache', self.write)
-
- def write(self, tr=None):
- """Save branch cache if it is dirty."""
- repo = self._repo
- wlock = None
- step = b''
- try:
- # write the new names
- if self._rbcnamescount < len(self._names):
- wlock = repo.wlock(wait=False)
- step = b' names'
- self._writenames(repo)
-
- # write the new revs
- start = self._rbcrevslen * _rbcrecsize
- if start != len(self._rbcrevs):
- step = b''
- if wlock is None:
- wlock = repo.wlock(wait=False)
- self._writerevs(repo, start)
-
- except (IOError, OSError, error.Abort, error.LockError) as inst:
- repo.ui.debug(
- b"couldn't write revision branch cache%s: %s\n"
- % (step, stringutil.forcebytestr(inst))
- )
- finally:
- if wlock is not None:
- wlock.release()
-
- def _writenames(self, repo):
- """write the new branch names to revbranchcache"""
- if self._rbcnamescount != 0:
- f = repo.cachevfs.open(_rbcnames, b'ab')
- if f.tell() == self._rbcsnameslen:
- f.write(b'\0')
- else:
- f.close()
- repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
- self._rbcnamescount = 0
- self._rbcrevslen = 0
- if self._rbcnamescount == 0:
- # before rewriting names, make sure references are removed
- repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
- f = repo.cachevfs.open(_rbcnames, b'wb')
- f.write(
- b'\0'.join(
- encoding.fromlocal(b)
- for b in self._names[self._rbcnamescount :]
- )
- )
- self._rbcsnameslen = f.tell()
- f.close()
- self._rbcnamescount = len(self._names)
-
- def _writerevs(self, repo, start):
- """write the new revs to revbranchcache"""
- revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
- with repo.cachevfs.open(_rbcrevs, b'ab') as f:
- if f.tell() != start:
- repo.ui.debug(
- b"truncating cache/%s to %d\n" % (_rbcrevs, start)
- )
- f.seek(start)
- if f.tell() != start:
- start = 0
- f.seek(start)
- f.truncate()
- end = revs * _rbcrecsize
- f.write(self._rbcrevs.slice(start, end))
- self._rbcrevslen = revs
--- a/mercurial/bundle2.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/bundle2.py Sat Oct 26 04:16:00 2024 +0200
@@ -145,6 +145,7 @@
preserve.
"""
+from __future__ import annotations
import collections
import errno
@@ -153,6 +154,7 @@
import string
import struct
import sys
+import typing
from .i18n import _
from .node import (
@@ -181,6 +183,17 @@
)
from .interfaces import repository
+if typing.TYPE_CHECKING:
+ from typing import (
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ )
+
+ Capabilities = Dict[bytes, Union[List[bytes], Tuple[bytes, ...]]]
+
urlerr = util.urlerr
urlreq = util.urlreq
@@ -602,7 +615,7 @@
)
-def decodecaps(blob):
+def decodecaps(blob: bytes) -> "Capabilities":
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
@@ -662,11 +675,14 @@
_magicstring = b'HG20'
- def __init__(self, ui, capabilities=()):
+ def __init__(self, ui, capabilities: "Optional[Capabilities]" = None):
+ if capabilities is None:
+ capabilities = {}
+
self.ui = ui
self._params = []
self._parts = []
- self.capabilities = dict(capabilities)
+ self.capabilities: "Capabilities" = dict(capabilities)
self._compengine = util.compengines.forbundletype(b'UN')
self._compopts = None
# If compression is being handled by a consumer of the raw
@@ -1271,7 +1287,6 @@
return None
def __call__(self):
-
self.ui.debug(
b'bundle2-input-stream-interrupt: opening out of band context\n'
)
@@ -1612,7 +1627,7 @@
# These are only the static capabilities.
# Check the 'getrepocaps' function for the rest.
-capabilities = {
+capabilities: "Capabilities" = {
b'HG20': (),
b'bookmarks': (),
b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
@@ -1626,7 +1641,8 @@
}
-def getrepocaps(repo, allowpushback=False, role=None):
+# TODO: drop the default value for 'role'
+def getrepocaps(repo, allowpushback: bool = False, role=None) -> "Capabilities":
"""return the bundle2 capabilities for a given repo
Exists to allow extensions (like evolution) to mutate the capabilities.
@@ -1675,7 +1691,7 @@
return caps
-def bundle2caps(remote):
+def bundle2caps(remote) -> "Capabilities":
"""return the bundle capabilities of a peer as dict"""
raw = remote.capable(b'bundle2')
if not raw and raw != b'':
@@ -1684,7 +1700,7 @@
return decodecaps(capsblob)
-def obsmarkersversion(caps):
+def obsmarkersversion(caps: "Capabilities"):
"""extract the list of supported obsmarkers versions from a bundle2caps dict"""
obscaps = caps.get(b'obsmarkers', ())
return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
@@ -1725,7 +1741,7 @@
msg %= count
raise error.ProgrammingError(msg)
- caps = {}
+ caps: "Capabilities" = {}
if opts.get(b'obsolescence', False):
caps[b'obsmarkers'] = (b'V1',)
stream_version = opts.get(b'stream', b"")
@@ -1788,7 +1804,7 @@
addpartrevbranchcache(repo, bundler, outgoing)
if opts.get(b'obsolescence', False):
- obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
+ obsmarkers = repo.obsstore.relevantmarkers(nodes=outgoing.missing)
buildobsmarkerspart(
bundler,
obsmarkers,
@@ -2101,7 +2117,7 @@
op.source,
b'bundle2',
expectedtotal=nbchangesets,
- **extrakwargs
+ **extrakwargs,
)
if op.reply is not None:
# This is definitely not the final form of this
@@ -2598,7 +2614,6 @@
@parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
def handlestreamv2bundle(op, part):
-
requirements = urlreq.unquote(part.params[b'requirements'])
requirements = requirements.split(b',') if requirements else []
filecount = int(part.params[b'filecount'])
--- a/mercurial/bundlecaches.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/bundlecaches.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,7 +3,11 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
+import re
+import typing
from typing import (
Dict,
@@ -15,10 +19,16 @@
from .thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
error,
requirements as requirementsmod,
sslutil,
+ url as urlmod,
util,
)
from .utils import stringutil
@@ -30,7 +40,7 @@
CLONEBUNDLESCHEME = b"peer-bundle-cache://"
-def get_manifest(repo):
+def get_manifest(repo) -> bytes:
"""get the bundle manifest to be served to a client from a server"""
raw_text = repo.vfs.tryread(CB_MANIFEST_FILE)
entries = [e.split(b' ', 1) for e in raw_text.splitlines()]
@@ -46,7 +56,7 @@
return b''.join(new_lines)
-def alter_bundle_url(repo, url):
+def alter_bundle_url(repo, url: bytes) -> bytes:
"""a function that exist to help extension and hosting to alter the url
This will typically be used to inject authentication information in the url
@@ -398,6 +408,9 @@
return False
+digest_regex = re.compile(b'^[a-z0-9]+:[0-9a-f]+(,[a-z0-9]+:[0-9a-f]+)*$')
+
+
def filterclonebundleentries(
repo, entries, streamclonerequested=False, pullbundles=False
):
@@ -473,6 +486,43 @@
)
continue
+ if b'DIGEST' in entry:
+ if not digest_regex.match(entry[b'DIGEST']):
+ repo.ui.debug(
+ b'filtering %s due to a bad DIGEST attribute\n' % url
+ )
+ continue
+ supported = 0
+ seen = {}
+ for digest_entry in entry[b'DIGEST'].split(b','):
+ algo, digest = digest_entry.split(b':')
+ if algo not in seen:
+ seen[algo] = digest
+ elif seen[algo] != digest:
+ repo.ui.debug(
+ b'filtering %s due to conflicting %s digests\n'
+ % (url, algo)
+ )
+ supported = 0
+ break
+ digester = urlmod.digesthandler.digest_algorithms.get(algo)
+ if digester is None:
+ continue
+ if len(digest) != digester().digest_size * 2:
+ repo.ui.debug(
+ b'filtering %s due to a bad %s digest\n' % (url, algo)
+ )
+ supported = 0
+ break
+ supported += 1
+ else:
+ if supported == 0:
+ repo.ui.debug(
+ b'filtering %s due to lack of supported digest\n' % url
+ )
+ if supported == 0:
+ continue
+
newentries.append(entry)
return newentries
--- a/mercurial/bundlerepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/bundlerepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,10 +11,12 @@
were part of the actual repository.
"""
+from __future__ import annotations
import contextlib
import os
import shutil
+import typing
from .i18n import _
from .node import (
@@ -53,7 +55,11 @@
class bundlerevlog(revlog.revlog):
- def __init__(self, opener, target, radix, cgunpacker, linkmapper):
+ def __init__(
+ self, opener: typing.Any, target, radix, cgunpacker, linkmapper
+ ):
+ # TODO: figure out real type of opener
+ #
# How it works:
# To retrieve a revision, we need to know the offset of the revision in
# the bundle (an unbundle object). We store this offset in the index
@@ -122,7 +128,7 @@
# delta base, not against rev - 1
# XXX: could use some caching
if rev <= self.repotiprev:
- return revlog.revlog._chunk(self, rev)
+ return super(bundlerevlog, self)._inner._chunk(rev)
self.bundle.seek(self.start(rev))
return self.bundle.read(self.length(rev))
@@ -263,10 +269,10 @@
if hasattr(self, 'opener'):
self.opener = vfsmod.readonlyvfs(self.opener)
- def write(self):
+ def write(self, repo):
raise NotImplementedError
- def _write(self, fp):
+ def _write(self, repo, fp):
raise NotImplementedError
def _updateroots(self, repo, phase, newroots, tr, invalidate=True):
@@ -286,7 +292,13 @@
return filespos
-class bundlerepository:
+_bundle_repo_baseclass = object
+
+if typing.TYPE_CHECKING:
+ _bundle_repo_baseclass = localrepo.localrepository
+
+
+class bundlerepository(_bundle_repo_baseclass):
"""A repository instance that is a union of a local repo and a bundle.
Instances represent a read-only repository composed of a local repository
@@ -408,7 +420,7 @@
with os.fdopen(fdtemp, 'wb') as fptemp:
fptemp.write(header)
while True:
- chunk = readfn(2 ** 18)
+ chunk = readfn(2**18)
if not chunk:
break
fptemp.write(chunk)
--- a/mercurial/cacheutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cacheutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
from . import repoview
@@ -14,9 +16,9 @@
# ones. Therefore copy all branch caches over.
cachefiles = [b'branch2']
cachefiles += [b'branch2-%s' % f for f in repoview.filtertable]
- cachefiles += [b'branch3']
- cachefiles += [b'branch3-%s' % f for f in repoview.filtertable]
- cachefiles += [b'rbc-names-v1', b'rbc-revs-v1']
+ cachefiles += [b'branch3-exp']
+ cachefiles += [b'branch3-exp-%s' % f for f in repoview.filtertable]
+ cachefiles += [b'rbc-names-v2', b'rbc-revs-v2']
cachefiles += [b'tags2']
cachefiles += [b'tags2-%s' % f for f in repoview.filtertable]
cachefiles += [b'hgtagsfnodes1']
--- a/mercurial/cext/base85.c Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cext/base85.c Sat Oct 26 04:16:00 2024 +0200
@@ -38,7 +38,7 @@
unsigned int acc, val, ch;
int pad = 0;
- if (!PyArg_ParseTuple(args, "y#|i", &text, &len, &pad)) {
+ if (!PyArg_ParseTuple(args, "y#|p", &text, &len, &pad)) {
return NULL;
}
--- a/mercurial/cext/base85.pyi Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cext/base85.pyi Sat Oct 26 04:16:00 2024 +0200
@@ -1,6 +1,4 @@
-from typing import Optional
-
version: int
-def b85encode(text: bytes, pad: Optional[int]) -> bytes: ...
+def b85encode(text: bytes, pad: bool = False) -> bytes: ...
def b85decode(text: bytes) -> bytes: ...
--- a/mercurial/cext/osutil.c Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cext/osutil.c Sat Oct 26 04:16:00 2024 +0200
@@ -36,6 +36,12 @@
#endif
#endif
+#ifndef _WIN32
+#include <sys/mman.h>
+#include <pthread.h>
+#endif
+
+
#ifdef __APPLE__
#include <sys/attr.h>
#include <sys/vnode.h>
@@ -1203,6 +1209,49 @@
}
#endif
+#ifdef MADV_POPULATE_READ
+
+typedef struct {
+ void * mmap_address;
+ size_t length;
+} mmap_info;
+
+static void _mmap_populate(mmap_info *info) {
+ /* We explicitly does not check the return value as we don't care about it.
+ * the madvise is here to help performance and we don't care if it fails
+ * (for example because the mapping is no longer valid) */
+ void * mmap_address = info->mmap_address;
+ size_t length = info->length;
+ free(info);
+ madvise(mmap_address, length, MADV_POPULATE_READ);
+}
+
+static PyObject *background_mmap_populate(PyObject *self, PyObject *mmap) {
+ Py_buffer b;
+ pthread_t thread_id;
+ mmap_info *info;
+ if (PyObject_GetBuffer(mmap, &b, PyBUF_CONTIG_RO | PyBUF_C_CONTIGUOUS) == -1) {
+ return NULL;
+ }
+ info = (mmap_info *)malloc(sizeof(mmap_info));
+ info->mmap_address=b.buf;
+ info->length=b.len;
+ /* note: for very large map, we could spin multiple thread populating
+ * different area */
+ pthread_create(&thread_id, NULL, (void *) &_mmap_populate, info);
+ /* We don't keep track of this thread as it is fine for it to die when we
+ * exit. */
+ pthread_detach(thread_id);
+ /* We release the PyBuffer in the main thread to let the object be garbage
+ * collected as soon as possible. This might result in the memory map being
+ * closed while the background thread is working. That will result in a
+ * error in the background thread we can ignore. */
+ PyBuffer_Release(&b);
+ Py_RETURN_NONE;
+}
+
+#endif
+
static char osutil_doc[] = "Native operating system services.";
static PyMethodDef methods[] = {
@@ -1237,6 +1286,10 @@
"Is a CoreGraphics session available?"
},
#endif
+#ifdef MADV_POPULATE_READ
+ {"background_mmap_populate", (PyCFunction)background_mmap_populate, METH_O,
+ "populate a mmap in the background\n"},
+#endif
{NULL, NULL}
};
--- a/mercurial/cext/parsers.pyi Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cext/parsers.pyi Sat Oct 26 04:16:00 2024 +0200
@@ -12,6 +12,21 @@
version: int
versionerrortext: str
+# From charencode.c
+
+# Note: the first 'bytes' arg of isasciistr() can be typed as 'Buffer' from the
+# typing_extensions backport, once the related bug is fixed in pytype, or pytype
+# is run under py3.12. The arg for asciilower(), asciiupper() and
+# jsonescapeu8fast() is checked specifically for the 'bytes' type. The 'bytes'
+# return is an instance of 'bytes', not an alias for 'Buffer'.
+#
+# https://github.com/google/pytype/issues/1772
+def isasciistr(s: bytes) -> bool: ...
+def asciilower(s: bytes) -> bytes: ...
+def asciiupper(s: bytes) -> bytes: ...
+def jsonescapeu8fast(u8chars: bytes, paranoid: bool) -> bytes: ...
+
+
class DirstateItem:
__doc__: str
@@ -61,7 +76,6 @@
def computephasesmapsets(self, root: Dict[int, Set[int]]) -> Tuple[int, Dict[int, Set[bytes]]]: ...
def reachableroots2(self, minroot: int, heads: List[int], roots: List[int], includepath: bool) -> List[int]: ...
def headrevs(self, filteredrevs: Optional[List[int]]) -> List[int]: ...
- def headrevsfiltered(self, filteredrevs: Optional[List[int]]) -> List[int]: ...
def issnapshot(self, value: int) -> bool: ...
def findsnapshots(self, cache: Dict[int, List[int]], start_rev: int) -> None: ...
def deltachain(self, rev: int, stop: int, generaldelta: bool) -> Tuple[List[int], bool]: ...
--- a/mercurial/cext/revlog.c Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cext/revlog.c Sat Oct 26 04:16:00 2024 +0200
@@ -1237,22 +1237,36 @@
static PyObject *index_headrevs(indexObject *self, PyObject *args)
{
- Py_ssize_t i, j, len;
+ Py_ssize_t i, j, len, stop_rev;
char *nothead = NULL;
PyObject *heads = NULL;
PyObject *filter = NULL;
PyObject *filteredrevs = Py_None;
-
- if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
+ PyObject *stop_rev_obj = Py_None;
+
+ if (!PyArg_ParseTuple(args, "|OO", &filteredrevs, &stop_rev_obj)) {
return NULL;
}
- if (self->headrevs && filteredrevs == self->filteredrevs)
+ len = index_length(self);
+ if (stop_rev_obj == Py_None) {
+ stop_rev = len;
+ } else {
+ stop_rev = PyLong_AsSsize_t(stop_rev_obj);
+ if (stop_rev == -1 && PyErr_Occurred() != NULL) {
+ return NULL;
+ }
+ }
+
+ if (self->headrevs && filteredrevs == self->filteredrevs &&
+ stop_rev == len)
return list_copy(self->headrevs);
- Py_DECREF(self->filteredrevs);
- self->filteredrevs = filteredrevs;
- Py_INCREF(filteredrevs);
+ if (stop_rev == len) {
+ Py_DECREF(self->filteredrevs);
+ self->filteredrevs = filteredrevs;
+ Py_INCREF(filteredrevs);
+ }
if (filteredrevs != Py_None) {
filter = PyObject_GetAttrString(filteredrevs, "__contains__");
@@ -1264,11 +1278,10 @@
}
}
- len = index_length(self);
heads = PyList_New(0);
if (heads == NULL)
goto bail;
- if (len == 0) {
+ if (stop_rev == 0) {
if (pylist_append_owned(heads, PyLong_FromLong(-1)) == -1) {
Py_XDECREF(nullid);
goto bail;
@@ -1276,13 +1289,13 @@
goto done;
}
- nothead = calloc(len, 1);
+ nothead = calloc(stop_rev, 1);
if (nothead == NULL) {
PyErr_NoMemory();
goto bail;
}
- for (i = len - 1; i >= 0; i--) {
+ for (i = stop_rev - 1; i >= 0; i--) {
int isfiltered;
int parents[2];
@@ -1304,7 +1317,7 @@
}
}
- if (index_get_parents(self, i, parents, (int)len - 1) < 0)
+ if (index_get_parents(self, i, parents, (int)stop_rev - 1) < 0)
goto bail;
for (j = 0; j < 2; j++) {
if (parents[j] >= 0)
@@ -1312,7 +1325,7 @@
}
}
- for (i = 0; i < len; i++) {
+ for (i = 0; i < stop_rev; i++) {
if (nothead[i])
continue;
if (pylist_append_owned(heads, PyLong_FromSsize_t(i)) == -1) {
@@ -1321,10 +1334,14 @@
}
done:
- self->headrevs = heads;
Py_XDECREF(filter);
free(nothead);
- return list_copy(self->headrevs);
+ if (stop_rev == len) {
+ self->headrevs = heads;
+ return list_copy(self->headrevs);
+ } else {
+ return heads;
+ }
bail:
Py_XDECREF(filter);
Py_XDECREF(heads);
@@ -3352,9 +3369,7 @@
{"replace_sidedata_info", (PyCFunction)index_replace_sidedata_info,
METH_VARARGS, "replace an existing index entry with a new value"},
{"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
- "get head revisions"}, /* Can do filtering since 3.2 */
- {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
- "get filtered head revisions"}, /* Can always do filtering */
+ "get head revisions"},
{"issnapshot", (PyCFunction)index_issnapshot, METH_O,
"True if the object is a snapshot"},
{"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
--- a/mercurial/cffi/bdiff.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/bdiff.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,15 +5,23 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
+import typing
from typing import (
List,
+ Optional,
Tuple,
)
from ..pure.bdiff import *
+
+from ..interfaces import (
+ modules as intmod,
+)
+
from . import _bdiff # pytype: disable=import-error
ffi = _bdiff.ffi
@@ -86,3 +94,11 @@
lib.free(b[0])
lib.bdiff_freehunks(l.next)
return b"".join(rl)
+
+
+# In order to adhere to the module protocol, these functions must be visible to
+# the type checker, though they aren't actually implemented by this
+# implementation of the module protocol. Callers are responsible for
+# checking that the implementation is available before using them.
+if typing.TYPE_CHECKING:
+ xdiffblocks: Optional[intmod.BDiffBlocksFnc] = None
--- a/mercurial/cffi/bdiffbuild.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/bdiffbuild.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import cffi
import os
--- a/mercurial/cffi/mpatch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/mpatch.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from typing import List
--- a/mercurial/cffi/mpatchbuild.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/mpatchbuild.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import cffi
import os
--- a/mercurial/cffi/osutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/osutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import stat as statmod
--- a/mercurial/cffi/osutilbuild.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cffi/osutilbuild.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import cffi
ffi = cffi.FFI()
--- a/mercurial/changegroup.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/changegroup.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import struct
@@ -407,7 +408,7 @@
yield chunkheader(len(chunk))
pos = 0
while pos < len(chunk):
- next = pos + 2 ** 20
+ next = pos + 2**20
yield chunk[pos:next]
pos = next
yield closechunk()
@@ -611,7 +612,7 @@
# validate incoming csets have their manifests
for cset in range(clstart, clend):
mfnode = cl.changelogrevision(cset).manifest
- mfest = ml[mfnode].readdelta()
+ mfest = ml[mfnode].read_delta_new_entries()
# store file nodes we must see
for f, n in mfest.items():
needfiles.setdefault(f, set()).add(n)
@@ -697,7 +698,7 @@
repo.hook(
b'pretxnchangegroup',
throw=True,
- **pycompat.strkwargs(hookargs)
+ **pycompat.strkwargs(hookargs),
)
added = range(clstart, clend)
@@ -1830,7 +1831,8 @@
treemanifests to send.
"""
clnode = nodes[x]
- mdata = mfl.get(tree, x).readfast(shallow=True)
+ mctx = mfl.get(tree, x)
+ mdata = mctx.read_delta_parents(shallow=True, exact=False)
for p, n, fl in mdata.iterentries():
if fl == b't': # subdirectory manifest
subtree = tree + p + b'/'
--- a/mercurial/changelog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/changelog.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import typing
from .i18n import _
from .node import (
@@ -13,6 +16,11 @@
)
from .thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
encoding,
error,
--- a/mercurial/chgserver.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/chgserver.py Sat Oct 26 04:16:00 2024 +0200
@@ -39,6 +39,7 @@
skiphash = False
"""
+from __future__ import annotations
import inspect
import os
@@ -151,7 +152,7 @@
"""
modules = [m for n, m in extensions.extensions(ui)]
try:
- from . import __version__
+ from . import __version__ # pytype: disable=import-error
modules.append(__version__)
except ImportError:
--- a/mercurial/cmdutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/cmdutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,12 +5,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import copy as copymod
import errno
import functools
import os
import re
+import typing
from typing import (
Any,
@@ -33,6 +35,11 @@
)
from .thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
bookmarks,
bundle2,
@@ -1112,7 +1119,7 @@
ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
-def findrepo(p):
+def findrepo(p: bytes) -> Optional[bytes]:
while not os.path.isdir(os.path.join(p, b".hg")):
oldp, p = p, os.path.dirname(p)
if p == oldp:
@@ -3833,7 +3840,6 @@
original_headers = patch.parsepatch(diff)
try:
-
chunks, opts = recordfilter(
repo.ui, original_headers, match, operation=operation
)
--- a/mercurial/color.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/color.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
--- a/mercurial/commands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/commands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import re
@@ -709,7 +710,8 @@
if dest == b'-':
if kind == b'files':
raise error.InputError(_(b'cannot archive plain files to stdout'))
- dest = cmdutil.makefileobj(ctx, dest)
+ realdest = dest
+ dest = lambda: cmdutil.makefileobj(ctx, realdest)
if not prefix:
prefix = os.path.basename(repo.root) + b'-%h'
@@ -3918,7 +3920,7 @@
branch=None,
tags=None,
bookmarks=None,
- **opts
+ **opts,
):
"""identify the working directory or specified revision
--- a/mercurial/commandserver.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/commandserver.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import gc
import os
--- a/mercurial/commit.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/commit.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from .node import (
@@ -214,15 +215,15 @@
elif narrow_action == mergestate.CHANGE_ADDED:
files.mark_added(f)
added.append(f)
- m[f] = m2[f]
+ fnode = m2[f]
flags = m2ctx.find(f)[1] or b''
- m.setflag(f, flags)
+ m.set(f, fnode, flags)
elif narrow_action == mergestate.CHANGE_MODIFIED:
files.mark_touched(f)
added.append(f)
- m[f] = m2[f]
+ fnode = m2[f]
flags = m2ctx.find(f)[1] or b''
- m.setflag(f, flags)
+ m.set(f, fnode, flags)
else:
msg = _(b"corrupted mergestate, unknown narrow action: %b")
hint = _(b"restart the merge")
@@ -234,7 +235,7 @@
removed.append(f)
else:
added.append(f)
- m[f], is_touched = _filecommit(
+ fnode, is_touched = _filecommit(
repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
)
if is_touched:
@@ -244,7 +245,7 @@
files.mark_merged(f)
else:
files.mark_touched(f)
- m.setflag(f, fctx.flags())
+ m.set(f, fnode, fctx.flags())
except OSError:
repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
raise
--- a/mercurial/config.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/config.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import os
--- a/mercurial/configitems.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/configitems.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import functools
import re
--- a/mercurial/configitems.toml Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/configitems.toml Sat Oct 26 04:16:00 2024 +0200
@@ -65,7 +65,13 @@
section = "censor"
name = "policy"
default = "abort"
-experimental = true
+documentation="""Control how to react when accessing censored content.
+Accepted value: "abort", "ignore". Defaults to abort.
+
+A few informative commands such as ``hg grep`` will unconditionally ignore
+censored data and merely report that it was encountered.
+"""
+
[[items]]
section = "chgserver"
@@ -632,7 +638,7 @@
[[items]]
section = 'devel'
name = 'sync.status.pre-dirstate-write-file-timeout'
-default=2
+default=20
[[items]]
section = 'devel'
@@ -641,7 +647,7 @@
[[items]]
section = 'devel'
name = 'sync.dirstate.post-docket-read-file-timeout'
-default=2
+default=20
[[items]]
section = 'devel'
@@ -650,7 +656,7 @@
[[items]]
section = 'devel'
name = 'sync.dirstate.pre-read-file-timeout'
-default=2
+default=20
[[items]]
section = "devel"
@@ -987,10 +993,6 @@
[[items]]
section = "experimental"
-name = "mmapindexthreshold"
-
-[[items]]
-section = "experimental"
name = "narrow"
default = false
@@ -1528,6 +1530,24 @@
default = false
[[items]]
+section = "experimental"
+name = "relaxed-block-sync-merge"
+default = false
+documentation="""When using built-in simple merge tools, this config makes it so that changes
+touching adjacent file regions no longer conflict with each other.
+
+In particular, addition/modification/removal adjacent to modification/removal
+are all allowed with no conflict.
+
+Addition next to addition is still treated as a conflict because it presents
+a legitimate ambiguity.
+
+The change tweaks existing logic for aligning file changes, making it so
+that a 0-length spacing between regions is just as good as a 1-line spacing.
+(default: False)
+"""
+
+[[items]]
section = "merge-tools"
name = ".*"
generic = true
@@ -1815,6 +1835,20 @@
default = "stat"
[[items]]
+section = "profiling"
+name = "py-spy.exe"
+default = "py-spy"
+
+[[items]]
+section = "profiling"
+name = "py-spy.freq"
+default = 100
+
+[[items]]
+section = "profiling"
+name = "py-spy.format"
+
+[[items]]
section = "progress"
name = "assume-tty"
default = false
@@ -1947,6 +1981,17 @@
experimental = true
[[items]]
+section = "rust"
+name = "update-from-null"
+default = true
+experimental = true
+documentation = """Whether to use the Rust fast-path when an update from null
+is detected. This config item is here to help users disable the fastpath in
+case one of their extensions is interfering with the update code. If you are
+one of these people, please reach out.
+"""
+
+[[items]]
section = "server"
name = "bookmarks-pushkey-compat"
default = true
@@ -2147,7 +2192,7 @@
[[items]]
section = "storage"
name = "revbranchcache.mmap"
-default = false
+default = true
[[items]]
section = "storage"
@@ -2167,6 +2212,16 @@
[[items]]
section = "storage"
+name = "revlog.mmap.index"
+default-type = "dynamic"
+
+[[items]]
+section = "storage"
+name = "revlog.mmap.index:size-threshold"
+default = "1 MB"
+
+[[items]]
+section = "storage"
name = "revlog.optimize-delta-parent-choice"
default = true
alias = [["format", "aggressivemergedeltas"]]
--- a/mercurial/context.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/context.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import filecmp
import os
@@ -265,6 +266,9 @@
raise error.ManifestLookupError(
self._node or b'None', path, _(b'not found in manifest')
)
+ # Try to find the file in the manifest delta that can be faster to read
+ # than a full manifest. If we fail to find the file, it might still
+ # exist in the full manifest, so lets look for it there.
if '_manifestdelta' in self.__dict__ or path in self.files():
if path in self._manifestdelta:
return (
@@ -538,7 +542,10 @@
@propertycache
def _manifestdelta(self):
- return self._manifestctx.readdelta()
+ base, delta = self._manifestctx.read_any_fast_delta()
+ if base is None:
+ self._manifest = delta
+ return delta
@propertycache
def _parents(self):
@@ -1057,7 +1064,12 @@
if path in ac[3]: # checking the 'files' field.
# The file has been touched, check if the content is
# similar to the one we search for.
- if fnode == mfl[ac[0]].readfast().get(path):
+ #
+ # If the content is similar to one of the parents, then it
+ # cannot be an adjusted linkrev
+ if fnode == (
+ mfl[ac[0]].read_delta_parents(exact=False).get(path)
+ ):
return a
# In theory, we should never get out of that loop without a result.
# But if manifest uses a buggy file revision (not children of the
--- a/mercurial/copies.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/copies.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import os
@@ -915,11 +916,14 @@
self.movewithdir = {} if movewithdir is None else movewithdir
def __repr__(self):
- return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % (
- self.copy,
- self.renamedelete,
- self.dirmove,
- self.movewithdir,
+ return (
+ '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>'
+ % (
+ self.copy,
+ self.renamedelete,
+ self.dirmove,
+ self.movewithdir,
+ )
)
--- a/mercurial/crecord.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/crecord.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
# This code is based on the Mark Edgington's crecord extension.
# (Itself based on Bryan O'Sullivan's record extension.)
+from __future__ import annotations
import os
import re
--- a/mercurial/dagop.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dagop.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,10 +5,21 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import heapq
+import typing
+from typing import (
+ List,
+)
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .node import nullrev
from . import (
error,
@@ -747,7 +758,7 @@
return child
-def annotate(base, parents, skiprevs=None, diffopts=None):
+def annotate(base, parents, skiprevs=None, diffopts=None) -> List[annotateline]:
"""Core algorithm for filectx.annotate()
`parents(fctx)` is a function returning a list of parent filectxs.
--- a/mercurial/dagparser.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dagparser.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
import string
--- a/mercurial/debugcommands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/debugcommands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
import codecs
@@ -142,7 +143,7 @@
@command(b'debugantivirusrunning', [])
def debugantivirusrunning(ui, repo):
"""attempt to trigger an antivirus scanner to see if one is active"""
- with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
+ with repo.cachevfs.open(b'eicar-test-file.com', b'wb') as f:
f.write(
util.b85decode(
# This is a base85-armored version of the EICAR test file. See
@@ -153,7 +154,7 @@
)
# Give an AV engine time to scan the file.
time.sleep(2)
- util.unlink(repo.cachevfs.join('eicar-test-file.com'))
+ util.unlink(repo.cachevfs.join(b'eicar-test-file.com'))
@command(b'debugapplystreamclonebundle', [], b'FILE')
@@ -254,6 +255,10 @@
progress = ui.makeprogress(
_(b'building'), unit=_(b'revisions'), total=total
)
+ merge_relaxed_sync = ui.configbool(
+ b'experimental',
+ b'relaxed-block-sync-merge',
+ )
with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
at = -1
atbranch = b'default'
@@ -278,7 +283,12 @@
base, local, other = [
x[fn].data() for x in (pa, p1, p2)
]
- m3 = simplemerge.Merge3Text(base, local, other)
+ m3 = simplemerge.Merge3Text(
+ base,
+ local,
+ other,
+ relaxed_sync=merge_relaxed_sync,
+ )
ml = [
l.strip()
for l in simplemerge.render_minimized(m3)[0]
@@ -2237,6 +2247,13 @@
locks = []
try:
+ # Help the tests out on Windows by writing the correct PID when
+ # invoked by the test harness, before creating the lock.
+ pids = encoding.environ.get(b'DAEMON_PIDS')
+ if pids:
+ with open(pids, "ab") as fp:
+ fp.write(b'%d\n' % os.getpid())
+
if opts.get('set_wlock'):
try:
locks.append(repo.wlock(False))
@@ -4510,8 +4527,10 @@
# TODO consider not doing this because we skip
# ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
u = urlutil.url(path)
- if u.scheme != b'http':
- raise error.Abort(_(b'only http:// paths are currently supported'))
+ if u.scheme not in (b'http', b'https'):
+ raise error.Abort(
+ _(b'only http:// and https:// paths are currently supported')
+ )
url, authinfo = u.authinfo()
openerargs = {
--- a/mercurial/destutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/destutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from . import bookmarks, error, obsutil, scmutil, stack
--- a/mercurial/diffhelper.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/diffhelper.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
--- a/mercurial/diffutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/diffutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import typing
from typing import (
--- a/mercurial/dirstate.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dirstate.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import contextlib
@@ -12,6 +13,16 @@
import stat
import uuid
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+)
+
from .i18n import _
from hgdemandimport import tracing
@@ -36,7 +47,6 @@
from .interfaces import (
dirstate as intdirstate,
- util as interfaceutil,
)
parsers = policy.importmod('parsers')
@@ -134,9 +144,7 @@
CHANGE_TYPE_FILES = "files"
-@interfaceutil.implementer(intdirstate.idirstate)
-class dirstate:
-
+class dirstate(intdirstate.idirstate):
# used by largefile to avoid overwritting transaction callback
_tr_key_suffix = b''
@@ -398,7 +406,7 @@
raise error.ProgrammingError(msg)
@property
- def is_changing_any(self):
+ def is_changing_any(self) -> bool:
"""Returns true if the dirstate is in the middle of a set of changes.
This returns True for any kind of change.
@@ -406,7 +414,7 @@
return self._changing_level > 0
@property
- def is_changing_parents(self):
+ def is_changing_parents(self) -> bool:
"""Returns true if the dirstate is in the middle of a set of changes
that modify the dirstate parent.
"""
@@ -415,7 +423,7 @@
return self._change_type == CHANGE_TYPE_PARENTS
@property
- def is_changing_files(self):
+ def is_changing_files(self) -> bool:
"""Returns true if the dirstate is in the middle of a set of changes
that modify the files tracked or their sources.
"""
@@ -471,11 +479,11 @@
def _pl(self):
return self._map.parents()
- def hasdir(self, d):
+ def hasdir(self, d: bytes) -> bool:
return self._map.hastrackeddir(d)
@rootcache(b'.hgignore')
- def _ignore(self):
+ def _ignore(self) -> matchmod.basematcher:
files = self._ignorefiles()
if not files:
return matchmod.never()
@@ -488,11 +496,11 @@
return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
@propertycache
- def _checklink(self):
+ def _checklink(self) -> bool:
return util.checklink(self._root)
@propertycache
- def _checkexec(self):
+ def _checkexec(self) -> bool:
return bool(util.checkexec(self._root))
@propertycache
@@ -504,7 +512,9 @@
# it's safe because f is always a relative path
return self._rootdir + f
- def flagfunc(self, buildfallback):
+ def flagfunc(
+ self, buildfallback: intdirstate.FlagFuncFallbackT
+ ) -> intdirstate.FlagFuncReturnT:
"""build a callable that returns flags associated with a filename
The information is extracted from three possible layers:
@@ -516,7 +526,7 @@
# small hack to cache the result of buildfallback()
fallback_func = []
- def get_flags(x):
+ def get_flags(x: bytes) -> bytes:
entry = None
fallback_value = None
try:
@@ -567,7 +577,7 @@
return forcecwd
return encoding.getcwd()
- def getcwd(self):
+ def getcwd(self) -> bytes:
"""Return the path from which a canonical path is calculated.
This path should be used to resolve file patterns or to convert
@@ -587,7 +597,7 @@
# we're outside the repo. return an absolute path.
return cwd
- def pathto(self, f, cwd=None):
+ def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
@@ -595,31 +605,31 @@
return util.pconvert(path)
return path
- def get_entry(self, path):
+ def get_entry(self, path: bytes) -> intdirstate.DirstateItemT:
"""return a DirstateItem for the associated path"""
entry = self._map.get(path)
if entry is None:
return DirstateItem()
return entry
- def __contains__(self, key):
+ def __contains__(self, key: Any) -> bool:
return key in self._map
- def __iter__(self):
+ def __iter__(self) -> Iterator[bytes]:
return iter(sorted(self._map))
- def items(self):
+ def items(self) -> Iterator[Tuple[bytes, intdirstate.DirstateItemT]]:
return self._map.items()
iteritems = items
- def parents(self):
+ def parents(self) -> List[bytes]:
return [self._validate(p) for p in self._pl]
- def p1(self):
+ def p1(self) -> bytes:
return self._validate(self._pl[0])
- def p2(self):
+ def p2(self) -> bytes:
return self._validate(self._pl[1])
@property
@@ -627,11 +637,11 @@
"""True if a merge is in progress"""
return self._pl[1] != self._nodeconstants.nullid
- def branch(self):
+ def branch(self) -> bytes:
return encoding.tolocal(self._branch)
@requires_changing_parents
- def setparents(self, p1, p2=None):
+ def setparents(self, p1: bytes, p2: Optional[bytes] = None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, "merged" entries a
@@ -644,8 +654,8 @@
p2 = self._nodeconstants.nullid
if self._changing_level == 0:
raise ValueError(
- b"cannot set dirstate parent outside of "
- b"dirstate.changing_parents context manager"
+ "cannot set dirstate parent outside of "
+ "dirstate.changing_parents context manager"
)
self._dirty = True
@@ -657,7 +667,9 @@
fold_p2 = oldp2 != nullid and p2 == nullid
return self._map.setparents(p1, p2, fold_p2=fold_p2)
- def setbranch(self, branch, transaction):
+ def setbranch(
+ self, branch: bytes, transaction: Optional[intdirstate.TransactionT]
+ ) -> None:
self.__class__._branch.set(self, encoding.fromlocal(branch))
if transaction is not None:
self._setup_tr_abort(transaction)
@@ -685,7 +697,7 @@
def _write_branch(self, file_obj):
file_obj.write(self._branch + b'\n')
- def invalidate(self):
+ def invalidate(self) -> None:
"""Causes the next access to reread the dirstate.
This is different from localrepo.invalidatedirstate() because it always
@@ -705,7 +717,7 @@
self._origpl = None
@requires_changing_any
- def copy(self, source, dest):
+ def copy(self, source: Optional[bytes], dest: bytes) -> None:
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
@@ -716,10 +728,10 @@
else:
self._map.copymap.pop(dest, None)
- def copied(self, file):
+ def copied(self, file: bytes) -> Optional[bytes]:
return self._map.copymap.get(file, None)
- def copies(self):
+ def copies(self) -> Dict[bytes, bytes]:
return self._map.copymap
@requires_changing_files
@@ -789,7 +801,7 @@
a `with dirstate.changing_parents(repo):` context.
"""
if self.in_merge:
- msg = b'update_file_reference should not be called when merging'
+ msg = 'update_file_reference should not be called when merging'
raise error.ProgrammingError(msg)
entry = self._map.get(filename)
if entry is None:
@@ -880,7 +892,6 @@
possibly_dirty=False,
parentfiledata=None,
):
-
# note: I do not think we need to double check name clash here since we
# are in a update/merge case that should already have taken care of
# this. The test agrees
@@ -986,7 +997,9 @@
)
return folded
- def normalize(self, path, isknown=False, ignoremissing=False):
+ def normalize(
+ self, path: bytes, isknown: bool = False, ignoremissing: bool = False
+ ) -> bytes:
"""
normalize the case of a pathname when on a casefolding filesystem
@@ -1012,12 +1025,17 @@
# - its semantic is unclear
# - do we really needs it ?
@requires_changing_parents
- def clear(self):
+ def clear(self) -> None:
self._map.clear()
self._dirty = True
@requires_changing_parents
- def rebuild(self, parent, allfiles, changedfiles=None):
+ def rebuild(
+ self,
+ parent: bytes,
+ allfiles: Iterable[bytes], # TODO: more than iterable? (uses len())
+ changedfiles: Optional[Iterable[bytes]] = None,
+ ) -> None:
matcher = self._sparsematcher
if matcher is not None and not matcher.always():
# should not add non-matching files
@@ -1083,7 +1101,7 @@
on_abort,
)
- def write(self, tr):
+ def write(self, tr: Optional[intdirstate.TransactionT]) -> None:
if not self._dirty:
return
# make sure we don't request a write of invalidated content
@@ -1092,7 +1110,6 @@
write_key = self._use_tracked_hint and self._dirty_tracked_set
if tr:
-
self._setup_tr_abort(tr)
self._attached_to_a_transaction = True
@@ -1134,7 +1151,9 @@
self._opener.unlink(self._filename_th)
self._use_tracked_hint = False
- def addparentchangecallback(self, category, callback):
+ def addparentchangecallback(
+ self, category: bytes, callback: intdirstate.AddParentChangeCallbackT
+ ) -> None:
"""add a callback to be called when the wd parents are changed
Callback will be called with the following arguments:
@@ -1169,7 +1188,7 @@
return True
return False
- def _ignorefiles(self):
+ def _ignorefiles(self) -> List[bytes]:
files = []
if os.path.exists(self._join(b'.hgignore')):
files.append(self._join(b'.hgignore'))
@@ -1180,7 +1199,7 @@
files.append(os.path.join(self._rootdir, util.expandpath(path)))
return files
- def _ignorefileandline(self, f):
+ def _ignorefileandline(self, f: bytes) -> intdirstate.IgnoreFileAndLineT:
files = collections.deque(self._ignorefiles())
visited = set()
while files:
@@ -1286,7 +1305,7 @@
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
- except (OSError) as inst:
+ except OSError as inst:
# nf not found on disk - it is dirstate only
if nf in dmap: # does it exactly match a missing file?
results[nf] = None
@@ -1338,7 +1357,14 @@
return results, dirsfound, dirsnotfound
- def walk(self, match, subrepos, unknown, ignored, full=True):
+ def walk(
+ self,
+ match: matchmod.basematcher,
+ subrepos: Any,
+ unknown: bool,
+ ignored: bool,
+ full: bool = True,
+ ) -> intdirstate.WalkReturnT:
"""
Walk recursively through the directory tree, finding all files
matched by match.
@@ -1611,7 +1637,14 @@
)
return (lookup, status)
- def status(self, match, subrepos, ignored, clean, unknown):
+ def status(
+ self,
+ match: matchmod.basematcher,
+ subrepos: bool,
+ ignored: bool,
+ clean: bool,
+ unknown: bool,
+ ) -> intdirstate.StatusReturnT:
"""Determine the status of the working copy relative to the
dirstate and return a pair of (unsure, status), where status is of type
scmutil.status and:
@@ -1736,20 +1769,34 @@
ladd(fn)
else:
madd(fn)
- elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
- # There might be a change in the future if for example the
- # internal clock is off, but this is a case where the issues
- # the user would face would be a lot worse and there is
- # nothing we can really do.
- ladd(fn)
- elif listclean:
- cadd(fn)
+ else:
+ reliable = None
+ if mtime_boundary is not None:
+ reliable = timestamp.reliable_mtime_of(
+ st, mtime_boundary
+ )
+ elif t.mtime_likely_equal_to(timestamp.mtime_of(st)):
+ # We can't compute the current fs time, so we're in
+ # a readonly fs or a LFS context.
+ cadd(fn)
+ continue
+
+ if reliable is None or not t.mtime_likely_equal_to(
+ reliable
+ ):
+ # There might be a change in the future if for example
+ # the internal clock is off, but this is a case where
+ # the issues the user would face would be a lot worse
+ # and there is nothing we can really do.
+ ladd(fn)
+ elif listclean:
+ cadd(fn)
status = scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
)
return (lookup, status, mtime_boundary)
- def matches(self, match):
+ def matches(self, match: matchmod.basematcher) -> Iterable[bytes]:
"""
return files in the dirstate (in whatever state) filtered by match
"""
@@ -1782,7 +1829,9 @@
files.append(self._map.docket.data_filename())
return tuple(files)
- def verify(self, m1, m2, p1, narrow_matcher=None):
+ def verify(
+ self, m1, m2, p1: bytes, narrow_matcher: Optional[Any] = None
+ ) -> Iterator[bytes]:
"""
check the dirstate contents against the parent manifest and yield errors
"""
--- a/mercurial/dirstatemap.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dirstatemap.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import stat
+
+from typing import (
+ Optional,
+ TYPE_CHECKING,
+)
from .i18n import _
@@ -12,6 +20,7 @@
policy,
testing,
txnutil,
+ typelib,
util,
)
@@ -20,6 +29,11 @@
v2,
)
+if TYPE_CHECKING:
+ from . import (
+ ui as uimod,
+ )
+
parsers = policy.importmod('parsers')
rustmod = policy.importrust('dirstate')
@@ -46,12 +60,31 @@
class, with and without Rust extensions enabled.
"""
+ _use_dirstate_v2: bool
+ _nodeconstants: typelib.NodeConstants
+ _ui: "uimod.ui"
+ _root: bytes
+ _filename: bytes
+ _nodelen: int
+ _dirtyparents: bool
+ _docket: Optional["docketmod.DirstateDocket"]
+ _write_mode: int
+ _pendingmode: Optional[bool]
+ identity: Optional[typelib.CacheStat]
+
# please pytype
_map = None
copymap = None
- def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
+ def __init__(
+ self,
+ ui: "uimod.ui",
+ opener,
+ root: bytes,
+ nodeconstants: typelib.NodeConstants,
+ use_dirstate_v2: bool,
+ ) -> None:
self._use_dirstate_v2 = use_dirstate_v2
self._nodeconstants = nodeconstants
self._ui = ui
@@ -76,16 +109,20 @@
# for consistent view between _pl() and _read() invocations
self._pendingmode = None
- def _set_identity(self):
+ def _set_identity(self) -> None:
self.identity = self._get_current_identity()
- def _get_current_identity(self):
+ def _get_current_identity(self) -> Optional[typelib.CacheStat]:
+ # TODO have a cleaner approach on httpstaticrepo side
+ path = self._opener.join(self._filename)
+ if path.startswith(b'https://') or path.startswith(b'http://'):
+ return util.uncacheable_cachestat()
try:
- return util.cachestat(self._opener.join(self._filename))
+ return util.cachestat(path)
except FileNotFoundError:
return None
- def may_need_refresh(self):
+ def may_need_refresh(self) -> bool:
if 'identity' not in vars(self):
# no existing identity, we need a refresh
return True
@@ -104,7 +141,7 @@
return True
return current_identity != self.identity
- def preload(self):
+ def preload(self) -> None:
"""Loads the underlying data, if it's not already loaded"""
self._map
@@ -135,7 +172,8 @@
self._pendingmode = mode
return fp
- def _readdirstatefile(self, size=-1):
+ def _readdirstatefile(self, size: int = -1) -> bytes:
+ testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
try:
with self._opendirstatefile() as fp:
return fp.read(size)
@@ -144,7 +182,8 @@
return b''
@property
- def docket(self):
+ def docket(self) -> "docketmod.DirstateDocket":
+ testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
if not self._docket:
if not self._use_dirstate_v2:
raise error.ProgrammingError(
@@ -331,7 +370,7 @@
`all` is unused when Rust is not enabled
"""
- for (filename, item) in self.items():
+ for filename, item in self.items():
yield (filename, item.state, item.mode, item.size, item.mtime)
def keys(self):
@@ -617,7 +656,8 @@
This should also drop associated copy information
- The fact we actually need to drop it is the responsability of the caller"""
+ The fact we actually need to drop it is the responsability of the caller
+ """
self._map.pop(f, None)
self.copymap.pop(f, None)
@@ -625,7 +665,6 @@
if rustmod is not None:
class dirstatemap(_dirstatemapcommon):
-
### Core data storage and access
@propertycache
@@ -645,12 +684,7 @@
parents = self._v1_map(e)
else:
parents = self.docket.parents
- inode = (
- self.identity.stat.st_ino
- if self.identity is not None
- and self.identity.stat is not None
- else None
- )
+ identity = self._get_rust_identity()
testing.wait_on_cfg(
self._ui, b'dirstate.post-docket-read-file'
)
@@ -664,7 +698,7 @@
self.docket.data_size,
self.docket.tree_metadata,
self.docket.uuid,
- inode,
+ identity,
)
parents = self.docket.parents
else:
@@ -678,16 +712,31 @@
self.get = self._map.get
return self._map
- def _v1_map(self, from_v2_exception=None):
+ def _get_rust_identity(self):
self._set_identity()
- inode = (
- self.identity.stat.st_ino
- if self.identity is not None and self.identity.stat is not None
- else None
- )
+ identity = None
+ if self.identity is not None and self.identity.stat is not None:
+ stat_info = self.identity.stat
+ identity = rustmod.DirstateIdentity(
+ mode=stat_info.st_mode,
+ dev=stat_info.st_dev,
+ ino=stat_info.st_ino,
+ nlink=stat_info.st_nlink,
+ uid=stat_info.st_uid,
+ gid=stat_info.st_gid,
+ size=stat_info.st_size,
+ mtime=stat_info[stat.ST_MTIME],
+ mtime_nsec=0,
+ ctime=stat_info[stat.ST_CTIME],
+ ctime_nsec=0,
+ )
+ return identity
+
+ def _v1_map(self, from_v2_exception=None):
+ identity = self._get_rust_identity()
try:
self._map, parents = rustmod.DirstateMap.new_v1(
- self._readdirstatefile(), inode
+ self._readdirstatefile(), identity
)
except OSError as e:
if from_v2_exception is not None:
--- a/mercurial/dirstateutils/docket.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dirstateutils/docket.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
--- a/mercurial/dirstateutils/timestamp.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dirstateutils/timestamp.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,10 +3,13 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import functools
import os
import stat
+import time
+from typing import Optional, Tuple
from .. import error
@@ -48,7 +51,7 @@
)
-def get_fs_now(vfs):
+def get_fs_now(vfs) -> Optional[timestamp]:
"""return a timestamp for "now" in the current vfs
This will raise an exception if no temporary files could be created.
@@ -61,14 +64,14 @@
vfs.unlink(tmpname)
-def zero():
+def zero() -> timestamp:
"""
Returns the `timestamp` at the Unix epoch.
"""
return tuple.__new__(timestamp, (0, 0))
-def mtime_of(stat_result):
+def mtime_of(stat_result: os.stat_result) -> timestamp:
"""
Takes an `os.stat_result`-like object and returns a `timestamp` object
for its modification time.
@@ -94,7 +97,17 @@
return timestamp((secs, subsec_nanos, False))
-def reliable_mtime_of(stat_result, present_mtime):
+def reliable_mtime_of(
+ stat_result: os.stat_result, present_mtime: timestamp
+) -> Optional[timestamp]:
+ """Wrapper for `make_mtime_reliable` for stat objects"""
+ file_mtime = mtime_of(stat_result)
+ return make_mtime_reliable(file_mtime, present_mtime)
+
+
+def make_mtime_reliable(
+ file_timestamp: timestamp, present_mtime: timestamp
+) -> Optional[timestamp]:
"""Same as `mtime_of`, but return `None` or a `Timestamp` with
`second_ambiguous` set if the date might be ambiguous.
@@ -103,9 +116,8 @@
Otherwise a concurrent modification might happens with the same mtime.
"""
- file_mtime = mtime_of(stat_result)
- file_second = file_mtime[0]
- file_ns = file_mtime[1]
+ file_second = file_timestamp[0]
+ file_ns = file_timestamp[1]
boundary_second = present_mtime[0]
boundary_ns = present_mtime[1]
# If the mtime of the ambiguous file is younger (or equal) to the starting
@@ -124,4 +136,45 @@
elif boundary_second < file_second < (3600 * 24 + boundary_second):
return None
else:
- return file_mtime
+ return file_timestamp
+
+
+FS_TICK_WAIT_TIMEOUT = 0.1 # 100 milliseconds
+
+
+def wait_until_fs_tick(vfs) -> Optional[Tuple[timestamp, bool]]:
+ """Wait until the next update from the filesystem time by writing in a loop
+ a new temporary file inside the working directory and checking if its time
+ differs from the first one observed.
+
+ Returns `None` if we are unable to get the filesystem time,
+ `(timestamp, True)` if we've timed out waiting for the filesystem clock
+ to tick, and `(timestamp, False)` if we've waited successfully.
+
+ On Linux, your average tick is going to be a "jiffy", or 1/HZ.
+ HZ is your kernel's tick rate (if it has one configured) and the value
+ is the one returned by `grep 'CONFIG_HZ=' /boot/config-$(uname -r)`,
+ again assuming a normal setup.
+
+ In my case (Alphare) at the time of writing, I get `CONFIG_HZ=250`,
+ which equates to 4ms.
+ This might change with a series that could make it to Linux 6.12:
+ https://lore.kernel.org/all/20241002-mgtime-v10-8-d1c4717f5284@kernel.org
+ """
+ start = time.monotonic()
+
+ try:
+ old_fs_time = get_fs_now(vfs)
+ new_fs_time = get_fs_now(vfs)
+
+ while (
+ new_fs_time[0] == old_fs_time[0]
+ and new_fs_time[1] == old_fs_time[1]
+ ):
+ if time.monotonic() - start > FS_TICK_WAIT_TIMEOUT:
+ return (old_fs_time, True)
+ new_fs_time = get_fs_now(vfs)
+ except OSError:
+ return None
+ else:
+ return (new_fs_time, False)
--- a/mercurial/dirstateutils/v2.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dirstateutils/v2.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,10 +5,18 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
+import typing
from ..thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .. import error, policy
parsers = policy.importmod('parsers')
--- a/mercurial/discovery.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/discovery.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import functools
--- a/mercurial/dispatch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/dispatch.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import getopt
@@ -85,7 +86,7 @@
# store the parsed and canonical command
self.canonical_command = None
- def _runexithandlers(self):
+ def _runexithandlers(self) -> None:
exc = None
handlers = self.ui._exithandlers
try:
@@ -239,7 +240,7 @@
return status
-def _rundispatch(req):
+def _rundispatch(req) -> int:
with tracing.log('dispatch._rundispatch'):
if req.ferr:
ferr = req.ferr
@@ -300,7 +301,7 @@
req.ui.log(
b'uiblocked',
b'ui blocked ms\n',
- **pycompat.strkwargs(req.ui._blockedtimes)
+ **pycompat.strkwargs(req.ui._blockedtimes),
)
return_code = ret & 255
req.ui.log(
--- a/mercurial/encoding.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/encoding.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import locale
import os
@@ -25,11 +26,12 @@
pycompat,
)
+from .interfaces import modules as intmod
from .pure import charencode as charencodepure
_Tlocalstr = TypeVar('_Tlocalstr', bound='localstr')
-charencode = policy.importmod('charencode')
+charencode: intmod.CharEncoding = policy.importmod('charencode')
isasciistr = charencode.isasciistr
asciilower = charencode.asciilower
@@ -40,6 +42,7 @@
unichr = chr
+
# These unicode characters are ignored by HFS+ (Apple Technote 1150,
# "Unicode Subtleties"), so we need to ignore them in some places for
# sanity.
@@ -103,14 +106,15 @@
if pycompat.iswindows:
_encodingrewrites[b'cp65001'] = b'utf-8'
+encoding: bytes = b'' # help pytype avoid seeing None value
try:
- encoding = environ.get(b"HGENCODING")
+ encoding = environ.get(b"HGENCODING", b'')
if not encoding:
encoding = locale.getpreferredencoding().encode('ascii') or b'ascii'
encoding = _encodingrewrites.get(encoding, encoding)
except locale.Error:
encoding = b'ascii'
-encodingmode = environ.get(b"HGENCODINGMODE", b"strict")
+encodingmode: bytes = environ.get(b"HGENCODINGMODE", b"strict")
fallbackencoding = b'ISO-8859-1'
@@ -366,7 +370,6 @@
cwd = cwd[0:1].upper() + cwd[1:]
return cwd
-
else:
getcwd = os.getcwdb # re-exports
@@ -524,7 +527,7 @@
other = 0
-def jsonescape(s: Any, paranoid: Any = False) -> Any:
+def jsonescape(s: bytes, paranoid: bool = False) -> bytes:
"""returns a string suitable for JSON
JSON is problematic for us because it doesn't support non-Unicode
--- a/mercurial/error.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/error.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,7 @@
imports.
"""
+from __future__ import annotations
import difflib
--- a/mercurial/exchange.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/exchange.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import weakref
@@ -449,7 +450,7 @@
newbranch,
bookmarks,
publish,
- **pycompat.strkwargs(opargs)
+ **pycompat.strkwargs(opargs),
)
if pushop.remote.local():
missing = (
@@ -703,8 +704,8 @@
repo = pushop.repo
# very naive computation, that can be quite expensive on big repo.
# However: evolution is currently slow on them anyway.
- nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
- pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
+ revs = repo.revs(b'::%ln', pushop.futureheads)
+ pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(revs=revs)
@pushdiscovery(b'bookmarks')
@@ -1278,18 +1279,18 @@
):
# push everything,
# use the fast path, no race possible on push
- cg = changegroup.makechangegroup(
- pushop.repo,
- outgoing,
- b'01',
- b'push',
- fastpath=True,
- bundlecaps=bundlecaps,
- )
+ fastpath = True
else:
- cg = changegroup.makechangegroup(
- pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
- )
+ fastpath = False
+
+ cg = changegroup.makechangegroup(
+ pushop.repo,
+ outgoing,
+ b'01',
+ b'push',
+ fastpath=fastpath,
+ bundlecaps=bundlecaps,
+ )
# apply changegroup to remote
# local repo finds heads on server, finds out what
@@ -1717,7 +1718,7 @@
includepats=includepats,
excludepats=excludepats,
depth=depth,
- **pycompat.strkwargs(opargs)
+ **pycompat.strkwargs(opargs),
)
peerlocal = pullop.remote.local()
@@ -2420,7 +2421,7 @@
common=None,
bundlecaps=None,
remote_sidedata=None,
- **kwargs
+ **kwargs,
):
"""Return chunks constituting a bundle's raw data.
@@ -2480,7 +2481,7 @@
bundlecaps=bundlecaps,
b2caps=b2caps,
remote_sidedata=remote_sidedata,
- **pycompat.strkwargs(kwargs)
+ **pycompat.strkwargs(kwargs),
)
info[b'prefercompressed'] = bundler.prefercompressed
@@ -2503,7 +2504,7 @@
heads=None,
common=None,
remote_sidedata=None,
- **kwargs
+ **kwargs,
):
"""add a changegroup part to the requested bundle"""
if not kwargs.get('cg', True) or not b2caps:
@@ -2603,10 +2604,15 @@
):
"""add an obsolescence markers part to the requested bundle"""
if kwargs.get('obsmarkers', False):
+ unfi_cl = repo.unfiltered().changelog
if heads is None:
- heads = repo.heads()
- subset = [c.node() for c in repo.set(b'::%ln', heads)]
- markers = repo.obsstore.relevantmarkers(subset)
+ headrevs = repo.changelog.headrevs()
+ else:
+ get_rev = unfi_cl.index.get_rev
+ headrevs = [get_rev(node) for node in heads]
+ headrevs = [rev for rev in headrevs if rev is not None]
+ revs = unfi_cl.ancestors(headrevs, inclusive=True)
+ markers = repo.obsstore.relevantmarkers(revs=revs)
markers = obsutil.sortedmarkers(markers)
bundle2.buildobsmarkerspart(bundler, markers)
@@ -2669,7 +2675,7 @@
b2caps=None,
heads=None,
common=None,
- **kwargs
+ **kwargs,
):
"""Transfer the .hgtags filenodes mapping.
@@ -2697,7 +2703,7 @@
b2caps=None,
heads=None,
common=None,
- **kwargs
+ **kwargs,
):
"""Transfer the rev-branch-cache mapping
@@ -2894,8 +2900,23 @@
entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
url = entries[0][b'URL']
+ digest = entries[0].get(b'DIGEST')
+ if digest:
+ algorithms = urlmod.digesthandler.digest_algorithms.keys()
+ preference = dict(zip(algorithms, range(len(algorithms))))
+ best_entry = None
+ best_preference = len(preference)
+ for digest_entry in digest.split(b','):
+ cur_algo, cur_digest = digest_entry.split(b':')
+ if cur_algo not in preference:
+ continue
+ if preference[cur_algo] < best_preference:
+ best_entry = digest_entry
+ best_preference = preference[cur_algo]
+ digest = best_entry
+
repo.ui.status(_(b'applying clone bundle from %s\n') % url)
- if trypullbundlefromurl(repo.ui, repo, url, remote):
+ if trypullbundlefromurl(repo.ui, repo, url, remote, digest):
repo.ui.status(_(b'finished applying clone bundle\n'))
# Bundle failed.
#
@@ -2924,14 +2945,14 @@
return util.chunkbuffer(peerclonebundle)
-def trypullbundlefromurl(ui, repo, url, peer):
+def trypullbundlefromurl(ui, repo, url, peer, digest):
"""Attempt to apply a bundle from a URL."""
with repo.lock(), repo.transaction(b'bundleurl') as tr:
try:
if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
fh = inline_clone_bundle_open(ui, url, peer)
else:
- fh = urlmod.open(ui, url)
+ fh = urlmod.open(ui, url, digest=digest)
cg = readbundle(ui, fh, b'stream')
if isinstance(cg, streamclone.streamcloneapplier):
--- a/mercurial/extensions.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/extensions.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import ast
import collections
@@ -290,7 +291,7 @@
with util.timedcm('load all extensions') as stats:
default_sub_options = ui.configsuboptions(b"extensions", b"*")[1]
- for (name, path) in result:
+ for name, path in result:
if path:
if path[0:1] == b'!':
if name not in _disabledextensions:
--- a/mercurial/exthelper.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/exthelper.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,6 +9,7 @@
### Extension helper ###
#####################################################################
+from __future__ import annotations
from . import (
commands,
--- a/mercurial/fancyopts.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/fancyopts.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import abc
import functools
--- a/mercurial/filelog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/filelog.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+from typing import (
+ Iterable,
+ Iterator,
+)
from .i18n import _
from .node import nullrev
@@ -14,7 +20,6 @@
)
from .interfaces import (
repository,
- util as interfaceutil,
)
from .utils import storageutil
from .revlogutils import (
@@ -23,8 +28,11 @@
)
-@interfaceutil.implementer(repository.ifilestorage)
-class filelog:
+class filelog: # (repository.ifilestorage)
+ _revlog: revlog.revlog
+ nullid: bytes
+ _fix_issue6528: bool
+
def __init__(self, opener, path, try_split=False):
self._revlog = revlog.revlog(
opener,
@@ -42,7 +50,7 @@
opts = opener.options
self._fix_issue6528 = opts.get(b'issue6528.fix-incoming', True)
- def get_revlog(self):
+ def get_revlog(self) -> revlog.revlog:
"""return an actual revlog instance if any
This exist because a lot of code leverage the fact the underlying
@@ -51,10 +59,10 @@
"""
return self._revlog
- def __len__(self):
+ def __len__(self) -> int:
return len(self._revlog)
- def __iter__(self):
+ def __iter__(self) -> Iterator[int]:
return self._revlog.__iter__()
def hasnode(self, node):
@@ -175,7 +183,6 @@
)
with self._revlog._writing(transaction):
-
if self._fix_issue6528:
deltas = rewrite.filter_delta_issue6528(self._revlog, deltas)
@@ -234,7 +241,7 @@
"""
return not storageutil.filedataequivalent(self, node, text)
- def verifyintegrity(self, state):
+ def verifyintegrity(self, state) -> Iterable[revlog.revlogproblem]:
return self._revlog.verifyintegrity(state)
def storageinfo(
--- a/mercurial/filemerge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/filemerge.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import os
@@ -480,6 +481,8 @@
suppresses the markers."""
ui = repo.ui
+ relaxed_sync = ui.configbool(b'experimental', b'relaxed-block-sync-merge')
+
try:
_verifytext(local, ui)
_verifytext(base, ui)
@@ -488,7 +491,11 @@
return True, True, False
else:
merged_text, conflicts = simplemerge.simplemerge(
- local, base, other, mode=mode
+ local,
+ base,
+ other,
+ mode=mode,
+ relaxed_sync=relaxed_sync,
)
# fcd.flags() already has the merged flags (done in
# mergestate.resolve())
--- a/mercurial/fileset.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/fileset.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
--- a/mercurial/filesetlang.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/filesetlang.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from . import (
--- a/mercurial/formatter.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/formatter.py Sat Oct 26 04:16:00 2024 +0200
@@ -105,11 +105,13 @@
baz: foo, bar
"""
+from __future__ import annotations
import contextlib
import itertools
import os
import pickle
+import typing
from .i18n import _
from .node import (
@@ -118,6 +120,11 @@
)
from .thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
error,
pycompat,
@@ -176,7 +183,6 @@
class baseformatter:
-
# set to True if the formater output a strict format that does not support
# arbitrary output in the stream.
strict_format = False
@@ -421,7 +427,6 @@
class jsonformatter(baseformatter):
-
strict_format = True
def __init__(self, ui, out, topic, opts):
--- a/mercurial/graphmod.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/graphmod.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,9 +17,18 @@
Data depends on type.
"""
+from __future__ import annotations
+
+import typing
from .node import nullrev
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
dagop,
smartset,
@@ -133,8 +142,7 @@
else:
getconf = lambda rev: {}
- for (cur, type, data, parents) in dag:
-
+ for cur, type, data, parents in dag:
# Compute seen and next
if cur not in seen:
seen.append(cur) # new head
@@ -244,7 +252,7 @@
def _fixlongrightedges(edges):
- for (i, (start, end)) in enumerate(edges):
+ for i, (start, end) in enumerate(edges):
if end > start:
edges[i] = (start, end + 1)
@@ -265,7 +273,7 @@
def _drawedges(echars, edges, nodeline, interline):
- for (start, end) in edges:
+ for start, end in edges:
if start == end + 1:
interline[2 * end + 1] = b"/"
elif start == end - 1:
@@ -381,7 +389,7 @@
this function can be monkey-patched by extensions to alter graph display
without needing to mimic all of the edge-fixup logic in ascii()
"""
- for (ln, logstr) in graph:
+ for ln, logstr in graph:
ui.write((ln + logstr).rstrip() + b"\n")
--- a/mercurial/grep.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/grep.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import difflib
--- a/mercurial/hbisect.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hbisect.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import contextlib
--- a/mercurial/help.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/help.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import itertools
import re
@@ -159,6 +160,15 @@
return rst
+def ext_help(ui: uimod.ui, ext) -> bytes:
+ doc = pycompat.getdoc(ext)
+ if doc is None:
+ return b""
+ assert doc is not None
+ doc = gettext(doc)
+ return sub_config_item_help(ui, doc)
+
+
def extshelp(ui: uimod.ui) -> bytes:
rst = loaddoc(b'extensions')(ui).splitlines(True)
rst.extend(
@@ -365,6 +375,7 @@
doc = gettext(fp.read())
for rewriter in helphooks.get(topic, []):
doc = rewriter(ui, topic, doc)
+ doc = sub_config_item_help(ui, doc)
return doc
return loader
@@ -695,6 +706,44 @@
return re.sub(br'( *)%s' % re.escape(marker), sub, doc)
+_CONFIG_DOC_RE = re.compile(b'(^ *)?:config-doc:`([^`]+)`', flags=re.MULTILINE)
+
+
+def sub_config_item_help(ui: uimod.ui, doc: bytes) -> bytes:
+ """replace :config-doc:`foo.bar` markup with the item documentation
+
+ This allow grouping config item declaration and help without having to
+ repeat it in the help text file and keep that in sync.
+ """
+ pieces = []
+ last_match_end = 0
+ for match in _CONFIG_DOC_RE.finditer(doc):
+ # finditer is expected to yield result in order
+ start = match.start()
+ assert last_match_end <= match.start()
+ pieces.append(doc[last_match_end:start])
+ item_name = match.group(2)
+ section, key = item_name.split(b'.', 1)
+ section_items = ui._knownconfig.get(section)
+ if section_items is None:
+ item = None
+ else:
+ item = section_items.get(key)
+ if item is None or not item.documentation:
+ item_doc = b'<missing help text for `%s`>' % item_name
+ else:
+ item_doc = gettext(item.documentation)
+ item_doc = sub_config_item_help(ui, item_doc)
+ indent = match.group(1)
+ if indent: # either None or 0 should be ignored
+ indent = indent
+ item_doc = indent + item_doc.replace(b'\n', b'\n' + indent)
+ pieces.append(item_doc)
+ last_match_end = match.end()
+ pieces.append(doc[last_match_end:])
+ return b''.join(pieces)
+
+
def _getcategorizedhelpcmds(
ui: uimod.ui, cmdtable, name: bytes, select: Optional[_SelectFn] = None
) -> Tuple[Dict[bytes, List[bytes]], Dict[bytes, bytes], _SynonymTable]:
@@ -757,7 +806,7 @@
full: bool = True,
subtopic: Optional[bytes] = None,
fullname: Optional[bytes] = None,
- **opts
+ **opts,
) -> bytes:
"""
Generate the help for 'name' as unformatted restructured text. If
@@ -822,6 +871,7 @@
doc,
source,
)
+ doc = sub_config_item_help(ui, doc)
doc = doc.splitlines(True)
if ui.quiet or not full:
rst.append(doc[0])
@@ -1042,12 +1092,15 @@
def helpext(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]:
try:
mod = extensions.find(name)
- doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available')
+ doc = ext_help(ui, mod)
+ if not doc:
+ doc = _(b'no help text available')
except KeyError:
mod = None
doc = extensions.disabled_help(name)
if not doc:
raise error.UnknownCommand(name)
+ doc = sub_config_item_help(ui, doc)
if b'\n' not in doc:
head, tail = doc, b""
@@ -1176,7 +1229,7 @@
keep: Optional[Iterable[bytes]] = None,
unknowncmd: bool = False,
full: bool = True,
- **opts
+ **opts,
) -> bytes:
"""get help for a given topic (as a dotted name) as rendered rst
@@ -1209,7 +1262,7 @@
subtopic=subtopic,
unknowncmd=unknowncmd,
full=full,
- **opts
+ **opts,
)
blocks, pruned = minirst.parse(text, keep=keep)
--- a/mercurial/helptext/config.txt Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/helptext/config.txt Sat Oct 26 04:16:00 2024 +0200
@@ -418,6 +418,12 @@
If no suitable authentication entry is found, the user is prompted
for credentials as usual if required by the remote.
+``censor``
+----------
+
+``policy``
+ :config-doc:`censor.policy`
+
``cmdserver``
-------------
@@ -2040,6 +2046,9 @@
Use a statistical profiler, statprof. This profiler is most
useful for profiling commands that run for longer than about 0.1
seconds.
+ ``py-spy``
+ use the py-spy profiler. A external py-spy executable must be available.
+ (Make sure to check `profiling.output` config to write the result.)
``format``
Profiling format. Specific to the ``ls`` instrumenting profiler.
@@ -2344,6 +2353,17 @@
you do, consider talking with the mercurial developer community about your
repositories.
+``revlog.mmap.index``
+ Whether to use the Operating System "memory mapping" feature (when
+ possible) to access the revlog index. This improves performance
+ and reduces memory pressure.
+
+.. container:: verbose
+
+ ``revlog.mmap.index:size-threshold``
+
+ The size of index above which to use the "memory mapping" feature.
+
``revlog.optimize-delta-parent-choice``
When storing a merge revision, both parents will be equally considered as
a possible delta base. This results in better delta selection and improved
--- a/mercurial/helptext/hgweb.txt Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/helptext/hgweb.txt Sat Oct 26 04:16:00 2024 +0200
@@ -83,4 +83,4 @@
The following web commands and their URLs are available:
- .. webcommandsmarker
+.. webcommandsmarker
--- a/mercurial/hg.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hg.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,11 +6,13 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import posixpath
import shutil
import stat
+import typing
import weakref
from .i18n import _
@@ -57,6 +59,11 @@
urlutil,
)
+if typing.TYPE_CHECKING:
+ from typing import (
+ List,
+ Tuple,
+ )
release = lock.release
@@ -1600,7 +1607,7 @@
# Files of interest
# Used to check if the repository has changed looking at mtime and size of
# these files.
-foi = [
+foi: "List[Tuple[str, bytes]]" = [
('spath', b'00changelog.i'),
('spath', b'phaseroots'), # ! phase can change content at the same size
('spath', b'obsstore'),
--- a/mercurial/hgweb/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
--- a/mercurial/hgweb/common.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/common.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import base64
import errno
--- a/mercurial/hgweb/hgweb_mod.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/hgweb_mod.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import os
--- a/mercurial/hgweb/hgwebdir_mod.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/hgwebdir_mod.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import gc
import os
@@ -120,7 +121,6 @@
seenrepos = set()
seendirs = set()
for name, path in repos:
-
if not name.startswith(subdir):
continue
name = name[len(subdir) :]
--- a/mercurial/hgweb/request.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/request.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,10 +6,19 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
# import wsgiref.validate
+import typing
+
from ..thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .. import (
error,
pycompat,
--- a/mercurial/hgweb/server.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/server.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import os
@@ -66,7 +67,6 @@
class _httprequesthandler(httpservermod.basehttprequesthandler):
-
url_scheme = b'http'
@staticmethod
@@ -358,7 +358,6 @@
class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
-
# SO_REUSEADDR has broken semantics on windows
if pycompat.iswindows:
allow_reuse_address = 0
@@ -396,7 +395,6 @@
def create_server(ui, app):
-
if ui.config(b'web', b'certificate'):
handler = _httprequesthandlerssl
else:
--- a/mercurial/hgweb/webcommands.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/webcommands.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import copy
import mimetypes
@@ -174,7 +175,7 @@
rename=webutil.renamelink(fctx),
permissions=fctx.manifest().flags(f),
ishead=int(ishead),
- **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)),
)
@@ -601,7 +602,6 @@
def dirlist(context):
for d in sorted(dirs):
-
emptydirs = []
h = dirs[d]
while isinstance(h, dict) and len(h) == 1:
@@ -629,7 +629,7 @@
fentries=templateutil.mappinggenerator(filelist),
dentries=templateutil.mappinggenerator(dirlist),
archives=web.archivelist(hex(node)),
- **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)),
)
@@ -874,7 +874,7 @@
symrev=webutil.symrevorshortnode(web.req, ctx),
rename=rename,
diff=diffs,
- **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)),
)
@@ -955,7 +955,7 @@
rightrev=rightrev,
rightnode=hex(rightnode),
comparison=comparison,
- **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)),
)
@@ -1062,7 +1062,7 @@
permissions=fctx.manifest().flags(f),
ishead=int(ishead),
diffopts=templateutil.hybriddict(diffopts),
- **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)),
)
@@ -1225,7 +1225,7 @@
revcount=revcount,
morevars=morevars,
lessvars=lessvars,
- **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)),
)
@@ -1284,35 +1284,46 @@
mimetype, artype, extension, encoding = webutil.archivespecs[type_]
- web.res.headers[b'Content-Type'] = mimetype
- web.res.headers[b'Content-Disposition'] = b'attachment; filename=%s%s' % (
- name,
- extension,
- )
-
- if encoding:
- web.res.headers[b'Content-Encoding'] = encoding
-
- web.res.setbodywillwrite()
- if list(web.res.sendresponse()):
- raise error.ProgrammingError(
- b'sendresponse() should not emit data if writing later'
- )
-
if web.req.method == b'HEAD':
return []
- bodyfh = web.res.getbodyfile()
+ def open_archive():
+ """Open the output "file" for the archiver.
+
+ This function starts the streaming response. Error reporting
+ after this point will result in short writes without proper
+ diagnostics to the client.
+ """
+ web.res.headers[b'Content-Type'] = mimetype
+ web.res.headers[
+ b'Content-Disposition'
+ ] = b'attachment; filename=%s%s' % (
+ name,
+ extension,
+ )
- archival.archive(
+ if encoding:
+ web.res.headers[b'Content-Encoding'] = encoding
+
+ web.res.setbodywillwrite()
+ if list(web.res.sendresponse()):
+ raise error.ProgrammingError(
+ b'sendresponse() should not emit data if writing later'
+ )
+
+ return web.res.getbodyfile()
+
+ total = archival.archive(
web.repo,
- bodyfh,
+ open_archive,
cnode,
artype,
prefix=name,
match=match,
subrepos=web.configbool(b"web", b"archivesubrepos"),
)
+ if total == 0:
+ raise ErrorResponse(HTTP_NOT_FOUND, b'no files found in changeset')
return []
@@ -1427,7 +1438,7 @@
return tree
def jsdata(context):
- for (id, type, ctx, vtx, edges) in fulltree():
+ for id, type, ctx, vtx, edges in fulltree():
yield {
b'node': pycompat.bytestr(ctx),
b'graphnode': webutil.getgraphnode(web.repo, ctx),
--- a/mercurial/hgweb/webutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/webutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import copy
import difflib
@@ -600,7 +601,7 @@
diffsummary=lambda context, mapping: diffsummary(diffstatsgen),
diffstat=diffstats,
archives=web.archivelist(ctx.hex()),
- **pycompat.strkwargs(commonentry(web.repo, ctx))
+ **pycompat.strkwargs(commonentry(web.repo, ctx)),
)
--- a/mercurial/hgweb/wsgicgi.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/wsgicgi.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
# This was originally copied from the public domain code at
# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
+from __future__ import annotations
from .. import encoding, pycompat
--- a/mercurial/hgweb/wsgiheaders.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hgweb/wsgiheaders.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,6 +9,7 @@
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
+from __future__ import annotations
import re
--- a/mercurial/hook.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/hook.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import errno
--- a/mercurial/httpconnection.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/httpconnection.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
@@ -25,6 +26,7 @@
urlerr = util.urlerr
urlreq = util.urlreq
+
# moved here from url.py to avoid a cycle
class httpsendfile:
"""This is a wrapper around the objects returned by python's "open".
--- a/mercurial/httppeer.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/httppeer.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import io
@@ -245,8 +246,9 @@
Returns the response object.
"""
dbg = ui.debug
+ line = b'devel-peer-request: %s\n'
+
if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
- line = b'devel-peer-request: %s\n'
dbg(
line
% b'%s %s'
@@ -491,6 +493,9 @@
# boolean capability. They only support headerless/uncompressed
# bundles.
types = [b""]
+
+ type = b""
+
for x in types:
if x in bundle2.bundletypes:
type = x
@@ -520,10 +525,9 @@
os.unlink(tempname)
def _calltwowaystream(self, cmd, fp, **args):
- filename = None
+ # dump bundle to disk
+ fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
try:
- # dump bundle to disk
- fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
with os.fdopen(fd, "wb") as fh:
d = fp.read(4096)
while d:
@@ -534,8 +538,7 @@
headers = {'Content-Type': 'application/mercurial-0.1'}
return self._callstream(cmd, data=fp_, headers=headers, **args)
finally:
- if filename is not None:
- os.unlink(filename)
+ os.unlink(filename)
def _callcompressable(self, cmd, **args):
return self._callstream(cmd, _compressible=True, **args)
--- a/mercurial/i18n.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/i18n.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import gettext as gettextmod
import locale
@@ -12,6 +13,7 @@
import sys
from typing import (
+ Dict,
List,
)
@@ -61,7 +63,9 @@
_ugettext = t.gettext
-_msgcache = {} # encoding: {message: translation}
+_msgcache: Dict[
+ bytes, Dict[bytes, bytes]
+] = {} # encoding: {message: translation}
def gettext(message: bytes) -> bytes:
@@ -119,6 +123,5 @@
def _(message: bytes) -> bytes:
return message
-
else:
_ = gettext
--- a/mercurial/interfaces/dirstate.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/interfaces/dirstate.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,50 +1,133 @@
+from __future__ import annotations
+
import contextlib
+import os
+import typing
+
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Protocol,
+ Tuple,
+)
+
+if typing.TYPE_CHECKING:
+ # Almost all mercurial modules are only imported in the type checking phase
+ # to avoid circular imports
+ from .. import (
+ match as matchmod,
+ scmutil,
+ transaction as txnmod,
+ )
+
+ # TODO: finish adding type hints
+ AddParentChangeCallbackT = Callable[
+ ["idirstate", Tuple[Any, Any], Tuple[Any, Any]], Any
+ ]
+ """The callback type for dirstate.addparentchangecallback()."""
-from . import util as interfaceutil
+ # TODO: add a Protocol for dirstatemap.DirStateItem? (It is
+ # conditionalized with python or rust implementations. Also,
+ # git.dirstate needs to yield non-None from ``items()``.)
+ DirstateItemT = Any # dirstatemap.DirstateItem
+
+ IgnoreFileAndLineT = Tuple[Optional[bytes], int, bytes]
+ """The return type of dirstate._ignorefileandline(), which holds
+ ``(file, lineno, originalline)``.
+ """
+
+ FlagFuncFallbackT = Callable[[], "FlagFuncReturnT"]
+ """The type for the dirstate.flagfunc() fallback function."""
+
+ FlagFuncReturnT = Callable[[bytes], bytes]
+ """The return type of dirstate.flagfunc()."""
+
+ # TODO: verify and complete this- it came from a pytype *.pyi file
+ StatusReturnT = Tuple[Any, scmutil.status, Any]
+ """The return type of dirstate.status()."""
+
+ # TODO: probably doesn't belong here.
+ TransactionT = txnmod.transaction
+ """The type for a transaction used with dirstate.
+
+ This is meant to help callers avoid having to remember to delay the import
+ of the transaction module.
+ """
+
+ # TODO: The value can also be mercurial.osutil.stat
+ WalkReturnT = Dict[bytes, Optional[os.stat_result]]
+ """The return type of dirstate.walk().
+
+ The matched files are keyed in the dictionary, mapped to a stat-like object
+ if the file exists.
+ """
-class idirstate(interfaceutil.Interface):
- def __init__(
- opener,
- ui,
- root,
- validate,
- sparsematchfn,
- nodeconstants,
- use_dirstate_v2,
- use_tracked_hint=False,
- ):
- """Create a new dirstate object.
-
- opener is an open()-like callable that can be used to open the
- dirstate file; root is the root of the directory tracked by
- the dirstate.
- """
+class idirstate(Protocol):
+ # TODO: convert these constructor args to fields?
+ # def __init__(
+ # self,
+ # opener,
+ # ui,
+ # root,
+ # validate,
+ # sparsematchfn,
+ # nodeconstants,
+ # use_dirstate_v2,
+ # use_tracked_hint=False,
+ # ):
+ # """Create a new dirstate object.
+ #
+ # opener is an open()-like callable that can be used to open the
+ # dirstate file; root is the root of the directory tracked by
+ # the dirstate.
+ # """
# TODO: all these private methods and attributes should be made
# public or removed from the interface.
- _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
- is_changing_any = interfaceutil.Attribute(
+
+ # TODO: decorate with `@rootcache(b'.hgignore')` like dirstate class?
+ @property
+ def _ignore(self) -> matchmod.basematcher:
+ """Matcher for ignored files."""
+
+ @property
+ def is_changing_any(self) -> bool:
"""True if any changes in progress."""
- )
- is_changing_parents = interfaceutil.Attribute(
+
+ @property
+ def is_changing_parents(self) -> bool:
"""True if parents changes in progress."""
- )
- is_changing_files = interfaceutil.Attribute(
+
+ @property
+ def is_changing_files(self) -> bool:
"""True if file tracking changes in progress."""
- )
- def _ignorefiles():
+ def _ignorefiles(self) -> List[bytes]:
"""Return a list of files containing patterns to ignore."""
- def _ignorefileandline(f):
+ def _ignorefileandline(self, f: bytes) -> IgnoreFileAndLineT:
"""Given a file `f`, return the ignore file and line that ignores it."""
- _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
- _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
+ # TODO: decorate with `@util.propertycache` like dirstate class?
+ # (can't because circular import)
+ @property
+ def _checklink(self) -> bool:
+ """Callable for checking symlinks.""" # TODO: this comment looks stale
+
+ # TODO: decorate with `@util.propertycache` like dirstate class?
+ # (can't because circular import)
+ @property
+ def _checkexec(self) -> bool:
+ """Callable for checking exec bits.""" # TODO: this comment looks stale
@contextlib.contextmanager
- def changing_parents(repo):
+ def changing_parents(self, repo) -> Iterator: # TODO: typehint this
"""Context manager for handling dirstate parents.
If an exception occurs in the scope of the context manager,
@@ -53,7 +136,7 @@
"""
@contextlib.contextmanager
- def changing_files(repo):
+ def changing_files(self, repo) -> Iterator: # TODO: typehint this
"""Context manager for handling dirstate files.
If an exception occurs in the scope of the context manager,
@@ -61,10 +144,10 @@
released.
"""
- def hasdir(d):
+ def hasdir(self, d: bytes) -> bool:
pass
- def flagfunc(buildfallback):
+ def flagfunc(self, buildfallback: FlagFuncFallbackT) -> FlagFuncReturnT:
"""build a callable that returns flags associated with a filename
The information is extracted from three possible layers:
@@ -73,7 +156,7 @@
3. a more expensive mechanism inferring the flags from the parents.
"""
- def getcwd():
+ def getcwd(self) -> bytes:
"""Return the path from which a canonical path is calculated.
This path should be used to resolve file patterns or to convert
@@ -81,19 +164,19 @@
used to get real file paths. Use vfs functions instead.
"""
- def pathto(f, cwd=None):
+ def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
pass
- def get_entry(path):
+ def get_entry(self, path: bytes) -> DirstateItemT:
"""return a DirstateItem for the associated path"""
- def __contains__(key):
+ def __contains__(self, key: Any) -> bool:
"""Check if bytestring `key` is known to the dirstate."""
- def __iter__():
+ def __iter__(self) -> Iterator[bytes]:
"""Iterate the dirstate's contained filenames as bytestrings."""
- def items():
+ def items(self) -> Iterator[Tuple[bytes, DirstateItemT]]:
"""Iterate the dirstate's entries as (filename, DirstateItem.
As usual, filename is a bytestring.
@@ -101,19 +184,20 @@
iteritems = items
- def parents():
+ def parents(self) -> List[bytes]:
pass
- def p1():
+ def p1(self) -> bytes:
pass
- def p2():
+ def p2(self) -> bytes:
pass
- def branch():
+ def branch(self) -> bytes:
pass
- def setparents(p1, p2=None):
+ # TODO: typehint the return. It's a copies Map of some sort.
+ def setparents(self, p1: bytes, p2: Optional[bytes] = None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, "merged" entries a
@@ -123,26 +207,30 @@
See localrepo.setparents()
"""
- def setbranch(branch, transaction):
+ def setbranch(
+ self, branch: bytes, transaction: Optional[TransactionT]
+ ) -> None:
pass
- def invalidate():
+ def invalidate(self) -> None:
"""Causes the next access to reread the dirstate.
This is different from localrepo.invalidatedirstate() because it always
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
check whether the dirstate has changed before rereading it."""
- def copy(source, dest):
+ def copy(self, source: Optional[bytes], dest: bytes) -> None:
"""Mark dest as a copy of source. Unmark dest if source is None."""
- def copied(file):
+ def copied(self, file: bytes) -> Optional[bytes]:
pass
- def copies():
+ def copies(self) -> Dict[bytes, bytes]:
pass
- def normalize(path, isknown=False, ignoremissing=False):
+ def normalize(
+ self, path: bytes, isknown: bool = False, ignoremissing: bool = False
+ ) -> bytes:
"""
normalize the case of a pathname when on a casefolding filesystem
@@ -160,16 +248,23 @@
- version provided via command arguments
"""
- def clear():
+ def clear(self) -> None:
pass
- def rebuild(parent, allfiles, changedfiles=None):
+ def rebuild(
+ self,
+ parent: bytes,
+ allfiles: Iterable[bytes], # TODO: more than iterable? (uses len())
+ changedfiles: Optional[Iterable[bytes]] = None,
+ ) -> None:
pass
- def write(tr):
+ def write(self, tr: Optional[TransactionT]) -> None:
pass
- def addparentchangecallback(category, callback):
+ def addparentchangecallback(
+ self, category: bytes, callback: AddParentChangeCallbackT
+ ) -> None:
"""add a callback to be called when the wd parents are changed
Callback will be called with the following arguments:
@@ -179,7 +274,14 @@
with a newer callback.
"""
- def walk(match, subrepos, unknown, ignored, full=True):
+ def walk(
+ self,
+ match: matchmod.basematcher,
+ subrepos: Any, # TODO: figure out what this is
+ unknown: bool,
+ ignored: bool,
+ full: bool = True,
+ ) -> WalkReturnT:
"""
Walk recursively through the directory tree, finding all files
matched by match.
@@ -191,7 +293,14 @@
"""
- def status(match, subrepos, ignored, clean, unknown):
+ def status(
+ self,
+ match: matchmod.basematcher,
+ subrepos: bool,
+ ignored: bool,
+ clean: bool,
+ unknown: bool,
+ ) -> StatusReturnT:
"""Determine the status of the working copy relative to the
dirstate and return a pair of (unsure, status), where status is of type
scmutil.status and:
@@ -208,12 +317,18 @@
dirstate was written
"""
- def matches(match):
+ # TODO: could return a list, except git.dirstate is a generator
+
+ def matches(self, match: matchmod.basematcher) -> Iterable[bytes]:
"""
return files in the dirstate (in whatever state) filtered by match
"""
- def verify(m1, m2, p1, narrow_matcher=None):
+ # TODO: finish adding typehints here, and to subclasses
+
+ def verify(
+ self, m1, m2, p1: bytes, narrow_matcher: Optional[Any] = None
+ ) -> Iterator[bytes]:
"""
check the dirstate contents against the parent manifest and yield errors
"""
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/interfaces/modules.py Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,85 @@
+# modules.py - protocol classes for dynamically loaded modules
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import annotations
+
+import typing
+
+from typing import (
+ Callable,
+ List,
+ Optional,
+ Protocol,
+ Tuple,
+)
+
+if typing.TYPE_CHECKING:
+ BDiffBlock = Tuple[int, int, int, int]
+ """An entry in the list returned by bdiff.{xdiff,}blocks()."""
+
+ BDiffBlocksFnc = Callable[[bytes, bytes], List[BDiffBlock]]
+ """The signature of `bdiff.blocks()` and `bdiff.xdiffblocks()`."""
+
+
+class Base85(Protocol):
+ """A Protocol class for the various base85 module implementations."""
+
+ def b85encode(self, text: bytes, pad: bool = False) -> bytes:
+ """encode text in base85 format"""
+
+ def b85decode(self, text: bytes) -> bytes:
+ """decode base85-encoded text"""
+
+
+class BDiff(Protocol):
+ """A Protocol class for the various bdiff module implementations."""
+
+ def splitnewlines(self, text: bytes) -> List[bytes]:
+ """like str.splitlines, but only split on newlines."""
+
+ def bdiff(self, a: bytes, b: bytes) -> bytes:
+ ...
+
+ def blocks(self, a: bytes, b: bytes) -> List[BDiffBlock]:
+ ...
+
+ def fixws(self, text: bytes, allws: bool) -> bytes:
+ ...
+
+ xdiffblocks: Optional[BDiffBlocksFnc]
+ """This method is currently only available in the ``cext`` module."""
+
+
+class CharEncoding(Protocol):
+ """A Protocol class for the various charencoding module implementations."""
+
+ def isasciistr(self, s: bytes) -> bool:
+ """Can the byte string be decoded with the ``ascii`` codec?"""
+
+ def asciilower(self, s: bytes) -> bytes:
+ """convert a string to lowercase if ASCII
+
+ Raises UnicodeDecodeError if non-ASCII characters are found."""
+
+ def asciiupper(self, s: bytes) -> bytes:
+ """convert a string to uppercase if ASCII
+
+ Raises UnicodeDecodeError if non-ASCII characters are found."""
+
+ def jsonescapeu8fast(self, u8chars: bytes, paranoid: bool) -> bytes:
+ """Convert a UTF-8 byte string to JSON-escaped form (fast path)
+
+ Raises ValueError if non-ASCII characters have to be escaped.
+ """
+
+
+class MPatch(Protocol):
+ """A protocol class for the various mpatch module implementations."""
+
+ def patches(self, a: bytes, bins: List[bytes]) -> bytes:
+ ...
+
+ def patchedsize(self, orig: int, delta: bytes) -> int:
+ ...
--- a/mercurial/interfaces/repository.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/interfaces/repository.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from ..i18n import _
from .. import error
@@ -81,6 +82,7 @@
CACHE_BRANCHMAP_SERVED,
CACHE_BRANCHMAP_ALL,
CACHE_BRANCHMAP_DETECT_PURE_TOPO,
+ CACHE_REV_BRANCH,
CACHE_CHANGELOG_CACHE,
CACHE_FILE_NODE_TAGS,
CACHE_FULL_MANIFEST,
@@ -1021,6 +1023,12 @@
__bool__ = __nonzero__
+ def set(path, node, flags):
+ """Define the node value and flags for a path in the manifest.
+
+ Equivalent to __setitem__ followed by setflag, but can be more efficient.
+ """
+
def __setitem__(path, node):
"""Define the node value for a path in the manifest.
@@ -1169,12 +1177,70 @@
def readdelta(shallow=False):
"""Obtain the manifest data structure representing changes from parent.
- This manifest is compared to its 1st parent. A new manifest representing
- those differences is constructed.
+ This manifest is compared to its 1st parent. A new manifest
+ representing those differences is constructed.
+
+ If `shallow` is True, this will read the delta for this directory,
+ without recursively reading subdirectory manifests. Instead, any
+ subdirectory entry will be reported as it appears in the manifest, i.e.
+ the subdirectory will be reported among files and distinguished only by
+ its 't' flag. This only apply if the underlying manifest support it.
The returned object conforms to the ``imanifestdict`` interface.
"""
+ def read_any_fast_delta(valid_bases=None, *, shallow=False):
+ """read some manifest information as fast if possible
+
+ This might return a "delta", a manifest object containing only file
+ changed compared to another revisions. The `valid_bases` argument
+ control the set of revision that might be used as a base.
+
+ If no delta can be retrieved quickly, a full read of the manifest will
+ be performed instead.
+
+ The function return a tuple with two elements. The first one is the
+ delta base used (or None if we did a full read), the second one is the
+ manifest information.
+
+ If `shallow` is True, this will read the delta for this directory,
+ without recursively reading subdirectory manifests. Instead, any
+ subdirectory entry will be reported as it appears in the manifest, i.e.
+ the subdirectory will be reported among files and distinguished only by
+ its 't' flag. This only apply if the underlying manifest support it.
+
+ The returned object conforms to the ``imanifestdict`` interface.
+ """
+
+ def read_delta_parents(*, shallow=False, exact=True):
+ """return a diff from this revision against both parents.
+
+ If `exact` is False, this might return a superset of the diff, containing
+ files that are actually present as is in one of the parents.
+
+ If `shallow` is True, this will read the delta for this directory,
+ without recursively reading subdirectory manifests. Instead, any
+ subdirectory entry will be reported as it appears in the manifest, i.e.
+ the subdirectory will be reported among files and distinguished only by
+ its 't' flag. This only apply if the underlying manifest support it.
+
+ The returned object conforms to the ``imanifestdict`` interface."""
+
+ def read_delta_new_entries(*, shallow=False):
+ """Return a manifest containing just the entries that might be new to
+ the repository.
+
+ This is often equivalent to a diff against both parents, but without
+ garantee. For performance reason, It might contains more files in some cases.
+
+ If `shallow` is True, this will read the delta for this directory,
+ without recursively reading subdirectory manifests. Instead, any
+ subdirectory entry will be reported as it appears in the manifest, i.e.
+ the subdirectory will be reported among files and distinguished only by
+ its 't' flag. This only apply if the underlying manifest support it.
+
+ The returned object conforms to the ``imanifestdict`` interface."""
+
def readfast(shallow=False):
"""Calls either ``read()`` or ``readdelta()``.
@@ -1428,6 +1494,10 @@
"""nodeconstants used by the current repository."""
)
+ narrowed = interfaceutil.Attribute(
+ """True, is the manifest is narrowed by a matcher"""
+ )
+
def __getitem__(node):
"""Obtain a manifest instance for a given binary node.
@@ -1463,7 +1533,7 @@
TODO formalize interface for returned object.
"""
- def clearcaches():
+ def clearcaches(clear_persisted_data: bool = False) -> None:
"""Clear caches associated with this collection."""
def rev(node):
--- a/mercurial/interfaces/util.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/interfaces/util.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,6 +9,7 @@
# bookkeeping for declaring interfaces. So, we use stubs for various
# zope.interface primitives unless instructed otherwise.
+from __future__ import annotations
from .. import encoding
--- a/mercurial/keepalive.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/keepalive.py Sat Oct 26 04:16:00 2024 +0200
@@ -82,6 +82,7 @@
# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
+from __future__ import annotations
import collections
import hashlib
@@ -339,7 +340,7 @@
h.putrequest(
req.get_method(),
urllibcompat.getselector(req),
- **skipheaders
+ **skipheaders,
)
if 'content-type' not in headers:
h.putheader(
@@ -351,7 +352,7 @@
h.putrequest(
req.get_method(),
urllibcompat.getselector(req),
- **skipheaders
+ **skipheaders,
)
except socket.error as err:
raise urlerr.urlerror(err)
@@ -380,22 +381,9 @@
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
- # 1) add readline(), readlines(), and readinto() methods
- # 2) add close_connection() methods
- # 3) add info() and geturl() methods
-
- # in order to add readline(), read must be modified to deal with a
- # buffer. example: readline must read a buffer and then spit back
- # one line at a time. The only real alternative is to read one
- # BYTE at a time (ick). Once something has been read, it can't be
- # put back (ok, maybe it can, but that's even uglier than this),
- # so if you THEN do a normal read, you must first take stuff from
- # the buffer.
-
- # the read method wraps the original to accommodate buffering,
- # although read() never adds to the buffer.
- # Both readline and readlines have been stolen with almost no
- # modification from socket.py
+ # 1) add close_connection() method
+ # 2) add geturl() method
+ # 3) add accounting for read(), readlines() and readinto()
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(
@@ -411,9 +399,6 @@
self._url = None # (same)
self._connection = None # (same)
- _raw_read = httplib.HTTPResponse.read
- _raw_readinto = getattr(httplib.HTTPResponse, 'readinto', None)
-
# Python 2.7 has a single close() which closes the socket handle.
# This method was effectively renamed to _close_conn() in Python 3. But
# there is also a close(). _close_conn() is called by methods like
@@ -435,183 +420,34 @@
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
- def info(self):
- return self.headers
-
def geturl(self):
return self._url
def read(self, amt=None):
- # the _rbuf test is only in this first if for speed. It's not
- # logically necessary
- if self._rbuf and amt is not None:
- L = len(self._rbuf)
- if amt > L:
- amt -= L
- else:
- s = self._rbuf[:amt]
- self._rbuf = self._rbuf[amt:]
- return s
- # Careful! http.client.HTTPResponse.read() on Python 3 is
- # implemented using readinto(), which can duplicate self._rbuf
- # if it's not empty.
- s = self._rbuf
- self._rbuf = b''
- data = self._raw_read(amt)
-
+ data = super().read(amt)
self.receivedbytescount += len(data)
- try:
+ if self._connection is not None:
self._connection.receivedbytescount += len(data)
- except AttributeError:
- pass
- try:
+ if self._handler is not None:
self._handler.parent.receivedbytescount += len(data)
- except AttributeError:
- pass
-
- s += data
- return s
-
- # stolen from Python SVN #68532 to fix issue1088
- def _read_chunked(self, amt):
- chunk_left = self.chunk_left
- parts = []
-
- while True:
- if chunk_left is None:
- line = self.fp.readline()
- i = line.find(b';')
- if i >= 0:
- line = line[:i] # strip chunk-extensions
- try:
- chunk_left = int(line, 16)
- except ValueError:
- # close the connection as protocol synchronization is
- # probably lost
- self.close()
- raise httplib.IncompleteRead(b''.join(parts))
- if chunk_left == 0:
- break
- if amt is None:
- parts.append(self._safe_read(chunk_left))
- elif amt < chunk_left:
- parts.append(self._safe_read(amt))
- self.chunk_left = chunk_left - amt
- return b''.join(parts)
- elif amt == chunk_left:
- parts.append(self._safe_read(amt))
- self._safe_read(2) # toss the CRLF at the end of the chunk
- self.chunk_left = None
- return b''.join(parts)
- else:
- parts.append(self._safe_read(chunk_left))
- amt -= chunk_left
-
- # we read the whole chunk, get another
- self._safe_read(2) # toss the CRLF at the end of the chunk
- chunk_left = None
+ return data
- # read and discard trailer up to the CRLF terminator
- ### note: we shouldn't have any trailers!
- while True:
- line = self.fp.readline()
- if not line:
- # a vanishingly small number of sites EOF without
- # sending the trailer
- break
- if line == b'\r\n':
- break
-
- # we read everything; close the "file"
- self.close()
-
- return b''.join(parts)
-
- def readline(self):
- # Fast path for a line is already available in read buffer.
- i = self._rbuf.find(b'\n')
- if i >= 0:
- i += 1
- line = self._rbuf[:i]
- self._rbuf = self._rbuf[i:]
- return line
-
- # No newline in local buffer. Read until we find one.
- # readinto read via readinto will already return _rbuf
- if self._raw_readinto is None:
- chunks = [self._rbuf]
- else:
- chunks = []
- i = -1
- readsize = self._rbufsize
- while True:
- new = self._raw_read(readsize)
- if not new:
- break
-
- self.receivedbytescount += len(new)
- self._connection.receivedbytescount += len(new)
- try:
- self._handler.parent.receivedbytescount += len(new)
- except AttributeError:
- pass
-
- chunks.append(new)
- i = new.find(b'\n')
- if i >= 0:
- break
-
- # We either have exhausted the stream or have a newline in chunks[-1].
-
- # EOF
- if i == -1:
- self._rbuf = b''
- return b''.join(chunks)
-
- i += 1
- self._rbuf = chunks[-1][i:]
- chunks[-1] = chunks[-1][:i]
- return b''.join(chunks)
-
- def readlines(self, sizehint=0):
- total = 0
- list = []
- while True:
- line = self.readline()
- if not line:
- break
- list.append(line)
- total += len(line)
- if sizehint and total >= sizehint:
- break
- return list
+ def readline(self, limit: int = -1):
+ data = super().readline(limit=limit)
+ self.receivedbytescount += len(data)
+ if self._connection is not None:
+ self._connection.receivedbytescount += len(data)
+ if self._handler is not None:
+ self._handler.parent.receivedbytescount += len(data)
+ return data
def readinto(self, dest):
- if self._raw_readinto is None:
- res = self.read(len(dest))
- if not res:
- return 0
- dest[0 : len(res)] = res
- return len(res)
- total = len(dest)
- have = len(self._rbuf)
- if have >= total:
- dest[0:total] = self._rbuf[:total]
- self._rbuf = self._rbuf[total:]
- return total
- mv = memoryview(dest)
- got = self._raw_readinto(mv[have:total])
-
+ got = super().readinto(dest)
self.receivedbytescount += got
- self._connection.receivedbytescount += got
- try:
- self._handler.receivedbytescount += got
- except AttributeError:
- pass
-
- dest[0:have] = self._rbuf
- got += len(self._rbuf)
- self._rbuf = b''
+ if self._connection is not None:
+ self._connection.receivedbytescount += got
+ if self._handler is not None:
+ self._handler.parent.receivedbytescount += got
return got
--- a/mercurial/linelog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/linelog.py Sat Oct 26 04:16:00 2024 +0200
@@ -19,10 +19,23 @@
in a new body of annotate information.
"""
+from __future__ import annotations
+
import abc
import struct
+import typing
+
+from typing import (
+ List,
+)
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import pycompat
_llentry = struct.Struct(b'>II')
@@ -45,7 +58,7 @@
@attr.s
class annotateresult:
rev = attr.ib()
- lines = attr.ib(type=bytearray)
+ lines = attr.ib(type=List[lineinfo])
_eof = attr.ib()
def __iter__(self):
@@ -53,7 +66,6 @@
class _llinstruction: # pytype: disable=ignored-metaclass
-
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
@@ -401,7 +413,7 @@
def annotate(self, rev):
pc = 1
- lines = []
+ lines: List[lineinfo] = []
executed = 0
# Sanity check: if instructions executed exceeds len(program), we
# hit an infinite loop in the linelog program somehow and we
--- a/mercurial/localrepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/localrepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import functools
import os
@@ -13,6 +14,7 @@
import re
import sys
import time
+import typing
import weakref
from concurrent import futures
@@ -75,9 +77,12 @@
wireprototypes,
)
+from .branching import (
+ rev_cache as rev_branch_cache,
+)
+
from .interfaces import (
repository,
- util as interfaceutil,
)
from .utils import (
@@ -254,8 +259,7 @@
legacycaps = moderncaps.union({b'changegroupsubset'})
-@interfaceutil.implementer(repository.ipeercommandexecutor)
-class localcommandexecutor:
+class localcommandexecutor: # (repository.ipeercommandexecutor)
def __init__(self, peer):
self._peer = peer
self._sent = False
@@ -300,8 +304,7 @@
self._closed = True
-@interfaceutil.implementer(repository.ipeercommands)
-class localpeer(repository.peer):
+class localpeer(repository.peer): # (repository.ipeercommands)
'''peer for a local repo; reflects only the most recent API'''
def __init__(self, repo, caps=None, path=None, remotehidden=False):
@@ -455,8 +458,7 @@
# End of peer interface.
-@interfaceutil.implementer(repository.ipeerlegacycommands)
-class locallegacypeer(localpeer):
+class locallegacypeer(localpeer): # (repository.ipeerlegacycommands)
"""peer extension which implements legacy methods too; used for tests with
restricted capabilities"""
@@ -523,20 +525,6 @@
return sharedvfs
-def _readrequires(vfs, allowmissing):
- """reads the require file present at root of this vfs
- and return a set of requirements
-
- If allowmissing is True, we suppress FileNotFoundError if raised"""
- # requires file contains a newline-delimited list of
- # features/capabilities the opener (us) must have in order to use
- # the repository. This file was introduced in Mercurial 0.9.2,
- # which means very old repositories may not have one. We assume
- # a missing file translates to no requirements.
- read = vfs.tryread if allowmissing else vfs.read
- return set(read(b'requires').splitlines())
-
-
def makelocalrepository(baseui, path: bytes, intents=None):
"""Create a local repository object.
@@ -598,7 +586,7 @@
raise error.RepoError(_(b'repository %s not found') % path)
- requirements = _readrequires(hgvfs, True)
+ requirements = scmutil.readrequires(hgvfs, True)
shared = (
requirementsmod.SHARED_REQUIREMENT in requirements
or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
@@ -626,7 +614,7 @@
if (
shared
and requirementsmod.SHARESAFE_REQUIREMENT
- not in _readrequires(sharedvfs, True)
+ not in scmutil.readrequires(sharedvfs, True)
):
mismatch_warn = ui.configbool(
b'share', b'safe-mismatch.source-not-safe.warn'
@@ -670,9 +658,9 @@
hint=hint,
)
else:
- requirements |= _readrequires(storevfs, False)
+ requirements |= scmutil.readrequires(storevfs, False)
elif shared:
- sourcerequires = _readrequires(sharedvfs, False)
+ sourcerequires = scmutil.readrequires(sharedvfs, False)
if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
mismatch_warn = ui.configbool(
@@ -1123,9 +1111,12 @@
if 0 <= chainspan:
delta_config.max_deltachain_span = chainspan
- mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
- if mmapindexthreshold is not None:
- data_config.mmap_index_threshold = mmapindexthreshold
+ has_populate = util.has_mmap_populate()
+ if ui.configbool(b'storage', b'revlog.mmap.index', has_populate):
+ data_config.mmap_index_threshold = ui.configbytes(
+ b'storage',
+ b'revlog.mmap.index:size-threshold',
+ )
withsparseread = ui.configbool(b'experimental', b'sparse-read')
srdensitythres = float(
@@ -1251,8 +1242,7 @@
return localrepository
-@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
-class revlogfilestorage:
+class revlogfilestorage: # (repository.ilocalrepositoryfilestorage)
"""File storage when using revlogs."""
def file(self, path):
@@ -1267,8 +1257,7 @@
return filelog.filelog(self.svfs, path, try_split=try_split)
-@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
-class revlognarrowfilestorage:
+class revlognarrowfilestorage: # (repository.ilocalrepositoryfilestorage)
"""File storage when using revlogs and narrow files."""
def file(self, path):
@@ -1305,9 +1294,16 @@
(repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
]
-
-@interfaceutil.implementer(repository.ilocalrepositorymain)
-class localrepository:
+_localrepo_base_classes = object
+
+if typing.TYPE_CHECKING:
+ _localrepo_base_classes = [
+ repository.ilocalrepositorymain,
+ repository.ilocalrepositoryfilestorage,
+ ]
+
+
+class localrepository(_localrepo_base_classes):
"""Main class for representing local repositories.
All local repositories are instances of this class.
@@ -2246,7 +2242,8 @@
@unfilteredmethod
def revbranchcache(self):
if not self._revbranchcache:
- self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
+ unfi = self.unfiltered()
+ self._revbranchcache = rev_branch_cache.revbranchcache(unfi)
return self._revbranchcache
def register_changeset(self, rev, changelogrevision):
--- a/mercurial/lock.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/lock.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import errno
@@ -110,7 +111,7 @@
raiseinterrupt(assertedsigs[0])
-def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
+def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs) -> "lock":
"""return an acquired lock or raise an a LockHeld exception
This function is responsible to issue warnings and or debug messages about
@@ -256,7 +257,7 @@
# wrapper around procutil.getpid() to make testing easier
return procutil.getpid()
- def lock(self):
+ def lock(self) -> int:
timeout = self.timeout
while True:
try:
@@ -272,7 +273,7 @@
errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
)
- def _trylock(self):
+ def _trylock(self) -> None:
if self.held:
self.held += 1
return
--- a/mercurial/logcmdutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/logcmdutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,10 +5,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import itertools
import os
import posixpath
+import typing
from typing import (
Any,
@@ -24,6 +26,11 @@
from .thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
dagop,
diffutil,
@@ -569,6 +576,10 @@
functions that use changesest_templater.
"""
+ _tresources: formatter.templateresources
+ lastheader: Optional[bytes]
+ t: templater.templater
+
# Arguments before "buffered" used to be positional. Consider not
# adding/removing arguments before "buffered" to not break callers.
def __init__(
@@ -659,7 +670,7 @@
self.footer = self.t.render(self._parts[b'footer'], props)
-def templatespec(tmpl, mapfile):
+def templatespec(tmpl, mapfile) -> formatter.templatespec:
assert not (tmpl and mapfile)
if mapfile:
return formatter.mapfile_templatespec(b'changeset', mapfile)
@@ -667,7 +678,7 @@
return formatter.literal_templatespec(tmpl)
-def _lookuptemplate(ui, tmpl, style):
+def _lookuptemplate(ui, tmpl, style) -> formatter.templatespec:
"""Find the template matching the given template spec or style
See formatter.lookuptemplate() for details.
--- a/mercurial/logexchange.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/logexchange.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .node import hex
--- a/mercurial/loggingutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/loggingutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
--- a/mercurial/lsprof.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/lsprof.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import _lsprof
import sys
--- a/mercurial/lsprofcalltree.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/lsprofcalltree.py Sat Oct 26 04:16:00 2024 +0200
@@ -10,6 +10,7 @@
of the GNU General Public License, incorporated herein by reference.
"""
+from __future__ import annotations
from . import pycompat
--- a/mercurial/mail.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/mail.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import email
import email.charset
--- a/mercurial/manifest.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/manifest.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,12 +5,28 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import heapq
import itertools
import struct
import weakref
+from typing import (
+ ByteString,
+ Callable,
+ Collection,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
from .i18n import _
from .node import (
bin,
@@ -30,7 +46,6 @@
)
from .interfaces import (
repository,
- util as interfaceutil,
)
from .revlogutils import (
constants as revlog_constants,
@@ -43,7 +58,7 @@
FASTDELTA_TEXTDIFF_THRESHOLD = 1000
-def _parse(nodelen, data):
+def _parse(nodelen, data: bytes):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -84,21 +99,23 @@
class lazymanifestiter:
- def __init__(self, lm):
+ def __init__(self, lm: '_LazyManifest') -> None:
self.pos = 0
self.lm = lm
- def __iter__(self):
+ def __iter__(self) -> 'lazymanifestiter':
return self
- def next(self):
+ def next(self) -> bytes:
try:
data, pos = self.lm._get(self.pos)
except IndexError:
raise StopIteration
if pos == -1:
+ assert isinstance(data, tuple)
self.pos += 1
return data[0]
+ assert isinstance(data, bytes)
self.pos += 1
zeropos = data.find(b'\x00', pos)
return data[pos:zeropos]
@@ -107,21 +124,23 @@
class lazymanifestiterentries:
- def __init__(self, lm):
+ def __init__(self, lm: '_LazyManifest') -> None:
self.lm = lm
self.pos = 0
- def __iter__(self):
+ def __iter__(self) -> 'lazymanifestiterentries':
return self
- def next(self):
+ def next(self) -> Tuple[bytes, bytes, bytes]:
try:
data, pos = self.lm._get(self.pos)
except IndexError:
raise StopIteration
if pos == -1:
+ assert isinstance(data, tuple)
self.pos += 1
return data
+ assert isinstance(data, bytes)
zeropos = data.find(b'\x00', pos)
nlpos = data.find(b'\n', pos)
if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
@@ -143,10 +162,10 @@
__next__ = next
-def unhexlify(data, extra, pos, length):
+def unhexlify(data: bytes, extra: int, pos, length: int):
s = bin(data[pos : pos + length])
if extra:
- s += chr(extra & 0xFF)
+ s += bytes([extra & 0xFF])
return s
@@ -157,7 +176,7 @@
_manifestflags = {b'', b'l', b't', b'x'}
-class _lazymanifest:
+class _LazyManifest:
"""A pure python manifest backed by a byte string. It is supplimented with
internal lists as it is modified, until it is compacted back to a pure byte
string.
@@ -177,12 +196,12 @@
def __init__(
self,
- nodelen,
- data,
+ nodelen: int,
+ data: bytes,
positions=None,
extrainfo=None,
extradata=None,
- hasremovals=False,
+ hasremovals: bool = False,
):
self._nodelen = nodelen
if positions is None:
@@ -198,7 +217,7 @@
self.data = data
self.hasremovals = hasremovals
- def findlines(self, data):
+ def findlines(self, data: bytes) -> List[int]:
if not data:
return []
pos = data.find(b"\n")
@@ -215,7 +234,9 @@
pos = data.find(b"\n", pos + 1)
return positions
- def _get(self, index):
+ def _get(
+ self, index: int
+ ) -> Tuple[Union[bytes, Tuple[bytes, bytes, bytes]], int]:
# get the position encoded in pos:
# positive number is an index in 'data'
# negative number is in extrapieces
@@ -224,12 +245,12 @@
return self.data, pos
return self.extradata[-pos - 1], -1
- def _getkey(self, pos):
+ def _getkey(self, pos) -> bytes:
if pos >= 0:
return self.data[pos : self.data.find(b'\x00', pos + 1)]
return self.extradata[-pos - 1][0]
- def bsearch(self, key):
+ def bsearch(self, key: bytes) -> int:
first = 0
last = len(self.positions) - 1
@@ -247,7 +268,7 @@
first = midpoint + 1
return -1
- def bsearch2(self, key):
+ def bsearch2(self, key: bytes) -> Tuple[int, bool]:
# same as the above, but will always return the position
# done for performance reasons
first = 0
@@ -267,10 +288,10 @@
first = midpoint + 1
return (first, False)
- def __contains__(self, key):
+ def __contains__(self, key: bytes) -> bool:
return self.bsearch(key) != -1
- def __getitem__(self, key):
+ def __getitem__(self, key: bytes) -> Tuple[bytes, bytes]:
if not isinstance(key, bytes):
raise TypeError(b"getitem: manifest keys must be a bytes.")
needle = self.bsearch(key)
@@ -278,7 +299,10 @@
raise KeyError
data, pos = self._get(needle)
if pos == -1:
+ assert isinstance(data, tuple)
return (data[1], data[2])
+
+ assert isinstance(data, bytes)
zeropos = data.find(b'\x00', pos)
nlpos = data.find(b'\n', zeropos)
assert 0 <= needle <= len(self.positions)
@@ -296,7 +320,7 @@
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
return (hashval, flags)
- def __delitem__(self, key):
+ def __delitem__(self, key: bytes) -> None:
needle, found = self.bsearch2(key)
if not found:
raise KeyError
@@ -309,7 +333,7 @@
self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
self.hasremovals = True
- def __setitem__(self, key, value):
+ def __setitem__(self, key: bytes, value: Tuple[bytes, bytes]):
if not isinstance(key, bytes):
raise TypeError(b"setitem: manifest keys must be a byte string.")
if not isinstance(value, tuple) or len(value) != 2:
@@ -344,7 +368,7 @@
self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
)
- def copy(self):
+ def copy(self) -> '_LazyManifest':
# XXX call _compact like in C?
return _lazymanifest(
self._nodelen,
@@ -355,7 +379,7 @@
self.hasremovals,
)
- def _compact(self):
+ def _compact(self) -> None:
# hopefully not called TOO often
if len(self.extradata) == 0 and not self.hasremovals:
return
@@ -414,16 +438,23 @@
self.hasremovals = False
self.extradata = []
- def _pack(self, d):
+ def _pack(self, d: Tuple[bytes, bytes, bytes]) -> bytes:
n = d[1]
assert len(n) in (20, 32)
return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
- def text(self):
+ def text(self) -> ByteString:
self._compact()
return self.data
- def diff(self, m2, clean=False):
+ def diff(
+ self, m2: '_LazyManifest', clean: bool = False
+ ) -> Dict[
+ bytes,
+ Optional[
+ Tuple[Tuple[Optional[bytes], bytes], Tuple[Optional[bytes], bytes]]
+ ],
+ ]:
'''Finds changes between the current manifest and m2.'''
# XXX think whether efficiency matters here
diff = {}
@@ -444,19 +475,19 @@
return diff
- def iterentries(self):
+ def iterentries(self) -> lazymanifestiterentries:
return lazymanifestiterentries(self)
- def iterkeys(self):
+ def iterkeys(self) -> lazymanifestiter:
return lazymanifestiter(self)
- def __iter__(self):
+ def __iter__(self) -> lazymanifestiter:
return lazymanifestiter(self)
- def __len__(self):
+ def __len__(self) -> int:
return len(self.positions)
- def filtercopy(self, filterfn):
+ def filtercopy(self, filterfn: Callable[[bytes], bool]) -> '_LazyManifest':
# XXX should be optimized
c = _lazymanifest(self._nodelen, b'')
for f, n, fl in self.iterentries():
@@ -468,52 +499,54 @@
try:
_lazymanifest = parsers.lazymanifest
except AttributeError:
- pass
-
-
-@interfaceutil.implementer(repository.imanifestdict)
-class manifestdict:
- def __init__(self, nodelen, data=b''):
+ _lazymanifest = _LazyManifest
+
+
+class manifestdict: # (repository.imanifestdict)
+ def __init__(self, nodelen: int, data: ByteString = b''):
self._nodelen = nodelen
self._lm = _lazymanifest(nodelen, data)
- def __getitem__(self, key):
+ def __getitem__(self, key: bytes) -> bytes:
return self._lm[key][0]
- def find(self, key):
+ def find(self, key: bytes) -> Tuple[bytes, bytes]:
return self._lm[key]
- def __len__(self):
+ def __len__(self) -> int:
return len(self._lm)
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
# nonzero is covered by the __len__ function, but implementing it here
# makes it easier for extensions to override.
return len(self._lm) != 0
__bool__ = __nonzero__
- def __setitem__(self, key, node):
+ def set(self, key: bytes, node: bytes, flags: bytes) -> None:
+ self._lm[key] = node, flags
+
+ def __setitem__(self, key: bytes, node: bytes) -> None:
self._lm[key] = node, self.flags(key)
- def __contains__(self, key):
+ def __contains__(self, key: bytes) -> bool:
if key is None:
return False
return key in self._lm
- def __delitem__(self, key):
+ def __delitem__(self, key: bytes) -> None:
del self._lm[key]
- def __iter__(self):
+ def __iter__(self) -> Iterator[bytes]:
return self._lm.__iter__()
- def iterkeys(self):
+ def iterkeys(self) -> Iterator[bytes]:
return self._lm.iterkeys()
- def keys(self):
+ def keys(self) -> List[bytes]:
return list(self.iterkeys())
- def filesnotin(self, m2, match=None):
+ def filesnotin(self, m2, match=None) -> Set[bytes]:
'''Set of files in this manifest that are not in the other'''
if match is not None:
match = matchmod.badmatch(match, lambda path, msg: None)
@@ -522,16 +555,16 @@
return {f for f in self if f not in m2}
@propertycache
- def _dirs(self):
+ def _dirs(self) -> pathutil.dirs:
return pathutil.dirs(self)
- def dirs(self):
+ def dirs(self) -> pathutil.dirs:
return self._dirs
- def hasdir(self, dir):
+ def hasdir(self, dir: bytes) -> bool:
return dir in self._dirs
- def _filesfastpath(self, match):
+ def _filesfastpath(self, match: matchmod.basematcher) -> bool:
"""Checks whether we can correctly and quickly iterate over matcher
files instead of over manifest files."""
files = match.files()
@@ -540,7 +573,7 @@
or (match.prefix() and all(fn in self for fn in files))
)
- def walk(self, match):
+ def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
"""Generates matching file names.
Equivalent to manifest.matches(match).iterkeys(), but without creating
@@ -577,7 +610,7 @@
if not self.hasdir(fn):
match.bad(fn, None)
- def _matches(self, match):
+ def _matches(self, match: matchmod.basematcher) -> 'manifestdict':
'''generate a new manifest filtered by the match argument'''
if match.always():
return self.copy()
@@ -594,7 +627,17 @@
m._lm = self._lm.filtercopy(match)
return m
- def diff(self, m2, match=None, clean=False):
+ def diff(
+ self,
+ m2: 'manifestdict',
+ match: Optional[matchmod.basematcher] = None,
+ clean: bool = False,
+ ) -> Dict[
+ bytes,
+ Optional[
+ Tuple[Tuple[Optional[bytes], bytes], Tuple[Optional[bytes], bytes]]
+ ],
+ ]:
"""Finds changes between the current manifest and m2.
Args:
@@ -615,42 +658,44 @@
return m1.diff(m2, clean=clean)
return self._lm.diff(m2._lm, clean)
- def setflag(self, key, flag):
+ def setflag(self, key: bytes, flag: bytes) -> None:
if flag not in _manifestflags:
raise TypeError(b"Invalid manifest flag set.")
self._lm[key] = self[key], flag
- def get(self, key, default=None):
+ def get(self, key: bytes, default=None) -> Optional[bytes]:
try:
return self._lm[key][0]
except KeyError:
return default
- def flags(self, key):
+ def flags(self, key: bytes) -> bytes:
try:
return self._lm[key][1]
except KeyError:
return b''
- def copy(self):
+ def copy(self) -> 'manifestdict':
c = manifestdict(self._nodelen)
c._lm = self._lm.copy()
return c
- def items(self):
+ def items(self) -> Iterator[Tuple[bytes, bytes]]:
return (x[:2] for x in self._lm.iterentries())
- def iteritems(self):
+ def iteritems(self) -> Iterator[Tuple[bytes, bytes]]:
return (x[:2] for x in self._lm.iterentries())
- def iterentries(self):
+ def iterentries(self) -> Iterator[Tuple[bytes, bytes, bytes]]:
return self._lm.iterentries()
- def text(self):
+ def text(self) -> ByteString:
# most likely uses native version
return self._lm.text()
- def fastdelta(self, base, changes):
+ def fastdelta(
+ self, base: ByteString, changes: Iterable[Tuple[bytes, bool]]
+ ) -> Tuple[ByteString, ByteString]:
"""Given a base manifest text as a bytearray and a list of changes
relative to that text, compute a delta that can be used by revlog.
"""
@@ -686,13 +731,13 @@
dline.append(l)
else:
if dstart is not None:
- delta.append([dstart, dend, b"".join(dline)])
+ delta.append((dstart, dend, b"".join(dline)))
dstart = start
dend = end
dline = [l]
if dstart is not None:
- delta.append([dstart, dend, b"".join(dline)])
+ delta.append((dstart, dend, b"".join(dline)))
# apply the delta to the base, and get a delta for addrevision
deltatext, arraytext = _addlistdelta(base, delta)
else:
@@ -706,17 +751,17 @@
return arraytext, deltatext
-def _msearch(m, s, lo=0, hi=None):
+def _msearch(
+ m: ByteString, s: bytes, lo: int = 0, hi: Optional[int] = None
+) -> Tuple[int, int]:
"""return a tuple (start, end) that says where to find s within m.
If the string is found m[start:end] are the line containing
that string. If start == end the string was not found and
they indicate the proper sorted insertion point.
-
- m should be a buffer, a memoryview or a byte string.
- s is a byte string"""
-
- def advance(i, c):
+ """
+
+ def advance(i: int, c: bytes):
while i < lenm and m[i : i + 1] != c:
i += 1
return i
@@ -749,7 +794,7 @@
return (lo, lo)
-def _checkforbidden(l):
+def _checkforbidden(l: Iterable[bytes]) -> None:
"""Check filenames for illegal characters."""
for f in l:
if b'\n' in f or b'\r' in f:
@@ -761,7 +806,10 @@
# apply the changes collected during the bisect loop to our addlist
# return a delta suitable for addrevision
-def _addlistdelta(addlist, x):
+def _addlistdelta(
+ addlist: ByteString,
+ x: Iterable[Tuple[int, int, bytes]],
+) -> Tuple[bytes, ByteString]:
# for large addlist arrays, building a new array is cheaper
# than repeatedly modifying the existing one
currentposition = 0
@@ -783,7 +831,7 @@
return deltatext, newaddlist
-def _splittopdir(f):
+def _splittopdir(f: bytes) -> Tuple[bytes, bytes]:
if b'/' in f:
dir, subpath = f.split(b'/', 1)
return dir + b'/', subpath
@@ -794,9 +842,14 @@
_noop = lambda s: None
-@interfaceutil.implementer(repository.imanifestdict)
-class treemanifest:
- def __init__(self, nodeconstants, dir=b'', text=b''):
+class treemanifest: # (repository.imanifestdict)
+ _dir: bytes
+ _dirs: Dict[bytes, 'treemanifest']
+ _dirty: bool
+ _files: Dict[bytes, bytes]
+ _flags: Dict[bytes, bytes]
+
+ def __init__(self, nodeconstants, dir: bytes = b'', text: bytes = b''):
self._dir = dir
self.nodeconstants = nodeconstants
self._node = self.nodeconstants.nullid
@@ -805,7 +858,10 @@
self._copyfunc = _noop
self._dirty = False
self._dirs = {}
- self._lazydirs = {}
+ self._lazydirs: Dict[
+ bytes,
+ Tuple[bytes, Callable[[bytes, bytes], 'treemanifest'], bool],
+ ] = {}
# Using _lazymanifest here is a little slower than plain old dicts
self._files = {}
self._flags = {}
@@ -819,10 +875,10 @@
self.parse(text, readsubtree)
self._dirty = True # Mark flat manifest dirty after parsing
- def _subpath(self, path):
+ def _subpath(self, path: bytes) -> bytes:
return self._dir + path
- def _loadalllazy(self):
+ def _loadalllazy(self) -> None:
selfdirs = self._dirs
subpath = self._subpath
for d, (node, readsubtree, docopy) in self._lazydirs.items():
@@ -830,11 +886,11 @@
selfdirs[d] = readsubtree(subpath(d), node).copy()
else:
selfdirs[d] = readsubtree(subpath(d), node)
- self._lazydirs = {}
-
- def _loadlazy(self, d):
+ self._lazydirs.clear()
+
+ def _loadlazy(self, d: bytes) -> None:
v = self._lazydirs.get(d)
- if v:
+ if v is not None:
node, readsubtree, docopy = v
if docopy:
self._dirs[d] = readsubtree(self._subpath(d), node).copy()
@@ -842,19 +898,23 @@
self._dirs[d] = readsubtree(self._subpath(d), node)
del self._lazydirs[d]
- def _loadchildrensetlazy(self, visit):
+ def _loadchildrensetlazy(
+ self, visit: Union[Set[bytes], bytes]
+ ) -> Optional[Set[bytes]]:
if not visit:
return None
if visit == b'all' or visit == b'this':
self._loadalllazy()
return None
+ visit = cast(Set[bytes], visit)
+
loadlazy = self._loadlazy
for k in visit:
loadlazy(k + b'/')
return visit
- def _loaddifflazy(self, t1, t2):
+ def _loaddifflazy(self, t1: 'treemanifest', t2: 'treemanifest'):
"""load items in t1 and t2 if they're needed for diffing.
The criteria currently is:
@@ -866,7 +926,7 @@
toloadlazy = []
for d, v1 in t1._lazydirs.items():
v2 = t2._lazydirs.get(d)
- if not v2 or v2[0] != v1[0]:
+ if v2 is None or v2[0] != v1[0]:
toloadlazy.append(d)
for d, v1 in t2._lazydirs.items():
if d not in t1._lazydirs:
@@ -876,7 +936,7 @@
t1._loadlazy(d)
t2._loadlazy(d)
- def __len__(self):
+ def __len__(self) -> int:
self._load()
size = len(self._files)
self._loadalllazy()
@@ -884,13 +944,13 @@
size += m.__len__()
return size
- def __nonzero__(self):
- # Faster than "__len() != 0" since it avoids loading sub-manifests
+ def __nonzero__(self) -> bool:
+ # Faster than "__len__() != 0" since it avoids loading sub-manifests
return not self._isempty()
__bool__ = __nonzero__
- def _isempty(self):
+ def _isempty(self) -> bool:
self._load() # for consistency; already loaded by all callers
# See if we can skip loading everything.
if self._files or (
@@ -901,7 +961,7 @@
return not self._dirs or all(m._isempty() for m in self._dirs.values())
@encoding.strmethod
- def __repr__(self):
+ def __repr__(self) -> bytes:
return (
b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
% (
@@ -913,23 +973,25 @@
)
)
- def dir(self):
+ def dir(self) -> bytes:
"""The directory that this tree manifest represents, including a
trailing '/'. Empty string for the repo root directory."""
return self._dir
- def node(self):
+ def node(self) -> bytes:
"""This node of this instance. nullid for unsaved instances. Should
be updated when the instance is read or written from a revlog.
"""
assert not self._dirty
return self._node
- def setnode(self, node):
+ def setnode(self, node: bytes) -> None:
self._node = node
self._dirty = False
- def iterentries(self):
+ def iterentries(
+ self,
+ ) -> Iterator[Tuple[bytes, Union[bytes, 'treemanifest'], bytes]]:
self._load()
self._loadalllazy()
for p, n in sorted(
@@ -941,7 +1003,7 @@
for x in n.iterentries():
yield x
- def items(self):
+ def items(self) -> Iterator[Tuple[bytes, Union[bytes, 'treemanifest']]]:
self._load()
self._loadalllazy()
for p, n in sorted(
@@ -955,7 +1017,7 @@
iteritems = items
- def iterkeys(self):
+ def iterkeys(self) -> Iterator[bytes]:
self._load()
self._loadalllazy()
for p in sorted(itertools.chain(self._dirs, self._files)):
@@ -965,13 +1027,13 @@
for f in self._dirs[p]:
yield f
- def keys(self):
+ def keys(self) -> List[bytes]:
return list(self.iterkeys())
- def __iter__(self):
+ def __iter__(self) -> Iterator[bytes]:
return self.iterkeys()
- def __contains__(self, f):
+ def __contains__(self, f: bytes) -> bool:
if f is None:
return False
self._load()
@@ -986,7 +1048,7 @@
else:
return f in self._files
- def get(self, f, default=None):
+ def get(self, f: bytes, default: Optional[bytes] = None) -> Optional[bytes]:
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -998,7 +1060,7 @@
else:
return self._files.get(f, default)
- def __getitem__(self, f):
+ def __getitem__(self, f: bytes) -> bytes:
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -1008,7 +1070,7 @@
else:
return self._files[f]
- def flags(self, f):
+ def flags(self, f: bytes) -> bytes:
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -1022,7 +1084,7 @@
return b''
return self._flags.get(f, b'')
- def find(self, f):
+ def find(self, f: bytes) -> Tuple[bytes, bytes]:
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -1032,7 +1094,7 @@
else:
return self._files[f], self._flags.get(f, b'')
- def __delitem__(self, f):
+ def __delitem__(self, f: bytes) -> None:
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -1048,7 +1110,27 @@
del self._flags[f]
self._dirty = True
- def __setitem__(self, f, n):
+ def set(self, f: bytes, node: bytes, flags: bytes) -> None:
+ """Set both the node and the flags for path f."""
+ assert node is not None
+ if flags not in _manifestflags:
+ raise TypeError(b"Invalid manifest flag set.")
+ self._load()
+ dir, subpath = _splittopdir(f)
+ if dir:
+ self._loadlazy(dir)
+ if dir not in self._dirs:
+ self._dirs[dir] = treemanifest(
+ self.nodeconstants, self._subpath(dir)
+ )
+ self._dirs[dir].set(subpath, node, flags)
+ else:
+ assert len(node) in (20, 32)
+ self._files[f] = node
+ self._flags[f] = flags
+ self._dirty = True
+
+ def __setitem__(self, f: bytes, n: bytes) -> None:
assert n is not None
self._load()
dir, subpath = _splittopdir(f)
@@ -1067,7 +1149,7 @@
self._files[f] = n
self._dirty = True
- def _load(self):
+ def _load(self) -> None:
if self._loadfunc is not _noop:
lf, self._loadfunc = self._loadfunc, _noop
lf(self)
@@ -1075,7 +1157,7 @@
cf, self._copyfunc = self._copyfunc, _noop
cf(self)
- def setflag(self, f, flags):
+ def setflag(self, f: bytes, flags: bytes) -> None:
"""Set the flags (symlink, executable) for path f."""
if flags not in _manifestflags:
raise TypeError(b"Invalid manifest flag set.")
@@ -1092,7 +1174,7 @@
self._flags[f] = flags
self._dirty = True
- def copy(self):
+ def copy(self) -> 'treemanifest':
copy = treemanifest(self.nodeconstants, self._dir)
copy._node = self._node
copy._dirty = self._dirty
@@ -1117,7 +1199,9 @@
copy._copyfunc = self._copyfunc
return copy
- def filesnotin(self, m2, match=None):
+ def filesnotin(
+ self, m2: 'treemanifest', match: Optional[matchmod.basematcher] = None
+ ) -> Set[bytes]:
'''Set of files in this manifest that are not in the other'''
if match and not match.always():
m1 = self._matches(match)
@@ -1147,13 +1231,13 @@
return files
@propertycache
- def _alldirs(self):
+ def _alldirs(self) -> pathutil.dirs:
return pathutil.dirs(self)
- def dirs(self):
+ def dirs(self) -> pathutil.dirs:
return self._alldirs
- def hasdir(self, dir):
+ def hasdir(self, dir: bytes) -> bool:
self._load()
topdir, subdir = _splittopdir(dir)
if topdir:
@@ -1164,7 +1248,7 @@
dirslash = dir + b'/'
return dirslash in self._dirs or dirslash in self._lazydirs
- def walk(self, match):
+ def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
"""Generates matching file names.
It also reports nonexistent files by marking them bad with match.bad().
@@ -1190,7 +1274,7 @@
if not self.hasdir(fn):
match.bad(fn, None)
- def _walk(self, match):
+ def _walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
'''Recursively generates matching file names for walk().'''
visit = match.visitchildrenset(self._dir[:-1])
if not visit:
@@ -1209,13 +1293,13 @@
for f in self._dirs[p]._walk(match):
yield f
- def _matches(self, match):
+ def _matches(self, match: matchmod.basematcher) -> 'treemanifest':
"""recursively generate a new manifest filtered by the match argument."""
if match.always():
return self.copy()
return self._matches_inner(match)
- def _matches_inner(self, match):
+ def _matches_inner(self, match: matchmod.basematcher) -> 'treemanifest':
if match.always():
return self.copy()
@@ -1256,10 +1340,22 @@
ret._dirty = True
return ret
- def fastdelta(self, base, changes):
+ def fastdelta(
+ self, base: ByteString, changes: Iterable[Tuple[bytes, bool]]
+ ) -> ByteString:
raise FastdeltaUnavailable()
- def diff(self, m2, match=None, clean=False):
+ def diff(
+ self,
+ m2: 'treemanifest',
+ match: Optional[matchmod.basematcher] = None,
+ clean: bool = False,
+ ) -> Dict[
+ bytes,
+ Optional[
+ Tuple[Tuple[Optional[bytes], bytes], Tuple[Optional[bytes], bytes]]
+ ],
+ ]:
"""Finds changes between the current manifest and m2.
Args:
@@ -1320,10 +1416,14 @@
_iterativediff(t1, t2, stackls)
return result
- def unmodifiedsince(self, m2):
+ def unmodifiedsince(self, m2: 'treemanifest') -> bool:
return not self._dirty and not m2._dirty and self._node == m2._node
- def parse(self, text, readsubtree):
+ def parse(
+ self,
+ text: bytes,
+ readsubtree: Callable[[bytes, bytes], 'treemanifest'],
+ ) -> None:
selflazy = self._lazydirs
for f, n, fl in _parse(self._nodelen, text):
if fl == b't':
@@ -1346,12 +1446,12 @@
if fl:
self._flags[f] = fl
- def text(self):
+ def text(self) -> ByteString:
"""Get the full data of this manifest as a bytestring."""
self._load()
return _text(self.iterentries())
- def dirtext(self):
+ def dirtext(self) -> ByteString:
"""Get the full data of this directory as a bytestring. Make sure that
any submanifests have been written first, so their nodeids are correct.
"""
@@ -1362,14 +1462,32 @@
files = [(f, self._files[f], flags(f)) for f in self._files]
return _text(sorted(dirs + files + lazydirs))
- def read(self, gettext, readsubtree):
+ def read(
+ self,
+ gettext: Callable[[], ByteString],
+ readsubtree: Callable[[bytes, bytes], 'treemanifest'],
+ ) -> None:
def _load_for_read(s):
s.parse(gettext(), readsubtree)
s._dirty = False
self._loadfunc = _load_for_read
- def writesubtrees(self, m1, m2, writesubtree, match):
+ def writesubtrees(
+ self,
+ m1: 'treemanifest',
+ m2: 'treemanifest',
+ writesubtree: Callable[
+ [
+ Callable[['treemanifest'], None],
+ bytes,
+ bytes,
+ matchmod.basematcher,
+ ],
+ None,
+ ],
+ match: matchmod.basematcher,
+ ) -> None:
self._load() # for consistency; should never have any effect here
m1._load()
m2._load()
@@ -1379,7 +1497,9 @@
ld = m._lazydirs.get(d)
if ld:
return ld[0]
- return m._dirs.get(d, emptytree)._node
+ tree = m._dirs.get(d, emptytree)
+ assert tree is not None # helps pytype
+ return tree._node
# let's skip investigating things that `match` says we do not need.
visit = match.visitchildrenset(self._dir[:-1])
@@ -1395,7 +1515,9 @@
subp1, subp2 = subp2, subp1
writesubtree(subm, subp1, subp2, match)
- def walksubtrees(self, matcher=None):
+ def walksubtrees(
+ self, matcher: Optional[matchmod.basematcher] = None
+ ) -> Iterator['treemanifest']:
"""Returns an iterator of the subtrees of this manifest, including this
manifest itself.
@@ -1548,8 +1670,7 @@
"""Exception raised when fastdelta isn't usable on a manifest."""
-@interfaceutil.implementer(repository.imanifeststorage)
-class manifestrevlog:
+class manifestrevlog: # (repository.imanifeststorage)
"""A revlog that stores manifest texts. This is responsible for caching the
full-text manifest contents.
"""
@@ -1578,10 +1699,12 @@
# stacks of commits, the number can go up, hence the config knob below.
cachesize = 4
optiontreemanifest = False
+ persistentnodemap = False
opts = getattr(opener, 'options', None)
if opts is not None:
cachesize = opts.get(b'manifestcachesize', cachesize)
optiontreemanifest = opts.get(b'treemanifest', False)
+ persistentnodemap = opts.get(b'persistent-nodemap', False)
self._treeondisk = optiontreemanifest or treemanifest
@@ -1610,7 +1733,7 @@
checkambig=not bool(tree),
mmaplargeindex=True,
upperboundcomp=MAXCOMPRESSION,
- persistentnodemap=opener.options.get(b'persistent-nodemap', False),
+ persistentnodemap=persistentnodemap,
)
self.index = self._revlog.index
@@ -1656,7 +1779,7 @@
def fulltextcache(self):
return self._fulltextcache
- def clearcaches(self, clear_persisted_data=False):
+ def clearcaches(self, clear_persisted_data: bool = False) -> None:
self._revlog.clearcaches()
self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
self._dirlogcache = {self.tree: self}
@@ -1682,8 +1805,8 @@
link,
p1,
p2,
- added,
- removed,
+ added: Iterable[bytes],
+ removed: Iterable[bytes],
readtree=None,
match=None,
):
@@ -1920,8 +2043,13 @@
self._revlog.opener = value
-@interfaceutil.implementer(repository.imanifestlog)
-class manifestlog:
+# TODO: drop this in favor of repository.imanifestrevisionstored?
+AnyManifestCtx = Union['manifestctx', 'treemanifestctx']
+# TODO: drop this in favor of repository.imanifestdict
+AnyManifestDict = Union[manifestdict, treemanifest]
+
+
+class manifestlog: # (repository.imanifestlog)
"""A collection class representing the collection of manifest snapshots
referenced by commits in the repository.
@@ -1958,7 +2086,13 @@
"""
return self.get(b'', node)
- def get(self, tree, node, verify=True):
+ @property
+ def narrowed(self):
+ return not (self._narrowmatch is None or self._narrowmatch.always())
+
+ def get(
+ self, tree: bytes, node: bytes, verify: bool = True
+ ) -> AnyManifestCtx:
"""Retrieves the manifest instance for the given node. Throws a
LookupError if not found.
@@ -2008,32 +2142,33 @@
def getstorage(self, tree):
return self._rootstore.dirlog(tree)
- def clearcaches(self, clear_persisted_data=False):
+ def clearcaches(self, clear_persisted_data: bool = False) -> None:
self._dirmancache.clear()
self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
- def rev(self, node):
+ def rev(self, node) -> int:
return self._rootstore.rev(node)
- def update_caches(self, transaction):
+ def update_caches(self, transaction) -> None:
return self._rootstore._revlog.update_caches(transaction=transaction)
-@interfaceutil.implementer(repository.imanifestrevisionwritable)
-class memmanifestctx:
+class memmanifestctx: # (repository.imanifestrevisionwritable)
+ _manifestdict: manifestdict
+
def __init__(self, manifestlog):
self._manifestlog = manifestlog
self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
- def _storage(self):
+ def _storage(self) -> manifestrevlog:
return self._manifestlog.getstorage(b'')
- def copy(self):
+ def copy(self) -> 'memmanifestctx':
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
return memmf
- def read(self):
+ def read(self) -> 'manifestdict':
return self._manifestdict
def write(self, transaction, link, p1, p2, added, removed, match=None):
@@ -2049,12 +2184,13 @@
)
-@interfaceutil.implementer(repository.imanifestrevisionstored)
-class manifestctx:
+class manifestctx: # (repository.imanifestrevisionstored)
"""A class representing a single revision of a manifest, including its
contents, its parent revs, and its linkrev.
"""
+ _data: Optional[manifestdict]
+
def __init__(self, manifestlog, node):
self._manifestlog = manifestlog
self._data = None
@@ -2068,22 +2204,22 @@
# rev = store.rev(node)
# self.linkrev = store.linkrev(rev)
- def _storage(self):
+ def _storage(self) -> 'manifestrevlog':
return self._manifestlog.getstorage(b'')
- def node(self):
+ def node(self) -> bytes:
return self._node
- def copy(self):
+ def copy(self) -> memmanifestctx:
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
return memmf
@propertycache
- def parents(self):
+ def parents(self) -> Tuple[bytes, bytes]:
return self._storage().parents(self._node)
- def read(self):
+ def read(self) -> 'manifestdict':
if self._data is None:
nc = self._manifestlog.nodeconstants
if self._node == nc.nullid:
@@ -2099,13 +2235,18 @@
self._data = manifestdict(nc.nodelen, text)
return self._data
- def readfast(self, shallow=False):
+ def readfast(self, shallow: bool = False) -> 'manifestdict':
"""Calls either readdelta or read, based on which would be less work.
readdelta is called if the delta is against the p1, and therefore can be
read quickly.
If `shallow` is True, nothing changes since this is a flat manifest.
"""
+ util.nouideprecwarn(
+ b'"readfast" is deprecated use "read_any_fast_delta" or "read_delta_parents"',
+ b"6.9",
+ stacklevel=2,
+ )
store = self._storage()
r = store.rev(self._node)
deltaparent = store.deltaparent(r)
@@ -2113,38 +2254,113 @@
return self.readdelta()
return self.read()
- def readdelta(self, shallow=False):
+ def readdelta(self, shallow: bool = False) -> 'manifestdict':
"""Returns a manifest containing just the entries that are present
in this manifest, but not in its p1 manifest. This is efficient to read
if the revlog delta is already p1.
Changing the value of `shallow` has no effect on flat manifests.
"""
+ util.nouideprecwarn(
+ b'"readfast" is deprecated use "read_any_fast_delta" or "read_delta_new_entries"',
+ b"6.9",
+ stacklevel=2,
+ )
store = self._storage()
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
return manifestdict(store.nodeconstants.nodelen, d)
- def find(self, key):
+ def read_any_fast_delta(
+ self,
+ valid_bases: Optional[Collection[int]] = None,
+ *,
+ shallow: bool = False,
+ ) -> Tuple[Optional[int], manifestdict]:
+ """see `imanifestrevisionstored` documentation"""
+ store = self._storage()
+ r = store.rev(self._node)
+ deltaparent = store.deltaparent(r)
+ if valid_bases is None:
+ # make sure the next check is True
+ valid_bases = (deltaparent,)
+ if deltaparent != nullrev and deltaparent in valid_bases:
+ d = mdiff.patchtext(store.revdiff(deltaparent, r))
+ return (
+ deltaparent,
+ manifestdict(store.nodeconstants.nodelen, d),
+ )
+ return (None, self.read())
+
+ def read_delta_parents(
+ self,
+ *,
+ shallow: bool = False,
+ exact: bool = True,
+ ) -> manifestdict:
+ """see `interface.imanifestrevisionbase` documentations"""
+ store = self._storage()
+ r = store.rev(self._node)
+ deltaparent = store.deltaparent(r)
+ parents = [p for p in store.parentrevs(r) if p is not nullrev]
+ if not exact and deltaparent in parents:
+ d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
+ return manifestdict(store.nodeconstants.nodelen, d)
+ elif not exact or len(parents) == 0:
+ return self.read()
+ elif len(parents) == 1:
+ p = parents[0]
+ d = mdiff.patchtext(store.revdiff(p, r))
+ return manifestdict(store.nodeconstants.nodelen, d)
+ else:
+ p1, p2 = parents
+ d1 = mdiff.patchtext(store.revdiff(p1, r))
+ d2 = mdiff.patchtext(store.revdiff(p2, r))
+ d1 = manifestdict(store.nodeconstants.nodelen, d1)
+ d2 = manifestdict(store.nodeconstants.nodelen, d2)
+ md = manifestdict(store.nodeconstants.nodelen)
+ for f, new_node, new_flag in d1.iterentries():
+ if f not in d2:
+ continue
+ if new_node is not None:
+ md.set(f, new_node, new_flag)
+ return md
+
+ def read_delta_new_entries(self, *, shallow=False) -> manifestdict:
+ """see `interface.imanifestrevisionbase` documentations"""
+ # If we are using narrow, returning a delta against an arbitrary
+ # changeset might return file outside the narrowspec. This can create
+ # issue when running validation server side with strict security as
+ # push from low priviledge usage might be seen as adding new revision
+ # for files they cannot touch. So we are strict if narrow is involved.
+ if self._manifestlog.narrowed:
+ return self.read_delta_parents(shallow=shallow, exact=True)
+ store = self._storage()
+ r = store.rev(self._node)
+ d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
+ return manifestdict(store.nodeconstants.nodelen, d)
+
+ def find(self, key: bytes) -> Tuple[bytes, bytes]:
return self.read().find(key)
-@interfaceutil.implementer(repository.imanifestrevisionwritable)
-class memtreemanifestctx:
+class memtreemanifestctx: # (repository.imanifestrevisionwritable)
+ _treemanifest: treemanifest
+
def __init__(self, manifestlog, dir=b''):
self._manifestlog = manifestlog
self._dir = dir
self._treemanifest = treemanifest(manifestlog.nodeconstants)
- def _storage(self):
+ def _storage(self) -> manifestrevlog:
return self._manifestlog.getstorage(b'')
- def copy(self):
+ def copy(self) -> 'memtreemanifestctx':
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self._treemanifest.copy()
return memmf
- def read(self):
+ def read(self) -> 'treemanifest':
return self._treemanifest
def write(self, transaction, link, p1, p2, added, removed, match=None):
@@ -2164,8 +2380,9 @@
)
-@interfaceutil.implementer(repository.imanifestrevisionstored)
-class treemanifestctx:
+class treemanifestctx: # (repository.imanifestrevisionstored)
+ _data: Optional[treemanifest]
+
def __init__(self, manifestlog, dir, node):
self._manifestlog = manifestlog
self._dir = dir
@@ -2180,7 +2397,7 @@
# rev = store.rev(node)
# self.linkrev = store.linkrev(rev)
- def _storage(self):
+ def _storage(self) -> manifestrevlog:
narrowmatch = self._manifestlog._narrowmatch
if not narrowmatch.always():
if not narrowmatch.visitdir(self._dir[:-1]):
@@ -2189,7 +2406,7 @@
)
return self._manifestlog.getstorage(self._dir)
- def read(self):
+ def read(self) -> 'treemanifest':
if self._data is None:
store = self._storage()
if self._node == self._manifestlog.nodeconstants.nullid:
@@ -2222,29 +2439,25 @@
return self._data
- def node(self):
+ def node(self) -> bytes:
return self._node
- def copy(self):
+ def copy(self) -> 'memtreemanifestctx':
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self.read().copy()
return memmf
@propertycache
- def parents(self):
+ def parents(self) -> Tuple[bytes, bytes]:
return self._storage().parents(self._node)
- def readdelta(self, shallow=False):
- """Returns a manifest containing just the entries that are present
- in this manifest, but not in its p1 manifest. This is efficient to read
- if the revlog delta is already p1.
-
- If `shallow` is True, this will read the delta for this directory,
- without recursively reading subdirectory manifests. Instead, any
- subdirectory entry will be reported as it appears in the manifest, i.e.
- the subdirectory will be reported among files and distinguished only by
- its 't' flag.
- """
+ def readdelta(self, shallow: bool = False) -> AnyManifestDict:
+ """see `imanifestrevisionstored` documentation"""
+ util.nouideprecwarn(
+ b'"readdelta" is deprecated use "read_any_fast_delta" or "read_delta_new_entries"',
+ b"6.9",
+ stacklevel=2,
+ )
store = self._storage()
if shallow:
r = store.rev(self._node)
@@ -2263,7 +2476,146 @@
md.setflag(f, fl1)
return md
- def readfast(self, shallow=False):
+ def read_any_fast_delta(
+ self,
+ valid_bases: Optional[Collection[int]] = None,
+ *,
+ shallow: bool = False,
+ ) -> Tuple[Optional[int], AnyManifestDict]:
+ """see `imanifestrevisionstored` documentation"""
+ store = self._storage()
+ r = store.rev(self._node)
+ deltaparent = store.deltaparent(r)
+
+ if valid_bases is None:
+ # make sure the next check is True
+ valid_bases = (deltaparent,)
+ can_use_delta = deltaparent != nullrev and deltaparent in valid_bases
+
+ if shallow:
+ if can_use_delta:
+ return (deltaparent, self._read_storage_delta_shallow())
+ else:
+ d = store.revision(self._node)
+ return (None, manifestdict(store.nodeconstants.nodelen, d))
+ else:
+ # note: This use "slow_delta" here is cargo culted from the previous
+ # implementation. I am not sure it make sense since the goal here is to
+ # be fast, so why are we computing a delta? On the other hand, tree
+ # manifest delta as fairly "cheap" and allow for skipping whole part of
+ # the tree that a full read would access. So it might be a good idea.
+ #
+ # If we realize we don't need delta here, we should simply use:
+ #
+ # return (None, self.read())
+ if can_use_delta:
+ return (None, self._read_storage_slow_delta(base=deltaparent))
+ else:
+ parents = [
+ p
+ for p in store.parentrevs(r)
+ if p is not nullrev and p in valid_bases
+ ]
+ if parents:
+ best_base = max(parents)
+ else:
+ best_base = max(valid_bases)
+ return (None, self._read_storage_slow_delta(base=best_base))
+
+ def _read_storage_delta_shallow(self) -> manifestdict:
+ store = self._storage()
+ r = store.rev(self._node)
+ d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
+ return manifestdict(store.nodeconstants.nodelen, d)
+
+ def _read_storage_slow_delta(self, base) -> 'treemanifest':
+ store = self._storage()
+ if base is None:
+ base = store.deltaparent(store.rev(self._node))
+ m0 = self._manifestlog.get(self._dir, store.node(base)).read()
+ m1 = self.read()
+ md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
+ for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).items():
+ if n1:
+ md[f] = n1
+ if fl1:
+ md.setflag(f, fl1)
+ return md
+
+ def read_delta_parents(
+ self,
+ *,
+ shallow: bool = False,
+ exact: bool = True,
+ ) -> AnyManifestDict:
+ """see `interface.imanifestrevisionbase` documentations"""
+ store = self._storage()
+ r = store.rev(self._node)
+ parents = [p for p in store.parentrevs(r) if p is not nullrev]
+ if not exact:
+ return self.read_any_fast_delta(parents, shallow=shallow)[1]
+ elif len(parents) == 0:
+ if shallow:
+ d = store.revision(self._node)
+ return manifestdict(store.nodeconstants.nodelen, d)
+ else:
+ return self.read()
+ elif len(parents) == 1:
+ p = parents[0]
+ if shallow:
+ d = mdiff.patchtext(store.revdiff(p, r))
+ return manifestdict(store.nodeconstants.nodelen, d)
+ else:
+ return self._read_storage_slow_delta(base=p)
+ else:
+ p1, p2 = parents
+ if shallow:
+ d1 = mdiff.patchtext(store.revdiff(p1, r))
+ d2 = mdiff.patchtext(store.revdiff(p2, r))
+ d1 = manifestdict(store.nodeconstants.nodelen, d1)
+ d2 = manifestdict(store.nodeconstants.nodelen, d2)
+ md = manifestdict(store.nodeconstants.nodelen)
+ for f, new_node, new_flag in d1.iterentries():
+ if f not in d2:
+ continue
+ if new_node is not None:
+ md.set(f, new_node, new_flag)
+ return md
+ else:
+ m1 = self._manifestlog.get(self._dir, store.node(p1)).read()
+ m2 = self._manifestlog.get(self._dir, store.node(p2)).read()
+ mc = self.read()
+ d1 = m1.diff(mc)
+ d2 = m2.diff(mc)
+ md = treemanifest(
+ self._manifestlog.nodeconstants,
+ dir=self._dir,
+ )
+ for f, new_node, new_flag in d1.iterentries():
+ if f not in d2:
+ continue
+ if new_node is not None:
+ md.set(f, new_node, new_flag)
+ return md
+
+ def read_delta_new_entries(
+ self, *, shallow: bool = False
+ ) -> AnyManifestDict:
+ """see `interface.imanifestrevisionbase` documentations"""
+ # If we are using narrow, returning a delta against an arbitrary
+ # changeset might return file outside the narrowspec. This can create
+ # issue when running validation server side with strict security as
+ # push from low priviledge usage might be seen as adding new revision
+ # for files they cannot touch. So we are strict if narrow is involved.
+ if self._manifestlog.narrowed:
+ return self.read_delta_parents(shallow=shallow, exact=True)
+ # delegate to existing another existing method for simplicity
+ store = self._storage()
+ r = store.rev(self._node)
+ bases = (store.deltaparent(r),)
+ return self.read_any_fast_delta(bases, shallow=shallow)[1]
+
+ def readfast(self, shallow=False) -> AnyManifestDict:
"""Calls either readdelta or read, based on which would be less work.
readdelta is called if the delta is against the p1, and therefore can be
read quickly.
@@ -2271,6 +2623,11 @@
If `shallow` is True, it only returns the entries from this manifest,
and not any submanifests.
"""
+ util.nouideprecwarn(
+ b'"readdelta" is deprecated use "read_any_fast_delta" or "read_delta_parents"',
+ b"6.9",
+ stacklevel=2,
+ )
store = self._storage()
r = store.rev(self._node)
deltaparent = store.deltaparent(r)
@@ -2284,7 +2641,7 @@
else:
return self.read()
- def find(self, key):
+ def find(self, key: bytes) -> Tuple[bytes, bytes]:
return self.read().find(key)
@@ -2300,6 +2657,9 @@
whose contents are unknown.
"""
+ _files: Dict[bytes, bytes]
+ _flags: Dict[bytes, bytes]
+
def __init__(self, nodeconstants, dir, node):
super(excludeddir, self).__init__(nodeconstants, dir)
self._node = node
--- a/mercurial/match.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/match.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,12 +5,23 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import bisect
import copy
import itertools
import os
import re
+import typing
+
+from typing import (
+ Any,
+ Callable,
+ List,
+ Tuple,
+ Union,
+ overload,
+)
from .i18n import _
from .pycompat import open
@@ -399,12 +410,12 @@
if badfn is not None:
self.bad = badfn
- def was_tampered_with_nonrec(self):
+ def was_tampered_with_nonrec(self) -> bool:
# [_was_tampered_with] is used to track if when extensions changed the matcher
# behavior (crazy stuff!), so we disable the rust fast path.
return self._was_tampered_with
- def was_tampered_with(self):
+ def was_tampered_with(self) -> bool:
return self.was_tampered_with_nonrec()
def __call__(self, fn):
@@ -894,7 +905,7 @@
self.bad = m1.bad
self.traversedir = m1.traversedir
- def was_tampered_with(self):
+ def was_tampered_with(self) -> bool:
return (
self.was_tampered_with_nonrec()
or self._m1.was_tampered_with()
@@ -984,7 +995,7 @@
self.bad = m1.bad
self.traversedir = m1.traversedir
- def was_tampered_with(self):
+ def was_tampered_with(self) -> bool:
return (
self.was_tampered_with_nonrec()
or self._m1.was_tampered_with()
@@ -1071,7 +1082,7 @@
sub/x.txt: No such file
"""
- def __init__(self, path, matcher):
+ def __init__(self, path: bytes, matcher: basematcher) -> None:
super(subdirmatcher, self).__init__()
self._path = path
self._matcher = matcher
@@ -1088,7 +1099,7 @@
if matcher.prefix():
self._always = any(f == path for f in matcher._files)
- def was_tampered_with(self):
+ def was_tampered_with(self) -> bool:
return (
self.was_tampered_with_nonrec() or self._matcher.was_tampered_with()
)
@@ -1227,7 +1238,7 @@
self.traversedir = m1.traversedir
self._matchers = matchers
- def was_tampered_with(self):
+ def was_tampered_with(self) -> bool:
return self.was_tampered_with_nonrec() or any(
map(lambda m: m.was_tampered_with(), self._matchers)
)
@@ -1663,6 +1674,33 @@
_commentre = None
+if typing.TYPE_CHECKING:
+ from typing_extensions import (
+ Literal,
+ )
+
+ @overload
+ def readpatternfile(
+ filepath: bytes, warn: Callable[[bytes], Any], sourceinfo: Literal[True]
+ ) -> List[Tuple[bytes, int, bytes]]:
+ ...
+
+ @overload
+ def readpatternfile(
+ filepath: bytes,
+ warn: Callable[[bytes], Any],
+ sourceinfo: Literal[False],
+ ) -> List[bytes]:
+ ...
+
+ @overload
+ def readpatternfile(
+ filepath: bytes,
+ warn: Callable[[bytes], Any],
+ sourceinfo: bool = False,
+ ) -> List[Union[Tuple[bytes, int, bytes], bytes]]:
+ ...
+
def readpatternfile(filepath, warn, sourceinfo=False):
"""parse a pattern file, returning a list of
--- a/mercurial/mdiff.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/mdiff.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,11 +5,24 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
import struct
+import typing
import zlib
+from typing import (
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
+
from .i18n import _
from . import (
diffhelper,
@@ -19,10 +32,14 @@
pycompat,
util,
)
+from .interfaces import (
+ modules as intmod,
+)
+
from .utils import dateutil
-bdiff = policy.importmod('bdiff')
-mpatch = policy.importmod('mpatch')
+bdiff: intmod.BDiff = policy.importmod('bdiff')
+mpatch: intmod.MPatch = policy.importmod('mpatch')
blocks = bdiff.blocks
fixws = bdiff.fixws
@@ -31,6 +48,21 @@
textdiff = bdiff.bdiff
splitnewlines = bdiff.splitnewlines
+if typing.TYPE_CHECKING:
+ HunkLines = List[bytes]
+ """Lines of a hunk- a header, followed by line additions and deletions."""
+
+ HunkRange = Tuple[int, int, int, int]
+ """HunkRange represents the range information of a hunk.
+
+ The tuple (s1, l1, s2, l2) forms the header '@@ -s1,l1 +s2,l2 @@'."""
+
+ Range = Tuple[int, int]
+ """A (lowerbound, upperbound) range tuple."""
+
+ TypedBlock = Tuple[intmod.BDiffBlock, bytes]
+ """A bdiff block with its type."""
+
# TODO: this looks like it could be an attrs, which might help pytype
class diffopts:
@@ -102,11 +134,11 @@
defaultopts = diffopts()
-def wsclean(opts, text, blank=True):
+def wsclean(opts: diffopts, text: bytes, blank: bool = True) -> bytes:
if opts.ignorews:
- text = bdiff.fixws(text, 1)
+ text = bdiff.fixws(text, True)
elif opts.ignorewsamount:
- text = bdiff.fixws(text, 0)
+ text = bdiff.fixws(text, False)
if blank and opts.ignoreblanklines:
text = re.sub(b'\n+', b'\n', text).strip(b'\n')
if opts.ignorewseol:
@@ -114,7 +146,13 @@
return text
-def splitblock(base1, lines1, base2, lines2, opts):
+def splitblock(
+ base1: int,
+ lines1: Iterable[bytes],
+ base2: int,
+ lines2: Iterable[bytes],
+ opts: diffopts,
+) -> Iterable[TypedBlock]:
# The input lines matches except for interwoven blank lines. We
# transform it into a sequence of matching blocks and blank blocks.
lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
@@ -135,12 +173,12 @@
while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
i1 += 1
i2 += 1
- yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
+ yield (base1 + s1, base1 + i1, base2 + s2, base2 + i2), btype
s1 = i1
s2 = i2
-def hunkinrange(hunk, linerange):
+def hunkinrange(hunk: Tuple[int, int], linerange: Range) -> bool:
"""Return True if `hunk` defined as (start, length) is in `linerange`
defined as (lowerbound, upperbound).
@@ -166,7 +204,9 @@
return lowerbound < start + length and start < upperbound
-def blocksinrange(blocks, rangeb):
+def blocksinrange(
+ blocks: Iterable[TypedBlock], rangeb: Range
+) -> Tuple[List[TypedBlock], Range]:
"""filter `blocks` like (a1, a2, b1, b2) from items outside line range
`rangeb` from ``(b1, b2)`` point of view.
@@ -206,14 +246,24 @@
return filteredblocks, (lba, uba)
-def chooseblocksfunc(opts=None):
- if opts is None or not opts.xdiff or not hasattr(bdiff, 'xdiffblocks'):
+def chooseblocksfunc(opts: Optional[diffopts] = None) -> intmod.BDiffBlocksFnc:
+ if (
+ opts is None
+ or not opts.xdiff
+ or not getattr(bdiff, 'xdiffblocks', None)
+ ):
return bdiff.blocks
else:
return bdiff.xdiffblocks
-def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
+def allblocks(
+ text1: bytes,
+ text2: bytes,
+ opts: Optional[diffopts] = None,
+ lines1: Optional[Sequence[bytes]] = None,
+ lines2: Optional[Sequence[bytes]] = None,
+) -> Iterable[TypedBlock]:
"""Return (block, type) tuples, where block is an mdiff.blocks
line entry. type is '=' for blocks matching exactly one another
(bdiff blocks), '!' for non-matching blocks and '~' for blocks
@@ -235,8 +285,8 @@
if i > 0:
s = diff[i - 1]
else:
- s = [0, 0, 0, 0]
- s = [s[1], s1[0], s[3], s1[2]]
+ s = (0, 0, 0, 0)
+ s = (s[1], s1[0], s[3], s1[2])
# bdiff sometimes gives huge matches past eof, this check eats them,
# and deals with the special first match case described above
@@ -255,7 +305,16 @@
yield s1, b'='
-def unidiff(a, ad, b, bd, fn1, fn2, binary, opts=defaultopts):
+def unidiff(
+ a: bytes,
+ ad: bytes,
+ b: bytes,
+ bd: bytes,
+ fn1: bytes,
+ fn2: bytes,
+ binary: bool,
+ opts: diffopts = defaultopts,
+) -> Tuple[List[bytes], Iterable[Tuple[Optional[HunkRange], HunkLines]]]:
"""Return a unified diff as a (headers, hunks) tuple.
If the diff is not null, `headers` is a list with unified diff header
@@ -266,7 +325,7 @@
Set binary=True if either a or b should be taken as a binary file.
"""
- def datetag(date, fn=None):
+ def datetag(date: bytes, fn: Optional[bytes] = None):
if not opts.git and not opts.nodates:
return b'\t%s' % date
if fn and b' ' in fn:
@@ -335,10 +394,16 @@
b"+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
]
- return headerlines, hunks
+ # The possible bool is consumed from the iterator above in the `next()`
+ # call.
+ return headerlines, cast(
+ "Iterable[Tuple[Optional[HunkRange], HunkLines]]", hunks
+ )
-def _unidiff(t1, t2, opts=defaultopts):
+def _unidiff(
+ t1: bytes, t2: bytes, opts: diffopts = defaultopts
+) -> Iterator[Union[bool, Tuple[HunkRange, HunkLines]]]:
"""Yield hunks of a headerless unified diff from t1 and t2 texts.
Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
@@ -366,7 +431,9 @@
lastfunc = [0, b'']
- def yieldhunk(hunk):
+ def yieldhunk(
+ hunk: Tuple[int, int, int, int, List[bytes]]
+ ) -> Iterable[Tuple[HunkRange, HunkLines]]:
(astart, a2, bstart, b2, delta) = hunk
aend = contextend(a2, len(l1))
alen = aend - astart
@@ -466,12 +533,11 @@
yield x
if prev:
# we've joined the previous hunk, record the new ending points.
- hunk[1] = a2
- hunk[3] = b2
+ hunk = (hunk[0], a2, hunk[2], b2, hunk[4])
delta = hunk[4]
else:
# create a new hunk
- hunk = [astart, a2, bstart, b2, delta]
+ hunk = (astart, a2, bstart, b2, delta)
delta[len(delta) :] = [b' ' + x for x in l1[astart:a1]]
delta[len(delta) :] = [b'-' + x for x in old]
@@ -487,7 +553,7 @@
yield False
-def b85diff(to, tn):
+def b85diff(to: Optional[bytes], tn: Optional[bytes]) -> bytes:
'''print base85-encoded binary diff'''
def fmtline(line):
@@ -524,7 +590,7 @@
return b''.join(ret)
-def patchtext(bin):
+def patchtext(bin: bytes) -> bytes:
pos = 0
t = []
while pos < len(bin):
@@ -543,13 +609,13 @@
# similar to difflib.SequenceMatcher.get_matching_blocks
-def get_matching_blocks(a, b):
+def get_matching_blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int]]:
return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
-def trivialdiffheader(length):
+def trivialdiffheader(length: int) -> bytes:
return struct.pack(b">lll", 0, 0, length) if length else b''
-def replacediffheader(oldlen, newlen):
+def replacediffheader(oldlen: int, newlen: int) -> bytes:
return struct.pack(b">lll", 0, oldlen, newlen)
--- a/mercurial/merge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/merge.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,13 +5,23 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
+import os
import struct
+import typing
+from typing import Dict, Optional, Tuple
from .i18n import _
from .node import nullrev
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .utils import stringutil
from .dirstateutils import timestamp
from . import (
@@ -31,6 +41,8 @@
worker,
)
+rust_update_mod = policy.importrust("update")
+
_pack = struct.pack
_unpack = struct.unpack
@@ -138,6 +150,8 @@
dircache = dict()
dirstate = repo.dirstate
wvfs = repo.wvfs
+ # wouldn't it be easier to loop over unknown files (and dirs)?
+
if not force:
def collectconflicts(conflicts, config):
@@ -420,11 +434,11 @@
# Track the names of all deleted files.
for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
deletedfiles.add(f)
- for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
+ for f, args, msg in mresult.getactions((mergestatemod.ACTION_MERGE,)):
f1, f2, fa, move, anc = args
if move:
deletedfiles.add(f1)
- for (f, args, msg) in mresult.getactions(
+ for f, args, msg in mresult.getactions(
(mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
):
f2, flags = args
@@ -1826,6 +1840,12 @@
UPDATECHECK_LINEAR = b'linear'
UPDATECHECK_NO_CONFLICT = b'noconflict'
+# Let extensions turn off any Rust code in the update code if that interferes
+# will their patching.
+# This being `True` does not mean that you have Rust extensions installed or
+# that the Rust path will be taken for any given invocation.
+MAYBE_USE_RUST_UPDATE = True
+
def _update(
repo,
@@ -1999,6 +2019,61 @@
if not branchmerge and not wc.dirty(missing=True):
followcopies = False
+ update_from_null = False
+ update_from_null_fallback = False
+ if (
+ MAYBE_USE_RUST_UPDATE
+ and repo.ui.configbool(b"rust", b"update-from-null")
+ and rust_update_mod is not None
+ and p1.rev() == nullrev
+ and not branchmerge
+ # TODO it's probably not too hard to pass down the transaction and
+ # respect the write patterns from Rust. But since it doesn't affect
+ # a simple update from null, then it doesn't matter yet.
+ and repo.currenttransaction() is None
+ and matcher is None
+ and not wc.mergestate().active()
+ and b'.hgsubstate' not in p2
+ ):
+ working_dir_iter = os.scandir(repo.root)
+ maybe_hg_folder = next(working_dir_iter)
+ assert maybe_hg_folder is not None
+ if maybe_hg_folder.name == b".hg":
+ try:
+ next(working_dir_iter)
+ except StopIteration:
+ update_from_null = True
+
+ if update_from_null:
+ # Check the narrowspec and sparsespec here to display warnings
+ # more easily.
+ # TODO figure out of a way of bubbling up warnings to Python
+ # while not polluting the Rust code (probably a channel)
+ repo.narrowmatch()
+ sparse.matcher(repo, [nullrev, p2.rev()])
+ repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
+ # note that we're in the middle of an update
+ repo.vfs.write(b'updatestate', p2.hex())
+ try:
+ updated_count = rust_update_mod.update_from_null(
+ repo.root, p2.rev()
+ )
+ except rust_update_mod.FallbackError:
+ update_from_null_fallback = True
+ else:
+ # We've changed the dirstate from Rust, we need to tell Python
+ repo.dirstate.invalidate()
+ # This includes setting the parents, since they are not read
+ # again on invalidation
+ with repo.dirstate.changing_parents(repo):
+ repo.dirstate.setparents(fp2)
+ repo.dirstate.setbranch(p2.branch(), repo.currenttransaction())
+ sparse.prunetemporaryincludes(repo)
+ repo.hook(b'update', parent1=xp1, parent2=xp2, error=0)
+ # update completed, clear state
+ util.unlink(repo.vfs.join(b'updatestate'))
+ return updateresult(updated_count, 0, 0, 0)
+
### calculate phase
mresult = calculateupdates(
repo,
@@ -2122,11 +2197,13 @@
# the dirstate.
always = matcher is None or matcher.always()
updatedirstate = updatedirstate and always and not wc.isinmemory()
- if updatedirstate:
+ # If we're in the fallback case, we've already done this
+ if updatedirstate and not update_from_null_fallback:
repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
# note that we're in the middle of an update
repo.vfs.write(b'updatestate', p2.hex())
+ # TODO don't run if Rust is available
_advertisefsmonitor(
repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
)
@@ -2156,83 +2233,21 @@
mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
)
with repo.dirstate.changing_parents(repo):
- ### Filter Filedata
- #
- # We gathered "cache" information for the clean file while
- # updating them: mtime, size and mode.
- #
- # At the time this comment is written, they are various issues
- # with how we gather the `mode` and `mtime` information (see
- # the comment in `batchget`).
- #
- # We are going to smooth one of this issue here : mtime ambiguity.
- #
- # i.e. even if the mtime gathered during `batchget` was
- # correct[1] a change happening right after it could change the
- # content while keeping the same mtime[2].
- #
- # When we reach the current code, the "on disk" part of the
- # update operation is finished. We still assume that no other
- # process raced that "on disk" part, but we want to at least
- # prevent later file change to alter the content of the file
- # right after the update operation. So quickly that the same
- # mtime is record for the operation.
- # To prevent such ambiguity to happens, we will only keep the
- # "file data" for files with mtime that are stricly in the past,
- # i.e. whose mtime is strictly lower than the current time.
- #
- # This protect us from race conditions from operation that could
- # run right after this one, especially other Mercurial
- # operation that could be waiting for the wlock to touch files
- # content and the dirstate.
- #
- # In an ideal world, we could only get reliable information in
- # `getfiledata` (from `getbatch`), however the current approach
- # have been a successful compromise since many years.
- #
- # At the time this comment is written, not using any "cache"
- # file data at all here would not be viable. As it would result is
- # a very large amount of work (equivalent to the previous `hg
- # update` during the next status after an update).
- #
- # [1] the current code cannot grantee that the `mtime` and
- # `mode` are correct, but the result is "okay in practice".
- # (see the comment in `batchget`). #
- #
- # [2] using nano-second precision can greatly help here because
- # it makes the "different write with same mtime" issue
- # virtually vanish. However, dirstate v1 cannot store such
- # precision and a bunch of python-runtime, operating-system and
- # filesystem does not provide use with such precision, so we
- # have to operate as if it wasn't available.
if getfiledata:
- ambiguous_mtime = {}
- now = timestamp.get_fs_now(repo.vfs)
- if now is None:
- # we can't write to the FS, so we won't actually update
- # the dirstate content anyway, no need to put cache
- # information.
- getfiledata = None
- else:
- now_sec = now[0]
- for f, m in getfiledata.items():
- if m is not None and m[2][0] >= now_sec:
- ambiguous_mtime[f] = (m[0], m[1], None)
- for f, m in ambiguous_mtime.items():
- getfiledata[f] = m
+ getfiledata = filter_ambiguous_files(repo, getfiledata)
repo.setparents(fp1, fp2)
mergestatemod.recordupdates(
repo, mresult.actionsdict, branchmerge, getfiledata
)
- # update completed, clear state
- util.unlink(repo.vfs.join(b'updatestate'))
-
if not branchmerge:
repo.dirstate.setbranch(
p2.branch(), repo.currenttransaction()
)
+ # update completed, clear state
+ util.unlink(repo.vfs.join(b'updatestate'))
+
# If we're updating to a location, clean up any stale temporary includes
# (ex: this happens during hg rebase --abort).
if not branchmerge:
@@ -2245,6 +2260,128 @@
return stats
+# filename -> (mode, size, timestamp)
+FileData = Dict[bytes, Optional[Tuple[int, int, Optional[timestamp.timestamp]]]]
+
+
+def filter_ambiguous_files(repo, file_data: FileData) -> Optional[FileData]:
+ """We've gathered "cache" information for the clean files while updating
+ them: their mtime, size and mode.
+
+ At the time this comment is written, there are various issues with how we
+ gather the `mode` and `mtime` information (see the comment in `batchget`).
+
+ We are going to smooth one of these issues here: mtime ambiguity.
+
+ i.e. even if the mtime gathered during `batchget` was correct[1] a change
+ happening right after it could change the content while keeping
+ the same mtime[2].
+
+ When we reach the current code, the "on disk" part of the update operation
+ is finished. We still assume that no other process raced that "on disk"
+ part, but we want to at least prevent later file changes to alter the
+ contents of the file right after the update operation so quickly that the
+ same mtime is recorded for the operation.
+
+ To prevent such ambiguities from happenning, we will do (up to) two things:
+ - wait until the filesystem clock has ticked
+ - only keep the "file data" for files with mtimes that are strictly in
+ the past, i.e. whose mtime is strictly lower than the current time.
+
+ We only wait for the system clock to tick if using dirstate-v2, since v1
+ only has second-level granularity and waiting for a whole second is
+ too much of a penalty in the general case.
+
+ Although we're assuming that people running dirstate-v2 on Linux
+ don't have a second-granularity FS (with the exclusion of NFS), users
+ can be surprising, and at some point in the future, dirstate-v2 will become
+ the default. To that end, we limit the wait time to 100ms and fall back
+ to the filtering method in case of a timeout.
+
+ +------------+------+--------------+
+ | version | wait | filter level |
+ +------------+------+--------------+
+ | V1 | No | Second |
+ | V2 | Yes | Nanosecond |
+ | V2-slow-fs | No | Second |
+ +------------+------+--------------+
+
+ This protects us from race conditions from operations that could run right
+ after this one, especially other Mercurial operations that could be waiting
+ for the wlock to touch files contents and the dirstate.
+
+ In an ideal world, we could only get reliable information in `getfiledata`
+ (from `getbatch`), however this filtering approach has been a successful
+ compromise for many years. A patch series of the linux kernel might change
+ this in 6.12³.
+
+ At the time this comment is written, not using any "cache" file data at all
+ here would not be viable, as it would result is a very large amount of work
+ (equivalent to the previous `hg update` during the next status after an
+ update).
+
+ [1] the current code cannot grantee that the `mtime` and `mode`
+ are correct, but the result is "okay in practice".
+ (see the comment in `batchget`)
+
+ [2] using nano-second precision can greatly help here because it makes the
+ "different write with same mtime" issue virtually vanish. However,
+ dirstate v1 cannot store such precision and a bunch of python-runtime,
+ operating-system and filesystem parts do not provide us with such
+ precision, so we have to operate as if it wasn't available.
+
+ [3] https://lore.kernel.org/all/20241002-mgtime-v10-8-d1c4717f5284@kernel.org
+ """
+ ambiguous_mtime: FileData = {}
+ dirstate_v2 = repo.dirstate._use_dirstate_v2
+ fs_now_result = None
+ fast_enough_fs = True
+ if dirstate_v2:
+ fstype = util.getfstype(repo.vfs.base)
+ # Exclude NFS right off the bat
+ fast_enough_fs = fstype != b'nfs'
+ if fstype is not None and fast_enough_fs:
+ fs_now_result = timestamp.wait_until_fs_tick(repo.vfs)
+
+ if fs_now_result is None:
+ try:
+ now = timestamp.get_fs_now(repo.vfs)
+ fs_now_result = (now, False)
+ except OSError:
+ pass
+
+ if fs_now_result is None:
+ # we can't write to the FS, so we won't actually update
+ # the dirstate content anyway, no need to put cache
+ # information.
+ return None
+ else:
+ now, timed_out = fs_now_result
+ if timed_out:
+ fast_enough_fs = False
+ for f, m in file_data.items():
+ if m is not None:
+ reliable = timestamp.make_mtime_reliable(m[2], now)
+ if reliable is None or (
+ reliable[2] and (not dirstate_v2 or not fast_enough_fs)
+ ):
+ # Either it's not reliable, or it's second ambiguous
+ # and we're in dirstate-v1 or in a slow fs, so discard
+ # the mtime.
+ ambiguous_mtime[f] = (m[0], m[1], None)
+ elif reliable[2]:
+ # We need to remember that this time is "second ambiguous"
+ # otherwise the next status might miss a subsecond change
+ # if its "stat" doesn't provide nanoseconds.
+ #
+ # TODO make osutil.c understand nanoseconds when possible
+ # (see timestamp.py for the same note)
+ ambiguous_mtime[f] = (m[0], m[1], reliable)
+ for f, m in ambiguous_mtime.items():
+ file_data[f] = m
+ return file_data
+
+
def merge(ctx, labels=None, force=False, wc=None):
"""Merge another topological branch into the working copy.
--- a/mercurial/mergestate.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/mergestate.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import collections
import shutil
import struct
@@ -495,7 +497,6 @@
class mergestate(_mergestate_base):
-
statepathv1 = b'merge/state'
statepathv2 = b'merge/state2'
@@ -779,45 +780,46 @@
def recordupdates(repo, actions, branchmerge, getfiledata):
"""record merge actions to the dirstate"""
+ dirstate = repo.dirstate
+ update_file = dirstate.update_file
+
# remove (must come first)
for f, args, msg in actions.get(ACTION_REMOVE, []):
if branchmerge:
- repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
+ update_file(f, p1_tracked=True, wc_tracked=False)
else:
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
+ update_file(f, p1_tracked=False, wc_tracked=False)
# forget (must come first)
for f, args, msg in actions.get(ACTION_FORGET, []):
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
+ update_file(f, p1_tracked=False, wc_tracked=False)
# resolve path conflicts
for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
(f0, origf0) = args
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
- repo.dirstate.copy(origf0, f)
+ update_file(f, p1_tracked=False, wc_tracked=True)
+ dirstate.copy(origf0, f)
if f0 == origf0:
- repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
+ update_file(f0, p1_tracked=True, wc_tracked=False)
else:
- repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
+ update_file(f0, p1_tracked=False, wc_tracked=False)
# re-add
for f, args, msg in actions.get(ACTION_ADD, []):
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
+ update_file(f, p1_tracked=False, wc_tracked=True)
# re-add/mark as modified
for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
if branchmerge:
- repo.dirstate.update_file(
+ update_file(
f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
)
else:
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
+ update_file(f, p1_tracked=False, wc_tracked=True)
# exec change
for f, args, msg in actions.get(ACTION_EXEC, []):
- repo.dirstate.update_file(
- f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
- )
+ update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
# keep
for f, args, msg in actions.get(ACTION_KEEP, []):
@@ -835,9 +837,9 @@
for f, args, msg in actions.get(ACTION_GET, []):
if branchmerge:
# tracked in p1 can be True also but update_file should not care
- old_entry = repo.dirstate.get_entry(f)
+ old_entry = dirstate.get_entry(f)
p1_tracked = old_entry.any_tracked and not old_entry.added
- repo.dirstate.update_file(
+ update_file(
f,
p1_tracked=p1_tracked,
wc_tracked=True,
@@ -845,7 +847,7 @@
)
else:
parentfiledata = getfiledata[f] if getfiledata else None
- repo.dirstate.update_file(
+ update_file(
f,
p1_tracked=True,
wc_tracked=True,
@@ -859,7 +861,7 @@
# We've done a branch merge, mark this file as merged
# so that we properly record the merger later
p1_tracked = f1 == f
- repo.dirstate.update_file(
+ update_file(
f,
p1_tracked=p1_tracked,
wc_tracked=True,
@@ -867,13 +869,11 @@
)
if f1 != f2: # copy/rename
if move:
- repo.dirstate.update_file(
- f1, p1_tracked=True, wc_tracked=False
- )
+ update_file(f1, p1_tracked=True, wc_tracked=False)
if f1 != f:
- repo.dirstate.copy(f1, f)
+ dirstate.copy(f1, f)
else:
- repo.dirstate.copy(f2, f)
+ dirstate.copy(f2, f)
else:
# We've update-merged a locally modified file, so
# we set the dirstate to emulate a normal checkout
@@ -881,30 +881,28 @@
# merge will appear as a normal local file
# modification.
if f2 == f: # file not locally copied/moved
- repo.dirstate.update_file(
+ update_file(
f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
)
if move:
- repo.dirstate.update_file(
- f1, p1_tracked=False, wc_tracked=False
- )
+ update_file(f1, p1_tracked=False, wc_tracked=False)
# directory rename, move local
for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
f0, flag = args
if branchmerge:
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
- repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
- repo.dirstate.copy(f0, f)
+ update_file(f, p1_tracked=False, wc_tracked=True)
+ update_file(f0, p1_tracked=True, wc_tracked=False)
+ dirstate.copy(f0, f)
else:
- repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
- repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
+ update_file(f, p1_tracked=True, wc_tracked=True)
+ update_file(f0, p1_tracked=False, wc_tracked=False)
# directory rename, get
for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
f0, flag = args
if branchmerge:
- repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
- repo.dirstate.copy(f0, f)
+ update_file(f, p1_tracked=False, wc_tracked=True)
+ dirstate.copy(f0, f)
else:
- repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
+ update_file(f, p1_tracked=True, wc_tracked=True)
--- a/mercurial/mergeutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/mergeutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
--- a/mercurial/metadata.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/metadata.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import multiprocessing
import struct
@@ -433,14 +435,12 @@
# Iteration over d1 content will deal with all cases, but the one in the
# first column of the table.
for filename, d1 in diff_p1.items():
-
d2 = diff_p2.pop(filename, None)
if d2 is None:
# this deal with the first line of the table.
_process_other_unchanged(md, mas, filename, d1)
else:
-
if d1[0][0] is None and d2[0][0] is None:
# case 🄼 — both deleted the file.
md.mark_added(filename)
--- a/mercurial/minifileset.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/minifileset.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from . import (
--- a/mercurial/minirst.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/minirst.py Sat Oct 26 04:16:00 2024 +0200
@@ -18,6 +18,7 @@
when adding support for new constructs.
"""
+from __future__ import annotations
import re
--- a/mercurial/namespaces.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/namespaces.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from .i18n import _
from . import (
registrar,
--- a/mercurial/narrowspec.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/narrowspec.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import weakref
from .i18n import _
@@ -225,7 +227,6 @@
m = "changing narrow spec outside of a transaction"
raise error.ProgrammingError(m)
else:
-
reporef = weakref.ref(repo)
def clean_pending(tr):
--- a/mercurial/node.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/node.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
--- a/mercurial/obsolete.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/obsolete.py Sat Oct 26 04:16:00 2024 +0200
@@ -68,6 +68,8 @@
"""
+from __future__ import annotations
+
import binascii
import struct
import weakref
@@ -771,10 +773,11 @@
_addchildren(self.children, markers)
_checkinvalidmarkers(self.repo, markers)
- def relevantmarkers(self, nodes):
- """return a set of all obsolescence markers relevant to a set of nodes.
+ def relevantmarkers(self, nodes=None, revs=None):
+ """return a set of all obsolescence markers relevant to a set of
+ nodes or revisions.
- "relevant" to a set of nodes mean:
+ "relevant" to a set of nodes or revisions mean:
- marker that use this changeset as successor
- prune marker of direct children on this changeset
@@ -782,13 +785,33 @@
markers
It is a set so you cannot rely on order."""
+ if nodes is None:
+ nodes = set()
+ if revs is None:
+ revs = set()
- pendingnodes = set(nodes)
- seenmarkers = set()
- seennodes = set(pendingnodes)
+ tonode = self.repo.unfiltered().changelog.node
+ pendingnodes = set()
precursorsmarkers = self.predecessors
succsmarkers = self.successors
children = self.children
+ for node in nodes:
+ if (
+ node in precursorsmarkers
+ or node in succsmarkers
+ or node in children
+ ):
+ pendingnodes.add(node)
+ for rev in revs:
+ node = tonode(rev)
+ if (
+ node in precursorsmarkers
+ or node in succsmarkers
+ or node in children
+ ):
+ pendingnodes.add(node)
+ seenmarkers = set()
+ seennodes = pendingnodes.copy()
while pendingnodes:
direct = set()
for current in pendingnodes:
@@ -1038,7 +1061,6 @@
def makefoldid(relation, user):
-
folddigest = hashutil.sha1(user)
for p in relation[0] + relation[1]:
folddigest.update(b'%d' % p.rev())
--- a/mercurial/obsutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/obsutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
@@ -108,7 +109,7 @@
elif exclusive:
rawmarkers = exclusivemarkers(repo, nodes)
else:
- rawmarkers = repo.obsstore.relevantmarkers(nodes)
+ rawmarkers = repo.obsstore.relevantmarkers(nodes=nodes)
for markerdata in rawmarkers:
yield marker(repo, markerdata)
@@ -947,7 +948,7 @@
}
-def _getfilteredreason(repo, changeid, ctx):
+def _getfilteredreason(repo, changeid, ctx) -> bytes:
"""return a human-friendly string on why a obsolete changeset is hidden"""
successors = successorssets(repo, ctx.node())
fate = _getobsfate(successors)
@@ -961,7 +962,6 @@
single_successor = short(successors[0][0])
return filteredmsgtable[b'superseded'] % (changeid, single_successor)
elif fate == b'superseded_split':
-
succs = []
for node_id in successors[0]:
succs.append(short(node_id))
@@ -975,6 +975,8 @@
args = (changeid, firstsuccessors, remainingnumber)
return filteredmsgtable[b'superseded_split_several'] % args
+ else:
+ raise error.ProgrammingError("unhandled fate: %r" % fate)
def divergentsets(repo, ctx):
--- a/mercurial/parser.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/parser.py Sat Oct 26 04:16:00 2024 +0200
@@ -16,6 +16,7 @@
# an action is a tree node name, a tree label, and an optional match
# __call__(program) parses program into a labeled tree
+from __future__ import annotations
from .i18n import _
from . import (
--- a/mercurial/patch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/patch.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import contextlib
@@ -229,7 +230,6 @@
def _extract(ui, fileobj, tmpname, tmpfp):
-
# attempt to detect the start of a patch
# (this heuristic is borrowed from quilt)
diffre = re.compile(
@@ -596,7 +596,7 @@
self.created = 0
self.maxsize = maxsize
if self.maxsize is None:
- self.maxsize = 4 * (2 ** 20)
+ self.maxsize = 4 * (2**20)
self.size = 0
self.data = {}
--- a/mercurial/pathutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pathutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import contextlib
import errno
import os
--- a/mercurial/phases.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/phases.py Sat Oct 26 04:16:00 2024 +0200
@@ -100,6 +100,7 @@
"""
+from __future__ import annotations
import heapq
import struct
@@ -116,6 +117,7 @@
Optional,
Set,
Tuple,
+ overload,
)
from .i18n import _
@@ -138,6 +140,9 @@
PhaseSets = Dict[int, Set[int]]
if typing.TYPE_CHECKING:
+ from typing_extensions import (
+ Literal, # py3.8+
+ )
from . import (
localrepo,
ui as uimod,
@@ -375,11 +380,31 @@
class phasecache:
+ if typing.TYPE_CHECKING:
+
+ @overload
+ def __init__(
+ self,
+ repo: Any,
+ phasedefaults: Any,
+ _load: Literal[False],
+ ) -> None:
+ pass
+
+ @overload
+ def __init__(
+ self,
+ repo: "localrepo.localrepository",
+ phasedefaults: Optional["Phasedefaults"],
+ _load: bool = True,
+ ) -> None:
+ pass
+
def __init__(
self,
- repo: "localrepo.localrepository",
- phasedefaults: Optional["Phasedefaults"],
- _load: bool = True,
+ repo,
+ phasedefaults,
+ _load=True,
):
if _load:
# Cheap trick to allow shallow-copy without copy module
@@ -387,7 +412,7 @@
self._phaseroots: Phaseroots = loaded[0]
self.dirty: bool = loaded[1]
self._loadedrevslen = 0
- self._phasesets: PhaseSets = None
+ self._phasesets: Optional[PhaseSets] = None
def hasnonpublicphases(self, repo: "localrepo.localrepository") -> bool:
"""detect if there are revisions with non-public phase"""
@@ -893,7 +918,6 @@
this_phase_set = self._phasesets[targetphase]
for r in range(start, end):
-
# gather information about the current_rev
r_phase = phase(repo, r)
p_phase = None # phase inherited from parents
--- a/mercurial/policy.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/policy.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,9 +5,18 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import sys
+import typing
+
+if typing.TYPE_CHECKING:
+ from typing import (
+ Dict,
+ Optional,
+ Tuple,
+ )
# Rules for how modules can be loaded. Values are:
#
@@ -23,8 +32,8 @@
# By default, fall back to the pure modules so the in-place build can
# run without recompiling the C extensions. This will be overridden by
# __modulepolicy__ generated by setup.py.
-policy = b'allow'
-_packageprefs = {
+policy: bytes = b'allow'
+_packageprefs: "Dict[bytes, Tuple[Optional[str], Optional[str]]]" = {
# policy: (versioned package, pure package)
b'c': ('cext', None),
b'allow': ('cext', 'pure'),
@@ -37,9 +46,9 @@
}
try:
- from . import __modulepolicy__
+ from . import __modulepolicy__ # pytype: disable=import-error
- policy = __modulepolicy__.modulepolicy
+ policy: bytes = __modulepolicy__.modulepolicy
except ImportError:
pass
@@ -48,14 +57,14 @@
# The canonical way to do this is to test platform.python_implementation().
# But we don't import platform and don't bloat for it here.
if '__pypy__' in sys.builtin_module_names:
- policy = b'cffi'
+ policy: bytes = b'cffi'
# Environment variable can always force settings.
if os.environ.get('HGMODULEPOLICY'): # ignore None and Empty
- policy = os.environ['HGMODULEPOLICY'].encode('utf-8')
+ policy: bytes = os.environ['HGMODULEPOLICY'].encode('utf-8')
-def _importfrom(pkgname, modname):
+def _importfrom(pkgname: str, modname: str):
# from .<pkgname> import <modname> (where . is looked through this module)
fakelocals = {}
pkg = __import__(pkgname, globals(), fakelocals, [modname], level=1)
@@ -69,7 +78,7 @@
# keep in sync with "version" in C modules
-_cextversions = {
+_cextversions: "Dict[Tuple[str, str], int]" = {
('cext', 'base85'): 1,
('cext', 'bdiff'): 3,
('cext', 'mpatch'): 1,
@@ -78,7 +87,7 @@
}
# map import request to other package or module
-_modredirects = {
+_modredirects: "Dict[Tuple[str, str], Tuple[str, str]]" = {
('cext', 'charencode'): ('cext', 'parsers'),
('cffi', 'base85'): ('pure', 'base85'),
('cffi', 'charencode'): ('pure', 'charencode'),
@@ -86,7 +95,7 @@
}
-def _checkmod(pkgname, modname, mod):
+def _checkmod(pkgname: str, modname: str, mod) -> None:
expected = _cextversions.get((pkgname, modname))
actual = getattr(mod, 'version', None)
if actual != expected:
@@ -97,7 +106,7 @@
)
-def importmod(modname):
+def importmod(modname: str):
"""Import module according to policy and check API version"""
try:
verpkg, purepkg = _packageprefs[policy]
@@ -118,12 +127,12 @@
return _importfrom(pn, mn)
-def _isrustpermissive():
+def _isrustpermissive() -> bool:
"""Assuming the policy is a Rust one, tell if it's permissive."""
return policy.endswith(b'-allow')
-def importrust(modname, member=None, default=None):
+def importrust(modname: str, member: "Optional[str]" = None, default=None):
"""Import Rust module according to policy and availability.
If policy isn't a Rust one, this returns `default`.
--- a/mercurial/posix.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/posix.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import fcntl
@@ -214,7 +215,7 @@
def copymode(
src: bytes,
dst: bytes,
- mode: Optional[bytes] = None,
+ mode: Optional[int] = None,
enforcewritable: bool = False,
) -> None:
"""Copy the file mode from the file at path src to dst.
@@ -387,20 +388,20 @@
return None # on posix platforms, every path is ok
-def getfsmountpoint(dirpath: bytes) -> Optional[bytes]:
+def getfsmountpoint(path: bytes) -> Optional[bytes]:
"""Get the filesystem mount point from a directory (best-effort)
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
"""
- return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
+ return getattr(osutil, 'getfsmountpoint', lambda x: None)(path)
-def getfstype(dirpath: bytes) -> Optional[bytes]:
+def getfstype(path: bytes) -> Optional[bytes]:
"""Get the filesystem type name from a directory (best-effort)
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
"""
- return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
+ return getattr(osutil, 'getfstype', lambda x: None)(path)
def get_password() -> bytes:
@@ -549,7 +550,7 @@
if pycompat.sysplatform == b'OpenVMS':
# OpenVMS's symlink emulation is broken on some OpenVMS versions.
- def checklink(path):
+ def checklink(path: bytes) -> bool:
return False
@@ -692,7 +693,7 @@
def lookupreg(
key: bytes,
- name: Optional[bytes] = None,
+ valname: Optional[bytes] = None,
scope: Optional[Union[int, Iterable[int]]] = None,
) -> Optional[bytes]:
return None
@@ -708,6 +709,8 @@
class cachestat:
+ stat: os.stat_result
+
def __init__(self, path: bytes) -> None:
self.stat = os.stat(path)
--- a/mercurial/profiling.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/profiling.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
+import os
+import signal
+import subprocess
import sys
from .i18n import _
@@ -67,11 +71,14 @@
# what is going on.
other_tool_name = sys.monitoring.get_tool(sys.monitoring.PROFILER_ID)
if other_tool_name == "cProfile":
- msg = 'cannot recursively call `lsprof`'
+ msg = b'cannot recursively call `lsprof`'
raise error.Abort(msg) from None
else:
- m = 'failed to start "lsprofile"; another profiler already running: %s'
- raise error.Abort(_(m) % other_tool_name) from None
+ tool = b'<unknown>'
+ if other_tool_name:
+ tool = encoding.strtolocal(other_tool_name)
+ m = b'failed to start "lsprofile"; another profiler already running: %s'
+ raise error.Abort(_(m) % tool) from None
try:
yield
finally:
@@ -192,6 +199,50 @@
fp.flush()
+@contextlib.contextmanager
+def pyspy_profile(ui, fp):
+ exe = ui.config(b'profiling', b'py-spy.exe')
+
+ freq = ui.configint(b'profiling', b'py-spy.freq')
+
+ format = ui.config(b'profiling', b'py-spy.format')
+
+ fd = fp.fileno()
+
+ output_path = "/dev/fd/%d" % (fd)
+
+ my_pid = os.getpid()
+
+ cmd = [
+ exe,
+ "record",
+ "--pid",
+ str(my_pid),
+ "--native",
+ "--rate",
+ str(freq),
+ "--output",
+ output_path,
+ ]
+
+ if format:
+ cmd.extend(["--format", format])
+
+ proc = subprocess.Popen(
+ cmd,
+ pass_fds={fd},
+ stdout=subprocess.PIPE,
+ )
+
+ _ = proc.stdout.readline()
+
+ try:
+ yield
+ finally:
+ os.kill(proc.pid, signal.SIGINT)
+ proc.communicate()
+
+
class profile:
"""Start profiling.
@@ -231,7 +282,7 @@
proffn = None
if profiler is None:
profiler = self._ui.config(b'profiling', b'type')
- if profiler not in (b'ls', b'stat', b'flame'):
+ if profiler not in (b'ls', b'stat', b'flame', b'py-spy'):
# try load profiler from extension with the same name
proffn = _loadprofiler(self._ui, profiler)
if proffn is None:
@@ -274,6 +325,8 @@
proffn = lsprofile
elif profiler == b'flame':
proffn = flameprofile
+ elif profiler == b'py-spy':
+ proffn = pyspy_profile
else:
proffn = statprofile
--- a/mercurial/progress.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/progress.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import threading
import time
--- a/mercurial/pure/base85.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/base85.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
@@ -23,7 +24,7 @@
_b85dec[c] = i
-def b85encode(text, pad=False):
+def b85encode(text: bytes, pad: bool = False) -> bytes:
"""encode text in base85 format"""
l = len(text)
r = l % 4
@@ -50,13 +51,15 @@
return out[:olen]
-def b85decode(text):
+def b85decode(text: bytes) -> bytes:
"""decode base85-encoded text"""
if not _b85dec:
_mkb85dec()
l = len(text)
out = []
+ acc = 0
+
for i in range(0, len(text), 5):
chunk = text[i : i + 5]
chunk = pycompat.bytestr(chunk)
--- a/mercurial/pure/bdiff.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/bdiff.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,16 +5,23 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import difflib
import re
import struct
+import typing
from typing import (
List,
+ Optional,
Tuple,
)
+from ..interfaces import (
+ modules as intmod,
+)
+
def splitnewlines(text: bytes) -> List[bytes]:
'''like str.splitlines, but only split on newlines.'''
@@ -106,3 +113,11 @@
text = re.sub(b'[ \t\r]+', b' ', text)
text = text.replace(b' \n', b'\n')
return text
+
+
+# In order to adhere to the module protocol, these functions must be visible to
+# the type checker, though they aren't actually implemented by this
+# implementation of the module protocol. Callers are responsible for
+# checking that the implementation is available before using them.
+if typing.TYPE_CHECKING:
+ xdiffblocks: Optional[intmod.BDiffBlocksFnc] = None
--- a/mercurial/pure/charencode.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/charencode.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,13 +5,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import array
from .. import pycompat
-def isasciistr(s):
+def isasciistr(s: bytes) -> bool:
try:
s.decode('ascii')
return True
@@ -19,7 +20,7 @@
return False
-def asciilower(s):
+def asciilower(s: bytes) -> bytes:
"""convert a string to lowercase if ASCII
Raises UnicodeDecodeError if non-ASCII characters are found."""
@@ -27,7 +28,7 @@
return s.lower()
-def asciiupper(s):
+def asciiupper(s: bytes) -> bytes:
"""convert a string to uppercase if ASCII
Raises UnicodeDecodeError if non-ASCII characters are found."""
@@ -52,7 +53,7 @@
_jsonmap.extend(pycompat.bytechr(x) for x in range(128, 256))
-def jsonescapeu8fast(u8chars, paranoid):
+def jsonescapeu8fast(u8chars: bytes, paranoid: bool) -> bytes:
"""Convert a UTF-8 byte string to JSON-escaped form (fast path)
Raises ValueError if non-ASCII characters have to be escaped.
@@ -70,7 +71,7 @@
_utf8strict = r'surrogatepass'
-def jsonescapeu8fallback(u8chars, paranoid):
+def jsonescapeu8fallback(u8chars: bytes, paranoid: bool) -> bytes:
"""Convert a UTF-8 byte string to JSON-escaped form (slow path)
Escapes all non-ASCII characters no matter if paranoid is False.
--- a/mercurial/pure/mpatch.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/mpatch.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import io
import struct
@@ -106,7 +107,7 @@
try:
p1, p2, l = struct.unpack(b">lll", m.read(12))
except struct.error:
- raise mpatchError(b"patch cannot be decoded")
+ raise mpatchError("patch cannot be decoded")
_pull(new, frags, p1 - last) # what didn't change
_pull([], frags, p2 - p1) # what got deleted
new.append((l, pos + 12)) # what got added
@@ -137,7 +138,7 @@
outlen += length
if bin != binend:
- raise mpatchError(b"patch cannot be decoded")
+ raise mpatchError("patch cannot be decoded")
outlen += orig - last
return outlen
--- a/mercurial/pure/osutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/osutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import ctypes
import ctypes.util
--- a/mercurial/pure/parsers.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pure/parsers.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,10 +5,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import io
import stat
import struct
+import typing
import zlib
from ..node import (
@@ -16,8 +18,15 @@
sha1nodeconstants,
)
from ..thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .. import (
error,
+ pycompat,
revlogutils,
util,
)
@@ -228,7 +237,7 @@
parentfiledata=(mode, size, (mtime, 0, False)),
)
else:
- raise RuntimeError(b'unknown state: %s' % state)
+ raise RuntimeError('unknown state: %s' % pycompat.sysstr(state))
def set_possibly_dirty(self):
"""Mark a file as "possibly dirty"
@@ -644,7 +653,7 @@
def _check_index(self, i):
if not isinstance(i, int):
- raise TypeError(b"expecting int indexes")
+ raise TypeError("expecting int indexes")
if i < 0 or i >= len(self):
raise IndexError(i)
@@ -687,6 +696,24 @@
p = p[revlog_constants.INDEX_HEADER.size :]
return p
+ def headrevs(self, excluded_revs=None, stop_rev=None):
+ count = len(self)
+ if stop_rev is not None:
+ count = min(count, stop_rev)
+ if not count:
+ return [nullrev]
+ # we won't iter over filtered rev so nobody is a head at start
+ ishead = [0] * (count + 1)
+ revs = range(count)
+ if excluded_revs is not None:
+ revs = (r for r in revs if r not in excluded_revs)
+
+ for r in revs:
+ ishead[r] = 1 # I may be an head
+ e = self[r]
+ ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
+ return [r for r, val in enumerate(ishead) if val]
+
class IndexObject(BaseIndexObject):
def __init__(self, data):
@@ -704,7 +731,7 @@
def __delitem__(self, i):
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
- raise ValueError(b"deleting slices only supports a:-1 with step 1")
+ raise ValueError("deleting slices only supports a:-1 with step 1")
i = i.start
self._check_index(i)
self._stripnodes(i)
@@ -783,12 +810,12 @@
count += 1
off += self.entry_size + s
if off != len(self._data):
- raise ValueError(b"corrupted data")
+ raise ValueError("corrupted data")
return count
def __delitem__(self, i):
if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
- raise ValueError(b"deleting slices only supports a:-1 with step 1")
+ raise ValueError("deleting slices only supports a:-1 with step 1")
i = i.start
self._check_index(i)
self._stripnodes(i)
@@ -841,7 +868,7 @@
raise KeyError
self._check_index(rev)
if rev < self._lgt:
- msg = b"cannot rewrite entries outside of this transaction"
+ msg = "cannot rewrite entries outside of this transaction"
raise KeyError(msg)
else:
entry = list(self[rev])
@@ -911,7 +938,6 @@
)
def _pack_entry(self, rev, entry):
-
base = entry[revlog_constants.ENTRY_DELTA_BASE]
link_rev = entry[revlog_constants.ENTRY_LINK_REV]
assert base == rev, (base, rev)
--- a/mercurial/pushkey.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pushkey.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from . import (
bookmarks,
--- a/mercurial/pvec.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pvec.py Sat Oct 26 04:16:00 2024 +0200
@@ -48,6 +48,7 @@
different branches
'''
+from __future__ import annotations
from .node import nullrev
from . import (
--- a/mercurial/pycompat.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/pycompat.py Sat Oct 26 04:16:00 2024 +0200
@@ -8,6 +8,7 @@
This contains aliases to hide python version-specific details from the core.
"""
+from __future__ import annotations
import builtins
import codecs
@@ -348,12 +349,46 @@
raise exc.with_traceback(tb)
+# Copied over from the 3.13 Python stdlib `inspect.cleandoc`, with a couple
+# of removals explained inline.
+# It differs slightly from the 3.8+ version, so it's better to use the same
+# version to remove any potential for variation.
+def cleandoc(doc):
+ """Clean up indentation from docstrings.
+
+ Any whitespace that can be uniformly removed from the second line
+ onwards is removed."""
+ lines = doc.expandtabs().split('\n')
+
+ # Find minimum indentation of any non-blank lines after first line.
+ margin = sys.maxsize
+ for line in lines[1:]:
+ content = len(line.lstrip(' '))
+ if content:
+ indent = len(line) - content
+ margin = min(margin, indent)
+ # Remove indentation.
+ if lines:
+ lines[0] = lines[0].lstrip(' ')
+ if margin < sys.maxsize:
+ for i in range(1, len(lines)):
+ lines[i] = lines[i][margin:]
+ # Here the upstream *Python* version does newline trimming, but it looks
+ # like the compiler (written in C) does not, so go with what the compiler
+ # does.
+ return '\n'.join(lines)
+
+
def getdoc(obj: object) -> Optional[bytes]:
"""Get docstring as bytes; may be None so gettext() won't confuse it
with _('')"""
doc = builtins.getattr(obj, '__doc__', None)
if doc is None:
return doc
+ if sys.version_info < (3, 13):
+ # Python 3.13+ "cleans up" the docstring at compile time, let's
+ # normalize this behavior for previous versions
+ doc = cleandoc(doc)
return sysbytes(doc)
--- a/mercurial/rcutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/rcutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
--- a/mercurial/registrar.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/registrar.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from typing import Any, List, Optional, Tuple
from . import (
--- a/mercurial/repair.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/repair.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from .node import (
@@ -241,6 +242,10 @@
del repo.obsstore
repo.invalidatevolatilesets()
+ # NOTE: eventually make a common entry point on localrepo to help
+ # other caches
+ repo.revbranchcache().invalidate(striprev)
+
if tmpbundlefile:
ui.note(_(b"adding branch\n"))
f = vfs.open(tmpbundlefile, b"rb")
--- a/mercurial/repocache.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/repocache.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import gc
--- a/mercurial/repoview.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/repoview.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import copy
import weakref
@@ -309,14 +310,11 @@
# no Rust fast path implemented yet, so just loop in Python
return [self.node(r) for r in self.headrevs()]
- def headrevs(self, revs=None):
+ def headrevs(self, revs=None, stop_rev=None):
if revs is None:
- try:
- return self.index.headrevsfiltered(self.filteredrevs)
- # AttributeError covers non-c-extension environments and
- # old c extensions without filter handling.
- except AttributeError:
- return self._headrevs()
+ return self.index.headrevs(self.filteredrevs, stop_rev)
+ # it is ignored from here, so better double check we passed the right argument
+ assert stop_rev is None
revs = self._checknofilteredinrevs(revs)
return super(filteredchangelogmixin, self).headrevs(revs)
--- a/mercurial/requirements.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/requirements.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
# obsolete experimental requirements:
# - manifestv2: An experimental new manifest format that allowed
--- a/mercurial/revlog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlog.py Sat Oct 26 04:16:00 2024 +0200
@@ -12,6 +12,7 @@
and O(changes) merge between branches.
"""
+from __future__ import annotations
import binascii
import collections
@@ -20,9 +21,17 @@
import io
import os
import struct
+import typing
import weakref
import zlib
+from typing import (
+ Iterable,
+ Iterator,
+ Optional,
+ Tuple,
+)
+
# import stuff from node for others to import from revlog
from .node import (
bin,
@@ -69,6 +78,12 @@
REVIDX_RAWTEXT_CHANGING_FLAGS,
)
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
ancestor,
dagop,
@@ -79,10 +94,10 @@
revlogutils,
templatefilters,
util,
+ vfs as vfsmod,
)
from .interfaces import (
repository,
- util as interfaceutil,
)
from .revlogutils import (
deltas as deltautil,
@@ -173,9 +188,8 @@
)
-@interfaceutil.implementer(repository.irevisiondelta)
@attr.s(slots=True)
-class revlogrevisiondelta:
+class revlogrevisiondelta: # (repository.irevisiondelta)
node = attr.ib()
p1node = attr.ib()
p2node = attr.ib()
@@ -189,12 +203,11 @@
linknode = attr.ib(default=None)
-@interfaceutil.implementer(repository.iverifyproblem)
@attr.s(frozen=True)
-class revlogproblem:
- warning = attr.ib(default=None)
- error = attr.ib(default=None)
- node = attr.ib(default=None)
+class revlogproblem: # (repository.iverifyproblem)
+ warning = attr.ib(default=None, type=Optional[bytes])
+ error = attr.ib(default=None, type=Optional[bytes])
+ node = attr.ib(default=None, type=Optional[bytes])
def parse_index_v1(data, inline):
@@ -221,7 +234,6 @@
index, cache = parsers.parse_index_devel_nodemap(data, inline)
return index, cache
-
else:
parse_index_v1_nodemap = None
@@ -352,9 +364,11 @@
boundaries are arbitrary and based on what we can delegate to Rust.
"""
+ opener: vfsmod.vfs
+
def __init__(
self,
- opener,
+ opener: vfsmod.vfs,
index,
index_file,
data_file,
@@ -558,7 +572,7 @@
c = self._get_decompressor(t)
return c.decompress
- def _get_decompressor(self, t):
+ def _get_decompressor(self, t: bytes):
try:
compressor = self._decompressors[t]
except KeyError:
@@ -574,7 +588,7 @@
)
return compressor
- def compress(self, data):
+ def compress(self, data: bytes) -> Tuple[bytes, bytes]:
"""Generate a possibly-compressed representation of data."""
if not data:
return b'', data
@@ -589,7 +603,7 @@
return b'', data
return b'u', data
- def decompress(self, data):
+ def decompress(self, data: bytes):
"""Decompress a revlog chunk.
The chunk is expected to begin with a header identifying the
@@ -905,9 +919,7 @@
"""Obtain decompressed chunks for the specified revisions.
Accepts an iterable of numeric revisions that are assumed to be in
- ascending order. Also accepts an optional already-open file handle
- to be used for reading. If used, the seek position of the file will
- not be preserved.
+ ascending order.
This function is similar to calling ``self._chunk()`` multiple times,
but is faster.
@@ -991,10 +1003,10 @@
chunks.sort()
return [x[1] for x in chunks]
- def raw_text(self, node, rev):
+ def raw_text(self, node, rev) -> bytes:
"""return the possibly unvalidated rawtext for a revision
- returns (rev, rawtext, validated)
+ returns rawtext
"""
# revision in the cache (could be useful to apply delta)
@@ -1035,7 +1047,7 @@
rawtext = mdiff.patches(basetext, bins)
del basetext # let us have a chance to free memory early
- return (rev, rawtext, False)
+ return rawtext
def sidedata(self, rev, sidedata_end):
"""Return the sidedata for a given revision number."""
@@ -1281,6 +1293,9 @@
"""
_flagserrorclass = error.RevlogError
+ _inner: "_InnerRevlog"
+
+ opener: vfsmod.vfs
@staticmethod
def is_inline_index(header_bytes):
@@ -1296,9 +1311,11 @@
features = FEATURES_BY_VERSION[_format_version]
return features[b'inline'](_format_flags)
+ _docket_file: Optional[bytes]
+
def __init__(
self,
- opener,
+ opener: vfsmod.vfs,
target,
radix,
postfix=None, # only exist for `tmpcensored` now
@@ -1794,7 +1811,7 @@
def __len__(self):
return len(self.index)
- def __iter__(self):
+ def __iter__(self) -> Iterator[int]:
return iter(range(len(self)))
def revs(self, start=0, stop=None):
@@ -1832,7 +1849,7 @@
else:
nodemaputil.setup_persistent_nodemap(transaction, self)
- def clearcaches(self):
+ def clearcaches(self, clear_persisted_data: bool = False) -> None:
"""Clear in-memory caches"""
self._chainbasecache.clear()
self._inner.clear_cache()
@@ -2346,12 +2363,10 @@
assert heads
return (orderedout, roots, heads)
- def headrevs(self, revs=None):
+ def headrevs(self, revs=None, stop_rev=None):
if revs is None:
- try:
- return self.index.headrevs()
- except AttributeError:
- return self._headrevs()
+ return self.index.headrevs(None, stop_rev)
+ assert stop_rev is None
if rustdagop is not None and self.index.rust_ext_compat:
return rustdagop.headrevs(self.index, revs)
return dagop.headrevs(revs, self._uncheckedparentrevs)
@@ -2365,19 +2380,6 @@
def computephases(self, roots):
return self.index.computephasesmapsets(roots)
- def _headrevs(self):
- count = len(self)
- if not count:
- return [nullrev]
- # we won't iter over filtered rev so nobody is a head at start
- ishead = [0] * (count + 1)
- index = self.index
- for r in self:
- ishead[r] = 1 # I may be an head
- e = index[r]
- ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
- return [r for r, val in enumerate(ishead) if val]
-
def _head_node_ids(self):
try:
return self.index.head_node_ids()
@@ -2729,7 +2731,8 @@
if rev is None:
rev = self.rev(node)
- return self._inner.raw_text(node, rev)
+ text = self._inner.raw_text(node, rev)
+ return (rev, text, False)
def _revisiondata(self, nodeorrev, raw=False):
# deal with <nodeorrev> argument type
@@ -2777,6 +2780,8 @@
def _sidedata(self, rev):
"""Return the sidedata for a given revision number."""
+ if self._sidedatafile is None:
+ return {}
sidedata_end = None
if self._docket is not None:
sidedata_end = self._docket.sidedata_end
@@ -3081,7 +3086,7 @@
sidedata=sidedata,
)
- def compress(self, data):
+ def compress(self, data: bytes) -> Tuple[bytes, bytes]:
return self._inner.compress(data)
def decompress(self, data):
@@ -3125,7 +3130,7 @@
raise error.RevlogError(
_(b"%s: attempt to add wdir revision") % self.display_id
)
- if self._inner._writinghandles is None:
+ if not self._inner.is_writing:
msg = b'adding revision outside `revlog._writing` context'
raise error.ProgrammingError(msg)
@@ -3870,7 +3875,7 @@
else:
rewrite.v2_censor(self, tr, censor_nodes, tombstone)
- def verifyintegrity(self, state):
+ def verifyintegrity(self, state) -> Iterable[revlogproblem]:
"""Verifies the integrity of the revlog.
Yields ``revlogproblem`` instances describing problems that are
--- a/mercurial/revlogutils/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,17 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import typing
from ..thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from ..interfaces import repository
# See mercurial.revlogutils.constants for doc
--- a/mercurial/revlogutils/concurrency_checker.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/concurrency_checker.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from ..i18n import _
from .. import error
--- a/mercurial/revlogutils/constants.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/constants.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
# GNU General Public License version 2 or any later version.
"""Helper class to compute deltas stored inside revlogs"""
+from __future__ import annotations
import struct
--- a/mercurial/revlogutils/debug.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/debug.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import string
--- a/mercurial/revlogutils/deltas.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/deltas.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,10 +7,12 @@
# GNU General Public License version 2 or any later version.
"""Helper class to compute deltas stored inside revlogs"""
+from __future__ import annotations
import abc
import collections
import struct
+import typing
# import stuff from node for others to import from revlog
from ..node import nullrev
@@ -31,6 +33,11 @@
from ..thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .. import (
error,
mdiff,
@@ -428,7 +435,6 @@
# Cut the revs at collected indices
previdx = 0
for idx in selected:
-
chunk = _trimchunk(revlog, revs, previdx, idx)
if chunk:
yield chunk
--- a/mercurial/revlogutils/docket.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/docket.py Sat Oct 26 04:16:00 2024 +0200
@@ -15,6 +15,7 @@
#
# * a data file, containing variable width data for these revisions,
+from __future__ import annotations
import os
import random
--- a/mercurial/revlogutils/flagutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/flagutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from ..i18n import _
--- a/mercurial/revlogutils/nodemap.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/nodemap.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
import struct
@@ -553,7 +554,7 @@
Children blocks are always yield before their parent block.
"""
- for (__, item) in sorted(block.items()):
+ for __, item in sorted(block.items()):
if isinstance(item, dict):
for sub_block in _walk_trie(item):
yield sub_block
--- a/mercurial/revlogutils/randomaccessfile.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/randomaccessfile.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import contextlib
from ..i18n import _
@@ -171,11 +173,7 @@
def read_chunk(self, offset, length):
"""Read a chunk of bytes from the file.
- Accepts an absolute offset, length to read, and an optional existing
- file handle to read from.
-
- If an existing file handle is passed, it will be seeked and the
- original seek position will NOT be restored.
+ Accepts an absolute offset, length to read.
Returns a str or buffer of raw byte data.
--- a/mercurial/revlogutils/revlogv0.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/revlogv0.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from ..node import sha1nodeconstants
from .constants import (
@@ -110,6 +111,24 @@
)
return INDEX_ENTRY_V0.pack(*e2)
+ def headrevs(self, excluded_revs=None, stop_rev=None):
+ count = len(self)
+ if stop_rev is not None:
+ count = min(count, stop_rev)
+ if not count:
+ return [node.nullrev]
+ # we won't iter over filtered rev so nobody is a head at start
+ ishead = [0] * (count + 1)
+ revs = range(count)
+ if excluded_revs is not None:
+ revs = (r for r in revs if r not in excluded_revs)
+
+ for r in revs:
+ ishead[r] = 1 # I may be an head
+ e = self[r]
+ ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
+ return [r for r, val in enumerate(ishead) if val]
+
def parse_index_v0(data, inline):
s = INDEX_ENTRY_V0.size
--- a/mercurial/revlogutils/rewrite.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/rewrite.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import binascii
import contextlib
import os
@@ -258,7 +260,6 @@
# this revision is empty, we can delta against nullrev
rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
else:
-
text = revlog.rawdata(rev)
info = revlogutils.revisioninfo(
node=entry[ENTRY_NODE_ID],
@@ -614,15 +615,20 @@
rl = fl._revlog
is_censored = lambda: rl.iscensored(filerev)
delta_base = lambda: rl.deltaparent(filerev)
- delta = lambda: rl._chunk(filerev)
+ delta = lambda: rl._inner._chunk(filerev)
full_text = lambda: rl.rawdata(filerev)
parent_revs = lambda: rl.parentrevs(filerev)
+ # This function is used by repair_issue6528, but not by
+ # filter_delta_issue6528. As such, we do not want to trust
+ # parent revisions of the delta base to decide whether
+ # the delta base has metadata.
return _is_revision_affected_fast_inner(
is_censored,
delta_base,
delta,
full_text,
parent_revs,
+ None, # don't trust the parent revisions
filerev,
metadata_cache,
)
@@ -634,6 +640,7 @@
delta,
full_text,
parent_revs,
+ deltabase_parentrevs,
filerev,
metadata_cache,
):
@@ -652,21 +659,36 @@
p1, p2 = parent_revs()
if p1 == nullrev or p2 != nullrev:
+ metadata_cache[filerev] = True
return False
delta_parent = delta_base()
parent_has_metadata = metadata_cache.get(delta_parent)
if parent_has_metadata is None:
- return _is_revision_affected_inner(
- full_text,
- parent_revs,
- filerev,
- metadata_cache,
- )
+ if deltabase_parentrevs is not None:
+ deltabase_parentrevs = deltabase_parentrevs()
+ if deltabase_parentrevs == (nullrev, nullrev):
+ # Need to check the content itself as there is no flag.
+ parent_has_metadata = None
+ elif deltabase_parentrevs[0] == nullrev:
+ # Second parent is !null, assume repository is correct
+ # and has flagged this file revision as having metadata.
+ parent_has_metadata = True
+ elif deltabase_parentrevs[1] == nullrev:
+ # First parent is !null, so assume it has no metadata.
+ parent_has_metadata = False
+ if parent_has_metadata is None:
+ return _is_revision_affected_inner(
+ full_text,
+ parent_revs,
+ filerev,
+ metadata_cache,
+ )
chunk = delta()
if not len(chunk):
# No diff for this revision
+ metadata_cache[filerev] = parent_has_metadata
return parent_has_metadata
header_length = 12
@@ -734,7 +756,7 @@
def filter_delta_issue6528(revlog, deltas_iter):
"""filter incomind deltas to repaire issue 6528 on the fly"""
- metadata_cache = {}
+ metadata_cache = {nullrev: False}
deltacomputer = deltas.deltacomputer(revlog)
@@ -763,9 +785,9 @@
p2_rev = revlog.rev(p2_node)
is_censored = lambda: bool(flags & REVIDX_ISCENSORED)
- delta_base = lambda: revlog.rev(delta_base)
delta_base = lambda: base_rev
parent_revs = lambda: (p1_rev, p2_rev)
+ deltabase_parentrevs = lambda: revlog.parentrevs(base_rev)
def full_text():
# note: being able to reuse the full text computation in the
@@ -791,6 +813,7 @@
lambda: delta,
full_text,
parent_revs,
+ deltabase_parentrevs,
rev,
metadata_cache,
)
@@ -845,7 +868,7 @@
# Set of filerevs (or hex filenodes if `to_report`) that need fixing
to_fix = set()
- metadata_cache = {}
+ metadata_cache = {nullrev: False}
for filerev in fl.revs():
affected = _is_revision_affected_fast(
repo, fl, filerev, metadata_cache
--- a/mercurial/revlogutils/sidedata.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revlogutils/sidedata.py Sat Oct 26 04:16:00 2024 +0200
@@ -30,6 +30,7 @@
the concept.
"""
+from __future__ import annotations
import collections
import struct
--- a/mercurial/revset.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revset.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
import functools
--- a/mercurial/revsetlang.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/revsetlang.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import string
--- a/mercurial/rewriteutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/rewriteutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
--- a/mercurial/scmposix.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/scmposix.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import array
import errno
import fcntl
--- a/mercurial/scmutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/scmutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
import errno
@@ -13,8 +14,20 @@
import posixpath
import re
import subprocess
+import typing
import weakref
+from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Set,
+ Tuple,
+)
+
from .i18n import _
from .node import (
bin,
@@ -24,6 +37,12 @@
wdirrev,
)
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
copies as copiesmod,
encoding,
@@ -39,6 +58,7 @@
revsetlang,
similar,
smartset,
+ typelib,
url,
util,
vfs,
@@ -55,6 +75,11 @@
else:
from . import scmposix as scmplatform
+if typing.TYPE_CHECKING:
+ from . import (
+ ui as uimod,
+ )
+
parsers = policy.importmod('parsers')
rustrevlog = policy.importrust('revlog')
@@ -69,15 +94,15 @@
relevant to the working copy.
"""
- modified = attr.ib(default=attr.Factory(list))
- added = attr.ib(default=attr.Factory(list))
- removed = attr.ib(default=attr.Factory(list))
- deleted = attr.ib(default=attr.Factory(list))
- unknown = attr.ib(default=attr.Factory(list))
- ignored = attr.ib(default=attr.Factory(list))
- clean = attr.ib(default=attr.Factory(list))
-
- def __iter__(self):
+ modified = attr.ib(default=attr.Factory(list), type=List[bytes])
+ added = attr.ib(default=attr.Factory(list), type=List[bytes])
+ removed = attr.ib(default=attr.Factory(list), type=List[bytes])
+ deleted = attr.ib(default=attr.Factory(list), type=List[bytes])
+ unknown = attr.ib(default=attr.Factory(list), type=List[bytes])
+ ignored = attr.ib(default=attr.Factory(list), type=List[bytes])
+ clean = attr.ib(default=attr.Factory(list), type=List[bytes])
+
+ def __iter__(self) -> Iterator[List[bytes]]:
yield self.modified
yield self.added
yield self.removed
@@ -86,7 +111,7 @@
yield self.ignored
yield self.clean
- def __repr__(self):
+ def __repr__(self) -> str:
return (
r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
r'unknown=%s, ignored=%s, clean=%s>'
@@ -119,7 +144,7 @@
yield subpath, ctx2.nullsub(subpath, ctx1)
-def nochangesfound(ui, repo, excluded=None):
+def nochangesfound(ui: "uimod.ui", repo, excluded=None) -> None:
"""Report no changes for push/pull, excluded is None or a list of
nodes excluded from the push/pull.
"""
@@ -139,7 +164,7 @@
ui.status(_(b"no changes found\n"))
-def callcatch(ui, func):
+def callcatch(ui: "uimod.ui", func: Callable[[], int]) -> int:
"""call func() with global exception handling
return func() if no exception happens. otherwise do some error handling
@@ -261,7 +286,7 @@
return coarse_exit_code
-def checknewlabel(repo, lbl, kind):
+def checknewlabel(repo, lbl: bytes, kind) -> None:
# Do not use the "kind" parameter in ui output.
# It makes strings difficult to translate.
if lbl in [b'tip', b'.', b'null']:
@@ -287,7 +312,7 @@
)
-def checkfilename(f):
+def checkfilename(f: bytes) -> None:
'''Check that the filename f is an acceptable filename for a tracked file'''
if b'\r' in f or b'\n' in f:
raise error.InputError(
@@ -296,7 +321,7 @@
)
-def checkportable(ui, f):
+def checkportable(ui: "uimod.ui", f: bytes) -> None:
'''Check if filename f is portable and warn or abort depending on config'''
checkfilename(f)
abort, warn = checkportabilityalert(ui)
@@ -309,7 +334,7 @@
ui.warn(_(b"warning: %s\n") % msg)
-def checkportabilityalert(ui):
+def checkportabilityalert(ui: "uimod.ui") -> Tuple[bool, bool]:
"""check if the user's config requests nothing, a warning, or abort for
non-portable filenames"""
val = ui.config(b'ui', b'portablefilenames')
@@ -325,7 +350,7 @@
class casecollisionauditor:
- def __init__(self, ui, abort, dirstate):
+ def __init__(self, ui: "uimod.ui", abort: bool, dirstate) -> None:
self._ui = ui
self._abort = abort
allfiles = b'\0'.join(dirstate)
@@ -336,7 +361,7 @@
# same filename twice.
self._newfiles = set()
- def __call__(self, f):
+ def __call__(self, f: bytes) -> None:
if f in self._newfiles:
return
fl = encoding.lower(f)
@@ -349,7 +374,9 @@
self._newfiles.add(f)
-def combined_filtered_and_obsolete_hash(repo, maxrev, needobsolete=False):
+def combined_filtered_and_obsolete_hash(
+ repo, maxrev, needobsolete: bool = False
+):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
@@ -430,7 +457,7 @@
return (filtered_set, obs_set)
-def _hash_revs(revs):
+def _hash_revs(revs: Iterable[int]) -> bytes:
"""return a hash from a list of revision numbers"""
s = hashutil.sha1()
for rev in revs:
@@ -438,7 +465,12 @@
return s.digest()
-def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
+def walkrepos(
+ path,
+ followsym: bool = False,
+ seen_dirs: Optional[List[bytes]] = None,
+ recurse: bool = False,
+) -> Iterable[bytes]:
"""yield every hg repository under path, always recursively.
The recurse flag will only control recursion into repo working dirs"""
@@ -487,7 +519,7 @@
dirs[:] = newdirs
-def binnode(ctx):
+def binnode(ctx) -> bytes:
"""Return binary node id for a given basectx"""
node = ctx.node()
if node is None:
@@ -495,7 +527,7 @@
return node
-def intrev(ctx):
+def intrev(ctx) -> int:
"""Return integer for a given basectx that can be used in comparison or
arithmetic operation"""
rev = ctx.rev()
@@ -504,14 +536,14 @@
return rev
-def formatchangeid(ctx):
+def formatchangeid(ctx) -> bytes:
"""Format changectx as '{rev}:{node|formatnode}', which is the default
template provided by logcmdutil.changesettemplater"""
repo = ctx.repo()
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
-def formatrevnode(ui, rev, node):
+def formatrevnode(ui: "uimod.ui", rev: int, node: bytes) -> bytes:
"""Format given revision and node depending on the current verbosity"""
if ui.debugflag:
hexfunc = hex
@@ -520,7 +552,7 @@
return b'%d:%s' % (rev, hexfunc(node))
-def resolvehexnodeidprefix(repo, prefix):
+def resolvehexnodeidprefix(repo, prefix: bytes):
if prefix.startswith(b'x'):
prefix = prefix[1:]
try:
@@ -552,7 +584,7 @@
return node
-def mayberevnum(repo, prefix):
+def mayberevnum(repo, prefix: bytes) -> bool:
"""Checks if the given prefix may be mistaken for a revision number"""
try:
i = int(prefix)
@@ -567,7 +599,7 @@
return False
-def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
+def shortesthexnodeidprefix(repo, node: bytes, minlength: int = 1, cache=None):
"""Find the shortest unambiguous prefix that matches hexnode.
If "cache" is not None, it must be a dictionary that can be used for
@@ -579,7 +611,7 @@
minlength = max(minlength, 1)
- def disambiguate(prefix):
+ def disambiguate(prefix: bytes):
"""Disambiguate against revnums."""
if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
if mayberevnum(repo, prefix):
@@ -644,7 +676,7 @@
raise error.RepoLookupError()
-def isrevsymbol(repo, symbol):
+def isrevsymbol(repo, symbol: bytes) -> bool:
"""Checks if a symbol exists in the repo.
See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
@@ -657,7 +689,7 @@
return False
-def revsymbol(repo, symbol):
+def revsymbol(repo, symbol: bytes):
"""Returns a context given a single revision symbol (as string).
This is similar to revsingle(), but accepts only a single revision symbol,
@@ -724,13 +756,12 @@
raise _filterederror(repo, symbol)
-def _filterederror(repo, changeid):
+def _filterederror(repo, changeid: bytes) -> error.FilteredRepoLookupError:
"""build an exception to be raised about a filtered changeid
This is extracted in a function to help extensions (eg: evolve) to
experiment with various message variants."""
if repo.filtername.startswith(b'visible'):
-
# Check if the changeset is obsolete
unfilteredrepo = repo.unfiltered()
ctx = revsymbol(unfilteredrepo, changeid)
@@ -760,7 +791,7 @@
return repo[l.last()]
-def _pairspec(revspec):
+def _pairspec(revspec) -> bool:
tree = revsetlang.parse(revspec)
return tree and tree[0] in (
b'range',
@@ -825,7 +856,9 @@
return repo.anyrevs(allspecs, user=True, localalias=localalias)
-def increasingwindows(windowsize=8, sizelimit=512):
+def increasingwindows(
+ windowsize: int = 8, sizelimit: int = 512
+) -> Iterable[int]:
while True:
yield windowsize
if windowsize < sizelimit:
@@ -891,7 +924,11 @@
return parents
-def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
+def getuipathfn(
+ repo,
+ legacyrelativevalue: bool = False,
+ forcerelativevalue: Optional[bool] = None,
+) -> typelib.UiPathFn:
"""Return a function that produced paths for presenting to the user.
The returned function takes a repo-relative path and produces a path
@@ -931,12 +968,14 @@
return util.localpath
-def subdiruipathfn(subpath, uipathfn):
+def subdiruipathfn(
+ subpath: bytes, uipathfn: typelib.UiPathFn
+) -> typelib.UiPathFn:
'''Create a new uipathfn that treats the file as relative to subpath.'''
return lambda f: uipathfn(posixpath.join(subpath, f))
-def anypats(pats, opts):
+def anypats(pats, opts) -> bool:
"""Checks if any patterns, including --include and --exclude were given.
Some commands (e.g. addremove) use this condition for deciding whether to
@@ -945,7 +984,7 @@
return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
-def expandpats(pats):
+def expandpats(pats: Iterable[bytes]) -> List[bytes]:
"""Expand bare globs when running on windows.
On posix we assume it already has already been done by sh."""
if not util.expandglobs:
@@ -966,7 +1005,12 @@
def matchandpats(
- ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
+ ctx,
+ pats=(),
+ opts=None,
+ globbed: bool = False,
+ default: bytes = b'relpath',
+ badfn=None,
):
"""Return a matcher and the patterns that were used.
The matcher will warn about bad matches, unless an alternate badfn callback
@@ -999,7 +1043,12 @@
def match(
- ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
+ ctx,
+ pats=(),
+ opts=None,
+ globbed: bool = False,
+ default: bytes = b'relpath',
+ badfn=None,
):
'''Return a matcher that will warn about bad matches.'''
return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
@@ -1010,12 +1059,12 @@
return matchmod.always()
-def matchfiles(repo, files, badfn=None):
+def matchfiles(repo, files, badfn=None) -> matchmod.exactmatcher:
'''Return a matcher that will efficiently match exactly these files.'''
return matchmod.exact(files, badfn=badfn)
-def parsefollowlinespattern(repo, rev, pat, msg):
+def parsefollowlinespattern(repo, rev, pat: bytes, msg: bytes) -> bytes:
"""Return a file name from `pat` pattern suitable for usage in followlines
logic.
"""
@@ -1030,7 +1079,7 @@
return files[0]
-def getorigvfs(ui, repo):
+def getorigvfs(ui: "uimod.ui", repo):
"""return a vfs suitable to save 'orig' file
return None if no special directory is configured"""
@@ -1040,7 +1089,7 @@
return vfs.vfs(repo.wvfs.join(origbackuppath))
-def backuppath(ui, repo, filepath):
+def backuppath(ui: "uimod.ui", repo, filepath: bytes) -> bytes:
"""customize where working copy backup files (.orig files) are created
Fetch user defined path from config file: [ui] origbackuppath = <path>
@@ -1083,7 +1132,7 @@
self._torev = repo.changelog.rev
self._revcontains = revcontainer.__contains__
- def __contains__(self, node):
+ def __contains__(self, node) -> bool:
return self._revcontains(self._torev(node))
@@ -1096,7 +1145,7 @@
fixphase=False,
targetphase=None,
backup=True,
-):
+) -> None:
"""do common cleanups when old nodes are replaced by new nodes
That includes writing obsmarkers or stripping nodes, and moving bookmarks.
@@ -1270,7 +1319,14 @@
)
-def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
+def addremove(
+ repo,
+ matcher,
+ prefix: bytes,
+ uipathfn: typelib.UiPathFn,
+ opts=None,
+ open_tr=None,
+) -> int:
if opts is None:
opts = {}
m = matcher
@@ -1303,7 +1359,7 @@
rejected = []
- def badfn(f, msg):
+ def badfn(f: bytes, msg: bytes) -> None:
if f in m.files():
m.bad(f, msg)
rejected.append(f)
@@ -1341,7 +1397,7 @@
return ret
-def marktouched(repo, files, similarity=0.0):
+def marktouched(repo, files, similarity: float = 0.0) -> int:
"""Assert that files have somehow been operated upon. files are relative to
the repo root."""
m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
@@ -1376,7 +1432,9 @@
return 0
-def _interestingfiles(repo, matcher):
+def _interestingfiles(
+ repo, matcher
+) -> Tuple[List[bytes], List[bytes], List[bytes], List[bytes], List[bytes]]:
"""Walk dirstate with matcher, looking for files that addremove would care
about.
@@ -1412,7 +1470,9 @@
return added, unknown, deleted, removed, forgotten
-def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
+def _findrenames(
+ repo, matcher, added, removed, similarity, uipathfn: typelib.UiPathFn
+) -> Dict[bytes, bytes]:
'''Find renames from removed files to added ones.'''
renames = {}
if similarity > 0:
@@ -1435,7 +1495,7 @@
return renames
-def _markchanges(repo, unknown, deleted, renames):
+def _markchanges(repo, unknown, deleted, renames) -> None:
"""Marks the files in unknown as added, the files in deleted as removed,
and the files in renames as copied."""
wctx = repo[None]
@@ -1518,7 +1578,15 @@
return copiesfn
-def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
+def dirstatecopy(
+ ui: "uimod.ui",
+ repo,
+ wctx,
+ src,
+ dst,
+ dryrun: bool = False,
+ cwd: Optional[bytes] = None,
+) -> None:
"""Update the dirstate to reflect the intent of copying src to dst. For
different reasons it might not end with dst being marked as copied from src.
"""
@@ -1543,7 +1611,7 @@
wctx.copy(origsrc, dst)
-def movedirstate(repo, newctx, match=None):
+def movedirstate(repo, newctx, match=None) -> None:
"""Move the dirstate to newctx and adjust it as necessary.
A matcher can be provided as an optimization. It is probably a bug to pass
@@ -1596,12 +1664,12 @@
return requirements, None
-def istreemanifest(repo):
+def istreemanifest(repo) -> bool:
"""returns whether the repository is using treemanifest or not"""
return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
-def writereporequirements(repo, requirements=None):
+def writereporequirements(repo, requirements=None, maywritestore=True) -> None:
"""writes requirements for the repo
Requirements are written to .hg/requires and .hg/store/requires based
@@ -1614,20 +1682,41 @@
if wcreq is not None:
writerequires(repo.vfs, wcreq)
if storereq is not None:
- writerequires(repo.svfs, storereq)
+ writerequires(repo.svfs, storereq, maywrite=maywritestore)
elif repo.ui.configbool(b'format', b'usestore'):
# only remove store requires if we are using store
- repo.svfs.tryunlink(b'requires')
-
-
-def writerequires(opener, requirements):
- with opener(b'requires', b'w', atomictemp=True) as fp:
- for r in sorted(requirements):
- fp.write(b"%s\n" % r)
+ if maywritestore:
+ repo.svfs.tryunlink(b'requires')
+
+
+def readrequires(vfs, allowmissing):
+ """reads the require file present at root of this vfs
+ and return a set of requirements
+
+ If allowmissing is True, we suppress FileNotFoundError if raised"""
+ # requires file contains a newline-delimited list of
+ # features/capabilities the opener (us) must have in order to use
+ # the repository. This file was introduced in Mercurial 0.9.2,
+ # which means very old repositories may not have one. We assume
+ # a missing file translates to no requirements.
+ read = vfs.tryread if allowmissing else vfs.read
+ return set(read(b'requires').splitlines())
+
+
+def writerequires(opener, requirements, maywrite=True) -> None:
+ on_disk = readrequires(opener, True)
+ if not (on_disk == set(requirements)):
+ if not maywrite:
+ raise error.Abort(_(b"store requirements are not as expected"))
+ with opener(b'requires', b'w', atomictemp=True) as fp:
+ for r in sorted(requirements):
+ fp.write(b"%s\n" % r)
class filecachesubentry:
- def __init__(self, path, stat):
+ _cacheable: Optional[bool] = None
+
+ def __init__(self, path, stat: bool):
self.path = path
self.cachestat = None
self._cacheable = None
@@ -1641,18 +1730,18 @@
# None means we don't know yet
self._cacheable = None
- def refresh(self):
+ def refresh(self) -> None:
if self.cacheable():
self.cachestat = filecachesubentry.stat(self.path)
- def cacheable(self):
+ def cacheable(self) -> bool:
if self._cacheable is not None:
return self._cacheable
# we don't know yet, assume it is for now
return True
- def changed(self):
+ def changed(self) -> bool:
# no point in going further if we can't cache it
if not self.cacheable():
return True
@@ -1674,27 +1763,30 @@
return False
@staticmethod
- def stat(path):
+ def stat(path: bytes) -> Optional[typelib.CacheStat]:
+ # TODO have a cleaner approach on httpstaticrepo side
+ if path.startswith(b'https://') or path.startswith(b'http://'):
+ return util.uncacheable_cachestat()
try:
return util.cachestat(path)
except FileNotFoundError:
- pass
+ return None
class filecacheentry:
- def __init__(self, paths, stat=True):
+ def __init__(self, paths, stat: bool = True) -> None:
self._entries = []
for path in paths:
self._entries.append(filecachesubentry(path, stat))
- def changed(self):
+ def changed(self) -> bool:
'''true if any entry has changed'''
for entry in self._entries:
if entry.changed():
return True
return False
- def refresh(self):
+ def refresh(self) -> None:
for entry in self._entries:
entry.refresh()
@@ -1725,13 +1817,15 @@
remove the ``filecacheentry``.
"""
- def __init__(self, *paths):
+ paths: Tuple[bytes, ...]
+
+ def __init__(self, *paths: bytes) -> None:
self.paths = paths
def tracked_paths(self, obj):
return [self.join(obj, path) for path in self.paths]
- def join(self, obj, fname):
+ def join(self, obj, fname: bytes):
"""Used to compute the runtime path of a cached file.
Users should subclass filecache and provide their own version of this
@@ -1792,7 +1886,7 @@
obj.__dict__[self.sname] = value # update copy returned by obj.x
-def extdatasource(repo, source):
+def extdatasource(repo, source: bytes):
"""Gather a map of rev -> value dict from the specified source
A source spec is treated as a URL, with a special case shell: type
@@ -1861,7 +1955,21 @@
class progress:
- def __init__(self, ui, updatebar, topic, unit=b"", total=None):
+ ui: "uimod.ui"
+ pos: Optional[int] # None once complete
+ topic: bytes
+ unit: bytes
+ total: Optional[int]
+ debug: bool
+
+ def __init__(
+ self,
+ ui: "uimod.ui",
+ updatebar,
+ topic: bytes,
+ unit: bytes = b"",
+ total: Optional[int] = None,
+ ) -> None:
self.ui = ui
self.pos = 0
self.topic = topic
@@ -1876,7 +1984,9 @@
def __exit__(self, exc_type, exc_value, exc_tb):
self.complete()
- def update(self, pos, item=b"", total=None):
+ def update(
+ self, pos: int, item: bytes = b"", total: Optional[int] = None
+ ) -> None:
assert pos is not None
if total:
self.total = total
@@ -1885,16 +1995,18 @@
if self.debug:
self._printdebug(item)
- def increment(self, step=1, item=b"", total=None):
+ def increment(
+ self, step: int = 1, item: bytes = b"", total: Optional[int] = None
+ ) -> None:
self.update(self.pos + step, item, total)
- def complete(self):
+ def complete(self) -> None:
self.pos = None
self.unit = b""
self.total = None
self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
- def _printdebug(self, item):
+ def _printdebug(self, item: bytes) -> None:
unit = b''
if self.unit:
unit = b' ' + self.unit
@@ -1911,7 +2023,7 @@
self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
-def gdinitconfig(ui):
+def gdinitconfig(ui: "uimod.ui"):
"""helper function to know if a repo should be created as general delta"""
# experimental config: format.generaldelta
return ui.configbool(b'format', b'generaldelta') or ui.configbool(
@@ -1919,7 +2031,7 @@
)
-def gddeltaconfig(ui):
+def gddeltaconfig(ui: "uimod.ui"):
"""helper function to know if incoming deltas should be optimized
The `format.generaldelta` config is an old form of the config that also
@@ -1938,11 +2050,11 @@
firstlinekey = b'__firstline'
- def __init__(self, vfs, path, keys=None):
+ def __init__(self, vfs, path: bytes, keys=None) -> None:
self.vfs = vfs
self.path = path
- def read(self, firstlinenonkeyval=False):
+ def read(self, firstlinenonkeyval: bool = False):
"""Read the contents of a simple key-value file
'firstlinenonkeyval' indicates whether the first line of file should
@@ -1973,7 +2085,7 @@
raise error.CorruptedState(stringutil.forcebytestr(e))
return d
- def write(self, data, firstline=None):
+ def write(self, data, firstline: Optional[bytes] = None) -> None:
"""Write key=>value mapping to a file
data is a dict. Keys must be alphanumerical and start with a letter.
Values must not contain newline characters.
@@ -2002,7 +2114,7 @@
fp.write(b''.join(lines))
-_reportobsoletedsource = [
+_reportobsoletedsource: List[bytes] = [
b'debugobsolete',
b'pull',
b'push',
@@ -2010,13 +2122,13 @@
b'unbundle',
]
-_reportnewcssource = [
+_reportnewcssource: List[bytes] = [
b'pull',
b'unbundle',
]
-def prefetchfiles(repo, revmatches):
+def prefetchfiles(repo, revmatches) -> None:
"""Invokes the registered file prefetch functions, allowing extensions to
ensure the corresponding files are available locally, before the command
uses them.
@@ -2045,10 +2157,12 @@
fileprefetchhooks = util.hooks()
# A marker that tells the evolve extension to suppress its own reporting
-_reportstroubledchangesets = True
-
-
-def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
+_reportstroubledchangesets: bool = True
+
+
+def registersummarycallback(
+ repo, otr, txnname: bytes = b'', as_validator: bool = False
+) -> None:
"""register a callback to issue a summary after the transaction is closed
If as_validator is true, then the callbacks are registered as transaction
@@ -2219,7 +2333,7 @@
repo.ui.status(msg % len(published))
-def getinstabilitymessage(delta, instability):
+def getinstabilitymessage(delta: int, instability: bytes) -> Optional[bytes]:
"""function to return the message to show warning about new instabilities
exists as a separate function so that extension can wrap to show more
@@ -2228,14 +2342,14 @@
return _(b'%i new %s changesets\n') % (delta, instability)
-def nodesummaries(repo, nodes, maxnumnodes=4):
+def nodesummaries(repo, nodes, maxnumnodes: int = 4) -> bytes:
if len(nodes) <= maxnumnodes or repo.ui.verbose:
return b' '.join(short(h) for h in nodes)
first = b' '.join(short(h) for h in nodes[:maxnumnodes])
return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
-def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
+def enforcesinglehead(repo, tr, desc: bytes, accountclosed, filtername) -> None:
"""check that no named branch has multiple heads"""
if desc in (b'strip', b'repair'):
# skip the logic during strip
@@ -2260,7 +2374,7 @@
return sink
-def unhidehashlikerevs(repo, specs, hiddentype):
+def unhidehashlikerevs(repo, specs, hiddentype: bytes):
"""parse the user specs and unhide changesets whose hash or revision number
is passed.
@@ -2313,7 +2427,7 @@
return repo.filtered(b'visible-hidden', revs)
-def _getrevsfromsymbols(repo, symbols):
+def _getrevsfromsymbols(repo, symbols) -> Set[int]:
"""parse the list of symbols and returns a set of revision numbers of hidden
changesets present in symbols"""
revs = set()
@@ -2348,7 +2462,7 @@
return revs
-def bookmarkrevs(repo, mark):
+def bookmarkrevs(repo, mark: bytes):
"""Select revisions reachable by a given bookmark
If the bookmarked revision isn't a head, an empty set will be returned.
@@ -2356,7 +2470,7 @@
return repo.revs(format_bookmark_revspec(mark))
-def format_bookmark_revspec(mark):
+def format_bookmark_revspec(mark: bytes) -> bytes:
"""Build a revset expression to select revisions reachable by a given
bookmark"""
mark = b'literal:' + mark
@@ -2370,7 +2484,7 @@
)
-def ismember(ui, username, userlist):
+def ismember(ui: "uimod.ui", username: bytes, userlist: List[bytes]) -> bool:
"""Check if username is a member of userlist.
If userlist has a single '*' member, all users are considered members.
@@ -2380,22 +2494,24 @@
return userlist == [b'*'] or username in userlist
-RESOURCE_HIGH = 3
-RESOURCE_MEDIUM = 2
-RESOURCE_LOW = 1
-RESOURCE_DEFAULT = 0
-
-RESOURCE_MAPPING = {
+RESOURCE_HIGH: int = 3
+RESOURCE_MEDIUM: int = 2
+RESOURCE_LOW: int = 1
+RESOURCE_DEFAULT: int = 0
+
+RESOURCE_MAPPING: Dict[bytes, int] = {
b'default': RESOURCE_DEFAULT,
b'low': RESOURCE_LOW,
b'medium': RESOURCE_MEDIUM,
b'high': RESOURCE_HIGH,
}
-DEFAULT_RESOURCE = RESOURCE_MEDIUM
-
-
-def get_resource_profile(ui, dimension=None):
+DEFAULT_RESOURCE: int = RESOURCE_MEDIUM
+
+
+def get_resource_profile(
+ ui: "uimod.ui", dimension: Optional[bytes] = None
+) -> int:
"""return the resource profile for a dimension
If no dimension is specified, the generic value is returned"""
--- a/mercurial/scmwindows.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/scmwindows.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import os
import winreg # pytype: disable=import-error
--- a/mercurial/server.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/server.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
--- a/mercurial/setdiscovery.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/setdiscovery.py Sat Oct 26 04:16:00 2024 +0200
@@ -40,6 +40,7 @@
classified with it (since all ancestors or descendants will be marked as well).
"""
+from __future__ import annotations
import collections
import random
@@ -190,7 +191,6 @@
return getparents
def _childrengetter(self):
-
if self._childrenmap is not None:
# During discovery, the `undecided` set keep shrinking.
# Therefore, the map computed for an iteration N will be
@@ -454,7 +454,6 @@
full = not initial_head_exchange
progress = ui.makeprogress(_(b'searching'), unit=_(b'queries'))
while not disco.iscomplete():
-
if full or disco.hasinfo():
if full:
ui.note(_(b"sampling from both directions\n"))
--- a/mercurial/shelve.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/shelve.py Sat Oct 26 04:16:00 2024 +0200
@@ -21,11 +21,23 @@
shelve".
"""
+from __future__ import annotations
+
import collections
import io
import itertools
import stat
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Sequence,
+ Tuple,
+)
+
from .i18n import _
from .node import (
bin,
@@ -37,6 +49,7 @@
bundle2,
changegroup,
cmdutil,
+ context as contextmod,
discovery,
error,
exchange,
@@ -69,16 +82,16 @@
class ShelfDir:
- def __init__(self, repo, for_backups=False):
+ def __init__(self, repo, for_backups: bool = False) -> None:
if for_backups:
self.vfs = vfsmod.vfs(repo.vfs.join(backupdir))
else:
self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
- def get(self, name):
+ def get(self, name: bytes) -> "Shelf":
return Shelf(self.vfs, name)
- def listshelves(self):
+ def listshelves(self) -> List[Tuple[float, bytes]]:
"""return all shelves in repo as list of (time, name)"""
try:
names = self.vfs.listdir()
@@ -99,14 +112,14 @@
return sorted(info, reverse=True)
-def _use_internal_phase(repo):
+def _use_internal_phase(repo) -> bool:
return (
phases.supportinternal(repo)
and repo.ui.config(b'shelve', b'store') == b'internal'
)
-def _target_phase(repo):
+def _target_phase(repo) -> int:
return phases.internal if _use_internal_phase(repo) else phases.secret
@@ -118,29 +131,29 @@
differences and lets you work with the shelf as a whole.
"""
- def __init__(self, vfs, name):
+ def __init__(self, vfs: vfsmod.vfs, name: bytes) -> None:
self.vfs = vfs
self.name = name
- def exists(self):
+ def exists(self) -> bool:
return self._exists(b'.shelve') or self._exists(b'.patch', b'.hg')
- def _exists(self, *exts):
+ def _exists(self, *exts: bytes) -> bool:
return all(self.vfs.exists(self.name + ext) for ext in exts)
- def mtime(self):
+ def mtime(self) -> float:
try:
return self._stat(b'.shelve')[stat.ST_MTIME]
except FileNotFoundError:
return self._stat(b'.patch')[stat.ST_MTIME]
- def _stat(self, ext):
+ def _stat(self, ext: bytes):
return self.vfs.stat(self.name + ext)
- def writeinfo(self, info):
+ def writeinfo(self, info) -> None:
scmutil.simplekeyvaluefile(self.vfs, self.name + b'.shelve').write(info)
- def hasinfo(self):
+ def hasinfo(self) -> bool:
return self.vfs.exists(self.name + b'.shelve')
def readinfo(self):
@@ -148,7 +161,7 @@
self.vfs, self.name + b'.shelve'
).read()
- def writebundle(self, repo, bases, node):
+ def writebundle(self, repo, bases, node) -> None:
cgversion = changegroup.safeversion(repo)
if cgversion == b'01':
btype = b'HG10BZ'
@@ -174,7 +187,7 @@
compression=compression,
)
- def applybundle(self, repo, tr):
+ def applybundle(self, repo, tr) -> contextmod.changectx:
filename = self.name + b'.hg'
fp = self.vfs(filename)
try:
@@ -197,10 +210,10 @@
finally:
fp.close()
- def open_patch(self, mode=b'rb'):
+ def open_patch(self, mode: bytes = b'rb'):
return self.vfs(self.name + b'.patch', mode)
- def patch_from_node(self, repo, node):
+ def patch_from_node(self, repo, node) -> io.BytesIO:
repo = repo.unfiltered()
match = _optimized_match(repo, node)
fp = io.BytesIO()
@@ -221,8 +234,8 @@
except (FileNotFoundError, error.RepoLookupError):
return self.open_patch()
- def _backupfilename(self, backupvfs, filename):
- def gennames(base):
+ def _backupfilename(self, backupvfs: vfsmod.vfs, filename: bytes) -> bytes:
+ def gennames(base: bytes):
yield base
base, ext = base.rsplit(b'.', 1)
for i in itertools.count(1):
@@ -232,7 +245,10 @@
if not backupvfs.exists(n):
return backupvfs.join(n)
- def movetobackup(self, backupvfs):
+ # Help pytype- gennames() yields infinitely
+ raise error.ProgrammingError("unreachable")
+
+ def movetobackup(self, backupvfs: vfsmod.vfs) -> None:
if not backupvfs.isdir():
backupvfs.makedir()
for suffix in shelvefileextensions:
@@ -243,7 +259,7 @@
self._backupfilename(backupvfs, filename),
)
- def delete(self):
+ def delete(self) -> None:
for ext in shelvefileextensions:
self.vfs.tryunlink(self.name + b'.' + ext)
@@ -256,7 +272,7 @@
return patch.changedfiles(ui, repo, filename)
-def _optimized_match(repo, node):
+def _optimized_match(repo, node: bytes):
"""
Create a matcher so that prefetch doesn't attempt to fetch
the entire repository pointlessly, and as an optimisation
@@ -272,6 +288,7 @@
versions of a shelved state are possible and handles them appropriately.
"""
+ # Class-wide constants
_version = 2
_filename = b'shelvedstate'
_keep = b'keep'
@@ -280,8 +297,19 @@
_noactivebook = b':no-active-bookmark'
_interactive = b'interactive'
+ # Per instance attrs
+ name: bytes
+ wctx: contextmod.workingctx
+ pendingctx: contextmod.changectx
+ parents: List[bytes]
+ nodestoremove: List[bytes]
+ branchtorestore: bytes
+ keep: bool
+ activebookmark: bytes
+ interactive: bool
+
@classmethod
- def _verifyandtransform(cls, d):
+ def _verifyandtransform(cls, d: Dict[bytes, Any]) -> None:
"""Some basic shelvestate syntactic verification and transformation"""
try:
d[b'originalwctx'] = bin(d[b'originalwctx'])
@@ -294,7 +322,7 @@
raise error.CorruptedState(stringutil.forcebytestr(err))
@classmethod
- def _getversion(cls, repo):
+ def _getversion(cls, repo) -> int:
"""Read version information from shelvestate file"""
fp = repo.vfs(cls._filename)
try:
@@ -306,7 +334,7 @@
return version
@classmethod
- def _readold(cls, repo):
+ def _readold(cls, repo) -> Dict[bytes, Any]:
"""Read the old position-based version of a shelvestate file"""
# Order is important, because old shelvestate file uses it
# to detemine values of fields (i.g. name is on the second line,
@@ -365,7 +393,7 @@
obj.activebookmark = d.get(b'activebook', b'')
obj.interactive = d.get(b'interactive') == cls._interactive
except (error.RepoLookupError, KeyError) as err:
- raise error.CorruptedState(pycompat.bytestr(err))
+ raise error.CorruptedState(stringutil.forcebytestr(err))
return obj
@@ -373,15 +401,15 @@
def save(
cls,
repo,
- name,
- originalwctx,
- pendingctx,
- nodestoremove,
- branchtorestore,
- keep=False,
- activebook=b'',
- interactive=False,
- ):
+ name: bytes,
+ originalwctx: contextmod.workingctx,
+ pendingctx: contextmod.changectx,
+ nodestoremove: List[bytes],
+ branchtorestore: bytes,
+ keep: bool = False,
+ activebook: bytes = b'',
+ interactive: bool = False,
+ ) -> None:
info = {
b"name": name,
b"originalwctx": hex(originalwctx.node()),
@@ -399,11 +427,11 @@
)
@classmethod
- def clear(cls, repo):
+ def clear(cls, repo) -> None:
repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
-def cleanupoldbackups(repo):
+def cleanupoldbackups(repo) -> None:
maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
backup_dir = ShelfDir(repo, for_backups=True)
hgfiles = backup_dir.listshelves()
@@ -418,19 +446,19 @@
backup_dir.get(name).delete()
-def _backupactivebookmark(repo):
+def _backupactivebookmark(repo) -> bytes:
activebookmark = repo._activebookmark
if activebookmark:
bookmarks.deactivate(repo)
return activebookmark
-def _restoreactivebookmark(repo, mark):
+def _restoreactivebookmark(repo, mark) -> None:
if mark:
bookmarks.activate(repo, mark)
-def _aborttransaction(repo, tr):
+def _aborttransaction(repo, tr) -> None:
"""Abort current transaction for shelve/unshelve, but keep dirstate"""
# disable the transaction invalidation of the dirstate, to preserve the
# current change in memory.
@@ -456,7 +484,7 @@
ds.setbranch(current_branch, None)
-def getshelvename(repo, parent, opts):
+def getshelvename(repo, parent, opts) -> bytes:
"""Decide on the name this shelve is going to have"""
def gennames():
@@ -496,7 +524,7 @@
return name
-def mutableancestors(ctx):
+def mutableancestors(ctx) -> Iterator[bytes]:
"""return all mutable ancestors for ctx (included)
Much faster than the revset ancestors(ctx) & draft()"""
@@ -514,7 +542,7 @@
visit.append(parent)
-def getcommitfunc(extra, interactive, editor=False):
+def getcommitfunc(extra, interactive: bool, editor: bool = False):
def commitfunc(ui, repo, message, match, opts):
hasmq = hasattr(repo, 'mq')
if hasmq:
@@ -550,7 +578,7 @@
return interactivecommitfunc if interactive else commitfunc
-def _nothingtoshelvemessaging(ui, repo, pats, opts):
+def _nothingtoshelvemessaging(ui, repo, pats, opts) -> None:
stat = repo.status(match=scmutil.match(repo[None], pats, opts))
if stat.deleted:
ui.status(
@@ -561,7 +589,7 @@
ui.status(_(b"nothing changed\n"))
-def _shelvecreatedcommit(repo, node, name, match):
+def _shelvecreatedcommit(repo, node: bytes, name: bytes, match) -> None:
info = {b'node': hex(node)}
shelf = ShelfDir(repo).get(name)
shelf.writeinfo(info)
@@ -573,14 +601,14 @@
)
-def _includeunknownfiles(repo, pats, opts, extra):
+def _includeunknownfiles(repo, pats, opts, extra) -> None:
s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
if s.unknown:
extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
repo[None].add(s.unknown)
-def _finishshelve(repo, tr):
+def _finishshelve(repo, tr) -> None:
if _use_internal_phase(repo):
tr.close()
else:
@@ -647,7 +675,7 @@
False,
cmdutil.recordfilter,
*pats,
- **pycompat.strkwargs(opts)
+ **pycompat.strkwargs(opts),
)
if not node:
_nothingtoshelvemessaging(ui, repo, pats, opts)
@@ -675,7 +703,7 @@
lockmod.release(tr, lock)
-def _isbareshelve(pats, opts):
+def _isbareshelve(pats, opts) -> bool:
return (
not pats
and not opts.get(b'interactive', False)
@@ -684,11 +712,11 @@
)
-def _iswctxonnewbranch(repo):
+def _iswctxonnewbranch(repo) -> bool:
return repo[None].branch() != repo[b'.'].branch()
-def cleanupcmd(ui, repo):
+def cleanupcmd(ui, repo) -> None:
"""subcommand that deletes all shelves"""
with repo.wlock():
@@ -699,7 +727,7 @@
cleanupoldbackups(repo)
-def deletecmd(ui, repo, pats):
+def deletecmd(ui, repo, pats) -> None:
"""subcommand that deletes a specific shelve"""
if not pats:
raise error.InputError(_(b'no shelved changes specified!'))
@@ -715,7 +743,7 @@
cleanupoldbackups(repo)
-def listcmd(ui, repo, pats, opts):
+def listcmd(ui, repo, pats: Iterable[bytes], opts) -> None:
"""subcommand that displays the list of shelves"""
pats = set(pats)
width = 80
@@ -762,7 +790,7 @@
ui.write(chunk, label=label)
-def patchcmds(ui, repo, pats, opts):
+def patchcmds(ui, repo, pats: Sequence[bytes], opts) -> None:
"""subcommand that displays shelves"""
shelf_dir = ShelfDir(repo)
if len(pats) == 0:
@@ -779,7 +807,7 @@
listcmd(ui, repo, pats, opts)
-def checkparents(repo, state):
+def checkparents(repo, state: shelvedstate) -> None:
"""check parent while resuming an unshelve"""
if state.parents != repo.dirstate.parents():
raise error.Abort(
@@ -787,11 +815,12 @@
)
-def _loadshelvedstate(ui, repo, opts):
+def _loadshelvedstate(ui, repo, opts) -> shelvedstate:
try:
state = shelvedstate.load(repo)
if opts.get(b'keep') is None:
opts[b'keep'] = state.keep
+ return state
except FileNotFoundError:
cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
except error.CorruptedState as err:
@@ -812,10 +841,13 @@
b'please update to some commit\n'
)
)
- return state
+ else:
+ raise error.ProgrammingError(
+ "a corrupted shelvedstate exists without --abort or --continue"
+ )
-def unshelveabort(ui, repo, state):
+def unshelveabort(ui, repo, state: shelvedstate) -> None:
"""subcommand that abort an in-progress unshelve"""
with repo.lock():
try:
@@ -834,14 +866,14 @@
ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
-def hgabortunshelve(ui, repo):
+def hgabortunshelve(ui, repo) -> None:
"""logic to abort unshelve using 'hg abort"""
with repo.wlock():
state = _loadshelvedstate(ui, repo, {b'abort': True})
return unshelveabort(ui, repo, state)
-def mergefiles(ui, repo, wctx, shelvectx):
+def mergefiles(ui, repo, wctx, shelvectx) -> None:
"""updates to wctx and merges the changes from shelvectx into the
dirstate."""
with ui.configoverride({(b'ui', b'quiet'): True}):
@@ -849,7 +881,7 @@
cmdutil.revert(ui, repo, shelvectx)
-def restorebranch(ui, repo, branchtorestore):
+def restorebranch(ui, repo, branchtorestore: bytes) -> None:
if branchtorestore and branchtorestore != repo.dirstate.branch():
repo.dirstate.setbranch(branchtorestore, repo.currenttransaction())
ui.status(
@@ -857,7 +889,7 @@
)
-def unshelvecleanup(ui, repo, name, opts):
+def unshelvecleanup(ui, repo, name: bytes, opts) -> None:
"""remove related files after an unshelve"""
if not opts.get(b'keep'):
backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
@@ -865,7 +897,7 @@
cleanupoldbackups(repo)
-def unshelvecontinue(ui, repo, state, opts):
+def unshelvecontinue(ui, repo, state: shelvedstate, opts) -> None:
"""subcommand to continue an in-progress unshelve"""
# We're finishing off a merge. First parent is our original
# parent, second is the temporary "fake" commit we're unshelving.
@@ -923,7 +955,7 @@
ui.status(_(b"unshelve of '%s' complete\n") % state.name)
-def hgcontinueunshelve(ui, repo):
+def hgcontinueunshelve(ui, repo) -> None:
"""logic to resume unshelve using 'hg continue'"""
with repo.wlock():
state = _loadshelvedstate(ui, repo, {b'continue': True})
@@ -955,7 +987,7 @@
return tmpwctx, addedbefore
-def _unshelverestorecommit(ui, repo, tr, basename):
+def _unshelverestorecommit(ui, repo, tr, basename: bytes):
"""Recreate commit in the repository during the unshelve"""
repo = repo.unfiltered()
node = None
@@ -976,7 +1008,9 @@
return repo, shelvectx
-def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
+def _createunshelvectx(
+ ui, repo, shelvectx, basename: bytes, interactive: bool, opts
+) -> Tuple[bytes, bool]:
"""Handles the creation of unshelve commit and updates the shelve if it
was partially unshelved.
@@ -1018,7 +1052,7 @@
False,
cmdutil.recordfilter,
*pats,
- **pycompat.strkwargs(opts)
+ **pycompat.strkwargs(opts),
)
snode = repo.commit(
text=shelvectx.description(),
@@ -1038,7 +1072,7 @@
opts,
tr,
oldtiprev,
- basename,
+ basename: bytes,
pctx,
tmpwctx,
shelvectx,
@@ -1109,7 +1143,7 @@
return shelvectx, ispartialunshelve
-def _forgetunknownfiles(repo, shelvectx, addedbefore):
+def _forgetunknownfiles(repo, shelvectx, addedbefore) -> None:
# Forget any files that were unknown before the shelve, unknown before
# unshelve started, but are now added.
shelveunknown = shelvectx.extra().get(b'shelve_unknown')
@@ -1121,7 +1155,7 @@
repo[None].forget(toforget)
-def _finishunshelve(repo, oldtiprev, tr, activebookmark):
+def _finishunshelve(repo, oldtiprev, tr, activebookmark) -> None:
_restoreactivebookmark(repo, activebookmark)
# We used to manually strip the commit to update inmemory structure and
# prevent some issue around hooks. This no longer seems to be the case, so
@@ -1129,7 +1163,7 @@
_aborttransaction(repo, tr)
-def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
+def _checkunshelveuntrackedproblems(ui, repo, shelvectx) -> None:
"""Check potential problems which may result from working
copy having untracked changes."""
wcdeleted = set(repo.status().deleted)
@@ -1141,7 +1175,7 @@
raise error.Abort(m, hint=hint)
-def unshelvecmd(ui, repo, *shelved, **opts):
+def unshelvecmd(ui, repo, *shelved, **opts) -> None:
opts = pycompat.byteskwargs(opts)
abortf = opts.get(b'abort')
continuef = opts.get(b'continue')
@@ -1178,6 +1212,11 @@
)
elif continuef:
return unshelvecontinue(ui, repo, state, opts)
+ else:
+ # Unreachable code, but help type checkers not think that
+ # 'basename' may be used before initialization when checking
+ # ShelfDir below.
+ raise error.ProgrammingError("neither abort nor continue specified")
elif len(shelved) > 1:
raise error.InputError(_(b'can only unshelve one change at a time'))
elif not shelved:
@@ -1195,7 +1234,7 @@
return _dounshelve(ui, repo, basename, opts)
-def _dounshelve(ui, repo, basename, opts):
+def _dounshelve(ui, repo, basename: bytes, opts) -> None:
repo = repo.unfiltered()
lock = tr = None
try:
--- a/mercurial/similar.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/similar.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from . import (
@@ -118,14 +119,14 @@
# Find exact matches.
matchedfiles = set()
- for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
+ for a, b in _findexactmatches(repo, addedfiles, removedfiles):
matchedfiles.add(b)
yield (a.path(), b.path(), 1.0)
# If the user requested similar files to be matched, search for them also.
if threshold < 1.0:
addedfiles = [x for x in addedfiles if x not in matchedfiles]
- for (a, b, score) in _findsimilarmatches(
+ for a, b, score in _findsimilarmatches(
repo, addedfiles, removedfiles, threshold
):
yield (a.path(), b.path(), score)
--- a/mercurial/simplemerge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/simplemerge.py Sat Oct 26 04:16:00 2024 +0200
@@ -16,6 +16,7 @@
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
+from __future__ import annotations
from .i18n import _
from . import (
@@ -48,6 +49,30 @@
return None
+def intersect_or_touch(ra, rb):
+ """Given two ranges return the range where they intersect or touch or None.
+
+ >>> intersect_or_touch((0, 10), (0, 6))
+ (0, 6)
+ >>> intersect_or_touch((0, 10), (5, 15))
+ (5, 10)
+ >>> intersect_or_touch((0, 10), (10, 15))
+ (10, 10)
+ >>> intersect_or_touch((0, 9), (10, 15))
+ >>> intersect_or_touch((0, 9), (7, 15))
+ (7, 9)
+ """
+ assert ra[0] <= ra[1]
+ assert rb[0] <= rb[1]
+
+ sa = max(ra[0], rb[0])
+ sb = min(ra[1], rb[1])
+ if sa <= sb:
+ return sa, sb
+ else:
+ return None
+
+
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing."""
if (aend - astart) != (bend - bstart):
@@ -65,7 +90,16 @@
Given strings BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS."""
- def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
+ def __init__(
+ self,
+ basetext,
+ atext,
+ btext,
+ base=None,
+ a=None,
+ b=None,
+ relaxed_sync=False,
+ ):
self.basetext = basetext
self.atext = atext
self.btext = btext
@@ -75,6 +109,7 @@
a = mdiff.splitnewlines(atext)
if b is None:
b = mdiff.splitnewlines(btext)
+ self.relaxed_sync = relaxed_sync
self.base = base
self.a = a
self.b = b
@@ -219,6 +254,11 @@
len_a = len(amatches)
len_b = len(bmatches)
+ if self.relaxed_sync:
+ intersect_fun = intersect_or_touch
+ else:
+ intersect_fun = intersect
+
sl = []
while ia < len_a and ib < len_b:
@@ -227,7 +267,7 @@
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
- i = intersect((abase, abase + alen), (bbase, bbase + blen))
+ i = intersect_fun((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
@@ -257,13 +297,41 @@
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
+ elif not self.relaxed_sync:
+ # if the blocks end at the same time we know they can't overlap
+ # any other block, so no need for the complicated checks below
+ ib += 1
+ elif (abase + alen) > (bbase + blen):
+ ib += 1
else:
- ib += 1
+ # If both end at the same time, either may touching the
+ # follow-up matching block on the other side.
+ # Advance the one whose next block comes sooner.
+ if ia + 1 == len_a:
+ # if we run out of blocks on A side, we may as well advance B
+ # since there's nothing on A side for that to touch
+ ib += 1
+ elif ib + 1 == len_b:
+ ia += 1
+ elif amatches[ia + 1][0] > bmatches[ib + 1][0]:
+ ib += 1
+ elif amatches[ia + 1][0] < bmatches[ib + 1][0]:
+ ia += 1
+ else:
+ # symmetric situation: both sides added lines to the same place
+ # it's less surprising if we treat it as a conflict, so skip
+ # both without a preferred order
+ ia += 1
+ ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
- sl.append((intbase, intbase, abase, abase, bbase, bbase))
+ sentinel_hunk = (intbase, intbase, abase, abase, bbase, bbase)
+ # we avoid duplicate sentinel hunk at the end to make the
+ # test output cleaner
+ if not (sl and sl[len(sl) - 1] == sentinel_hunk):
+ sl.append(sentinel_hunk)
return sl
@@ -497,6 +565,7 @@
other,
mode=b'merge',
allow_binary=False,
+ relaxed_sync=False,
):
"""Performs the simplemerge algorithm.
@@ -508,7 +577,9 @@
_verifytext(base)
_verifytext(other)
- m3 = Merge3Text(base.text(), local.text(), other.text())
+ m3 = Merge3Text(
+ base.text(), local.text(), other.text(), relaxed_sync=relaxed_sync
+ )
conflicts = False
if mode == b'union':
lines = _resolve(m3, (1, 2))
--- a/mercurial/smartset.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/smartset.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from . import (
encoding,
--- a/mercurial/sparse.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/sparse.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
@@ -612,7 +613,7 @@
repo, includes, excludes, profiles, force=False, removing=False
):
"""Update the sparse config and working directory state."""
- with repo.lock():
+ with repo.wlock():
raw = repo.vfs.tryread(b'sparse')
oldincludes, oldexcludes, oldprofiles = parseconfig(
repo.ui, raw, b'sparse'
@@ -632,10 +633,10 @@
if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
- scmutil.writereporequirements(repo)
+ scmutil.writereporequirements(repo, maywritestore=False)
elif requirements.SPARSE_REQUIREMENT not in oldrequires:
repo.requirements.add(requirements.SPARSE_REQUIREMENT)
- scmutil.writereporequirements(repo)
+ scmutil.writereporequirements(repo, maywritestore=False)
try:
writeconfig(repo, includes, excludes, profiles)
@@ -644,7 +645,7 @@
if repo.requirements != oldrequires:
repo.requirements.clear()
repo.requirements |= oldrequires
- scmutil.writereporequirements(repo)
+ scmutil.writereporequirements(repo, maywritestore=False)
writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
raise
@@ -730,7 +731,7 @@
The new config is written out and a working directory refresh is performed.
"""
- with repo.wlock(), repo.lock(), repo.dirstate.changing_parents(repo):
+ with repo.wlock(), repo.dirstate.changing_parents(repo):
raw = repo.vfs.tryread(b'sparse')
oldinclude, oldexclude, oldprofiles = parseconfig(
repo.ui, raw, b'sparse'
--- a/mercurial/sshpeer.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/sshpeer.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
import uuid
--- a/mercurial/sslutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/sslutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import hashlib
import os
@@ -497,7 +498,6 @@
)
elif e.reason == 'CERTIFICATE_VERIFY_FAILED' and pycompat.iswindows:
-
ui.warn(
_(
b'(the full certificate chain may not be available '
--- a/mercurial/stabletailgraph/stabletailsort.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/stabletailgraph/stabletailsort.py Sat Oct 26 04:16:00 2024 +0200
@@ -19,6 +19,8 @@
optimised to operate on large production graphs.
"""
+from __future__ import annotations
+
import itertools
from ..node import nullrev
from .. import ancestor
--- a/mercurial/stack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/stack.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
def getstack(repo, rev=None):
"""return a sorted smartrev of the stack containing either rev if it is
--- a/mercurial/state.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/state.py Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,7 @@
the data.
"""
+from __future__ import annotations
import contextlib
--- a/mercurial/statichttprepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/statichttprepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
@@ -51,11 +52,14 @@
def seek(self, pos):
self.pos = pos
- def read(self, bytes=None):
+ def read(self, n: int = -1):
req = urlreq.request(pycompat.strurl(self.url))
- end = b''
- if bytes:
- end = self.pos + bytes - 1
+ end = ''
+
+ if n == 0:
+ return b''
+ elif n > 0:
+ end = "%d" % (self.pos + n - 1)
if self.pos or end:
req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
@@ -75,12 +79,14 @@
if code == 200:
# HTTPRangeHandler does nothing if remote does not support
# Range headers and returns the full entity. Let's slice it.
- if bytes:
- data = data[self.pos : self.pos + bytes]
+ if n > 0 and (self.pos + n) < len(data):
+ data = data[self.pos : self.pos + n]
+ elif self.pos < len(data):
+ data = data[self.pos :]
else:
- data = data[self.pos :]
- elif bytes:
- data = data[:bytes]
+ data = b''
+ elif 0 < n < len(data):
+ data = data[:n]
self.pos += len(data)
return data
@@ -138,6 +144,9 @@
f = b"/".join((self.base, urlreq.quote(path)))
return httprangereader(f, urlopener)
+ def _auditpath(self, path: bytes, mode: bytes) -> None:
+ raise NotImplementedError
+
def join(self, path, *insidef):
if path:
return pathutil.join(self.base, path, *insidef)
@@ -160,6 +169,8 @@
):
supported = localrepo.localrepository._basesupported
+ manifestlog: manifest.manifestlog
+
def __init__(self, ui, path):
self._url = path
self.ui = ui
@@ -186,9 +197,8 @@
# check if it is a non-empty old-style repository
try:
- fp = self.vfs(b"00changelog.i")
- fp.read(1)
- fp.close()
+ with self.vfs(b"00changelog.i") as fp:
+ fp.read(1)
except FileNotFoundError:
# we do not care about empty old-style repositories here
msg = _(b"'%s' does not appear to be an hg repository") % path
--- a/mercurial/statprof.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/statprof.py Sat Oct 26 04:16:00 2024 +0200
@@ -101,6 +101,7 @@
main thread's work patterns.
"""
# no-check-code
+from __future__ import annotations
import collections
import contextlib
@@ -113,6 +114,10 @@
import threading
import time
+from typing import (
+ List,
+)
+
from .pycompat import open
from . import (
encoding,
@@ -155,6 +160,8 @@
class ProfileState:
+ samples: List["Sample"]
+
def __init__(self, frequency=None):
self.reset(frequency)
self.track = b'cpu'
--- a/mercurial/store.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/store.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,15 +5,29 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import collections
import functools
import os
import re
import stat
-from typing import Generator, List
+import typing
+
+from typing import (
+ Generator,
+ List,
+ Optional,
+)
from .i18n import _
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .node import hex
from .revlogutils.constants import (
INDEX_HEADER,
@@ -37,10 +51,10 @@
parsers = policy.importmod('parsers')
# how much bytes should be read from fncache in one read
# It is done to prevent loading large fncache files into memory
-fncache_chunksize = 10 ** 6
+fncache_chunksize = 10**6
-def _match_tracked_entry(entry, matcher):
+def _match_tracked_entry(entry: "BaseStoreEntry", matcher):
"""parses a fncache entry and returns whether the entry is tracking a path
matched by matcher or not.
@@ -48,10 +62,16 @@
if matcher is None:
return True
+
+ # TODO: make this safe for other entry types. Currently, the various
+ # store.data_entry generators only yield RevlogStoreEntry, so the
+ # attributes do exist on `entry`.
+ # pytype: disable=attribute-error
if entry.is_filelog:
return matcher(entry.target_id)
elif entry.is_manifestlog:
return matcher.visitdir(entry.target_id.rstrip(b'/'))
+ # pytype: enable=attribute-error
raise error.ProgrammingError(b"cannot process entry %r" % entry)
@@ -457,7 +477,7 @@
def has_size(self):
return self._file_size is not None
- def get_stream(self, vfs, copies):
+ def get_stream(self, vfs, volatiles):
"""return data "stream" information for this file
(unencoded_file_path, content_iterator, content_size)
@@ -465,8 +485,8 @@
size = self.file_size(None)
def get_stream():
- actual_path = copies[vfs.join(self.unencoded_path)]
- with open(actual_path, 'rb') as fp:
+ path = vfs.join(self.unencoded_path)
+ with volatiles.open(path) as fp:
yield None # ready to stream
if size <= 65536:
yield fp.read(size)
@@ -493,7 +513,7 @@
self,
repo=None,
vfs=None,
- copies=None,
+ volatiles=None,
max_changeset=None,
preserve_file_count=False,
):
@@ -502,7 +522,7 @@
return [(unencoded_file_path, content_iterator, content_size), …]
"""
assert vfs is not None
- return [f.get_stream(vfs, copies) for f in self.files()]
+ return [f.get_stream(vfs, volatiles) for f in self.files()]
@attr.s(slots=True, init=False)
@@ -612,7 +632,7 @@
self,
repo=None,
vfs=None,
- copies=None,
+ volatiles=None,
max_changeset=None,
preserve_file_count=False,
):
@@ -628,13 +648,13 @@
return super().get_streams(
repo=repo,
vfs=vfs,
- copies=copies,
+ volatiles=volatiles,
max_changeset=max_changeset,
preserve_file_count=preserve_file_count,
)
elif not preserve_file_count:
stream = [
- f.get_stream(vfs, copies)
+ f.get_stream(vfs, volatiles)
for f in self.files()
if not f.unencoded_path.endswith((b'.i', b'.d'))
]
@@ -648,7 +668,7 @@
name_to_size[f.unencoded_path] = f.file_size(None)
stream = [
- f.get_stream(vfs, copies)
+ f.get_stream(vfs, volatiles)
for f in self.files()
if not f.unencoded_path.endswith(b'.i')
]
@@ -803,7 +823,7 @@
concurrencychecker=concurrencychecker,
)
- def manifestlog(self, repo, storenarrowmatch):
+ def manifestlog(self, repo, storenarrowmatch) -> manifest.manifestlog:
rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
@@ -1119,11 +1139,13 @@
self.fncache.add(path)
return self.vfs(encoded, mode, *args, **kw)
- def join(self, path):
+ def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
+ insidef = (self.encode(f) for f in insidef)
+
if path:
- return self.vfs.join(self.encode(path))
+ return self.vfs.join(self.encode(path), *insidef)
else:
- return self.vfs.join(path)
+ return self.vfs.join(path, *insidef)
def register_file(self, path):
"""generic hook point to lets fncache steer its stew"""
--- a/mercurial/streamclone.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/streamclone.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import os
@@ -547,6 +548,7 @@
_srcstore = b's' # store (svfs)
_srccache = b'c' # cache (cache)
+
# This is it's own function so extensions can override it.
def _walkstreamfullstorefiles(repo):
"""list snapshot file from the store"""
@@ -564,55 +566,114 @@
return (src, name, ftype, copy(vfsmap[src].join(name)))
-class TempCopyManager:
- """Manage temporary backup of volatile file during stream clone
+class VolatileManager:
+ """Manage temporary backups of volatile files during stream clone.
- This should be used as a Python context, the copies will be discarded when
- exiting the context.
+ This class will keep open file handles for the volatile files, writing the
+ smaller ones on disk if the number of open file handles grow too much.
- A copy can be done by calling the object on the real path (encoded full
- path)
+ This should be used as a Python context, the file handles and copies will
+ be discarded when exiting the context.
- The backup path can be retrieved using the __getitem__ protocol, obj[path].
- On file without backup, it will return the unmodified path. (equivalent to
- `dict.get(x, x)`)
+ The preservation can be done by calling the object on the real path
+ (encoded full path).
+
+ Valid filehandles for any file should be retrieved by calling `open(path)`.
"""
+ # arbitrarily picked as "it seemed fine" and much higher than the current
+ # usage.
+ MAX_OPEN = 100
+
def __init__(self):
+ self._counter = 0
+ self._volatile_fps = None
self._copies = None
self._dst_dir = None
def __enter__(self):
- if self._copies is not None:
- msg = "Copies context already open"
- raise error.ProgrammingError(msg)
- self._copies = {}
- self._dst_dir = pycompat.mkdtemp(prefix=b'hg-clone-')
+ if self._counter == 0:
+ assert self._volatile_fps is None
+ self._volatile_fps = {}
+ self._counter += 1
return self
- def __call__(self, src):
- """create a backup of the file at src"""
- prefix = os.path.basename(src)
- fd, dst = pycompat.mkstemp(prefix=prefix, dir=self._dst_dir)
- os.close(fd)
- self._copies[src] = dst
- util.copyfiles(src, dst, hardlink=True)
- return dst
-
- def __getitem__(self, src):
- """return the path to a valid version of `src`
-
- If the file has no backup, the path of the file is returned
- unmodified."""
- return self._copies.get(src, src)
-
def __exit__(self, *args, **kwars):
"""discard all backups"""
- for tmp in self._copies.values():
- util.tryunlink(tmp)
- util.tryrmdir(self._dst_dir)
- self._copies = None
- self._dst_dir = None
+ self._counter -= 1
+ if self._counter == 0:
+ for _size, fp in self._volatile_fps.values():
+ fp.close()
+ self._volatile_fps = None
+ if self._copies is not None:
+ for tmp in self._copies.values():
+ util.tryunlink(tmp)
+ util.tryrmdir(self._dst_dir)
+ self._copies = None
+ self._dst_dir = None
+ assert self._volatile_fps is None
+ assert self._copies is None
+ assert self._dst_dir is None
+
+ def _init_tmp_copies(self):
+ """prepare a temporary directory to save volatile files
+
+ This will be used as backup if we have too many files open"""
+ assert 0 < self._counter
+ assert self._copies is None
+ assert self._dst_dir is None
+ self._copies = {}
+ self._dst_dir = pycompat.mkdtemp(prefix=b'hg-clone-')
+
+ def _flush_some_on_disk(self):
+ """move some of the open files to tempory files on disk"""
+ if self._copies is None:
+ self._init_tmp_copies()
+ flush_count = self.MAX_OPEN // 2
+ for src, (size, fp) in sorted(self._volatile_fps.items())[:flush_count]:
+ prefix = os.path.basename(src)
+ fd, dst = pycompat.mkstemp(prefix=prefix, dir=self._dst_dir)
+ self._copies[src] = dst
+ os.close(fd)
+ # we no longer hardlink, but on the other hand we rarely do this,
+ # and we do it for the smallest file only and not at all in the
+ # common case.
+ with open(dst, 'wb') as bck:
+ fp.seek(0)
+ bck.write(fp.read())
+ del self._volatile_fps[src]
+ fp.close()
+
+ def _keep_one(self, src):
+ """preserve an open file handle for a given path"""
+ # store the file quickly to ensure we close it if any error happens
+ _, fp = self._volatile_fps[src] = (None, open(src, 'rb'))
+ fp.seek(0, os.SEEK_END)
+ size = fp.tell()
+ self._volatile_fps[src] = (size, fp)
+
+ def __call__(self, src):
+ """preserve the volatile file at src"""
+ assert 0 < self._counter
+ if len(self._volatile_fps) >= (self.MAX_OPEN - 1):
+ self._flush_some_on_disk()
+ self._keep_one(src)
+
+ @contextlib.contextmanager
+ def open(self, src):
+ assert 0 < self._counter
+ entry = self._volatile_fps.get(src)
+ if entry is not None:
+ _size, fp = entry
+ fp.seek(0)
+ yield fp
+ else:
+ if self._copies is None:
+ actual_path = src
+ else:
+ actual_path = self._copies.get(src, src)
+ with open(actual_path, 'rb') as fp:
+ yield fp
def _makemap(repo):
@@ -657,12 +718,12 @@
_(b'bundle'), total=totalfilesize, unit=_(b'bytes')
)
progress.update(0)
- with TempCopyManager() as copy, progress:
- # create a copy of volatile files
+ with VolatileManager() as volatiles, progress:
+ # make sure we preserve volatile files
for k, vfs, e in entries:
for f in e.files():
if f.is_volatile:
- copy(vfs.join(f.unencoded_path))
+ volatiles(vfs.join(f.unencoded_path))
# the first yield release the lock on the repository
yield file_count, totalfilesize
totalbytecount = 0
@@ -671,7 +732,7 @@
entry_streams = e.get_streams(
repo=repo,
vfs=vfs,
- copies=copy,
+ volatiles=volatiles,
max_changeset=max_linkrev,
preserve_file_count=True,
)
@@ -720,15 +781,15 @@
unit=_(b'entry'),
)
progress.update(0)
- with TempCopyManager() as copy, progress:
- # create a copy of volatile files
+ with VolatileManager() as volatiles, progress:
+ # make sure we preserve volatile files
for k, vfs, e in entries:
if e.maybe_volatile:
for f in e.files():
if f.is_volatile:
# record the expected size under lock
f.file_size(vfs)
- copy(vfs.join(f.unencoded_path))
+ volatiles(vfs.join(f.unencoded_path))
# the first yield release the lock on the repository
yield None
@@ -738,7 +799,7 @@
entry_streams = e.get_streams(
repo=repo,
vfs=vfs,
- copies=copy,
+ volatiles=volatiles,
max_changeset=max_linkrev,
)
yield util.uvarintencode(len(entry_streams))
@@ -809,7 +870,6 @@
"""
with repo.lock():
-
repo.ui.debug(b'scanning\n')
entries = _entries_walk(
@@ -857,7 +917,6 @@
# considering the files to preserve, disabling the gc while we do so helps
# performance a lot.
with repo.lock(), util.nogc():
-
repo.ui.debug(b'scanning\n')
entries = _entries_walk(
@@ -990,7 +1049,6 @@
with repo.transaction(b'clone'):
ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
with nested(*ctxs):
-
for i in range(entrycount):
filecount = util.uvarintdecodestream(fp)
if filecount == 0:
@@ -1123,7 +1181,6 @@
with dest_repo.lock():
with src_repo.lock():
-
# bookmark is not integrated to the streaming as it might use the
# `repo.vfs` and they are too many sentitive data accessible
# through `repo.vfs` to expose it to streaming clone.
--- a/mercurial/strip.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/strip.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from .i18n import _
from . import (
bookmarks as bookmarksmod,
@@ -63,7 +65,6 @@
soft=False,
):
with repo.wlock(), repo.lock():
-
if update:
checklocalchanges(repo, force=force)
urev = _findupdatetarget(repo, revs)
--- a/mercurial/subrepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/subrepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import copy
import errno
@@ -363,21 +364,21 @@
"""handle the files command for this subrepo"""
return 1
- def archive(self, archiver, prefix, match=None, decode=True):
- if match is not None:
- files = [f for f in self.files() if match(f)]
- else:
- files = self.files()
+ def archive(self, opener, prefix, match: matchmod.basematcher, decode=True):
+ files = [f for f in self.files() if match(f)]
total = len(files)
relpath = subrelpath(self)
progress = self.ui.makeprogress(
_(b'archiving (%s)') % relpath, unit=_(b'files'), total=total
)
progress.update(0)
+ archiver = None
for name in files:
flags = self.fileflags(name)
mode = b'x' in flags and 0o755 or 0o644
symlink = b'l' in flags
+ if archiver is None:
+ archiver = opener()
archiver.addfile(
prefix + name, mode, symlink, self.filedata(name, decode)
)
@@ -652,22 +653,20 @@
)
@annotatesubrepoerror
- def archive(self, archiver, prefix, match=None, decode=True):
+ def archive(self, opener, prefix, match: matchmod.basematcher, decode=True):
self._get(self._state + (b'hg',))
- files = self.files()
- if match:
- files = [f for f in files if match(f)]
+ files = [f for f in self.files() if match(f)]
rev = self._state[1]
ctx = self._repo[rev]
scmutil.prefetchfiles(
self._repo, [(ctx.rev(), scmutil.matchfiles(self._repo, files))]
)
- total = abstractsubrepo.archive(self, archiver, prefix, match)
+ total = abstractsubrepo.archive(self, opener, prefix, match)
for subpath in ctx.substate:
s = subrepo(ctx, subpath, True)
submatch = matchmod.subdirmatcher(subpath, match)
subprefix = prefix + subpath + b'/'
- total += s.archive(archiver, subprefix, submatch, decode)
+ total += s.archive(opener, subprefix, submatch, decode)
return total
@annotatesubrepoerror
@@ -1227,16 +1226,12 @@
externals.append(path)
elif item == 'missing':
missing.append(path)
- if (
- item
- not in (
- '',
- 'normal',
- 'unversioned',
- 'external',
- )
- or props not in ('', 'none', 'normal')
- ):
+ if item not in (
+ '',
+ 'normal',
+ 'unversioned',
+ 'external',
+ ) or props not in ('', 'none', 'normal'):
changes.append(path)
for path in changes:
for ext in externals:
@@ -1915,7 +1910,7 @@
else:
self.wvfs.unlink(f)
- def archive(self, archiver, prefix, match=None, decode=True):
+ def archive(self, opener, prefix, match: matchmod.basematcher, decode=True):
total = 0
source, revision = self._state
if not revision:
@@ -1931,12 +1926,13 @@
progress = self.ui.makeprogress(
_(b'archiving (%s)') % relpath, unit=_(b'files')
)
+ archiver = None
progress.update(0)
for info in tar:
if info.isdir():
continue
bname = pycompat.fsencode(info.name)
- if match and not match(bname):
+ if not match(bname):
continue
if info.issym():
data = info.linkname
@@ -1947,6 +1943,8 @@
else:
self.ui.warn(_(b'skipping "%s" (unknown type)') % bname)
continue
+ if archiver is None:
+ archiver = opener()
archiver.addfile(prefix + bname, info.mode, info.issym(), data)
total += 1
progress.increment()
--- a/mercurial/subrepoutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/subrepoutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import posixpath
--- a/mercurial/tagmerge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/tagmerge.py Sat Oct 26 04:16:00 2024 +0200
@@ -71,6 +71,7 @@
# - put blocks whose nodes come all from p2 first
# - write the tag blocks in the sorted order
+from __future__ import annotations
from .i18n import _
from . import (
--- a/mercurial/tags.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/tags.py Sat Oct 26 04:16:00 2024 +0200
@@ -10,6 +10,7 @@
# Eventually, it could take care of updating (adding/removing/moving)
# tags too.
+from __future__ import annotations
import binascii
import io
@@ -601,7 +602,7 @@
# we keep them in UTF-8 throughout this module. If we converted
# them local encoding on input, we would lose info writing them to
# the cache.
- for (name, (node, hist)) in sorted(cachetags.items()):
+ for name, (node, hist) in sorted(cachetags.items()):
for n in hist:
cachefile.write(b"%s %s\n" % (hex(n), name))
cachefile.write(b"%s %s\n" % (hex(node), name))
@@ -851,25 +852,45 @@
rev = ctx.rev()
fnode = None
cl = self._repo.changelog
+ ml = self._repo.manifestlog
+ mctx = ctx.manifestctx()
+ base_values = {}
p1rev, p2rev = cl._uncheckedparentrevs(rev)
- p1node = cl.node(p1rev)
- p1fnode = self.getfnode(p1node, computemissing=False)
+ m_p1_node, m_p2_node = mctx.parents
+ if p1rev != nullrev:
+ p1_node = cl.node(p1rev)
+ fnode = self.getfnode(p1_node, computemissing=False)
+ # when unknown, fnode is None or False
+ if fnode:
+ p1_manifest_rev = ml.rev(m_p1_node)
+ base_values[p1_manifest_rev] = fnode
if p2rev != nullrev:
- # There is some no-merge changeset where p1 is null and p2 is set
- # Processing them as merge is just slower, but still gives a good
- # result.
- p2node = cl.node(p2rev)
- p2fnode = self.getfnode(p2node, computemissing=False)
- if p1fnode != p2fnode:
- # we cannot rely on readfast because we don't know against what
- # parent the readfast delta is computed
- p1fnode = None
- if p1fnode:
- mctx = ctx.manifestctx()
- fnode = mctx.readfast().get(b'.hgtags')
+ p2_node = cl.node(p2rev)
+ fnode = self.getfnode(p2_node, computemissing=False)
+ # when unknown, fnode is None or False
+ if fnode:
+ p2_manifest_rev = ml.rev(m_p2_node)
+ base_values[p2_manifest_rev] = fnode
+ # XXX: Beware that using delta to speed things up here is actually
+ # buggy as it will fails to detect a `.hgtags` deletion. That buggy
+ # behavior has been cargo culted from the previous version of this code
+ # as "in practice this seems fine" and not using delta is just too
+ # slow.
+ #
+ # However note that we only consider delta from p1 or p2 because it is
+ # far less likely to have a .hgtags delete in a child than missing from
+ # one branch to another. As the delta chain construction keep being
+ # optimized, it means we will not use delta as often as we could.
+ if base_values:
+ base, m = mctx.read_any_fast_delta(base_values)
+ fnode = m.get(b'.hgtags')
if fnode is None:
- fnode = p1fnode
- if fnode is None:
+ if base is not None:
+ fnode = base_values[base]
+ else:
+ # No delta and .hgtags file on this revision.
+ fnode = self._repo.nullid
+ else:
# Populate missing entry.
try:
fnode = ctx.filenode(b'.hgtags')
--- a/mercurial/templatefilters.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/templatefilters.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import re
--- a/mercurial/templatefuncs.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/templatefuncs.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
import re
--- a/mercurial/templatekw.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/templatekw.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from .node import (
@@ -482,7 +483,7 @@
return showlatesttags(context, mapping, None)
-def showlatesttags(context, mapping, pattern):
+def showlatesttags(context, mapping, pattern) -> _hybrid:
"""helper method for the latesttag keyword and function"""
latesttags = getlatesttags(context, mapping, pattern)
--- a/mercurial/templater.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/templater.py Sat Oct 26 04:16:00 2024 +0200
@@ -65,10 +65,17 @@
operation.
"""
+from __future__ import annotations
import abc
import os
+from typing import (
+ BinaryIO,
+ Optional,
+ Tuple,
+)
+
from .i18n import _
from .pycompat import (
FileNotFoundError,
@@ -1121,7 +1128,9 @@
return path if os.path.isdir(path) else None
-def open_template(name, templatepath=None):
+def open_template(
+ name: bytes, templatepath: Optional[bytes] = None
+) -> Tuple[bytes, BinaryIO]:
"""returns a file-like object for the given template, and its full path
If the name is a relative path and we're in a frozen binary, the template
@@ -1156,7 +1165,9 @@
)
-def try_open_template(name, templatepath=None):
+def try_open_template(
+ name: bytes, templatepath: Optional[bytes] = None
+) -> Tuple[Optional[bytes], Optional[BinaryIO]]:
try:
return open_template(name, templatepath)
except (EnvironmentError, ImportError):
--- a/mercurial/templateutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/templateutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import abc
import types
--- a/mercurial/testing/__init__.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/testing/__init__.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import os
import time
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/testing/ps_util.py Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,50 @@
+# This python code can be imported into tests in order to terminate a process
+# with signal.SIGKILL on posix, or a roughly equivalent procedure on Windows.
+import os
+import signal
+import subprocess
+import sys
+import tempfile
+
+from .. import (
+ encoding,
+ pycompat,
+)
+
+from ..utils import procutil
+
+
+def kill_nt(pid: int, exit_code: int):
+ fd, pidfile = tempfile.mkstemp(
+ prefix=b"sigkill-", dir=encoding.environ[b"HGTMP"], text=False
+ )
+ try:
+ os.write(fd, b'%d\n' % pid)
+ finally:
+ os.close(fd)
+
+ env = dict(encoding.environ)
+ env[b"DAEMON_EXITCODE"] = b"%d" % exit_code
+
+ # Simulate the message written to stderr for this process on non-Windows
+ # platforms, for test consistency.
+ print("Killed!", file=sys.stderr)
+
+ subprocess.run(
+ [
+ encoding.environ[b"PYTHON"],
+ b"%s/killdaemons.py"
+ % encoding.environ[b'RUNTESTDIR_FORWARD_SLASH'],
+ pidfile,
+ ],
+ env=procutil.tonativeenv(env),
+ )
+
+
+def kill(pid: int):
+ """Kill the process with the given PID with SIGKILL or equivalent."""
+ if pycompat.iswindows:
+ exit_code = 128 + 9
+ kill_nt(pid, exit_code)
+ else:
+ os.kill(pid, signal.SIGKILL)
--- a/mercurial/testing/revlog.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/testing/revlog.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import unittest
# picked from test-parse-index2, copied rather than imported
--- a/mercurial/testing/storage.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/testing/storage.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import unittest
--- a/mercurial/transaction.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/transaction.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,6 +11,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import errno
import os
--- a/mercurial/treediscovery.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/treediscovery.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
--- a/mercurial/txnutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/txnutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from . import encoding
--- a/mercurial/typelib.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/typelib.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import typing
+from typing import (
+ Callable,
+)
+
# Note: this is slightly different from pycompat.TYPE_CHECKING, as using
# pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when
# used as the base class during a pytype run.
@@ -21,8 +27,29 @@
if TYPE_CHECKING:
from typing import (
BinaryIO,
+ Union,
+ )
+
+ from . import (
+ node,
+ posix,
+ util,
+ windows,
)
BinaryIO_Proxy = BinaryIO
+ CacheStat = Union[
+ posix.cachestat,
+ windows.cachestat,
+ util.uncacheable_cachestat,
+ ]
+ NodeConstants = node.sha1nodeconstants
else:
+ from typing import Any
+
BinaryIO_Proxy = object
+ CacheStat = Any
+ NodeConstants = Any
+
+# scmutil.getuipathfn() related callback.
+UiPathFn = Callable[[bytes], bytes]
--- a/mercurial/ui.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/ui.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import collections
import contextlib
--- a/mercurial/unionrepo.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/unionrepo.py Sat Oct 26 04:16:00 2024 +0200
@@ -11,8 +11,10 @@
allowing operations like diff and log with revsets.
"""
+from __future__ import annotations
+
import contextlib
-
+import typing
from .i18n import _
@@ -37,7 +39,9 @@
class unionrevlog(revlog.revlog):
- def __init__(self, opener, radix, revlog2, linkmapper):
+ def __init__(self, opener: typing.Any, radix, revlog2, linkmapper):
+ # TODO: figure out real type of opener
+ #
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
@@ -47,6 +51,10 @@
opener = vfsmod.readonlyvfs(opener)
target = getattr(revlog2, 'target', None)
if target is None:
+ # Help pytype- changelog and revlog are not possible here because
+ # they both have a 'target' attr.
+ assert not isinstance(revlog2, (changelog.changelog, revlog.revlog))
+
# a revlog wrapper, eg: the manifestlog that is not an actual revlog
target = revlog2._revlog.target
revlog.revlog.__init__(self, opener, target=target, radix=radix)
@@ -129,7 +137,7 @@
def _chunk(self, rev):
if rev <= self.repotiprev:
- return revlog.revlog._chunk(self, rev)
+ return super(unionrevlog, self)._inner._chunk(rev)
return self.revlog2._chunk(self.node(rev))
def revdiff(self, rev1, rev2):
@@ -204,6 +212,9 @@
class unionmanifest(unionrevlog, manifest.manifestrevlog):
+ repotiprev: int
+ revlog2: manifest.manifestrevlog
+
def __init__(self, nodeconstants, opener, opener2, linkmapper):
# XXX manifestrevlog is not actually a revlog , so mixing it with
# bundlerevlog is not a good idea.
@@ -215,6 +226,10 @@
class unionfilelog(filelog.filelog):
+ _revlog: unionrevlog
+ repotiprev: int
+ revlog2: revlog.revlog
+
def __init__(self, opener, path, opener2, linkmapper, repo):
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
@@ -238,13 +253,20 @@
return False
-class unionrepository:
+_union_repo_baseclass = object
+
+if typing.TYPE_CHECKING:
+ _union_repo_baseclass = localrepo.localrepository
+
+
+class unionrepository(_union_repo_baseclass):
"""Represents the union of data in 2 repositories.
Instances are not usable if constructed directly. Use ``instance()``
or ``makeunionrepository()`` to create a usable instance.
"""
+ # noinspection PyMissingConstructor
def __init__(self, repo2, url):
self.repo2 = repo2
self._url = url
--- a/mercurial/upgrade.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/upgrade.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
from .i18n import _
from . import (
@@ -194,7 +195,6 @@
onlydefault.append(d)
if fromconfig or onlydefault:
-
if fromconfig:
ui.status(
_(
@@ -309,7 +309,7 @@
):
"""Upgrades a share to use share-safe mechanism"""
wlock = None
- store_requirements = localrepo._readrequires(storevfs, False)
+ store_requirements = scmutil.readrequires(storevfs, False)
original_crequirements = current_requirements.copy()
# after upgrade, store requires will be shared, so lets find
# the requirements which are not present in store and
@@ -326,7 +326,7 @@
wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
# some process might change the requirement in between, re-read
# and update current_requirements
- locked_requirements = localrepo._readrequires(hgvfs, True)
+ locked_requirements = scmutil.readrequires(hgvfs, True)
if locked_requirements != original_crequirements:
removed = current_requirements - locked_requirements
# update current_requirements in place because it's passed
@@ -372,7 +372,7 @@
):
"""Downgrades a share which use share-safe to not use it"""
wlock = None
- source_requirements = localrepo._readrequires(sharedvfs, True)
+ source_requirements = scmutil.readrequires(sharedvfs, True)
original_crequirements = current_requirements.copy()
# we cannot be 100% sure on which requirements were present in store when
# the source supported share-safe. However, we do know that working
@@ -387,7 +387,7 @@
wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
# some process might change the requirement in between, re-read
# and update current_requirements
- locked_requirements = localrepo._readrequires(hgvfs, True)
+ locked_requirements = scmutil.readrequires(hgvfs, True)
if locked_requirements != original_crequirements:
removed = current_requirements - locked_requirements
# update current_requirements in place because it's passed
--- a/mercurial/upgrade_utils/actions.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/upgrade_utils/actions.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import random
from typing import (
--- a/mercurial/upgrade_utils/auto_upgrade.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/upgrade_utils/auto_upgrade.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,9 @@
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+
+from __future__ import annotations
+
from ..i18n import _
from .. import (
--- a/mercurial/upgrade_utils/engine.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/upgrade_utils/engine.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import stat
@@ -30,6 +31,14 @@
def get_sidedata_helpers(srcrepo, dstrepo):
use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+
+ if use_w and pycompat.isdarwin:
+ # Avoid a PicklingError on macOS in bundlerepository.
+ use_w = False
+ srcrepo.ui.debug(
+ b'ignoring experimental.worker.repository-upgrade=True on darwin'
+ )
+
sequential = pycompat.iswindows or not use_w
if not sequential:
srcrepo.register_sidedata_computer(
--- a/mercurial/url.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/url.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,11 +7,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import base64
+import hashlib
import socket
from .i18n import _
+from .node import hex
from . import (
encoding,
error,
@@ -231,36 +234,29 @@
return keepalive.HTTPHandler._start_transaction(self, h, req)
-class logginghttpconnection(keepalive.HTTPConnection):
- def __init__(self, createconn, *args, **kwargs):
- keepalive.HTTPConnection.__init__(self, *args, **kwargs)
- self._create_connection = createconn
-
+class logginghttphandler(httphandler):
+ """HTTP(S) handler that logs socket I/O."""
-class logginghttphandler(httphandler):
- """HTTP handler that logs socket I/O."""
-
- def __init__(self, logfh, name, observeropts, timeout=None):
- super(logginghttphandler, self).__init__(timeout=timeout)
+ def __init__(self, logfh, name, observeropts, *args, **kwargs):
+ super().__init__(*args, **kwargs)
self._logfh = logfh
self._logname = name
self._observeropts = observeropts
- # do_open() calls the passed class to instantiate an HTTPConnection. We
- # pass in a callable method that creates a custom HTTPConnection instance
- # whose callback to create the socket knows how to proxy the socket.
- def http_open(self, req):
- return self.do_open(self._makeconnection, req)
+ def do_open(self, http_class, *args, **kwargs):
+ _logfh = self._logfh
+ _logname = self._logname
+ _observeropts = self._observeropts
- def _makeconnection(self, *args, **kwargs):
- def createconnection(*args, **kwargs):
- sock = socket.create_connection(*args, **kwargs)
- return util.makeloggingsocket(
- self._logfh, sock, self._logname, **self._observeropts
- )
+ class logginghttpconnection(http_class):
+ def connect(self):
+ super().connect()
+ self.sock = util.makeloggingsocket(
+ _logfh, self.sock, _logname, **_observeropts
+ )
- return logginghttpconnection(createconnection, *args, **kwargs)
+ return super().do_open(logginghttpconnection, *args, **kwargs)
if has_https:
@@ -319,7 +315,7 @@
key_file=None,
cert_file=None,
*args,
- **kwargs
+ **kwargs,
):
keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
self.key_file = key_file
@@ -462,41 +458,123 @@
return None
-class cookiehandler(urlreq.basehandler):
- def __init__(self, ui):
- self.cookiejar = None
-
- cookiefile = ui.config(b'auth', b'cookiefile')
- if not cookiefile:
- return
-
- cookiefile = util.expandpath(cookiefile)
- try:
- cookiejar = util.cookielib.MozillaCookieJar(
- pycompat.fsdecode(cookiefile)
+def load_cookiejar(ui):
+ cookiefile = ui.config(b'auth', b'cookiefile')
+ if not cookiefile:
+ return
+ cookiefile = util.expandpath(cookiefile)
+ try:
+ cookiejar = util.cookielib.MozillaCookieJar(
+ pycompat.fsdecode(cookiefile)
+ )
+ cookiejar.load()
+ return cookiejar
+ except util.cookielib.LoadError as e:
+ ui.warn(
+ _(
+ b'(error loading cookie file %s: %s; continuing without '
+ b'cookies)\n'
)
- cookiejar.load()
- self.cookiejar = cookiejar
- except util.cookielib.LoadError as e:
- ui.warn(
- _(
- b'(error loading cookie file %s: %s; continuing without '
- b'cookies)\n'
- )
- % (cookiefile, stringutil.forcebytestr(e))
- )
+ % (cookiefile, stringutil.forcebytestr(e))
+ )
+
+
+class readlinehandler(urlreq.basehandler):
+ def http_response(self, request, response):
+ class readlineresponse(response.__class__):
+ def readlines(self, sizehint=0):
+ total = 0
+ list = []
+ while True:
+ line = self.readline()
+ if not line:
+ break
+ list.append(line)
+ total += len(line)
+ if sizehint and total >= sizehint:
+ break
+ return list
+
+ response.__class__ = readlineresponse
+ return response
+
+ https_response = http_response
+
+
+class digesthandler(urlreq.basehandler):
+ # exchange.py assumes the algorithms are listed in order of preference,
+ # earlier entries are prefered.
+ digest_algorithms = {
+ b'sha256': hashlib.sha256,
+ b'sha512': hashlib.sha512,
+ }
+
+ def __init__(self, digest):
+ if b':' not in digest:
+ raise error.Abort(_(b'invalid digest specification'))
+ algo, checksum = digest.split(b':')
+ if algo not in self.digest_algorithms:
+ raise error.Abort(_(b'unsupported digest algorithm: %s') % algo)
+ self._digest = checksum
+ self._hasher = self.digest_algorithms[algo]()
- def http_request(self, request):
- if self.cookiejar:
- self.cookiejar.add_cookie_header(request)
+ def http_response(self, request, response):
+ class digestresponse(response.__class__):
+ def _digest_input(self, data):
+ self._hasher.update(data)
+ self._digest_consumed += len(data)
+ if self._digest_finished:
+ digest = hex(self._hasher.digest())
+ if digest != self._digest:
+ raise error.SecurityError(
+ _(
+ b'file with digest %s expected, but %s found for %d bytes'
+ )
+ % (
+ pycompat.bytestr(self._digest),
+ pycompat.bytestr(digest),
+ self._digest_consumed,
+ )
+ )
- return request
+ def read(self, amt=None):
+ self._digest_recursion_level += 1
+ data = super().read(amt)
+ self._digest_recursion_level -= 1
+ if self._digest_recursion_level == 0:
+ self._digest_input(data)
+ return data
- def https_request(self, request):
- if self.cookiejar:
- self.cookiejar.add_cookie_header(request)
+ def readline(self):
+ self._digest_recursion_level += 1
+ data = super().readline()
+ self._digest_recursion_level -= 1
+ if self._digest_recursion_level == 0:
+ self._digest_input(data)
+ return data
- return request
+ def readinto(self, dest):
+ self._digest_recursion_level += 1
+ got = super().readinto(dest)
+ self._digest_recursion_level -= 1
+ if self._digest_recursion_level == 0:
+ self._digest_input(dest[:got])
+ return got
+
+ def _close_conn(self):
+ self._digest_finished = True
+ return super().close()
+
+ response.__class__ = digestresponse
+ response._digest = self._digest
+ response._digest_consumed = 0
+ response._hasher = self._hasher.copy()
+ # Python 3.8 / 3.9 recurses internally between read/readinto.
+ response._digest_recursion_level = 0
+ response._digest_finished = False
+ return response
+
+ https_response = http_response
handlerfuncs = []
@@ -510,6 +588,7 @@
loggingname=b's',
loggingopts=None,
sendaccept=True,
+ digest=None,
):
"""
construct an opener suitable for urllib2
@@ -535,16 +614,13 @@
loggingfh, loggingname, loggingopts or {}, timeout=timeout
)
)
- # We don't yet support HTTPS when logging I/O. If we attempt to open
- # an HTTPS URL, we'll likely fail due to unknown protocol.
-
else:
handlers.append(httphandler(timeout=timeout))
- if has_https:
- # pytype get confused about the conditional existence for httpshandler here.
- handlers.append(
- httpshandler(ui, timeout=timeout) # pytype: disable=name-error
- )
+ if has_https:
+ # pytype get confused about the conditional existence for httpshandler here.
+ handlers.append(
+ httpshandler(ui, timeout=timeout) # pytype: disable=name-error
+ )
handlers.append(proxyhandler(ui))
@@ -563,7 +639,10 @@
(httpbasicauthhandler(passmgr), httpdigestauthhandler(passmgr))
)
handlers.extend([h(ui, passmgr) for h in handlerfuncs])
- handlers.append(cookiehandler(ui))
+ handlers.append(urlreq.httpcookieprocessor(cookiejar=load_cookiejar(ui)))
+ handlers.append(readlinehandler())
+ if digest:
+ handlers.append(digesthandler(digest))
opener = urlreq.buildopener(*handlers)
# keepalive.py's handlers will populate these attributes if they exist.
@@ -602,7 +681,7 @@
return opener
-def open(ui, url_, data=None, sendaccept=True):
+def open(ui, url_, data=None, sendaccept=True, digest=None):
u = urlutil.url(url_)
if u.scheme:
u.scheme = u.scheme.lower()
@@ -613,7 +692,7 @@
urlreq.pathname2url(pycompat.fsdecode(path))
)
authinfo = None
- return opener(ui, authinfo, sendaccept=sendaccept).open(
+ return opener(ui, authinfo, sendaccept=sendaccept, digest=digest).open(
pycompat.strurl(url_), data
)
--- a/mercurial/urllibcompat.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/urllibcompat.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
import http.server
import urllib.error
import urllib.parse
@@ -68,6 +70,7 @@
b"FileHandler",
b"FTPHandler",
b"ftpwrapper",
+ b"HTTPCookieProcessor",
b"HTTPHandler",
b"HTTPSHandler",
b"install_opener",
@@ -109,6 +112,7 @@
),
)
+
# urllib.parse.quote() accepts both str and bytes, decodes bytes
# (if necessary), and returns str. This is wonky. We provide a custom
# implementation that only accepts bytes and emits bytes.
--- a/mercurial/util.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/util.py Sat Oct 26 04:16:00 2024 +0200
@@ -13,6 +13,7 @@
hide platform-specific details from the core.
"""
+from __future__ import annotations
import abc
import collections
@@ -32,19 +33,30 @@
import sys
import time
import traceback
+import typing
import warnings
from typing import (
Any,
+ BinaryIO,
+ Callable,
Iterable,
Iterator,
List,
Optional,
Tuple,
+ Type,
+ TypeVar,
)
from .node import hex
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .pycompat import (
open,
)
@@ -55,8 +67,12 @@
i18n,
policy,
pycompat,
+ typelib,
urllibcompat,
)
+from .interfaces import (
+ modules as intmod,
+)
from .utils import (
compression,
hashutil,
@@ -74,7 +90,7 @@
]
-base85 = policy.importmod('base85')
+base85: intmod.Base85 = policy.importmod('base85')
osutil = policy.importmod('osutil')
b85decode = base85.b85decode
@@ -148,6 +164,10 @@
username = platform.username
+if typing.TYPE_CHECKING:
+ _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
+
+
def setumask(val: int) -> None:
'''updates the umask. used by chg server'''
if pycompat.iswindows:
@@ -323,8 +343,10 @@
def buffer(sliceable, offset=0, length=None):
if length is not None:
- return memoryview(sliceable)[offset : offset + length]
- return memoryview(sliceable)[offset:]
+ view = memoryview(sliceable)[offset : offset + length]
+ else:
+ view = memoryview(sliceable)[offset:]
+ return view.toreadonly()
_chunksize = 4096
@@ -440,13 +462,24 @@
return data
-def mmapread(fp, size=None):
+def has_mmap_populate():
+ return hasattr(osutil, "background_mmap_populate") or hasattr(
+ mmap, 'MAP_POPULATE'
+ )
+
+
+def mmapread(fp, size=None, pre_populate=True):
"""Read a file content using mmap
- The responsability of checking the file system is mmap safe is the
- responsability of the caller.
+ The responsibility of checking the file system is mmap safe is the
+ responsibility of the caller (see `vfs.is_mmap_safe`).
In some case, a normal string might be returned.
+
+ If `pre_populate` is True (the default), the mmapped data will be
+ pre-populated in memory if the system support this option, this slow down
+ the initial mmapping but avoid potentially crippling page fault on later
+ access. If this is not the desired behavior, set `pre_populate` to False.
"""
if size == 0:
# size of 0 to mmap.mmap() means "all data"
@@ -455,8 +488,24 @@
elif size is None:
size = 0
fd = getattr(fp, 'fileno', lambda: fp)()
+
+ if pycompat.iswindows:
+ _mmap = lambda fd, size: mmap.mmap(fd, size, access=mmap.ACCESS_READ)
+ else:
+ flags = mmap.MAP_PRIVATE
+ bg_populate = hasattr(osutil, "background_mmap_populate")
+
+ if pre_populate and not bg_populate:
+ flags |= getattr(mmap, 'MAP_POPULATE', 0)
+
+ def _mmap(fd, size) -> mmap.mmap:
+ m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
+ if pre_populate and bg_populate:
+ osutil.background_mmap_populate(m)
+ return m
+
try:
- return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
+ return _mmap(fd, size)
except ValueError:
# Empty files cannot be mmapped, but mmapread should still work. Check
# if the file is empty, and if so, return an empty buffer.
@@ -465,6 +514,16 @@
raise
+class uncacheable_cachestat:
+ stat: Optional[os.stat_result]
+
+ def __init__(self) -> None:
+ self.stat = None
+
+ def cacheable(self) -> bool:
+ return False
+
+
class fileobjectproxy:
"""A proxy around file objects that tells a watcher when events occur.
@@ -1139,7 +1198,7 @@
def version():
"""Return version information if available."""
try:
- from . import __version__
+ from . import __version__ # pytype: disable=import-error
return __version__.version
except ImportError:
@@ -1315,7 +1374,7 @@
self[k] = f[k]
def insert(self, position, key, value):
- for (i, (k, v)) in enumerate(list(self.items())):
+ for i, (k, v) in enumerate(list(self.items())):
if i == position:
self[key] = value
if i >= position:
@@ -2148,7 +2207,7 @@
timer = time.time
-def makelock(info, pathname):
+def makelock(info: bytes, pathname: bytes) -> None:
"""Create a lock file atomically if possible
This may leave a stale lock file if symlink isn't supported and signal
@@ -2164,8 +2223,10 @@
flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
ld = os.open(pathname, flags)
- os.write(ld, info)
- os.close(ld)
+ try:
+ os.write(ld, info)
+ finally:
+ os.close(ld)
def readlock(pathname: bytes) -> bytes:
@@ -2404,7 +2465,12 @@
return path.split(pycompat.ossep)
-def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
+def mktempcopy(
+ name: bytes,
+ emptyok: bool = False,
+ createmode: Optional[int] = None,
+ enforcewritable: bool = False,
+) -> bytes:
"""Create a temporary file with the same contents from name
The permission bits are copied from the original file.
@@ -2455,11 +2521,11 @@
'exists()' examination on client side of this class.
"""
- def __init__(self, stat):
+ def __init__(self, stat: Optional[os.stat_result]) -> None:
self.stat = stat
@classmethod
- def frompath(cls, path):
+ def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
try:
stat = os.stat(path)
except FileNotFoundError:
@@ -2467,13 +2533,13 @@
return cls(stat)
@classmethod
- def fromfp(cls, fp):
+ def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
stat = os.fstat(fp.fileno())
return cls(stat)
__hash__ = object.__hash__
- def __eq__(self, old):
+ def __eq__(self, old) -> bool:
try:
# if ambiguity between stat of new and old file is
# avoided, comparison of size, ctime and mtime is enough
@@ -2490,7 +2556,7 @@
except AttributeError:
return False
- def isambig(self, old):
+ def isambig(self, old: _Tfilestat) -> bool:
"""Examine whether new (= self) stat is ambiguous against old one
"S[N]" below means stat of a file at N-th change:
@@ -2525,7 +2591,7 @@
except AttributeError:
return False
- def avoidambig(self, path, old):
+ def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
"""Change file stat of specified path to avoid ambiguity
'old' should be previous filestat of 'path'.
@@ -2545,7 +2611,7 @@
return False
return True
- def __ne__(self, other):
+ def __ne__(self, other) -> bool:
return not self == other
@@ -2711,10 +2777,10 @@
def splitbig(chunks):
for chunk in chunks:
- if len(chunk) > 2 ** 20:
+ if len(chunk) > 2**20:
pos = 0
while pos < len(chunk):
- end = pos + 2 ** 18
+ end = pos + 2**18
yield chunk[pos:end]
pos = end
else:
@@ -2738,7 +2804,7 @@
while left > 0:
# refill the queue
if not queue:
- target = 2 ** 18
+ target = 2**18
for chunk in self.iter:
queue.append(chunk)
target -= len(chunk)
@@ -2894,20 +2960,20 @@
)
-class transformingwriter:
+class transformingwriter(typelib.BinaryIO_Proxy):
"""Writable file wrapper to transform data by function"""
- def __init__(self, fp, encode):
+ def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
self._fp = fp
self._encode = encode
- def close(self):
+ def close(self) -> None:
self._fp.close()
- def flush(self):
+ def flush(self) -> None:
self._fp.flush()
- def write(self, data):
+ def write(self, data: bytes) -> int:
return self._fp.write(self._encode(data))
@@ -2925,7 +2991,7 @@
return _eolre.sub(b'\r\n', s)
-def _crlfwriter(fp):
+def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
return transformingwriter(fp, tocrlf)
@@ -2938,6 +3004,21 @@
fromnativeeol = pycompat.identity
nativeeolwriter = pycompat.identity
+if typing.TYPE_CHECKING:
+ # Replace the various overloads that come along with aliasing other methods
+ # with the narrow definition that we care about in the type checking phase
+ # only. This ensures that both Windows and POSIX see only the definition
+ # that is actually available.
+
+ def tonativeeol(s: bytes) -> bytes:
+ raise NotImplementedError
+
+ def fromnativeeol(s: bytes) -> bytes:
+ raise NotImplementedError
+
+ def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
+ raise NotImplementedError
+
# TODO delete since workaround variant for Python 2 no longer needed.
def iterfile(fp):
@@ -3068,12 +3149,12 @@
_sizeunits = (
- (b'm', 2 ** 20),
- (b'k', 2 ** 10),
- (b'g', 2 ** 30),
- (b'kb', 2 ** 10),
- (b'mb', 2 ** 20),
- (b'gb', 2 ** 30),
+ (b'm', 2**20),
+ (b'k', 2**10),
+ (b'g', 2**30),
+ (b'kb', 2**10),
+ (b'mb', 2**20),
+ (b'gb', 2**30),
(b'b', 1),
)
@@ -3307,6 +3388,7 @@
"""
if pycompat.sysplatform.startswith(b'win'):
# On Windows, use the GlobalMemoryStatusEx kernel function directly.
+ # noinspection PyPep8Naming
from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
from ctypes.wintypes import ( # pytype: disable=import-error
Structure,
--- a/mercurial/utils/cborutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/cborutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import struct
@@ -205,7 +206,6 @@
STREAM_ENCODERS = {
bytes: streamencodebytestring,
int: streamencodeint,
- int: streamencodeint,
list: streamencodearray,
tuple: streamencodearray,
dict: streamencodemap,
--- a/mercurial/utils/compression.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/compression.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import bz2
import collections
@@ -511,7 +512,7 @@
parts = []
pos = 0
while pos < insize:
- pos2 = pos + 2 ** 20
+ pos2 = pos + 2**20
parts.append(z.compress(data[pos:pos2]))
pos = pos2
parts.append(z.flush())
--- a/mercurial/utils/dateutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/dateutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import calendar
import datetime
--- a/mercurial/utils/hashutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/hashutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import hashlib
try:
--- a/mercurial/utils/memorytop.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/memorytop.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,8 @@
# memorytop in strategic places to show the current memory use by allocation
# site.
+from __future__ import annotations
+
import gc
import tracemalloc
--- a/mercurial/utils/procutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/procutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import errno
@@ -711,7 +712,6 @@
if stdin is not None:
stdin.close()
-
else:
def runbgcommand(
--- a/mercurial/utils/repoviewutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/repoviewutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
from .. import error
### Nearest subset relation
--- a/mercurial/utils/resourceutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/resourceutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,15 +7,24 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import sys
+import typing
from typing import Iterator
from .. import pycompat
+if typing.TYPE_CHECKING:
+ from typing import (
+ BinaryIO,
+ Iterator,
+ )
+
+
def mainfrozen():
"""return True if we are a frozen executable.
@@ -45,17 +54,16 @@
# leading "mercurial." off of the package name, so that these
# pseudo resources are found in their directory next to the
# executable.
- def _package_path(package):
+ def _package_path(package: bytes) -> bytes:
dirs = package.split(b".")
assert dirs[0] == b"mercurial"
return os.path.join(_rootpath, *dirs[1:])
-
else:
datapath = os.path.dirname(os.path.dirname(pycompat.fsencode(__file__)))
_rootpath = os.path.dirname(datapath)
- def _package_path(package):
+ def _package_path(package: bytes) -> bytes:
return os.path.join(_rootpath, *package.split(b"."))
@@ -78,11 +86,11 @@
# importlib.resources was not found (almost definitely because we're on a
# Python version before 3.7)
- def open_resource(package, name):
+ def open_resource(package: bytes, name: bytes) -> "BinaryIO":
path = os.path.join(_package_path(package), name)
return open(path, "rb")
- def is_resource(package, name):
+ def is_resource(package: bytes, name: bytes) -> bool:
path = os.path.join(_package_path(package), name)
try:
@@ -90,17 +98,16 @@
except (IOError, OSError):
return False
- def contents(package):
+ def contents(package: bytes) -> "Iterator[bytes]":
path = pycompat.fsdecode(_package_path(package))
for p in os.listdir(path):
yield pycompat.fsencode(p)
-
else:
from .. import encoding
- def open_resource(package, name):
+ def open_resource(package: bytes, name: bytes) -> "BinaryIO":
if hasattr(resources, 'files'):
return (
resources.files( # pytype: disable=module-attr
--- a/mercurial/utils/storageutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/storageutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import re
import struct
--- a/mercurial/utils/stringutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/stringutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,12 +7,14 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import ast
import codecs
import re as remod
import textwrap
import types
+import typing
from typing import (
Optional,
@@ -22,6 +24,11 @@
from ..i18n import _
from ..thirdparty import attr
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from .. import (
encoding,
error,
@@ -574,7 +581,6 @@
return mailmap
for line in mailmapcontent.splitlines():
-
# Don't bother checking the line if it is a comment or
# is an improperly formed author field
if line.lstrip().startswith(b'#'):
@@ -719,7 +725,7 @@
def escapestr(s: bytes) -> bytes:
# "bytes" is also a typing shortcut for bytes, bytearray, and memoryview
- if isinstance(s, memoryview):
+ if isinstance(s, (memoryview, bytearray)):
s = bytes(s)
# call underlying function of s.encode('string_escape') directly for
# Python 3 compatibility
@@ -801,7 +807,6 @@
chunks.reverse()
while chunks:
-
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
--- a/mercurial/utils/urlutil.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/utils/urlutil.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,11 +4,17 @@
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+
+from __future__ import annotations
+
import os
import re as remod
import socket
from typing import (
+ Callable,
+ Dict,
+ Tuple,
Union,
)
@@ -29,7 +35,7 @@
)
# keeps pyflakes happy
-assert [Union]
+assert [Callable, Dict, Tuple, Union]
urlreq = urllibcompat.urlreq
@@ -652,12 +658,12 @@
self[name] = new_paths
-_pathsuboptions = {}
+_pathsuboptions: "Dict[bytes, Tuple[str, Callable]]" = {}
# a dictionnary of methods that can be used to format a sub-option value
path_suboptions_display = {}
-def pathsuboption(option, attr, display=pycompat.bytestr):
+def pathsuboption(option: bytes, attr: str, display=pycompat.bytestr):
"""Decorator used to declare a path sub-option.
Arguments are the sub-option name and the attribute it should set on
--- a/mercurial/verify.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/verify.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
@@ -31,7 +32,7 @@
return v.verify()
-def _normpath(f):
+def _normpath(f: bytes) -> bytes:
# under hg < 2.4, convert didn't sanitize paths properly, so a
# converted repo may contain repeated slashes
while b'//' in f:
@@ -360,7 +361,7 @@
self._err(lr, _(b"%s not in changesets") % short(n), label)
try:
- mfdelta = mfl.get(dir, n).readdelta(shallow=True)
+ mfdelta = mfl.get(dir, n).read_delta_new_entries(shallow=True)
for f, fn, fl in mfdelta.iterentries():
if not f:
self._err(lr, _(b"entry without name in manifest"))
--- a/mercurial/vfs.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/vfs.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,14 +5,30 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import abc
import contextlib
import os
import shutil
import stat
import threading
+import typing
from typing import (
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ MutableMapping,
Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
)
from .i18n import _
@@ -24,8 +40,19 @@
util,
)
+if typing.TYPE_CHECKING:
+ from . import (
+ ui as uimod,
+ )
-def _avoidambig(path: bytes, oldstat):
+ _Tbackgroundfilecloser = TypeVar(
+ '_Tbackgroundfilecloser', bound='backgroundfilecloser'
+ )
+ _Tclosewrapbase = TypeVar('_Tclosewrapbase', bound='closewrapbase')
+ _OnErrorFn = Callable[[Exception], Optional[object]]
+
+
+def _avoidambig(path: bytes, oldstat: util.filestat) -> None:
"""Avoid file stat ambiguity forcibly
This function causes copying ``path`` file, if it is owned by
@@ -44,30 +71,29 @@
checkandavoid()
-class abstractvfs:
+class abstractvfs(abc.ABC):
"""Abstract base class; cannot be instantiated"""
# default directory separator for vfs
#
# Other vfs code always use `/` and this works fine because python file API
# abstract the use of `/` and make it work transparently. For consistency
- # vfs will always use `/` when joining. This avoid some confusion in
+ # vfs will always use `/` when joining. This avoids some confusion in
# encoded vfs (see issue6546)
- _dir_sep = b'/'
-
- def __init__(self, *args, **kwargs):
- '''Prevent instantiation; don't call this from subclasses.'''
- raise NotImplementedError('attempted instantiating ' + str(type(self)))
+ _dir_sep: bytes = b'/'
# TODO: type return, which is util.posixfile wrapped by a proxy
- def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
- raise NotImplementedError
+ @abc.abstractmethod
+ def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs) -> Any:
+ ...
- def _auditpath(self, path: bytes, mode: bytes):
- raise NotImplementedError
+ @abc.abstractmethod
+ def _auditpath(self, path: bytes, mode: bytes) -> None:
+ ...
+ @abc.abstractmethod
def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
- raise NotImplementedError
+ ...
def tryread(self, path: bytes) -> bytes:
'''gracefully return an empty string for missing files'''
@@ -77,7 +103,7 @@
pass
return b""
- def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
+ def tryreadlines(self, path: bytes, mode: bytes = b'rb') -> List[bytes]:
'''gracefully return an empty array for missing files'''
try:
return self.readlines(path, mode=mode)
@@ -99,18 +125,22 @@
with self(path, b'rb') as fp:
return fp.read()
- def readlines(self, path: bytes, mode: bytes = b'rb'):
+ def readlines(self, path: bytes, mode: bytes = b'rb') -> List[bytes]:
with self(path, mode=mode) as fp:
return fp.readlines()
def write(
- self, path: bytes, data: bytes, backgroundclose=False, **kwargs
+ self, path: bytes, data: bytes, backgroundclose: bool = False, **kwargs
) -> int:
with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
return fp.write(data)
def writelines(
- self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
+ self,
+ path: bytes,
+ data: Iterable[bytes],
+ mode: bytes = b'wb',
+ notindexed: bool = False,
) -> None:
with self(path, mode=mode, notindexed=notindexed) as fp:
return fp.writelines(data)
@@ -137,7 +167,7 @@
def exists(self, path: Optional[bytes] = None) -> bool:
return os.path.exists(self.join(path))
- def fstat(self, fp):
+ def fstat(self, fp: BinaryIO) -> os.stat_result:
return util.fstat(fp)
def isdir(self, path: Optional[bytes] = None) -> bool:
@@ -177,7 +207,7 @@
to allow handling of strange encoding if needed."""
return self._join(*paths)
- def split(self, path: bytes):
+ def split(self, path: bytes) -> Tuple[bytes, bytes]:
"""split top-most element of a path (as os.path.split would do)
This exists to allow handling of strange encoding if needed."""
@@ -186,7 +216,7 @@
def lexists(self, path: Optional[bytes] = None) -> bool:
return os.path.lexists(self.join(path))
- def lstat(self, path: Optional[bytes] = None):
+ def lstat(self, path: Optional[bytes] = None) -> os.stat_result:
return os.lstat(self.join(path))
def is_mmap_safe(self, path: Optional[bytes] = None) -> bool:
@@ -203,7 +233,7 @@
then you get SIGBUS, which can be pretty disruptive: we get core dump
reports, and the process terminates without writing to the blackbox.
- Instead in this situation we prefer to read the file normally.
+ Instead, in this situation we prefer to read the file normally.
The risk of ESTALE in the middle of the read remains, but it's
smaller because we read sooner and the error should be reported
just as any other error.
@@ -218,21 +248,21 @@
fstype = util.getfstype(self.join(path))
return fstype is not None and fstype != b'nfs'
- def listdir(self, path: Optional[bytes] = None):
+ def listdir(self, path: Optional[bytes] = None) -> List[bytes]:
return os.listdir(self.join(path))
- def makedir(self, path: Optional[bytes] = None, notindexed=True):
+ def makedir(self, path: Optional[bytes] = None, notindexed=True) -> None:
return util.makedir(self.join(path), notindexed)
def makedirs(
self, path: Optional[bytes] = None, mode: Optional[int] = None
- ):
+ ) -> None:
return util.makedirs(self.join(path), mode)
- def makelock(self, info, path: bytes):
+ def makelock(self, info: bytes, path: bytes) -> None:
return util.makelock(info, self.join(path))
- def mkdir(self, path: Optional[bytes] = None):
+ def mkdir(self, path: Optional[bytes] = None) -> None:
return os.mkdir(self.join(path))
def mkstemp(
@@ -240,7 +270,7 @@
suffix: bytes = b'',
prefix: bytes = b'tmp',
dir: Optional[bytes] = None,
- ):
+ ) -> Tuple[int, bytes]:
fd, name = pycompat.mkstemp(
suffix=suffix, prefix=prefix, dir=self.join(dir)
)
@@ -250,13 +280,21 @@
else:
return fd, fname
- def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
+ # TODO: This doesn't match osutil.listdir(). stat=False in pure;
+ # non-optional bool in cext. 'skip' is bool if we trust cext, or bytes
+ # going by how pure uses it. Also, cext returns a custom stat structure.
+ # from cext.osutil.pyi:
+ #
+ # path: bytes, st: bool, skip: Optional[bool]
+ def readdir(
+ self, path: Optional[bytes] = None, stat=None, skip=None
+ ) -> Any:
return util.listdir(self.join(path), stat, skip)
def readlock(self, path: bytes) -> bytes:
return util.readlock(self.join(path))
- def rename(self, src: bytes, dst: bytes, checkambig=False):
+ def rename(self, src: bytes, dst: bytes, checkambig: bool = False) -> None:
"""Rename from src to dst
checkambig argument is used with util.filestat, and is useful
@@ -271,34 +309,37 @@
self._auditpath(dst, b'w')
srcpath = self.join(src)
dstpath = self.join(dst)
- oldstat = checkambig and util.filestat.frompath(dstpath)
+ oldstat = util.filestat.frompath(dstpath) if checkambig else None
+
+ util.rename(srcpath, dstpath)
+
if oldstat and oldstat.stat:
- ret = util.rename(srcpath, dstpath)
_avoidambig(dstpath, oldstat)
- return ret
- return util.rename(srcpath, dstpath)
def readlink(self, path: bytes) -> bytes:
return util.readlink(self.join(path))
- def removedirs(self, path: Optional[bytes] = None):
+ def removedirs(self, path: Optional[bytes] = None) -> None:
"""Remove a leaf directory and all empty intermediate ones"""
return util.removedirs(self.join(path))
- def rmdir(self, path: Optional[bytes] = None):
+ def rmdir(self, path: Optional[bytes] = None) -> None:
"""Remove an empty directory."""
return os.rmdir(self.join(path))
def rmtree(
- self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
- ):
+ self,
+ path: Optional[bytes] = None,
+ ignore_errors: bool = False,
+ forcibly: bool = False,
+ ) -> None:
"""Remove a directory tree recursively
If ``forcibly``, this tries to remove READ-ONLY files, too.
"""
if forcibly:
- def onexc(function, path, excinfo):
+ def onexc(function, path: bytes, excinfo):
if function is not os.remove:
raise
# read-only files cannot be unlinked under Windows
@@ -321,31 +362,39 @@
self.join(path), ignore_errors=ignore_errors, onerror=onexc
)
- def setflags(self, path: bytes, l: bool, x: bool):
+ def setflags(self, path: bytes, l: bool, x: bool) -> None:
return util.setflags(self.join(path), l, x)
- def stat(self, path: Optional[bytes] = None):
+ def stat(self, path: Optional[bytes] = None) -> os.stat_result:
return os.stat(self.join(path))
- def unlink(self, path: Optional[bytes] = None):
+ def unlink(self, path: Optional[bytes] = None) -> None:
return util.unlink(self.join(path))
- def tryunlink(self, path: Optional[bytes] = None):
+ def tryunlink(self, path: Optional[bytes] = None) -> bool:
"""Attempt to remove a file, ignoring missing file errors."""
return util.tryunlink(self.join(path))
def unlinkpath(
- self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
- ):
+ self,
+ path: Optional[bytes] = None,
+ ignoremissing: bool = False,
+ rmdir: bool = True,
+ ) -> None:
return util.unlinkpath(
self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
)
- def utime(self, path: Optional[bytes] = None, t=None):
+ # TODO: could be Tuple[float, float] too.
+ def utime(
+ self, path: Optional[bytes] = None, t: Optional[Tuple[int, int]] = None
+ ) -> None:
return os.utime(self.join(path), t)
- def walk(self, path: Optional[bytes] = None, onerror=None):
- """Yield (dirpath, dirs, files) tuple for each directories under path
+ def walk(
+ self, path: Optional[bytes] = None, onerror: Optional[_OnErrorFn] = None
+ ) -> Iterator[Tuple[bytes, List[bytes], List[bytes]]]:
+ """Yield (dirpath, dirs, files) tuple for each directory under path
``dirpath`` is relative one from the root of this vfs. This
uses ``os.sep`` as path separator, even you specify POSIX
@@ -361,7 +410,9 @@
yield (dirpath[prefixlen:], dirs, files)
@contextlib.contextmanager
- def backgroundclosing(self, ui, expectedcount=-1):
+ def backgroundclosing(
+ self, ui: uimod.ui, expectedcount: int = -1
+ ) -> Iterator[Optional[backgroundfilecloser]]:
"""Allow files to be closed asynchronously.
When this context manager is active, ``backgroundclose`` can be passed
@@ -371,10 +422,7 @@
# Sharing backgroundfilecloser between threads is complex and using
# multiple instances puts us at risk of running out of file descriptors
# only allow to use backgroundfilecloser when in main thread.
- if not isinstance(
- threading.current_thread(),
- threading._MainThread, # pytype: disable=module-attr
- ):
+ if threading.current_thread() is not threading.main_thread():
yield
return
vfs = getattr(self, 'vfs', self)
@@ -394,7 +442,7 @@
None # pytype: disable=attribute-error
)
- def register_file(self, path):
+ def register_file(self, path: bytes) -> None:
"""generic hook point to lets fncache steer its stew"""
@@ -409,14 +457,21 @@
See pathutil.pathauditor() for details.
"""
+ audit: Union[pathutil.pathauditor, Callable[[bytes, Optional[bytes]], Any]]
+ base: bytes
+ createmode: Optional[int]
+ options: Dict[bytes, Any]
+ _audit: bool
+ _trustnlink: Optional[bool]
+
def __init__(
self,
base: bytes,
- audit=True,
- cacheaudited=False,
- expandpath=False,
- realpath=False,
- ):
+ audit: bool = True,
+ cacheaudited: bool = False,
+ expandpath: bool = False,
+ realpath: bool = False,
+ ) -> None:
if expandpath:
base = util.expandpath(base)
if realpath:
@@ -436,15 +491,15 @@
return util.checklink(self.base)
@util.propertycache
- def _chmod(self):
+ def _chmod(self) -> bool:
return util.checkexec(self.base)
- def _fixfilemode(self, name):
+ def _fixfilemode(self, name: bytes) -> None:
if self.createmode is None or not self._chmod:
return
os.chmod(name, self.createmode & 0o666)
- def _auditpath(self, path, mode) -> None:
+ def _auditpath(self, path: bytes, mode: bytes) -> None:
if self._audit:
if os.path.isabs(path) and path.startswith(self.base):
path = os.path.relpath(path, self.base)
@@ -454,7 +509,9 @@
self.audit(path, mode=mode)
def isfileorlink_checkdir(
- self, dircache, path: Optional[bytes] = None
+ self,
+ dircache: MutableMapping[bytes, bool],
+ path: bytes,
) -> bool:
"""return True if the path is a regular file or a symlink and
the directories along the path are "normal", that is
@@ -482,13 +539,13 @@
self,
path: bytes,
mode: bytes = b"rb",
- atomictemp=False,
- notindexed=False,
- backgroundclose=False,
- checkambig=False,
- auditpath=True,
- makeparentdirs=True,
- ):
+ atomictemp: bool = False,
+ notindexed: bool = False,
+ backgroundclose: bool = False,
+ checkambig: bool = False,
+ auditpath: bool = True,
+ makeparentdirs: bool = True,
+ ) -> Any: # TODO: should be BinaryIO if util.atomictempfile can be coersed
"""Open ``path`` file, which is relative to vfs root.
By default, parent directories are created as needed. Newly created
@@ -573,9 +630,9 @@
)
fp = checkambigatclosing(fp)
- if backgroundclose and isinstance(
- threading.current_thread(),
- threading._MainThread, # pytype: disable=module-attr
+ if (
+ backgroundclose
+ and threading.current_thread() is threading.main_thread()
):
if (
not self._backgroundfilecloser # pytype: disable=attribute-error
@@ -623,26 +680,26 @@
return self.base
-opener = vfs
+opener: Type[vfs] = vfs
-class proxyvfs(abstractvfs):
- def __init__(self, vfs: "vfs"):
+class proxyvfs(abstractvfs, abc.ABC):
+ def __init__(self, vfs: vfs) -> None:
self.vfs = vfs
@property
- def createmode(self):
+ def createmode(self) -> Optional[int]:
return self.vfs.createmode
- def _auditpath(self, path, mode):
+ def _auditpath(self, path: bytes, mode: bytes) -> None:
return self.vfs._auditpath(path, mode)
@property
- def options(self):
+ def options(self) -> Dict[bytes, Any]:
return self.vfs.options
@options.setter
- def options(self, value):
+ def options(self, value: Dict[bytes, Any]) -> None:
self.vfs.options = value
@property
@@ -653,11 +710,12 @@
class filtervfs(proxyvfs, abstractvfs):
'''Wrapper vfs for filtering filenames with a function.'''
- def __init__(self, vfs: "vfs", filter):
+ def __init__(self, vfs: vfs, filter: Callable[[bytes], bytes]) -> None:
proxyvfs.__init__(self, vfs)
self._filter = filter
- def __call__(self, path: bytes, *args, **kwargs):
+ # TODO: The return type should be BinaryIO
+ def __call__(self, path: bytes, *args, **kwargs) -> Any:
return self.vfs(self._filter(path), *args, **kwargs)
def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
@@ -667,16 +725,17 @@
return self.vfs.join(path)
-filteropener = filtervfs
+filteropener: Type[filtervfs] = filtervfs
class readonlyvfs(proxyvfs):
'''Wrapper vfs preventing any writing.'''
- def __init__(self, vfs: "vfs"):
+ def __init__(self, vfs: vfs) -> None:
proxyvfs.__init__(self, vfs)
- def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
+ # TODO: The return type should be BinaryIO
+ def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw) -> Any:
if mode not in (b'r', b'rb'):
raise error.Abort(_(b'this vfs is read only'))
return self.vfs(path, mode, *args, **kw)
@@ -685,56 +744,58 @@
return self.vfs.join(path, *insidef)
-class closewrapbase:
+class closewrapbase(abc.ABC):
"""Base class of wrapper, which hooks closing
- Do not instantiate outside of the vfs layer.
+ Do not instantiate outside the vfs layer.
"""
- def __init__(self, fh):
+ def __init__(self, fh) -> None:
object.__setattr__(self, '_origfh', fh)
- def __getattr__(self, attr):
+ def __getattr__(self, attr: str) -> Any:
return getattr(self._origfh, attr)
- def __setattr__(self, attr, value):
+ def __setattr__(self, attr: str, value: Any) -> None:
return setattr(self._origfh, attr, value)
- def __delattr__(self, attr):
+ def __delattr__(self, attr: str) -> None:
return delattr(self._origfh, attr)
- def __enter__(self):
+ def __enter__(self: _Tclosewrapbase) -> _Tclosewrapbase:
self._origfh.__enter__()
return self
- def __exit__(self, exc_type, exc_value, exc_tb):
- raise NotImplementedError('attempted instantiating ' + str(type(self)))
+ @abc.abstractmethod
+ def __exit__(self, exc_type, exc_value, exc_tb) -> None:
+ ...
- def close(self):
- raise NotImplementedError('attempted instantiating ' + str(type(self)))
+ @abc.abstractmethod
+ def close(self) -> None:
+ ...
class delayclosedfile(closewrapbase):
"""Proxy for a file object whose close is delayed.
- Do not instantiate outside of the vfs layer.
+ Do not instantiate outside the vfs layer.
"""
- def __init__(self, fh, closer):
+ def __init__(self, fh, closer) -> None:
super(delayclosedfile, self).__init__(fh)
object.__setattr__(self, '_closer', closer)
- def __exit__(self, exc_type, exc_value, exc_tb):
+ def __exit__(self, exc_type, exc_value, exc_tb) -> None:
self._closer.close(self._origfh)
- def close(self):
+ def close(self) -> None:
self._closer.close(self._origfh)
class backgroundfilecloser:
"""Coordinates background closing of file handles on multiple threads."""
- def __init__(self, ui, expectedcount=-1):
+ def __init__(self, ui: uimod.ui, expectedcount: int = -1) -> None:
self._running = False
self._entered = False
self._threads = []
@@ -772,11 +833,11 @@
self._threads.append(t)
t.start()
- def __enter__(self):
+ def __enter__(self: _Tbackgroundfilecloser) -> _Tbackgroundfilecloser:
self._entered = True
return self
- def __exit__(self, exc_type, exc_value, exc_tb):
+ def __exit__(self, exc_type, exc_value, exc_tb) -> None:
self._running = False
# Wait for threads to finish closing so open files don't linger for
@@ -784,7 +845,7 @@
for t in self._threads:
t.join()
- def _worker(self):
+ def _worker(self) -> None:
"""Main routine for worker thread."""
while True:
try:
@@ -800,7 +861,7 @@
if not self._running:
break
- def close(self, fh):
+ def close(self, fh) -> None:
"""Schedule a file for closing."""
if not self._entered:
raise error.Abort(
@@ -808,7 +869,7 @@
)
# If a background thread encountered an exception, raise now so we fail
- # fast. Otherwise we may potentially go on for minutes until the error
+ # fast. Otherwise, we may potentially go on for minutes until the error
# is acted on.
if self._threadexception:
e = self._threadexception
@@ -831,22 +892,22 @@
This proxy is useful only if the target file is guarded by any
lock (e.g. repo.lock or repo.wlock)
- Do not instantiate outside of the vfs layer.
+ Do not instantiate outside the vfs layer.
"""
- def __init__(self, fh):
+ def __init__(self, fh) -> None:
super(checkambigatclosing, self).__init__(fh)
object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
- def _checkambig(self):
+ def _checkambig(self) -> None:
oldstat = self._oldstat
if oldstat.stat:
_avoidambig(self._origfh.name, oldstat)
- def __exit__(self, exc_type, exc_value, exc_tb):
+ def __exit__(self, exc_type, exc_value, exc_tb) -> None:
self._origfh.__exit__(exc_type, exc_value, exc_tb)
self._checkambig()
- def close(self):
+ def close(self) -> None:
self._origfh.close()
self._checkambig()
--- a/mercurial/win32.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/win32.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import ctypes
import ctypes.wintypes as wintypes
@@ -172,6 +173,7 @@
X509_ASN_ENCODING = 0x00000001
PKCS_7_ASN_ENCODING = 0x00010000
+
# These structs are only complete enough to achieve what we need.
class CERT_CHAIN_CONTEXT(ctypes.Structure):
_fields_ = (
@@ -368,7 +370,7 @@
# See https://bugs.python.org/issue28474
code = _kernel32.GetLastError()
if code > 0x7FFFFFFF:
- code -= 2 ** 32
+ code -= 2**32
err = ctypes.WinError(code=code) # pytype: disable=module-attr
raise OSError(
err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
@@ -459,10 +461,10 @@
return _getfileinfo(name).nNumberOfLinks
-def samefile(path1: bytes, path2: bytes) -> bool:
- '''Returns whether path1 and path2 refer to the same file or directory.'''
- res1 = _getfileinfo(path1)
- res2 = _getfileinfo(path2)
+def samefile(fpath1: bytes, fpath2: bytes) -> bool:
+ '''Returns whether fpath1 and fpath2 refer to the same file or directory.'''
+ res1 = _getfileinfo(fpath1)
+ res2 = _getfileinfo(fpath2)
return (
res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
and res1.nFileIndexHigh == res2.nFileIndexHigh
@@ -470,10 +472,10 @@
)
-def samedevice(path1: bytes, path2: bytes) -> bool:
- '''Returns whether path1 and path2 are on the same device.'''
- res1 = _getfileinfo(path1)
- res2 = _getfileinfo(path2)
+def samedevice(fpath1: bytes, fpath2: bytes) -> bool:
+ '''Returns whether fpath1 and fpath2 are on the same device.'''
+ res1 = _getfileinfo(fpath1)
+ res2 = _getfileinfo(fpath2)
return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
@@ -710,16 +712,16 @@
return pi.dwProcessId
-def unlink(f: bytes) -> None:
+def unlink(path: bytes) -> None:
'''try to implement POSIX' unlink semantics on Windows'''
- if os.path.isdir(f):
+ if os.path.isdir(path):
# use EPERM because it is POSIX prescribed value, even though
# unlink(2) on directories returns EISDIR on Linux
raise IOError(
errno.EPERM,
r"Unlinking directory not permitted: '%s'"
- % encoding.strfromlocal(f),
+ % encoding.strfromlocal(path),
)
# POSIX allows to unlink and rename open files. Windows has serious
@@ -740,9 +742,9 @@
# implicit zombie filename blocking on a temporary name.
for tries in range(10):
- temp = b'%s-%08x' % (f, random.randint(0, 0xFFFFFFFF))
+ temp = b'%s-%08x' % (path, random.randint(0, 0xFFFFFFFF))
try:
- os.rename(f, temp)
+ os.rename(path, temp)
break
except FileExistsError:
pass
--- a/mercurial/windows.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/windows.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import errno
import getpass
@@ -298,7 +299,7 @@
def copymode(
src: bytes,
dst: bytes,
- mode: Optional[bytes] = None,
+ mode: Optional[int] = None,
enforcewritable: bool = False,
) -> None:
pass
@@ -621,10 +622,10 @@
return None
-def readlink(pathname: bytes) -> bytes:
- path = pycompat.fsdecode(pathname)
+def readlink(path: bytes) -> bytes:
+ path_str = pycompat.fsdecode(path)
try:
- link = os.readlink(path)
+ link = os.readlink(path_str)
except ValueError as e:
# On py2, os.readlink() raises an AttributeError since it is
# unsupported. On py3, reading a non-link raises a ValueError. Simply
--- a/mercurial/wireprotoframing.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/wireprotoframing.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,12 +9,20 @@
# protocol. For details about the protocol, see
# `hg help internals.wireprotocol`.
+from __future__ import annotations
import collections
import struct
+import typing
from .i18n import _
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
encoding,
error,
@@ -546,7 +554,7 @@
"""
atomdicts = []
- for (formatting, args, labels) in atoms:
+ for formatting, args, labels in atoms:
# TODO look for localstr, other types here?
if not isinstance(formatting, bytes):
@@ -1198,7 +1206,6 @@
b'%s' % stringutil.forcebytestr(e),
errtype=b'server',
):
-
yield frame
break
@@ -1259,7 +1266,6 @@
for chunk in cborutil.streamencodebytestringfromiter(
o.chunks
):
-
for frame in emitter.send(chunk):
yield frame
--- a/mercurial/wireprotoserver.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/wireprotoserver.py Sat Oct 26 04:16:00 2024 +0200
@@ -4,6 +4,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import contextlib
import struct
--- a/mercurial/wireprototypes.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/wireprototypes.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
+
+import typing
from .node import (
bin,
@@ -10,6 +13,12 @@
)
from .i18n import _
from .thirdparty import attr
+
+# Force pytype to use the non-vendored package
+if typing.TYPE_CHECKING:
+ # noinspection PyPackageRequirements
+ import attr
+
from . import (
error,
util,
--- a/mercurial/wireprotov1peer.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/wireprotov1peer.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import sys
import weakref
--- a/mercurial/wireprotov1server.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/wireprotov1server.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import binascii
import os
--- a/mercurial/worker.py Thu Jan 11 20:37:34 2024 +0100
+++ b/mercurial/worker.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from __future__ import annotations
import os
import pickle
--- a/pyproject.toml Thu Jan 11 20:37:34 2024 +0100
+++ b/pyproject.toml Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,42 @@
[build-system]
requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+
+[project]
+name = "mercurial"
+authors = [
+ {name = "Olivia Mackall and many others", email = "mercurial@mercurial-scm.org"},
+]
+description="Fast scalable distributed SCM (revision control, version control) system"
+readme = "README.rst"
+requires-python = ">=3.8"
+license={text = "GNU GPLv2 or any later version"}
+classifiers=[
+ "Development Status :: 6 - Mature",
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "Intended Audience :: System Administrators",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Natural Language :: Danish",
+ "Natural Language :: English",
+ "Natural Language :: German",
+ "Natural Language :: Italian",
+ "Natural Language :: Japanese",
+ "Natural Language :: Portuguese (Brazilian)",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: OS Independent",
+ "Operating System :: POSIX",
+ "Programming Language :: C",
+ "Programming Language :: Python",
+ "Topic :: Software Development :: Version Control",
+]
+dynamic = ["version"]
+
+[project.urls]
+home = "https://mercurial-scm.org/"
+download_url = "https://mercurial-scm.org/release/"
+
[tool.black]
line-length = 80
--- a/rust/.cargo/config Thu Jan 11 20:37:34 2024 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# Rust builds with a modern MSVC and uses a newer CRT.
-# Python 2.7 has a shared library dependency on an older CRT (msvcr90.dll).
-# We statically link the modern CRT to avoid multiple msvcr*.dll libraries
-# being loaded and Python possibly picking up symbols from the newer runtime
-# (which would be loaded first).
-[target.'cfg(target_os = "windows")']
-rustflags = ["-Ctarget-feature=+crt-static"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/.cargo/config.toml Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,7 @@
+# Rust builds with a modern MSVC and uses a newer CRT.
+# Python 2.7 has a shared library dependency on an older CRT (msvcr90.dll).
+# We statically link the modern CRT to avoid multiple msvcr*.dll libraries
+# being loaded and Python possibly picking up symbols from the newer runtime
+# (which would be loaded first).
+[target.'cfg(target_os = "windows")']
+rustflags = ["-Ctarget-feature=+crt-static"]
--- a/rust/Cargo.lock Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/Cargo.lock Sat Oct 26 04:16:00 2024 +0200
@@ -180,7 +180,7 @@
"js-sys",
"num-traits",
"wasm-bindgen",
- "windows-targets 0.52.0",
+ "windows-targets 0.52.6",
]
[[package]]
@@ -231,6 +231,19 @@
]
[[package]]
+name = "console"
+version = "0.15.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb"
+dependencies = [
+ "encode_unicode",
+ "lazy_static",
+ "libc",
+ "unicode-width",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
name = "convert_case"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -435,16 +448,28 @@
"libc",
"option-ext",
"redox_users",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
+name = "dyn-clone"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
+
+[[package]]
name = "either"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
[[package]]
+name = "encode_unicode"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
+
+[[package]]
name = "env_logger"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -467,6 +492,18 @@
]
[[package]]
+name = "filetime"
+version = "0.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "libredox",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
name = "flate2"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -584,11 +621,14 @@
"clap",
"crossbeam-channel",
"derive_more",
+ "dyn-clone",
+ "filetime",
"flate2",
"format-bytes",
"hashbrown 0.13.1",
"home",
"im-rc",
+ "indicatif",
"itertools",
"lazy_static",
"libc",
@@ -610,6 +650,7 @@
"thread_local",
"toml",
"twox-hash",
+ "uuid",
"zstd",
]
@@ -691,6 +732,19 @@
]
[[package]]
+name = "indicatif"
+version = "0.17.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3"
+dependencies = [
+ "console",
+ "instant",
+ "number_prefix",
+ "portable-atomic",
+ "unicode-width",
+]
+
+[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -752,6 +806,7 @@
dependencies = [
"bitflags 2.6.0",
"libc",
+ "redox_syscall 0.5.3",
]
[[package]]
@@ -869,6 +924,12 @@
]
[[package]]
+name = "number_prefix"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
+
+[[package]]
name = "once_cell"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -917,6 +978,12 @@
checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
[[package]]
+name = "portable-atomic"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2"
+
+[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1123,6 +1190,15 @@
]
[[package]]
+name = "redox_syscall"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
+dependencies = [
+ "bitflags 2.6.0",
+]
+
+[[package]]
name = "redox_users"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1348,7 +1424,7 @@
"cfg-if",
"fastrand",
"libc",
- "redox_syscall",
+ "redox_syscall 0.2.16",
"remove_dir_all",
"winapi",
]
@@ -1455,6 +1531,15 @@
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
+name = "uuid"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
+dependencies = [
+ "getrandom 0.2.8",
+]
+
+[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1615,6 +1700,24 @@
]
[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
name = "windows-targets"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1631,17 +1734,18 @@
[[package]]
name = "windows-targets"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
- "windows_aarch64_gnullvm 0.52.0",
- "windows_aarch64_msvc 0.52.0",
- "windows_i686_gnu 0.52.0",
- "windows_i686_msvc 0.52.0",
- "windows_x86_64_gnu 0.52.0",
- "windows_x86_64_gnullvm 0.52.0",
- "windows_x86_64_msvc 0.52.0",
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
]
[[package]]
@@ -1652,9 +1756,9 @@
[[package]]
name = "windows_aarch64_gnullvm"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
@@ -1664,9 +1768,9 @@
[[package]]
name = "windows_aarch64_msvc"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
@@ -1676,9 +1780,15 @@
[[package]]
name = "windows_i686_gnu"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
@@ -1688,9 +1798,9 @@
[[package]]
name = "windows_i686_msvc"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
@@ -1700,9 +1810,9 @@
[[package]]
name = "windows_x86_64_gnu"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
@@ -1712,9 +1822,9 @@
[[package]]
name = "windows_x86_64_gnullvm"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
@@ -1724,9 +1834,9 @@
[[package]]
name = "windows_x86_64_msvc"
-version = "0.52.0"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "wyz"
--- a/rust/README.rst Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/README.rst Sat Oct 26 04:16:00 2024 +0200
@@ -83,7 +83,7 @@
------------------------------
The minimum supported rust version (MSRV) is specified in the `Clippy`_
-configuration file at ``rust/clippy.toml``. It is set to be ``1.61.0`` as of
+configuration file at ``rust/clippy.toml``. It is set to be ``1.79.0`` as of
this writing, but keep in mind that the authoritative value is the one
from the configuration file.
--- a/rust/clippy.toml Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/clippy.toml Sat Oct 26 04:16:00 2024 +0200
@@ -1,1 +1,1 @@
-msrv = "1.61.0"
+msrv = "1.79.0"
--- a/rust/hg-core/Cargo.toml Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/Cargo.toml Sat Oct 26 04:16:00 2024 +0200
@@ -16,6 +16,7 @@
hashbrown = { version = "0.13.1", features = ["rayon"] }
home = "0.5.4"
im-rc = "15.1.0"
+indicatif = "0.17.8"
itertools = "0.10.5"
lazy_static = "1.4.0"
libc = "0.2.137"
@@ -41,6 +42,9 @@
once_cell = "1.16.0"
bitvec = "1.0.1"
chrono = "0.4.34"
+dyn-clone = "1.0.16"
+filetime = "0.2.23"
+uuid = { version = "1.10", features = ["v4"] }
# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
# we have a clearer view of which backend is the fastest.
--- a/rust/hg-core/src/config/config_items.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/config/config_items.rs Sat Oct 26 04:16:00 2024 +0200
@@ -267,6 +267,66 @@
}
}
+impl TryFrom<&DefaultConfigItem> for Option<i64> {
+ type Error = HgError;
+
+ fn try_from(value: &DefaultConfigItem) -> Result<Self, Self::Error> {
+ match &value.default {
+ Some(default) => {
+ let err = HgError::abort(
+ format!(
+ "programming error: wrong query on config item '{}.{}'",
+ value.section,
+ value.name
+ ),
+ exit_codes::ABORT,
+ Some(format!(
+ "asked for 'i64', type of default is '{}'",
+ default.type_str()
+ )),
+ );
+ match default {
+ DefaultConfigItemType::Primitive(
+ toml::Value::Integer(b),
+ ) => Ok(Some(*b)),
+ _ => Err(err),
+ }
+ }
+ None => Ok(None),
+ }
+ }
+}
+
+impl TryFrom<&DefaultConfigItem> for Option<f64> {
+ type Error = HgError;
+
+ fn try_from(value: &DefaultConfigItem) -> Result<Self, Self::Error> {
+ match &value.default {
+ Some(default) => {
+ let err = HgError::abort(
+ format!(
+ "programming error: wrong query on config item '{}.{}'",
+ value.section,
+ value.name
+ ),
+ exit_codes::ABORT,
+ Some(format!(
+ "asked for 'f64', type of default is '{}'",
+ default.type_str()
+ )),
+ );
+ match default {
+ DefaultConfigItemType::Primitive(toml::Value::Float(
+ b,
+ )) => Ok(Some(*b)),
+ _ => Err(err),
+ }
+ }
+ None => Ok(None),
+ }
+ }
+}
+
/// Allows abstracting over more complex default values than just primitives.
/// The former `configitems.py` contained some dynamic code that is encoded
/// in this enum.
--- a/rust/hg-core/src/config/layer.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/config/layer.rs Sat Oct 26 04:16:00 2024 +0200
@@ -8,7 +8,7 @@
// GNU General Public License version 2 or any later version.
use crate::errors::HgError;
-use crate::exit_codes::CONFIG_PARSE_ERROR_ABORT;
+use crate::exit_codes::{CONFIG_ERROR_ABORT, CONFIG_PARSE_ERROR_ABORT};
use crate::utils::files::{get_bytes_from_path, get_path_from_bytes};
use format_bytes::{format_bytes, write_bytes, DisplayBytes};
use lazy_static::lazy_static;
@@ -324,9 +324,7 @@
ConfigOrigin::Tweakdefaults => {
write_bytes!(out, b"ui.tweakdefaults")
}
- ConfigOrigin::Defaults => {
- write_bytes!(out, b"configitems.toml")
- }
+ ConfigOrigin::Defaults => write_bytes!(out, b"configitems.toml"),
}
}
}
@@ -338,12 +336,49 @@
pub message: Vec<u8>,
}
+impl From<ConfigParseError> for HgError {
+ fn from(error: ConfigParseError) -> Self {
+ let ConfigParseError {
+ origin,
+ line,
+ message,
+ } = error;
+ let line_message = if let Some(line_number) = line {
+ format_bytes!(b":{}", line_number.to_string().into_bytes())
+ } else {
+ Vec::new()
+ };
+ HgError::Abort {
+ message: String::from_utf8_lossy(&format_bytes!(
+ b"config error at {}{}: {}",
+ origin,
+ line_message,
+ message
+ ))
+ .to_string(),
+ detailed_exit_code: CONFIG_ERROR_ABORT,
+ hint: None,
+ }
+ }
+}
+
#[derive(Debug, derive_more::From)]
pub enum ConfigError {
Parse(ConfigParseError),
Other(HgError),
}
+impl From<ConfigError> for HgError {
+ fn from(error: ConfigError) -> Self {
+ match error {
+ ConfigError::Parse(config_parse_error) => {
+ Self::from(config_parse_error)
+ }
+ ConfigError::Other(hg_error) => hg_error,
+ }
+ }
+}
+
fn make_regex(pattern: &'static str) -> Regex {
Regex::new(pattern).expect("expected a valid regex")
}
--- a/rust/hg-core/src/config/mod.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/config/mod.rs Sat Oct 26 04:16:00 2024 +0200
@@ -531,6 +531,54 @@
)
}
+ /// Returns an `Err` if the first value found is not a valid unsigned
+ /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
+ pub fn get_i64(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<i64>, HgError> {
+ self.get_parse(
+ section,
+ item,
+ "valid integer",
+ |value| str::from_utf8(value).ok()?.parse().ok(),
+ true,
+ )
+ }
+
+ /// Returns an `Err` if the first value found is not a valid unsigned
+ /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
+ pub fn get_u64(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<u64>, HgError> {
+ self.get_parse(
+ section,
+ item,
+ "valid integer",
+ |value| str::from_utf8(value).ok()?.parse().ok(),
+ true,
+ )
+ }
+
+ /// Returns an `Err` if the first value found is not a valid float
+ /// representation. Otherwise, returns an `Ok(value)` if found, or `None`.
+ pub fn get_f64(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<f64>, HgError> {
+ self.get_parse(
+ section,
+ item,
+ "valid float",
+ |value| str::from_utf8(value).ok()?.parse().ok(),
+ true,
+ )
+ }
+
/// Returns an `Err` if the first value found is not a valid file size
/// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
/// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
@@ -548,6 +596,22 @@
)
}
+ /// Same as [`Self::get_byte_size`], but doesn't fall back to the default
+ /// `configitem` if not defined in the user config.
+ pub fn get_byte_size_no_default(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Result<Option<u64>, HgError> {
+ self.get_parse(
+ section,
+ item,
+ "byte quantity",
+ values::parse_byte_size,
+ false,
+ )
+ }
+
/// Returns an `Err` if the first value found is not a valid boolean.
/// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
/// found, or `None`.
@@ -744,6 +808,60 @@
pub fn tweakdefaults(&mut self) {
self.layers.insert(0, Config::tweakdefaults_layer());
}
+
+ /// Return the resource profile for a dimension (memory, cpu or disk).
+ ///
+ /// If no dimension is specified, the generic value is returned.
+ pub fn get_resource_profile(
+ &self,
+ dimension: Option<&str>,
+ ) -> ResourceProfile {
+ let mut value = self.resource_profile_from_item(b"usage", b"resource");
+ if let Some(dimension) = &dimension {
+ let sub_value = self.resource_profile_from_item(
+ b"usage",
+ format!("resources.{}", dimension).as_bytes(),
+ );
+ if sub_value != ResourceProfileValue::Default {
+ value = sub_value
+ }
+ }
+ ResourceProfile {
+ dimension: dimension.map(ToOwned::to_owned),
+ value,
+ }
+ }
+
+ fn resource_profile_from_item(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> ResourceProfileValue {
+ match self.get(section, item).unwrap_or(b"default") {
+ b"default" => ResourceProfileValue::Default,
+ b"low" => ResourceProfileValue::Low,
+ b"medium" => ResourceProfileValue::Medium,
+ b"high" => ResourceProfileValue::High,
+ _ => ResourceProfileValue::Default,
+ }
+ }
+}
+
+/// Corresponds to `usage.resources[.<dimension>]`.
+///
+/// See `hg help config.usage.resources`.
+#[derive(Debug, Eq, PartialEq, PartialOrd, Ord)]
+pub struct ResourceProfile {
+ pub dimension: Option<String>,
+ pub value: ResourceProfileValue,
+}
+
+#[derive(Debug, Eq, PartialEq, PartialOrd, Ord)]
+pub enum ResourceProfileValue {
+ Default,
+ Low,
+ Medium,
+ High,
}
#[cfg(test)]
--- a/rust/hg-core/src/dirstate/entry.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate/entry.rs Sat Oct 26 04:16:00 2024 +0200
@@ -116,7 +116,12 @@
metadata: &fs::Metadata,
boundary: &Self,
) -> io::Result<Option<Self>> {
- let mut mtime = Self::for_mtime_of(metadata)?;
+ Ok(Self::for_mtime_of(metadata)?.for_reliable_mtime_of_self(boundary))
+ }
+
+ /// See [`Self::for_reliable_mtime_of`]
+ pub fn for_reliable_mtime_of_self(&self, boundary: &Self) -> Option<Self> {
+ let mut new = *self;
// If the mtime of the ambiguous file is younger (or equal) to the
// starting point of the `status` walk, we cannot garantee that
// another, racy, write will not happen right after with the same mtime
@@ -126,23 +131,23 @@
// mismatch between the current clock and previous file system
// operation. So mtime more than one days in the future are considered
// fine.
- let reliable = if mtime.truncated_seconds == boundary.truncated_seconds
+ let reliable = if self.truncated_seconds == boundary.truncated_seconds
{
- mtime.second_ambiguous = true;
- mtime.nanoseconds != 0
+ new.second_ambiguous = true;
+ self.nanoseconds != 0
&& boundary.nanoseconds != 0
- && mtime.nanoseconds < boundary.nanoseconds
+ && self.nanoseconds < boundary.nanoseconds
} else {
// `truncated_seconds` is less than 2**31,
// so this does not overflow `u32`:
let one_day_later = boundary.truncated_seconds + 24 * 3600;
- mtime.truncated_seconds < boundary.truncated_seconds
- || mtime.truncated_seconds > one_day_later
+ self.truncated_seconds < boundary.truncated_seconds
+ || self.truncated_seconds > one_day_later
};
if reliable {
- Ok(Some(mtime))
+ Some(new)
} else {
- Ok(None)
+ None
}
}
--- a/rust/hg-core/src/dirstate/parsers.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate/parsers.rs Sat Oct 26 04:16:00 2024 +0200
@@ -23,8 +23,13 @@
pub fn parse_dirstate_parents(
contents: &[u8],
) -> Result<&DirstateParents, HgError> {
- let (parents, _rest) = DirstateParents::from_bytes(contents)
- .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
+ let contents_len = contents.len();
+ let (parents, _rest) =
+ DirstateParents::from_bytes(contents).map_err(|_| {
+ HgError::corrupted(format!(
+ "Too little data for dirstate: {contents_len} bytes.",
+ ))
+ })?;
Ok(parents)
}
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,7 @@
use bytes_cast::BytesCast;
use std::borrow::Cow;
+use std::fs::Metadata;
+use std::os::unix::fs::MetadataExt;
use std::path::PathBuf;
use super::on_disk;
@@ -45,6 +47,68 @@
ForceAppend,
}
+/// Used to detect out-of-process changes in the dirstate
+#[derive(Debug, Copy, Clone)]
+pub struct DirstateIdentity {
+ pub mode: u32,
+ pub dev: u64,
+ pub ino: u64,
+ pub nlink: u64,
+ pub uid: u32,
+ pub gid: u32,
+ pub size: u64,
+ pub mtime: i64,
+ pub mtime_nsec: i64,
+ pub ctime: i64,
+ pub ctime_nsec: i64,
+}
+
+impl From<Metadata> for DirstateIdentity {
+ fn from(value: Metadata) -> Self {
+ Self {
+ mode: value.mode(),
+ dev: value.dev(),
+ ino: value.ino(),
+ nlink: value.nlink(),
+ uid: value.uid(),
+ gid: value.gid(),
+ size: value.size(),
+ mtime: value.mtime(),
+ mtime_nsec: value.mtime_nsec(),
+ ctime: value.ctime(),
+ ctime_nsec: value.ctime_nsec(),
+ }
+ }
+}
+
+impl PartialEq for DirstateIdentity {
+ fn eq(&self, other: &Self) -> bool {
+ // Some platforms return 0 when they have no support for nanos.
+ // This shouldn't be a problem in practice because of how highly
+ // unlikely it is that we actually get exactly 0 nanos, and worst
+ // case scenario, we don't write out the dirstate in a non-wlocked
+ // situation like status.
+ let mtime_nanos_equal = (self.mtime_nsec == 0
+ || other.mtime_nsec == 0)
+ || self.mtime_nsec == other.mtime_nsec;
+ let ctime_nanos_equal = (self.ctime_nsec == 0
+ || other.ctime_nsec == 0)
+ || self.ctime_nsec == other.ctime_nsec;
+
+ self.mode == other.mode
+ && self.dev == other.dev
+ && self.ino == other.ino
+ && self.nlink == other.nlink
+ && self.uid == other.uid
+ && self.gid == other.gid
+ && self.size == other.size
+ && self.mtime == other.mtime
+ && mtime_nanos_equal
+ && self.ctime == other.ctime
+ && ctime_nanos_equal
+ }
+}
+
#[derive(Debug)]
pub struct DirstateMap<'on_disk> {
/// Contents of the `.hg/dirstate` file
@@ -82,12 +146,15 @@
/// check the file identity.
///
/// TODO On non-Unix systems, something like hashing is a possibility?
- pub(super) identity: Option<u64>,
+ pub(super) identity: Option<DirstateIdentity>,
pub(super) dirstate_version: DirstateVersion,
/// Controlled by config option `devel.dirstate.v2.data_update_mode`
pub(super) write_mode: DirstateMapWriteMode,
+
+ /// Controlled by config option `format.use-dirstate-tracked-hint`
+ pub(super) use_tracked_hint: bool,
}
/// Using a plain `HgPathBuf` of the full path from the repository root as a
@@ -427,19 +494,16 @@
pub(super) tracked_descendants_count: u32,
}
-#[derive(Debug)]
+#[derive(Debug, Default)]
pub(super) enum NodeData {
Entry(DirstateEntry),
- CachedDirectory { mtime: TruncatedTimestamp },
+ CachedDirectory {
+ mtime: TruncatedTimestamp,
+ },
+ #[default]
None,
}
-impl Default for NodeData {
- fn default() -> Self {
- NodeData::None
- }
-}
-
impl NodeData {
fn has_entry(&self) -> bool {
matches!(self, NodeData::Entry(_))
@@ -474,6 +538,7 @@
identity: None,
dirstate_version: DirstateVersion::V1,
write_mode: DirstateMapWriteMode::Auto,
+ use_tracked_hint: false,
}
}
@@ -483,7 +548,7 @@
data_size: usize,
metadata: &[u8],
uuid: Vec<u8>,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> Result<Self, DirstateError> {
if let Some(data) = on_disk.get(..data_size) {
Ok(on_disk::read(data, metadata, uuid, identity)?)
@@ -495,9 +560,11 @@
#[logging_timer::time("trace")]
pub fn new_v1(
on_disk: &'on_disk [u8],
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> Result<(Self, Option<DirstateParents>), DirstateError> {
let mut map = Self::empty(on_disk);
+ map.identity = identity;
+
if map.on_disk.is_empty() {
return Ok((map, None));
}
@@ -537,7 +604,6 @@
},
)?;
let parents = Some(*parents);
- map.identity = identity;
Ok((map, parents))
}
@@ -962,6 +1028,30 @@
pub(crate) fn set_write_mode(&mut self, write_mode: DirstateMapWriteMode) {
self.write_mode = write_mode;
}
+
+ pub(crate) fn set_tracked_hint(&mut self, tracked_hint: bool) {
+ self.use_tracked_hint = tracked_hint;
+ }
+}
+
+/// Sets the parameters for resetting a dirstate entry
+pub struct DirstateEntryReset<'a> {
+ /// Which entry are we resetting
+ pub filename: &'a HgPath,
+ /// Whether the entry is tracked in the working copy
+ pub wc_tracked: bool,
+ /// Whether the entry is tracked in p1
+ pub p1_tracked: bool,
+ /// Whether the entry has merge information
+ pub p2_info: bool,
+ /// Whether the entry's mtime should be trusted
+ pub has_meaningful_mtime: bool,
+ /// Information from the parent file data (from the manifest)
+ pub parent_file_data_opt: Option<ParentFileData>,
+ /// Set this to `true` if you are *certain* that there is no old entry for
+ /// this filename. Yield better performance in cases where we do a lot
+ /// of additions to the dirstate.
+ pub from_empty: bool,
}
type DebugDirstateTuple<'a> = (&'a HgPath, (u8, i32, i32, i32));
@@ -1050,28 +1140,31 @@
pub fn reset_state(
&mut self,
- filename: &HgPath,
- wc_tracked: bool,
- p1_tracked: bool,
- p2_info: bool,
- has_meaningful_mtime: bool,
- parent_file_data_opt: Option<ParentFileData>,
+ reset: DirstateEntryReset,
) -> Result<(), DirstateError> {
- if !(p1_tracked || p2_info || wc_tracked) {
- self.drop_entry_and_copy_source(filename)?;
+ if !(reset.p1_tracked || reset.p2_info || reset.wc_tracked) {
+ self.drop_entry_and_copy_source(reset.filename)?;
return Ok(());
}
- self.copy_map_remove(filename)?;
- let old_entry_opt = self.get(filename)?;
+ if !reset.from_empty {
+ self.copy_map_remove(reset.filename)?;
+ }
+
+ let old_entry_opt = if reset.from_empty {
+ None
+ } else {
+ self.get(reset.filename)?
+ };
+
self.with_dmap_mut(|map| {
map.reset_state(
- filename,
+ reset.filename,
old_entry_opt,
- wc_tracked,
- p1_tracked,
- p2_info,
- has_meaningful_mtime,
- parent_file_data_opt,
+ reset.wc_tracked,
+ reset.p1_tracked,
+ reset.p2_info,
+ reset.has_meaningful_mtime,
+ reset.parent_file_data_opt,
)
})
}
@@ -1558,7 +1651,7 @@
/// Test the very simple case a single tracked file
#[test]
fn test_tracked_descendants_simple() -> Result<(), DirstateError> {
- let mut map = OwningDirstateMap::new_empty(vec![]);
+ let mut map = OwningDirstateMap::new_empty(vec![], None);
assert_eq!(map.len(), 0);
map.set_tracked(p(b"some/nested/path"))?;
@@ -1578,7 +1671,7 @@
/// Test the simple case of all tracked, but multiple files
#[test]
fn test_tracked_descendants_multiple() -> Result<(), DirstateError> {
- let mut map = OwningDirstateMap::new_empty(vec![]);
+ let mut map = OwningDirstateMap::new_empty(vec![], None);
map.set_tracked(p(b"some/nested/path"))?;
map.set_tracked(p(b"some/nested/file"))?;
@@ -1640,44 +1733,84 @@
/// Check with a mix of tracked and non-tracked items
#[test]
fn test_tracked_descendants_different() -> Result<(), DirstateError> {
- let mut map = OwningDirstateMap::new_empty(vec![]);
+ let mut map = OwningDirstateMap::new_empty(vec![], None);
// A file that was just added
map.set_tracked(p(b"some/nested/path"))?;
// This has no information, the dirstate should ignore it
- map.reset_state(p(b"some/file"), false, false, false, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/file"),
+ wc_tracked: false,
+ p1_tracked: false,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert_does_not_exist(&map, b"some/file");
// A file that was removed
- map.reset_state(
- p(b"some/nested/file"),
- false,
- true,
- false,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/nested/file"),
+ wc_tracked: false,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert!(!map.get(p(b"some/nested/file"))?.unwrap().tracked());
// Only present in p2
- map.reset_state(p(b"some/file3"), false, false, true, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/file3"),
+ wc_tracked: false,
+ p1_tracked: false,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert!(!map.get(p(b"some/file3"))?.unwrap().tracked());
// A file that was merged
- map.reset_state(p(b"root_file"), true, true, true, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"root_file"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert!(map.get(p(b"root_file"))?.unwrap().tracked());
// A file that is added, with info from p2
// XXX is that actually possible?
- map.reset_state(p(b"some/file2"), true, false, true, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/file2"),
+ wc_tracked: true,
+ p1_tracked: false,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert!(map.get(p(b"some/file2"))?.unwrap().tracked());
// A clean file
// One layer without any files to test deletion cascade
- map.reset_state(
- p(b"some/other/nested/path"),
- true,
- true,
- false,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/other/nested/path"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
assert!(map.get(p(b"some/other/nested/path"))?.unwrap().tracked());
assert_eq!(map.len(), 6);
@@ -1737,16 +1870,52 @@
/// Check that copies counter is correctly updated
#[test]
fn test_copy_source() -> Result<(), DirstateError> {
- let mut map = OwningDirstateMap::new_empty(vec![]);
+ let mut map = OwningDirstateMap::new_empty(vec![], None);
// Clean file
- map.reset_state(p(b"files/clean"), true, true, false, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"files/clean"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// Merged file
- map.reset_state(p(b"files/from_p2"), true, true, true, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"files/from_p2"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// Removed file
- map.reset_state(p(b"removed"), false, true, false, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"removed"),
+ wc_tracked: false,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// Added file
- map.reset_state(p(b"files/added"), true, false, false, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"files/added"),
+ wc_tracked: true,
+ p1_tracked: false,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// Add copy
map.copy_map_insert(p(b"files/clean"), p(b"clean_copy_source"))?;
assert_eq!(map.copy_map_len(), 1);
@@ -1794,56 +1963,73 @@
#[test]
fn test_on_disk() -> Result<(), DirstateError> {
// First let's create some data to put "on disk"
- let mut map = OwningDirstateMap::new_empty(vec![]);
+ let mut map = OwningDirstateMap::new_empty(vec![], None);
// A file that was just added
map.set_tracked(p(b"some/nested/added"))?;
map.copy_map_insert(p(b"some/nested/added"), p(b"added_copy_source"))?;
// A file that was removed
- map.reset_state(
- p(b"some/nested/removed"),
- false,
- true,
- false,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/nested/removed"),
+ wc_tracked: false,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// Only present in p2
- map.reset_state(
- p(b"other/p2_info_only"),
- false,
- false,
- true,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"other/p2_info_only"),
+ wc_tracked: false,
+ p1_tracked: false,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
map.copy_map_insert(
p(b"other/p2_info_only"),
p(b"other/p2_info_copy_source"),
)?;
// A file that was merged
- map.reset_state(p(b"merged"), true, true, true, false, None)?;
+ let reset = DirstateEntryReset {
+ filename: p(b"merged"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// A file that is added, with info from p2
// XXX is that actually possible?
- map.reset_state(
- p(b"other/added_with_p2"),
- true,
- false,
- true,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"other/added_with_p2"),
+ wc_tracked: true,
+ p1_tracked: false,
+ p2_info: true,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
// One layer without any files to test deletion cascade
// A clean file
- map.reset_state(
- p(b"some/other/nested/clean"),
- true,
- true,
- false,
- false,
- None,
- )?;
+ let reset = DirstateEntryReset {
+ filename: p(b"some/other/nested/clean"),
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime: false,
+ parent_file_data_opt: None,
+ from_empty: false,
+ };
+ map.reset_state(reset)?;
let (packed, metadata, _should_append, _old_data_size) =
map.pack_v2(DirstateMapWriteMode::ForceNewDataFile)?;
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate_tree/on_disk.rs Sat Oct 26 04:16:00 2024 +0200
@@ -8,7 +8,9 @@
self, DirstateMap, DirstateMapWriteMode, NodeRef,
};
use crate::dirstate_tree::path_with_basename::WithBasename;
-use crate::errors::HgError;
+use crate::errors::{HgError, IoResultExt};
+use crate::repo::Repo;
+use crate::requirements::DIRSTATE_TRACKED_HINT_V1;
use crate::utils::hg_path::HgPath;
use crate::DirstateEntry;
use crate::DirstateError;
@@ -20,6 +22,9 @@
use rand::Rng;
use std::borrow::Cow;
use std::fmt::Write;
+use uuid::Uuid;
+
+use super::dirstate_map::DirstateIdentity;
/// Added at the start of `.hg/dirstate` when the "v2" format is used.
/// This a redundant sanity check more than an actual "magic number" since
@@ -288,10 +293,12 @@
on_disk: &'on_disk [u8],
metadata: &[u8],
uuid: Vec<u8>,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
if on_disk.is_empty() {
let mut map = DirstateMap::empty(on_disk);
+ map.identity = identity;
+ map.old_uuid = Some(uuid);
map.dirstate_version = DirstateVersion::V2;
return Ok(map);
}
@@ -315,6 +322,7 @@
identity,
dirstate_version: DirstateVersion::V2,
write_mode: DirstateMapWriteMode::Auto,
+ use_tracked_hint: false,
};
Ok(dirstate_map)
}
@@ -332,9 +340,7 @@
) -> Result<usize, DirstateV2ParseError> {
let start = self.base_name_start.get();
if start < self.full_path.len.get() {
- let start = usize::try_from(start)
- // u32 -> usize, could only panic on a 16-bit CPU
- .expect("dirstate-v2 base_name_start out of bounds");
+ let start = usize::from(start);
Ok(start)
} else {
Err(DirstateV2ParseError::new("not enough bytes for base name"))
@@ -593,8 +599,8 @@
{
// Either `usize::MAX` would result in "out of bounds" error since a single
// `&[u8]` cannot occupy the entire addess space.
- let start = start.get().try_into().unwrap_or(std::usize::MAX);
- let len = len.try_into().unwrap_or(std::usize::MAX);
+ let start = start.get().try_into().unwrap_or(usize::MAX);
+ let len = len.try_into().unwrap_or(usize::MAX);
let bytes = match on_disk.get(start..) {
Some(bytes) => bytes,
None => {
@@ -913,3 +919,22 @@
}
}
}
+
+/// Write a new tracked key to disk.
+/// See `format.use-dirstate-tracked-hint` config help for more details.
+pub fn write_tracked_key(repo: &Repo) -> Result<(), HgError> {
+ // TODO move this to the dirstate itself once it grows a `dirty` flag and
+ // can reason about which context it needs to write this in.
+ // For now, only this fast-path needs to think about the tracked hint.
+ // Use [`crate::dirstate_tree::dirstate_map::DirstateMap::
+ // use_tracked_hint`] instead of looking at the requirements once
+ // refactored.
+ if !repo.requirements().contains(DIRSTATE_TRACKED_HINT_V1) {
+ return Ok(());
+ }
+ // TODO use `hg_vfs` once the `InnerRevlog` is in.
+ let path = repo
+ .working_directory_path()
+ .join(".hg/dirstate-tracked-hint");
+ std::fs::write(&path, Uuid::new_v4().as_bytes()).when_writing_file(&path)
+}
--- a/rust/hg-core/src/dirstate_tree/owning.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate_tree/owning.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,6 +1,6 @@
use crate::{DirstateError, DirstateParents};
-use super::dirstate_map::DirstateMap;
+use super::dirstate_map::{DirstateIdentity, DirstateMap};
use self_cell::self_cell;
use std::ops::Deref;
@@ -15,18 +15,25 @@
);
impl OwningDirstateMap {
- pub fn new_empty<OnDisk>(on_disk: OnDisk) -> Self
+ pub fn new_empty<OnDisk>(
+ on_disk: OnDisk,
+ identity: Option<DirstateIdentity>,
+ ) -> Self
where
OnDisk: Deref<Target = [u8]> + Send + 'static,
{
let on_disk = Box::new(on_disk);
- OwningDirstateMap::new(on_disk, |bytes| DirstateMap::empty(bytes))
+ OwningDirstateMap::new(on_disk, |bytes| {
+ let mut empty = DirstateMap::empty(bytes);
+ empty.identity = identity;
+ empty
+ })
}
pub fn new_v1<OnDisk>(
on_disk: OnDisk,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> Result<(Self, DirstateParents), DirstateError>
where
OnDisk: Deref<Target = [u8]> + Send + 'static,
@@ -50,7 +57,7 @@
data_size: usize,
metadata: &[u8],
uuid: Vec<u8>,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> Result<Self, DirstateError>
where
OnDisk: Deref<Target = [u8]> + Send + 'static,
@@ -81,7 +88,7 @@
self.get_map().old_uuid.as_deref()
}
- pub fn old_identity(&self) -> Option<u64> {
+ pub fn old_identity(&self) -> Option<DirstateIdentity> {
self.get_map().identity
}
--- a/rust/hg-core/src/dirstate_tree/status.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs Sat Oct 26 04:16:00 2024 +0200
@@ -9,6 +9,7 @@
use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::matchers::get_ignore_function;
use crate::matchers::{Matcher, VisitChildrenSet};
+use crate::utils::files::filesystem_now;
use crate::utils::files::get_bytes_from_os_string;
use crate::utils::files::get_bytes_from_path;
use crate::utils::files::get_path_from_bytes;
@@ -30,7 +31,6 @@
use std::path::Path;
use std::path::PathBuf;
use std::sync::Mutex;
-use std::time::SystemTime;
/// Returns the status of the working directory compared to its parent
/// changeset.
@@ -677,14 +677,14 @@
// The directory was modified too recently,
// don’t cache its `read_dir` results.
//
- // 1. A change to this directory (direct child was
- // added or removed) cause its mtime to be set
- // (possibly truncated) to `directory_mtime`
+ // 1. A change to this directory (direct child was added or
+ // removed) cause its mtime to be set (possibly truncated)
+ // to `directory_mtime`
// 2. This `status` algorithm calls `read_dir`
- // 3. An other change is made to the same directory is
- // made so that calling `read_dir` agin would give
- // different results, but soon enough after 1. that
- // the mtime stays the same
+ // 3. An other change is made to the same directory is made so
+ // that calling `read_dir` agin would give different
+ // results, but soon enough after 1. that the mtime stays
+ // the same
//
// On a system where the time resolution poor, this
// scenario is not unlikely if all three steps are caused
@@ -1034,22 +1034,3 @@
}
}
}
-
-/// Return the `mtime` of a temporary file newly-created in the `.hg` directory
-/// of the give repository.
-///
-/// This is similar to `SystemTime::now()`, with the result truncated to the
-/// same time resolution as other files’ modification times. Using `.hg`
-/// instead of the system’s default temporary directory (such as `/tmp`) makes
-/// it more likely the temporary file is in the same disk partition as contents
-/// of the working directory, which can matter since different filesystems may
-/// store timestamps with different resolutions.
-///
-/// This may fail, typically if we lack write permissions. In that case we
-/// should continue the `status()` algoritm anyway and consider the current
-/// date/time to be unknown.
-fn filesystem_now(repo_root: &Path) -> Result<SystemTime, io::Error> {
- tempfile::tempfile_in(repo_root.join(".hg"))?
- .metadata()?
- .modified()
-}
--- a/rust/hg-core/src/errors.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/errors.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,6 @@
use crate::config::ConfigValueParseError;
use crate::exit_codes;
+use crate::utils::hg_path::HgPathError;
use std::fmt;
/// Common error cases that can happen in many different APIs
@@ -49,6 +50,8 @@
/// A race condition has been detected. This *must* be handled locally
/// and not directly surface to the user.
RaceDetected(String),
+ /// An invalid path was found
+ Path(HgPathError),
}
/// Details about where an I/O error happened
@@ -117,6 +120,7 @@
HgError::RaceDetected(context) => {
write!(f, "encountered a race condition {context}")
}
+ HgError::Path(hg_path_error) => write!(f, "{}", hg_path_error),
}
}
}
--- a/rust/hg-core/src/lib.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/lib.rs Sat Oct 26 04:16:00 2024 +0200
@@ -35,7 +35,9 @@
pub mod lock;
pub mod logging;
pub mod operations;
+pub mod progress;
pub mod revset;
+pub mod update;
pub mod utils;
pub mod vfs;
--- a/rust/hg-core/src/lock.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/lock.rs Sat Oct 26 04:16:00 2024 +0200
@@ -2,7 +2,7 @@
use crate::errors::HgError;
use crate::errors::HgResultExt;
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
use std::io;
use std::io::ErrorKind;
@@ -21,7 +21,7 @@
/// The return value of `f` is dropped in that case. If all is successful, the
/// return value of `f` is forwarded.
pub fn try_with_lock_no_wait<R>(
- hg_vfs: Vfs,
+ hg_vfs: &VfsImpl,
lock_filename: &str,
f: impl FnOnce() -> R,
) -> Result<R, LockError> {
@@ -57,7 +57,7 @@
Err(LockError::AlreadyHeld)
}
-fn break_lock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), LockError> {
+fn break_lock(hg_vfs: &VfsImpl, lock_filename: &str) -> Result<(), LockError> {
try_with_lock_no_wait(hg_vfs, &format!("{}.break", lock_filename), || {
// Check again in case some other process broke and
// acquired the lock in the meantime
@@ -71,7 +71,7 @@
#[cfg(unix)]
fn make_lock(
- hg_vfs: Vfs,
+ hg_vfs: &VfsImpl,
lock_filename: &str,
data: &str,
) -> Result<(), HgError> {
@@ -82,7 +82,7 @@
}
fn read_lock(
- hg_vfs: Vfs,
+ hg_vfs: &VfsImpl,
lock_filename: &str,
) -> Result<Option<String>, HgError> {
let link_target =
@@ -98,7 +98,7 @@
}
}
-fn unlock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), HgError> {
+fn unlock(hg_vfs: &VfsImpl, lock_filename: &str) -> Result<(), HgError> {
hg_vfs.remove_file(lock_filename)
}
--- a/rust/hg-core/src/logging.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/logging.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,5 @@
use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
use std::io::Write;
/// An utility to append to a log file with the given name, and optionally
@@ -9,14 +9,14 @@
/// "example.log.1" to "example.log.2" etc up to the given maximum number of
/// files.
pub struct LogFile<'a> {
- vfs: Vfs<'a>,
+ vfs: VfsImpl,
name: &'a str,
max_size: Option<u64>,
max_files: u32,
}
impl<'a> LogFile<'a> {
- pub fn new(vfs: Vfs<'a>, name: &'a str) -> Self {
+ pub fn new(vfs: VfsImpl, name: &'a str) -> Self {
Self {
vfs,
name,
@@ -87,8 +87,12 @@
#[test]
fn test_rotation() {
let temp = tempfile::tempdir().unwrap();
- let vfs = Vfs { base: temp.path() };
- let logger = LogFile::new(vfs, "log").max_size(Some(3)).max_files(2);
+ let vfs = VfsImpl {
+ base: temp.path().to_owned(),
+ };
+ let logger = LogFile::new(vfs.clone(), "log")
+ .max_size(Some(3))
+ .max_files(2);
logger.write(b"one\n").unwrap();
logger.write(b"two\n").unwrap();
logger.write(b"3\n").unwrap();
--- a/rust/hg-core/src/matchers.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/matchers.rs Sat Oct 26 04:16:00 2024 +0200
@@ -617,7 +617,11 @@
std::mem::swap(&mut m1, &mut m2);
}
m1.file_set().map(|m1_files| {
- m1_files.iter().filter(|f| m2.matches(f)).cloned().collect()
+ m1_files
+ .iter()
+ .filter(|&f| m2.matches(f))
+ .cloned()
+ .collect()
})
} else {
// without exact input file sets, we can't do an exact
@@ -710,7 +714,7 @@
};
if base_is_exact {
new.files = base_files.map(|files| {
- files.iter().filter(|f| new.matches(f)).cloned().collect()
+ files.iter().filter(|&f| new.matches(f)).cloned().collect()
});
}
new
--- a/rust/hg-core/src/narrow.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/narrow.rs Sat Oct 26 04:16:00 2024 +0200
@@ -24,7 +24,7 @@
/// as part of wire protocol commands. That means that changes to this
/// data structure influence the wire protocol and should not be taken
/// lightly - especially removals.
-const VALID_PREFIXES: [&str; 2] = ["path:", "rootfilesin:"];
+pub const VALID_PREFIXES: [&str; 2] = ["path:", "rootfilesin:"];
/// Return the matcher for the current narrow spec, and all configuration
/// warnings to display.
--- a/rust/hg-core/src/operations/debugdata.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/operations/debugdata.rs Sat Oct 26 04:16:00 2024 +0200
@@ -5,31 +5,33 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::errors::HgError;
use crate::repo::Repo;
-use crate::revlog::{Revlog, RevlogError};
-
-/// Kind of data to debug
-#[derive(Debug, Copy, Clone, PartialEq, Eq)]
-pub enum DebugDataKind {
- Changelog,
- Manifest,
-}
+use crate::revlog::Revlog;
+use crate::{exit_codes, RevlogError, RevlogType};
/// Dump the contents data of a revision.
pub fn debug_data(
repo: &Repo,
revset: &str,
- kind: DebugDataKind,
+ kind: RevlogType,
) -> Result<Vec<u8>, RevlogError> {
let index_file = match kind {
- DebugDataKind::Changelog => "00changelog.i",
- DebugDataKind::Manifest => "00manifest.i",
+ RevlogType::Changelog => "00changelog.i",
+ RevlogType::Manifestlog => "00manifest.i",
+ _ => {
+ return Err(RevlogError::Other(HgError::abort(
+ format!("invalid revlog type {}", kind),
+ exit_codes::ABORT,
+ None,
+ )))
+ }
};
let revlog = Revlog::open(
&repo.store_vfs(),
index_file,
None,
- repo.default_revlog_options(kind == DebugDataKind::Changelog)?,
+ repo.default_revlog_options(RevlogType::Changelog)?,
)?;
let rev =
crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?;
--- a/rust/hg-core/src/operations/list_tracked_files.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/operations/list_tracked_files.rs Sat Oct 26 04:16:00 2024 +0200
@@ -5,38 +5,78 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use std::num::NonZeroU8;
+
use crate::errors::HgError;
-use crate::matchers::Matcher;
+use crate::matchers::{Matcher, VisitChildrenSet};
use crate::repo::Repo;
use crate::revlog::manifest::Manifest;
use crate::revlog::RevlogError;
use crate::utils::filter_map_results;
use crate::utils::hg_path::HgPath;
+use crate::{Node, UncheckedRevision};
+
+/// List files under Mercurial control at a given revset.
+pub fn list_revset_tracked_files(
+ repo: &Repo,
+ revset: &str,
+ narrow_matcher: Box<dyn Matcher + Sync>,
+) -> Result<FilesForRev, RevlogError> {
+ let rev = crate::revset::resolve_single(revset, repo)?;
+ list_rev_tracked_files(repo, rev.into(), narrow_matcher)
+}
/// List files under Mercurial control at a given revision.
pub fn list_rev_tracked_files(
repo: &Repo,
- revset: &str,
- narrow_matcher: Box<dyn Matcher>,
+ rev: UncheckedRevision,
+ narrow_matcher: Box<dyn Matcher + Sync>,
) -> Result<FilesForRev, RevlogError> {
- let rev = crate::revset::resolve_single(revset, repo)?;
+ // TODO move this to the repo itself
+ // This implies storing the narrow matcher in the repo, bubbling up the
+ // errors and warnings, so it's a bit of churn. In the meantime, the repo
+ // method will error out on narrowed manifests.
+ let manifest = match repo.manifest_for_rev(rev) {
+ Ok(manifest) => manifest,
+ Err(e) => match e {
+ RevlogError::InvalidRevision(_) => {
+ let outside_of_current_narrow_spec = narrow_matcher
+ .visit_children_set(HgPath::new(""))
+ == VisitChildrenSet::Empty;
+ if outside_of_current_narrow_spec {
+ // Fake a manifest for a manifest whose node is known, but
+ // which doesn't exist because it's empty after narrowing
+ Manifest::empty()
+ } else {
+ return Err(e);
+ }
+ }
+ _ => return Err(e),
+ },
+ };
Ok(FilesForRev {
- manifest: repo.manifest_for_rev(rev.into())?,
+ manifest,
narrow_matcher,
})
}
pub struct FilesForRev {
manifest: Manifest,
- narrow_matcher: Box<dyn Matcher>,
+ narrow_matcher: Box<dyn Matcher + Sync>,
}
+/// Like [`crate::revlog::manifest::ManifestEntry`], but with the `Node`
+/// already checked.
+pub type ExpandedManifestEntry<'a> = (&'a HgPath, Node, Option<NonZeroU8>);
+
impl FilesForRev {
- pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
+ pub fn iter(
+ &self,
+ ) -> impl Iterator<Item = Result<ExpandedManifestEntry, HgError>> {
filter_map_results(self.manifest.iter(), |entry| {
let path = entry.path;
Ok(if self.narrow_matcher.matches(path) {
- Some(path)
+ Some((path, entry.node_id()?, entry.flags))
} else {
None
})
--- a/rust/hg-core/src/operations/mod.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/operations/mod.rs Sat Oct 26 04:16:00 2024 +0200
@@ -7,6 +7,9 @@
mod list_tracked_files;
mod status_rev_rev;
pub use cat::{cat, CatOutput};
-pub use debugdata::{debug_data, DebugDataKind};
-pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
+pub use debugdata::debug_data;
+pub use list_tracked_files::{
+ list_rev_tracked_files, list_revset_tracked_files, ExpandedManifestEntry,
+ FilesForRev,
+};
pub use status_rev_rev::{status_rev_rev_no_copies, DiffStatus, StatusRevRev};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/progress.rs Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,92 @@
+//! Progress-bar related things
+
+use std::{
+ sync::atomic::{AtomicBool, Ordering},
+ time::Duration,
+};
+
+use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
+
+/// A generic determinate progress bar trait
+pub trait Progress: Send + Sync + 'static {
+ /// Set the current position and optionally the total
+ fn update(&self, pos: u64, total: Option<u64>);
+ /// Increment the current position and optionally the total
+ fn increment(&self, step: u64, total: Option<u64>);
+ /// Declare that progress is over and the progress bar should be deleted
+ fn complete(self);
+}
+
+const PROGRESS_DELAY: Duration = Duration::from_secs(1);
+
+/// A generic (determinate) progress bar. Stays hidden until [`PROGRESS_DELAY`]
+/// to prevent flickering a progress bar for super fast operations.
+pub struct HgProgressBar {
+ progress: ProgressBar,
+ has_been_shown: AtomicBool,
+}
+
+impl HgProgressBar {
+ // TODO pass config to check progress.disable/assume-tty/delay/etc.
+ /// Return a new progress bar with `topic` as the prefix.
+ /// The progress and total are both set to 0, and it is hidden until the
+ /// next call to `update` given that more than a second has elapsed.
+ pub fn new(topic: &str) -> Self {
+ let template =
+ format!("{} {{wide_bar}} {{pos}}/{{len}} {{eta}} ", topic);
+ let style = ProgressStyle::with_template(&template).unwrap();
+ let progress_bar = ProgressBar::new(0).with_style(style);
+ // Hide the progress bar and only show it if we've elapsed more
+ // than a second
+ progress_bar.set_draw_target(ProgressDrawTarget::hidden());
+ Self {
+ progress: progress_bar,
+ has_been_shown: false.into(),
+ }
+ }
+
+ /// Called whenever the progress changes to determine whether to start
+ /// showing the progress bar
+ fn maybe_show(&self) {
+ if self.progress.is_hidden()
+ && self.progress.elapsed() > PROGRESS_DELAY
+ {
+ // Catch a race condition whereby we check if it's hidden, then
+ // set the draw target from another thread, then do it again from
+ // this thread, which results in multiple progress bar lines being
+ // left drawn.
+ let has_been_shown =
+ self.has_been_shown.fetch_or(true, Ordering::Relaxed);
+ if !has_been_shown {
+ // Here we are certain that we're the only thread that has
+ // set `has_been_shown` and we can change the draw target
+ self.progress.set_draw_target(ProgressDrawTarget::stderr());
+ self.progress.tick();
+ }
+ }
+ }
+}
+
+impl Progress for HgProgressBar {
+ fn update(&self, pos: u64, total: Option<u64>) {
+ self.progress.update(|state| {
+ state.set_pos(pos);
+ if let Some(t) = total {
+ state.set_len(t)
+ }
+ });
+ self.maybe_show();
+ }
+
+ fn increment(&self, step: u64, total: Option<u64>) {
+ self.progress.inc(step);
+ if let Some(t) = total {
+ self.progress.set_length(t)
+ }
+ self.maybe_show();
+ }
+
+ fn complete(self) {
+ self.progress.finish_and_clear();
+ }
+}
--- a/rust/hg-core/src/repo.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/repo.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,7 +1,9 @@
use crate::changelog::Changelog;
use crate::config::{Config, ConfigError, ConfigParseError};
use crate::dirstate::DirstateParents;
-use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
+use crate::dirstate_tree::dirstate_map::{
+ DirstateIdentity, DirstateMapWriteMode,
+};
use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
use crate::dirstate_tree::owning::OwningDirstateMap;
use crate::errors::HgResultExt;
@@ -9,8 +11,9 @@
use crate::lock::{try_with_lock_no_wait, LockError};
use crate::manifest::{Manifest, Manifestlog};
use crate::requirements::{
- CHANGELOGV2_REQUIREMENT, GENERALDELTA_REQUIREMENT, NODEMAP_REQUIREMENT,
- REVLOGV1_REQUIREMENT, REVLOGV2_REQUIREMENT,
+ CHANGELOGV2_REQUIREMENT, DIRSTATE_TRACKED_HINT_V1,
+ GENERALDELTA_REQUIREMENT, NODEMAP_REQUIREMENT, REVLOGV1_REQUIREMENT,
+ REVLOGV2_REQUIREMENT,
};
use crate::revlog::filelog::Filelog;
use crate::revlog::RevlogError;
@@ -18,9 +21,10 @@
use crate::utils::files::get_path_from_bytes;
use crate::utils::hg_path::HgPath;
use crate::utils::SliceExt;
-use crate::vfs::{is_dir, is_file, Vfs};
+use crate::vfs::{is_dir, is_file, VfsImpl};
use crate::{
- requirements, NodePrefix, RevlogVersionOptions, UncheckedRevision,
+ exit_codes, requirements, NodePrefix, RevlogDataConfig, RevlogDeltaConfig,
+ RevlogFeatureConfig, RevlogType, RevlogVersionOptions, UncheckedRevision,
};
use crate::{DirstateError, RevlogOpenOptions};
use std::cell::{Ref, RefCell, RefMut};
@@ -32,7 +36,8 @@
const V2_MAX_READ_ATTEMPTS: usize = 5;
-type DirstateMapIdentity = (Option<u64>, Option<Vec<u8>>, usize);
+/// Docket file identity, data file uuid and the data size
+type DirstateV2Identity = (Option<DirstateIdentity>, Option<Vec<u8>>, usize);
/// A repository on disk
pub struct Repo {
@@ -67,6 +72,32 @@
}
}
+impl From<RepoError> for HgError {
+ fn from(value: RepoError) -> Self {
+ match value {
+ RepoError::NotFound { at } => HgError::abort(
+ format!(
+ "abort: no repository found in '{}' (.hg not found)!",
+ at.display()
+ ),
+ exit_codes::ABORT,
+ None,
+ ),
+ RepoError::ConfigParseError(config_parse_error) => {
+ HgError::Abort {
+ message: String::from_utf8_lossy(
+ &config_parse_error.message,
+ )
+ .to_string(),
+ detailed_exit_code: exit_codes::CONFIG_PARSE_ERROR_ABORT,
+ hint: None,
+ }
+ }
+ RepoError::Other(hg_error) => hg_error,
+ }
+ }
+}
+
impl Repo {
/// tries to find nearest repository root in current working directory or
/// its ancestors
@@ -120,8 +151,10 @@
let mut repo_config_files =
vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
- let hg_vfs = Vfs { base: &dot_hg };
- let mut reqs = requirements::load_if_exists(hg_vfs)?;
+ let hg_vfs = VfsImpl {
+ base: dot_hg.to_owned(),
+ };
+ let mut reqs = requirements::load_if_exists(&hg_vfs)?;
let relative =
reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
let shared =
@@ -162,9 +195,10 @@
store_path = shared_path.join("store");
- let source_is_share_safe =
- requirements::load(Vfs { base: &shared_path })?
- .contains(requirements::SHARESAFE_REQUIREMENT);
+ let source_is_share_safe = requirements::load(VfsImpl {
+ base: shared_path.to_owned(),
+ })?
+ .contains(requirements::SHARESAFE_REQUIREMENT);
if share_safe != source_is_share_safe {
return Err(HgError::unsupported("share-safe mismatch").into());
@@ -175,7 +209,9 @@
}
}
if share_safe {
- reqs.extend(requirements::load(Vfs { base: &store_path })?);
+ reqs.extend(requirements::load(VfsImpl {
+ base: store_path.to_owned(),
+ })?);
}
let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
@@ -215,19 +251,23 @@
/// For accessing repository files (in `.hg`), except for the store
/// (`.hg/store`).
- pub fn hg_vfs(&self) -> Vfs<'_> {
- Vfs { base: &self.dot_hg }
+ pub fn hg_vfs(&self) -> VfsImpl {
+ VfsImpl {
+ base: self.dot_hg.to_owned(),
+ }
}
/// For accessing repository store files (in `.hg/store`)
- pub fn store_vfs(&self) -> Vfs<'_> {
- Vfs { base: &self.store }
+ pub fn store_vfs(&self) -> VfsImpl {
+ VfsImpl {
+ base: self.store.to_owned(),
+ }
}
/// For accessing the working copy
- pub fn working_directory_vfs(&self) -> Vfs<'_> {
- Vfs {
- base: &self.working_directory,
+ pub fn working_directory_vfs(&self) -> VfsImpl {
+ VfsImpl {
+ base: self.working_directory.to_owned(),
}
}
@@ -235,7 +275,7 @@
&self,
f: impl FnOnce() -> R,
) -> Result<R, LockError> {
- try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
+ try_with_lock_no_wait(&self.hg_vfs(), "wlock", f)
}
/// Whether this repo should use dirstate-v2.
@@ -274,13 +314,12 @@
.unwrap_or_default())
}
- fn dirstate_identity(&self) -> Result<Option<u64>, HgError> {
- use std::os::unix::fs::MetadataExt;
+ fn dirstate_identity(&self) -> Result<Option<DirstateIdentity>, HgError> {
Ok(self
.hg_vfs()
.symlink_metadata("dirstate")
.io_not_found_as_none()?
- .map(|meta| meta.ino()))
+ .map(DirstateIdentity::from))
}
pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
@@ -318,10 +357,10 @@
/// Returns the information read from the dirstate docket necessary to
/// check if the data file has been updated/deleted by another process
/// since we last read the dirstate.
- /// Namely, the inode, data file uuid and the data size.
+ /// Namely the docket file identity, data file uuid and the data size.
fn get_dirstate_data_file_integrity(
&self,
- ) -> Result<DirstateMapIdentity, HgError> {
+ ) -> Result<DirstateV2Identity, HgError> {
assert!(
self.use_dirstate_v2(),
"accessing dirstate data file ID without dirstate-v2"
@@ -332,7 +371,6 @@
let identity = self.dirstate_identity()?;
let dirstate = self.dirstate_file_contents()?;
if dirstate.is_empty() {
- self.dirstate_parents.set(DirstateParents::NULL);
Ok((identity, None, 0))
} else {
let docket_res =
@@ -415,11 +453,14 @@
debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
let identity = self.dirstate_identity()?;
let dirstate_file_contents = self.dirstate_file_contents()?;
+ let parents = self.dirstate_parents()?;
if dirstate_file_contents.is_empty() {
- self.dirstate_parents.set(DirstateParents::NULL);
- Ok(OwningDirstateMap::new_empty(Vec::new()))
+ self.dirstate_parents.set(parents);
+ Ok(OwningDirstateMap::new_empty(Vec::new(), identity))
} else {
- let (map, parents) =
+ // Ignore the dirstate on-disk parents, they may have been set in
+ // the repo before
+ let (map, _) =
OwningDirstateMap::new_v1(dirstate_file_contents, identity)?;
self.dirstate_parents.set(parents);
Ok(map)
@@ -433,8 +474,7 @@
let dirstate_file_contents = self.dirstate_file_contents()?;
let identity = self.dirstate_identity()?;
if dirstate_file_contents.is_empty() {
- self.dirstate_parents.set(DirstateParents::NULL);
- return Ok(OwningDirstateMap::new_empty(Vec::new()));
+ return Ok(OwningDirstateMap::new_empty(Vec::new(), identity));
}
let docket = crate::dirstate_tree::on_disk::read_docket(
&dirstate_file_contents,
@@ -510,7 +550,13 @@
_ => DirstateMapWriteMode::Auto,
};
- map.with_dmap_mut(|m| m.set_write_mode(write_mode));
+ let tracked_hint =
+ self.requirements().contains(DIRSTATE_TRACKED_HINT_V1);
+
+ map.with_dmap_mut(|m| {
+ m.set_write_mode(write_mode);
+ m.set_tracked_hint(tracked_hint);
+ });
Ok(map)
}
@@ -529,7 +575,10 @@
}
fn new_changelog(&self) -> Result<Changelog, HgError> {
- Changelog::open(&self.store_vfs(), self.default_revlog_options(true)?)
+ Changelog::open(
+ &self.store_vfs(),
+ self.default_revlog_options(RevlogType::Changelog)?,
+ )
}
pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
@@ -543,7 +592,7 @@
fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
Manifestlog::open(
&self.store_vfs(),
- self.default_revlog_options(false)?,
+ self.default_revlog_options(RevlogType::Manifestlog)?,
)
}
@@ -590,9 +639,12 @@
}
pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
- Filelog::open(self, path, self.default_revlog_options(false)?)
+ Filelog::open(
+ self,
+ path,
+ self.default_revlog_options(RevlogType::Filelog)?,
+ )
}
-
/// Write to disk any updates that were made through `dirstate_map_mut`.
///
/// The "wlock" must be held while calling this.
@@ -742,10 +794,11 @@
pub fn default_revlog_options(
&self,
- changelog: bool,
+ revlog_type: RevlogType,
) -> Result<RevlogOpenOptions, HgError> {
let requirements = self.requirements();
- let version = if changelog
+ let is_changelog = revlog_type == RevlogType::Changelog;
+ let version = if is_changelog
&& requirements.contains(CHANGELOGV2_REQUIREMENT)
{
let compute_rank = self
@@ -756,7 +809,8 @@
RevlogVersionOptions::V2
} else if requirements.contains(REVLOGV1_REQUIREMENT) {
RevlogVersionOptions::V1 {
- generaldelta: requirements.contains(GENERALDELTA_REQUIREMENT),
+ general_delta: requirements.contains(GENERALDELTA_REQUIREMENT),
+ inline: !is_changelog,
}
} else {
RevlogVersionOptions::V0
@@ -766,8 +820,45 @@
// We don't need to dance around the slow path like in the Python
// implementation since we know we have access to the fast code.
use_nodemap: requirements.contains(NODEMAP_REQUIREMENT),
+ delta_config: RevlogDeltaConfig::new(
+ self.config(),
+ self.requirements(),
+ revlog_type,
+ )?,
+ data_config: RevlogDataConfig::new(
+ self.config(),
+ self.requirements(),
+ )?,
+ feature_config: RevlogFeatureConfig::new(
+ self.config(),
+ requirements,
+ )?,
})
}
+
+ pub fn node(&self, rev: UncheckedRevision) -> Option<crate::Node> {
+ self.changelog()
+ .ok()
+ .and_then(|c| c.node_from_rev(rev).copied())
+ }
+
+ /// Change the current working directory parents cached in the repo.
+ ///
+ /// TODO
+ /// This does *not* do a lot of what it expected from a full `set_parents`:
+ /// - parents should probably be stored in the dirstate
+ /// - dirstate should have a "changing parents" context
+ /// - dirstate should return copies if out of a merge context to be
+ /// discarded within the repo context
+ /// See `setparents` in `context.py`.
+ pub fn manually_set_parents(
+ &self,
+ new_parents: DirstateParents,
+ ) -> Result<(), HgError> {
+ let mut parents = self.dirstate_parents.value.borrow_mut();
+ *parents = Some(new_parents);
+ Ok(())
+ }
}
/// Lazily-initialized component of `Repo` with interior mutability
--- a/rust/hg-core/src/requirements.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/requirements.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,7 +1,7 @@
use crate::errors::{HgError, HgResultExt};
use crate::repo::Repo;
use crate::utils::join_display;
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
use std::collections::HashSet;
fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
@@ -24,11 +24,13 @@
.collect()
}
-pub(crate) fn load(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+pub(crate) fn load(hg_vfs: VfsImpl) -> Result<HashSet<String>, HgError> {
parse(&hg_vfs.read("requires")?)
}
-pub(crate) fn load_if_exists(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+pub(crate) fn load_if_exists(
+ hg_vfs: &VfsImpl,
+) -> Result<HashSet<String>, HgError> {
if let Some(bytes) = hg_vfs.read("requires").io_not_found_as_none()? {
parse(&bytes)
} else {
--- a/rust/hg-core/src/revlog/changelog.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/changelog.rs Sat Oct 26 04:16:00 2024 +0200
@@ -4,15 +4,16 @@
use std::fmt::{Debug, Formatter};
use std::{iter, str};
-use chrono::{DateTime, FixedOffset, NaiveDateTime};
+use chrono::{DateTime, FixedOffset, Utc};
use itertools::{Either, Itertools};
use crate::errors::HgError;
+use crate::revlog::Index;
use crate::revlog::Revision;
use crate::revlog::{Node, NodePrefix};
use crate::revlog::{Revlog, RevlogEntry, RevlogError};
use crate::utils::hg_path::HgPath;
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
use crate::{Graph, GraphError, RevlogOpenOptions, UncheckedRevision};
/// A specialized `Revlog` to work with changelog data format.
@@ -24,7 +25,7 @@
impl Changelog {
/// Open the `changelog` of a repository given by its root.
pub fn open(
- store_vfs: &Vfs,
+ store_vfs: &VfsImpl,
options: RevlogOpenOptions,
) -> Result<Self, HgError> {
let revlog = Revlog::open(store_vfs, "00changelog.i", None, options)?;
@@ -81,6 +82,10 @@
) -> Result<Revision, RevlogError> {
self.revlog.rev_from_node(node)
}
+
+ pub fn get_index(&self) -> &Index {
+ &self.revlog.index
+ }
}
impl Graph for Changelog {
@@ -334,7 +339,7 @@
HgError::corrupted(format!("failed to parse timestamp: {e}"))
})
.and_then(|secs| {
- NaiveDateTime::from_timestamp_opt(secs, 0).ok_or_else(|| {
+ DateTime::from_timestamp(secs, 0).ok_or_else(|| {
HgError::corrupted(format!(
"integer timestamp out of valid range: {secs}"
))
@@ -359,14 +364,17 @@
let timezone = FixedOffset::west_opt(timezone_secs)
.ok_or_else(|| HgError::corrupted("timezone offset out of bounds"))?;
- Ok(DateTime::from_naive_utc_and_offset(timestamp_utc, timezone))
+ Ok(DateTime::from_naive_utc_and_offset(
+ timestamp_utc.naive_utc(),
+ timezone,
+ ))
}
/// Attempt to parse the given string as floating-point timestamp, and
/// convert the result into a `chrono::NaiveDateTime`.
fn parse_float_timestamp(
timestamp_str: &str,
-) -> Result<NaiveDateTime, HgError> {
+) -> Result<DateTime<Utc>, HgError> {
let timestamp = timestamp_str.parse::<f64>().map_err(|e| {
HgError::corrupted(format!("failed to parse timestamp: {e}"))
})?;
@@ -394,7 +402,7 @@
// precision with present-day timestamps.)
let nsecs = (subsecs * 1_000_000_000.0) as u32;
- NaiveDateTime::from_timestamp_opt(secs, nsecs).ok_or_else(|| {
+ DateTime::from_timestamp(secs, nsecs).ok_or_else(|| {
HgError::corrupted(format!(
"float timestamp out of valid range: {timestamp}"
))
@@ -495,8 +503,11 @@
#[cfg(test)]
mod tests {
use super::*;
- use crate::vfs::Vfs;
- use crate::NULL_REVISION;
+ use crate::vfs::VfsImpl;
+ use crate::{
+ RevlogDataConfig, RevlogDeltaConfig, RevlogFeatureConfig,
+ NULL_REVISION,
+ };
use pretty_assertions::assert_eq;
#[test]
@@ -555,11 +566,23 @@
fn test_data_from_rev_null() -> Result<(), RevlogError> {
// an empty revlog will be enough for this case
let temp = tempfile::tempdir().unwrap();
- let vfs = Vfs { base: temp.path() };
+ let vfs = VfsImpl {
+ base: temp.path().to_owned(),
+ };
std::fs::write(temp.path().join("foo.i"), b"").unwrap();
- let revlog =
- Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::new())
- .unwrap();
+ std::fs::write(temp.path().join("foo.d"), b"").unwrap();
+ let revlog = Revlog::open(
+ &vfs,
+ "foo.i",
+ None,
+ RevlogOpenOptions::new(
+ false,
+ RevlogDataConfig::default(),
+ RevlogDeltaConfig::default(),
+ RevlogFeatureConfig::default(),
+ ),
+ )
+ .unwrap();
let changelog = Changelog { revlog };
assert_eq!(
@@ -617,7 +640,7 @@
#[test]
fn test_unescape_nul_followed_by_octal() {
// Escaped NUL chars followed by octal digits are decoded correctly.
- let expected = b"\012";
+ let expected = b"\x0012";
let escaped = br"\012";
let unescaped = unescape_extra(escaped);
assert_eq!(&expected[..], &unescaped[..]);
@@ -627,19 +650,19 @@
fn test_parse_float_timestamp() {
let test_cases = [
// Zero should map to the UNIX epoch.
- ("0.0", "1970-01-01 00:00:00"),
+ ("0.0", "1970-01-01 00:00:00 UTC"),
// Negative zero should be the same as positive zero.
- ("-0.0", "1970-01-01 00:00:00"),
+ ("-0.0", "1970-01-01 00:00:00 UTC"),
// Values without fractional components should work like integers.
// (Assuming the timestamp is within the limits of f64 precision.)
- ("1115154970.0", "2005-05-03 21:16:10"),
+ ("1115154970.0", "2005-05-03 21:16:10 UTC"),
// We expect some loss of precision in the fractional component
// when parsing arbitrary floating-point values.
- ("1115154970.123456789", "2005-05-03 21:16:10.123456716"),
+ ("1115154970.123456789", "2005-05-03 21:16:10.123456716 UTC"),
// But representable f64 values should parse losslessly.
- ("1115154970.123456716", "2005-05-03 21:16:10.123456716"),
+ ("1115154970.123456716", "2005-05-03 21:16:10.123456716 UTC"),
// Negative fractional components are subtracted from the epoch.
- ("-1.333", "1969-12-31 23:59:58.667"),
+ ("-1.333", "1969-12-31 23:59:58.667 UTC"),
];
for (input, expected) in test_cases {
@@ -713,7 +736,7 @@
for (extra, msg) in test_cases {
assert!(
- decode_extra(&extra).is_err(),
+ decode_extra(extra).is_err(),
"corrupt extra should have failed to parse: {}",
msg
);
--- a/rust/hg-core/src/revlog/filelog.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/filelog.rs Sat Oct 26 04:16:00 2024 +0200
@@ -29,7 +29,7 @@
impl Filelog {
pub fn open_vfs(
- store_vfs: &crate::vfs::Vfs<'_>,
+ store_vfs: &crate::vfs::VfsImpl,
file_path: &HgPath,
options: RevlogOpenOptions,
) -> Result<Self, HgError> {
--- a/rust/hg-core/src/revlog/index.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/index.rs Sat Oct 26 04:16:00 2024 +0200
@@ -26,7 +26,7 @@
pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
}
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
pub struct IndexHeaderFlags {
flags: u16,
}
@@ -350,9 +350,6 @@
return Err(HgError::corrupted("unsupported revlog version"));
}
- // This is only correct because we know version is REVLOGV1.
- // In v2 we always use generaldelta, while in v0 we never use
- // generaldelta. Similar for [is_inline] (it's only used in v1).
let uses_generaldelta = header.format_flags().uses_generaldelta();
if header.format_flags().is_inline() {
@@ -424,7 +421,6 @@
assert!(self.is_inline());
{
// Wrap in a block to drop the read guard
- // TODO perf?
let mut offsets = self.offsets.write().unwrap();
if offsets.is_none() {
offsets.replace(inline_scan(&self.bytes.bytes).1);
@@ -452,6 +448,7 @@
///
/// The specified revision being of the checked type, it always exists
/// if it was validated by this index.
+ #[inline(always)]
pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
if rev == NULL_REVISION {
return None;
@@ -544,15 +541,82 @@
/// Return the head revisions of this index
pub fn head_revs(&self) -> Result<Vec<Revision>, GraphError> {
- self.head_revs_filtered(&HashSet::new(), false)
+ self.head_revs_advanced(&HashSet::new(), None, false)
.map(|h| h.unwrap())
}
+ /// Return the head revisions of this index
+ pub fn head_revs_advanced(
+ &self,
+ filtered_revs: &HashSet<Revision>,
+ stop_rev: Option<Revision>,
+ py_shortcut: bool,
+ ) -> Result<Option<Vec<Revision>>, GraphError> {
+ {
+ let guard = self
+ .head_revs
+ .read()
+ .expect("RwLock on Index.head_revs should not be poisoned");
+ let self_head_revs = &guard.0;
+ let self_filtered_revs = &guard.1;
+ if !self_head_revs.is_empty()
+ && filtered_revs == self_filtered_revs
+ && stop_rev.is_none()
+ {
+ if py_shortcut {
+ // Don't copy the revs since we've already cached them
+ // on the Python side.
+ return Ok(None);
+ } else {
+ return Ok(Some(self_head_revs.to_owned()));
+ }
+ }
+ }
+
+ let (as_vec, cachable) = if self.is_empty() {
+ (vec![NULL_REVISION], true)
+ } else {
+ let length: usize = match stop_rev {
+ Some(r) => r.0 as usize,
+ None => self.len(),
+ };
+ let cachable = self.len() == length;
+ let mut not_heads = bitvec![0; length];
+ dagops::retain_heads_fast(
+ self,
+ not_heads.as_mut_bitslice(),
+ filtered_revs,
+ )?;
+ (
+ not_heads
+ .into_iter()
+ .enumerate()
+ .filter_map(|(idx, is_not_head)| {
+ if is_not_head {
+ None
+ } else {
+ Some(Revision(idx as BaseRevision))
+ }
+ })
+ .collect(),
+ cachable,
+ )
+ };
+ if cachable {
+ *self
+ .head_revs
+ .write()
+ .expect("RwLock on Index.head_revs should not be poisoned") =
+ (as_vec.to_owned(), filtered_revs.to_owned());
+ }
+ Ok(Some(as_vec))
+ }
+
/// Python-specific shortcut to save on PyList creation
pub fn head_revs_shortcut(
&self,
) -> Result<Option<Vec<Revision>>, GraphError> {
- self.head_revs_filtered(&HashSet::new(), true)
+ self.head_revs_advanced(&HashSet::new(), None, true)
}
/// Return the heads removed and added by advancing from `begin` to `end`.
@@ -606,61 +670,6 @@
Ok((heads_removed, heads_added))
}
- /// Return the head revisions of this index
- pub fn head_revs_filtered(
- &self,
- filtered_revs: &HashSet<Revision>,
- py_shortcut: bool,
- ) -> Result<Option<Vec<Revision>>, GraphError> {
- {
- let guard = self
- .head_revs
- .read()
- .expect("RwLock on Index.head_revs should not be poisoned");
- let self_head_revs = &guard.0;
- let self_filtered_revs = &guard.1;
- if !self_head_revs.is_empty()
- && filtered_revs == self_filtered_revs
- {
- if py_shortcut {
- // Don't copy the revs since we've already cached them
- // on the Python side.
- return Ok(None);
- } else {
- return Ok(Some(self_head_revs.to_owned()));
- }
- }
- }
-
- let as_vec = if self.is_empty() {
- vec![NULL_REVISION]
- } else {
- let mut not_heads = bitvec![0; self.len()];
- dagops::retain_heads_fast(
- self,
- not_heads.as_mut_bitslice(),
- filtered_revs,
- )?;
- not_heads
- .into_iter()
- .enumerate()
- .filter_map(|(idx, is_not_head)| {
- if is_not_head {
- None
- } else {
- Some(Revision(idx as BaseRevision))
- }
- })
- .collect()
- };
- *self
- .head_revs
- .write()
- .expect("RwLock on Index.head_revs should not be poisoned") =
- (as_vec.to_owned(), filtered_revs.to_owned());
- Ok(Some(as_vec))
- }
-
/// Obtain the delta chain for a revision.
///
/// `stop_rev` specifies a revision to stop at. If not specified, we
@@ -820,7 +829,7 @@
}
let [mut p1, mut p2] = self
.parents(rev)
- .map_err(|_| RevlogError::InvalidRevision)?;
+ .map_err(|e| RevlogError::InvalidRevision(e.to_string()))?;
while let Some(p1_entry) = self.get_entry(p1) {
if p1_entry.compressed_len() != 0 || p1.0 == 0 {
break;
@@ -830,9 +839,9 @@
if parent_base.0 == p1.0 {
break;
}
- p1 = self
- .check_revision(parent_base)
- .ok_or(RevlogError::InvalidRevision)?;
+ p1 = self.check_revision(parent_base).ok_or(
+ RevlogError::InvalidRevision(parent_base.to_string()),
+ )?;
}
while let Some(p2_entry) = self.get_entry(p2) {
if p2_entry.compressed_len() != 0 || p2.0 == 0 {
@@ -843,16 +852,16 @@
if parent_base.0 == p2.0 {
break;
}
- p2 = self
- .check_revision(parent_base)
- .ok_or(RevlogError::InvalidRevision)?;
+ p2 = self.check_revision(parent_base).ok_or(
+ RevlogError::InvalidRevision(parent_base.to_string()),
+ )?;
}
if base == p1.0 || base == p2.0 {
return Ok(false);
}
rev = self
.check_revision(base.into())
- .ok_or(RevlogError::InvalidRevision)?;
+ .ok_or(RevlogError::InvalidRevision(base.to_string()))?;
}
Ok(rev == NULL_REVISION)
}
@@ -1387,6 +1396,7 @@
fn vec_of_empty(sets_size: usize, vec_len: usize) -> Vec<Self>;
/// The size of the bit mask in memory
+ #[allow(unused)]
fn size(&self) -> usize;
/// The number of elements that can be represented in the set.
@@ -1394,12 +1404,14 @@
/// Another way to put it is that it is the highest integer `C` such that
/// the set is guaranteed to always be a subset of the integer range
/// `[0, C)`
+ #[allow(unused)]
fn capacity(&self) -> usize;
/// Declare `n` to belong to the set
fn add(&mut self, n: usize);
/// Declare `n` not to belong to the set
+ #[allow(unused)]
fn discard(&mut self, n: usize);
/// Replace this bit set by its union with other
@@ -1749,6 +1761,9 @@
}
#[cfg(test)]
+pub use tests::IndexEntryBuilder;
+
+#[cfg(test)]
mod tests {
use super::*;
use crate::node::NULL_NODE;
@@ -2027,6 +2042,3 @@
assert_eq!(get_version(&bytes), 2)
}
}
-
-#[cfg(test)]
-pub use tests::IndexEntryBuilder;
--- a/rust/hg-core/src/revlog/manifest.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/manifest.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,9 +1,11 @@
+use std::num::NonZeroU8;
+
use crate::errors::HgError;
use crate::revlog::{Node, NodePrefix};
use crate::revlog::{Revlog, RevlogError};
use crate::utils::hg_path::HgPath;
use crate::utils::SliceExt;
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
use crate::{
Graph, GraphError, Revision, RevlogOpenOptions, UncheckedRevision,
};
@@ -23,7 +25,7 @@
impl Manifestlog {
/// Open the `manifest` of a repository given by its root.
pub fn open(
- store_vfs: &Vfs,
+ store_vfs: &VfsImpl,
options: RevlogOpenOptions,
) -> Result<Self, HgError> {
let revlog = Revlog::open(store_vfs, "00manifest.i", None, options)?;
@@ -85,6 +87,11 @@
}
impl Manifest {
+ /// Return a new empty manifest
+ pub fn empty() -> Self {
+ Self { bytes: vec![] }
+ }
+
pub fn iter(
&self,
) -> impl Iterator<Item = Result<ManifestEntry, HgError>> {
@@ -178,7 +185,7 @@
pub hex_node_id: &'manifest [u8],
/// `Some` values are b'x', b'l', or 't'
- pub flags: Option<u8>,
+ pub flags: Option<NonZeroU8>,
}
impl<'a> ManifestEntry<'a> {
@@ -198,7 +205,7 @@
Self {
path: HgPath::new(path),
hex_node_id,
- flags,
+ flags: flags.map(|f| f.try_into().expect("invalid flag")),
}
}
--- a/rust/hg-core/src/revlog/mod.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/mod.rs Sat Oct 26 04:16:00 2024 +0200
@@ -17,6 +17,7 @@
pub mod patch;
use std::borrow::Cow;
+use std::collections::HashSet;
use std::io::Read;
use std::ops::Deref;
use std::path::Path;
@@ -31,8 +32,13 @@
use super::index::Index;
use super::index::INDEX_ENTRY_SIZE;
use super::nodemap::{NodeMap, NodeMapError};
+use crate::config::{Config, ResourceProfileValue};
use crate::errors::HgError;
-use crate::vfs::Vfs;
+use crate::exit_codes;
+use crate::requirements::{
+ GENERALDELTA_REQUIREMENT, NARROW_REQUIREMENT, SPARSEREVLOG_REQUIREMENT,
+};
+use crate::vfs::VfsImpl;
/// As noted in revlog.c, revision numbers are actually encoded in
/// 4 bytes, and are liberally converted to ints, whence the i32
@@ -133,6 +139,16 @@
ParentOutOfRange(Revision),
}
+impl std::fmt::Display for GraphError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ GraphError::ParentOutOfRange(revision) => {
+ write!(f, "parent out of range ({})", revision)
+ }
+ }
+ }
+}
+
impl<T: Graph> Graph for &T {
fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
(*self).parents(rev)
@@ -187,7 +203,8 @@
#[derive(Debug, derive_more::From, derive_more::Display)]
pub enum RevlogError {
- InvalidRevision,
+ #[display(fmt = "invalid revision identifier: {}", "_0")]
+ InvalidRevision(String),
/// Working directory is not supported
WDirUnsupported,
/// Found more than one entry whose ID match the requested prefix
@@ -217,6 +234,383 @@
}
}
+#[derive(derive_more::Display, Debug, Copy, Clone, PartialEq, Eq)]
+pub enum RevlogType {
+ Changelog,
+ Manifestlog,
+ Filelog,
+}
+
+impl TryFrom<usize> for RevlogType {
+ type Error = HgError;
+
+ fn try_from(value: usize) -> Result<Self, Self::Error> {
+ match value {
+ 1001 => Ok(Self::Changelog),
+ 1002 => Ok(Self::Manifestlog),
+ 1003 => Ok(Self::Filelog),
+ t => Err(HgError::abort(
+ format!("Unknown revlog type {}", t),
+ exit_codes::ABORT,
+ None,
+ )),
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum CompressionEngine {
+ Zlib {
+ /// Between 0 and 9 included
+ level: u32,
+ },
+ Zstd {
+ /// Between 0 and 22 included
+ level: u32,
+ /// Never used in practice for now
+ threads: u32,
+ },
+ /// No compression is performed
+ None,
+}
+impl CompressionEngine {
+ pub fn set_level(&mut self, new_level: usize) -> Result<(), HgError> {
+ match self {
+ CompressionEngine::Zlib { level } => {
+ if new_level > 9 {
+ return Err(HgError::abort(
+ format!(
+ "invalid compression zlib compression level {}",
+ new_level
+ ),
+ exit_codes::ABORT,
+ None,
+ ));
+ }
+ *level = new_level as u32;
+ }
+ CompressionEngine::Zstd { level, .. } => {
+ if new_level > 22 {
+ return Err(HgError::abort(
+ format!(
+ "invalid compression zstd compression level {}",
+ new_level
+ ),
+ exit_codes::ABORT,
+ None,
+ ));
+ }
+ *level = new_level as u32;
+ }
+ CompressionEngine::None => {}
+ }
+ Ok(())
+ }
+
+ pub fn zstd(
+ zstd_level: Option<u32>,
+ ) -> Result<CompressionEngine, HgError> {
+ let mut engine = CompressionEngine::Zstd {
+ level: 3,
+ threads: 0,
+ };
+ if let Some(level) = zstd_level {
+ engine.set_level(level as usize)?;
+ }
+ Ok(engine)
+ }
+}
+
+impl Default for CompressionEngine {
+ fn default() -> Self {
+ Self::Zlib { level: 6 }
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+/// Holds configuration values about how the revlog data is read
+pub struct RevlogDataConfig {
+ /// Should we try to open the "pending" version of the revlog
+ pub try_pending: bool,
+ /// Should we try to open the "split" version of the revlog
+ pub try_split: bool,
+ /// When True, `indexfile` should be opened with `checkambig=True` at
+ /// writing time, to avoid file stat ambiguity
+ pub check_ambig: bool,
+ /// If true, use mmap instead of reading to deal with large indexes
+ pub mmap_large_index: bool,
+ /// How much data is considered large
+ pub mmap_index_threshold: Option<u64>,
+ /// How much data to read and cache into the raw revlog data cache
+ pub chunk_cache_size: u64,
+ /// The size of the uncompressed cache compared to the largest revision
+ /// seen
+ pub uncompressed_cache_factor: Option<f64>,
+ /// The number of chunks cached
+ pub uncompressed_cache_count: Option<u64>,
+ /// Allow sparse reading of the revlog data
+ pub with_sparse_read: bool,
+ /// Minimal density of a sparse read chunk
+ pub sr_density_threshold: f64,
+ /// Minimal size of the data we skip when performing sparse reads
+ pub sr_min_gap_size: u64,
+ /// Whether deltas are encoded against arbitrary bases
+ pub general_delta: bool,
+}
+
+impl RevlogDataConfig {
+ pub fn new(
+ config: &Config,
+ requirements: &HashSet<String>,
+ ) -> Result<Self, HgError> {
+ let mut data_config = Self::default();
+ if let Some(chunk_cache_size) =
+ config.get_byte_size(b"format", b"chunkcachesize")?
+ {
+ data_config.chunk_cache_size = chunk_cache_size;
+ }
+
+ let memory_profile = config.get_resource_profile(Some("memory"));
+ if memory_profile.value >= ResourceProfileValue::Medium {
+ data_config.uncompressed_cache_count = Some(10_000);
+ data_config.uncompressed_cache_factor = Some(4.0);
+ if memory_profile.value >= ResourceProfileValue::High {
+ data_config.uncompressed_cache_factor = Some(10.0)
+ }
+ }
+
+ if let Some(mmap_index_threshold) = config
+ .get_byte_size(b"storage", b"revlog.mmap.index:size-threshold")?
+ {
+ data_config.mmap_index_threshold = Some(mmap_index_threshold);
+ }
+
+ let with_sparse_read =
+ config.get_bool(b"experimental", b"sparse-read")?;
+ if let Some(sr_density_threshold) = config
+ .get_f64(b"experimental", b"sparse-read.density-threshold")?
+ {
+ data_config.sr_density_threshold = sr_density_threshold;
+ }
+ data_config.with_sparse_read = with_sparse_read;
+ if let Some(sr_min_gap_size) = config
+ .get_byte_size(b"experimental", b"sparse-read.min-gap-size")?
+ {
+ data_config.sr_min_gap_size = sr_min_gap_size;
+ }
+
+ data_config.with_sparse_read =
+ requirements.contains(SPARSEREVLOG_REQUIREMENT);
+
+ Ok(data_config)
+ }
+}
+
+impl Default for RevlogDataConfig {
+ fn default() -> Self {
+ Self {
+ chunk_cache_size: 65536,
+ sr_density_threshold: 0.50,
+ sr_min_gap_size: 262144,
+ try_pending: Default::default(),
+ try_split: Default::default(),
+ check_ambig: Default::default(),
+ mmap_large_index: Default::default(),
+ mmap_index_threshold: Default::default(),
+ uncompressed_cache_factor: Default::default(),
+ uncompressed_cache_count: Default::default(),
+ with_sparse_read: Default::default(),
+ general_delta: Default::default(),
+ }
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+/// Holds configuration values about how new deltas are computed.
+///
+/// Some attributes are duplicated from [`RevlogDataConfig`] to help having
+/// each object self contained.
+pub struct RevlogDeltaConfig {
+ /// Whether deltas can be encoded against arbitrary bases
+ pub general_delta: bool,
+ /// Allow sparse writing of the revlog data
+ pub sparse_revlog: bool,
+ /// Maximum length of a delta chain
+ pub max_chain_len: Option<u64>,
+ /// Maximum distance between a delta chain's start and end
+ pub max_deltachain_span: Option<u64>,
+ /// If `upper_bound_comp` is not None, this is the expected maximal
+ /// gain from compression for the data content
+ pub upper_bound_comp: Option<f64>,
+ /// Should we try a delta against both parents
+ pub delta_both_parents: bool,
+ /// Test delta base candidate groups by chunks of this maximal size
+ pub candidate_group_chunk_size: u64,
+ /// Should we display debug information about delta computation
+ pub debug_delta: bool,
+ /// Trust incoming deltas by default
+ pub lazy_delta: bool,
+ /// Trust the base of incoming deltas by default
+ pub lazy_delta_base: bool,
+}
+impl RevlogDeltaConfig {
+ pub fn new(
+ config: &Config,
+ requirements: &HashSet<String>,
+ revlog_type: RevlogType,
+ ) -> Result<Self, HgError> {
+ let mut delta_config = Self {
+ delta_both_parents: config
+ .get_option_no_default(
+ b"storage",
+ b"revlog.optimize-delta-parent-choice",
+ )?
+ .unwrap_or(true),
+ candidate_group_chunk_size: config
+ .get_u64(
+ b"storage",
+ b"revlog.delta-parent-search.candidate-group-chunk-size",
+ )?
+ .unwrap_or_default(),
+ ..Default::default()
+ };
+
+ delta_config.debug_delta =
+ config.get_bool(b"debug", b"revlog.debug-delta")?;
+
+ delta_config.general_delta =
+ requirements.contains(GENERALDELTA_REQUIREMENT);
+
+ let lazy_delta =
+ config.get_bool(b"storage", b"revlog.reuse-external-delta")?;
+
+ if revlog_type == RevlogType::Manifestlog {
+ // upper bound of what we expect from compression
+ // (real life value seems to be 3)
+ delta_config.upper_bound_comp = Some(3.0)
+ }
+
+ let mut lazy_delta_base = false;
+ if lazy_delta {
+ lazy_delta_base = match config.get_option_no_default(
+ b"storage",
+ b"revlog.reuse-external-delta-parent",
+ )? {
+ Some(base) => base,
+ None => config.get_bool(b"format", b"generaldelta")?,
+ };
+ }
+ delta_config.lazy_delta = lazy_delta;
+ delta_config.lazy_delta_base = lazy_delta_base;
+
+ delta_config.max_deltachain_span =
+ match config.get_i64(b"experimental", b"maxdeltachainspan")? {
+ Some(span) => {
+ if span < 0 {
+ None
+ } else {
+ Some(span as u64)
+ }
+ }
+ None => None,
+ };
+
+ delta_config.sparse_revlog =
+ requirements.contains(SPARSEREVLOG_REQUIREMENT);
+
+ delta_config.max_chain_len =
+ config.get_byte_size_no_default(b"format", b"maxchainlen")?;
+
+ Ok(delta_config)
+ }
+}
+
+impl Default for RevlogDeltaConfig {
+ fn default() -> Self {
+ Self {
+ delta_both_parents: true,
+ lazy_delta: true,
+ general_delta: Default::default(),
+ sparse_revlog: Default::default(),
+ max_chain_len: Default::default(),
+ max_deltachain_span: Default::default(),
+ upper_bound_comp: Default::default(),
+ candidate_group_chunk_size: Default::default(),
+ debug_delta: Default::default(),
+ lazy_delta_base: Default::default(),
+ }
+ }
+}
+
+#[derive(Debug, Default, Clone, Copy, PartialEq)]
+/// Holds configuration values about the available revlog features
+pub struct RevlogFeatureConfig {
+ /// The compression engine and its options
+ pub compression_engine: CompressionEngine,
+ /// Can we use censor on this revlog
+ pub censorable: bool,
+ /// Does this revlog use the "side data" feature
+ pub has_side_data: bool,
+ /// Might remove this configuration once the rank computation has no
+ /// impact
+ pub compute_rank: bool,
+ /// Parent order is supposed to be semantically irrelevant, so we
+ /// normally re-sort parents to ensure that the first parent is non-null,
+ /// if there is a non-null parent at all.
+ /// filelog abuses the parent order as a flag to mark some instances of
+ /// meta-encoded files, so allow it to disable this behavior.
+ pub canonical_parent_order: bool,
+ /// Can ellipsis commit be used
+ pub enable_ellipsis: bool,
+}
+impl RevlogFeatureConfig {
+ pub fn new(
+ config: &Config,
+ requirements: &HashSet<String>,
+ ) -> Result<Self, HgError> {
+ let mut feature_config = Self::default();
+
+ let zlib_level = config.get_u32(b"storage", b"revlog.zlib.level")?;
+ let zstd_level = config.get_u32(b"storage", b"revlog.zstd.level")?;
+
+ feature_config.compression_engine = CompressionEngine::default();
+
+ for requirement in requirements {
+ if requirement.starts_with("revlog-compression-")
+ || requirement.starts_with("exp-compression-")
+ {
+ let split = &mut requirement.splitn(3, '-');
+ split.next();
+ split.next();
+ feature_config.compression_engine = match split.next().unwrap()
+ {
+ "zstd" => CompressionEngine::zstd(zstd_level)?,
+ e => {
+ return Err(HgError::UnsupportedFeature(format!(
+ "Unsupported compression engine '{e}'"
+ )))
+ }
+ };
+ }
+ }
+ if let Some(level) = zlib_level {
+ if matches!(
+ feature_config.compression_engine,
+ CompressionEngine::Zlib { .. }
+ ) {
+ feature_config
+ .compression_engine
+ .set_level(level as usize)?;
+ }
+ }
+
+ feature_config.enable_ellipsis =
+ requirements.contains(NARROW_REQUIREMENT);
+
+ Ok(feature_config)
+ }
+}
+
/// Read only implementation of revlog.
pub struct Revlog {
/// When index and data are not interleaved: bytes of the revlog index.
@@ -235,10 +629,10 @@
}
}
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, PartialEq)]
pub enum RevlogVersionOptions {
V0,
- V1 { generaldelta: bool },
+ V1 { general_delta: bool, inline: bool },
V2,
ChangelogV2 { compute_rank: bool },
}
@@ -251,24 +645,65 @@
pub version: RevlogVersionOptions,
/// Whether the revlog uses a persistent nodemap.
pub use_nodemap: bool,
- // TODO other non-header/version options,
+ pub delta_config: RevlogDeltaConfig,
+ pub data_config: RevlogDataConfig,
+ pub feature_config: RevlogFeatureConfig,
+}
+
+#[cfg(test)]
+impl Default for RevlogOpenOptions {
+ fn default() -> Self {
+ Self {
+ version: RevlogVersionOptions::V1 {
+ general_delta: true,
+ inline: false,
+ },
+ use_nodemap: true,
+ data_config: Default::default(),
+ delta_config: Default::default(),
+ feature_config: Default::default(),
+ }
+ }
}
impl RevlogOpenOptions {
- pub fn new() -> Self {
+ pub fn new(
+ inline: bool,
+ data_config: RevlogDataConfig,
+ delta_config: RevlogDeltaConfig,
+ feature_config: RevlogFeatureConfig,
+ ) -> Self {
Self {
- version: RevlogVersionOptions::V1 { generaldelta: true },
+ version: RevlogVersionOptions::V1 {
+ general_delta: data_config.general_delta,
+ inline,
+ },
use_nodemap: false,
+ data_config,
+ delta_config,
+ feature_config,
}
}
- fn default_index_header(&self) -> index::IndexHeader {
+ pub fn index_header(&self) -> index::IndexHeader {
index::IndexHeader {
header_bytes: match self.version {
RevlogVersionOptions::V0 => [0, 0, 0, 0],
- RevlogVersionOptions::V1 { generaldelta } => {
- [0, if generaldelta { 3 } else { 1 }, 0, 1]
- }
+ RevlogVersionOptions::V1 {
+ general_delta,
+ inline,
+ } => [
+ 0,
+ if general_delta && inline {
+ 3
+ } else if general_delta {
+ 2
+ } else {
+ u8::from(inline)
+ },
+ 0,
+ 1,
+ ],
RevlogVersionOptions::V2 => 0xDEADu32.to_be_bytes(),
RevlogVersionOptions::ChangelogV2 { compute_rank: _ } => {
0xD34Du32.to_be_bytes()
@@ -278,19 +713,14 @@
}
}
-impl Default for RevlogOpenOptions {
- fn default() -> Self {
- Self::new()
- }
-}
-
impl Revlog {
/// Open a revlog index file.
///
/// It will also open the associated data file if index and data are not
/// interleaved.
pub fn open(
- store_vfs: &Vfs,
+ // Todo use the `Vfs` trait here once we create a function for mmap
+ store_vfs: &VfsImpl,
index_path: impl AsRef<Path>,
data_path: Option<&Path>,
options: RevlogOpenOptions,
@@ -299,7 +729,8 @@
}
fn open_gen(
- store_vfs: &Vfs,
+ // Todo use the `Vfs` trait here once we create a function for mmap
+ store_vfs: &VfsImpl,
index_path: impl AsRef<Path>,
data_path: Option<&Path>,
options: RevlogOpenOptions,
@@ -310,12 +741,12 @@
match store_vfs.mmap_open_opt(index_path)? {
None => Index::new(
Box::<Vec<_>>::default(),
- options.default_index_header(),
+ options.index_header(),
),
Some(index_mmap) => {
let index = Index::new(
Box::new(index_mmap),
- options.default_index_header(),
+ options.index_header(),
)?;
Ok(index)
}
@@ -329,6 +760,9 @@
let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
if index.is_inline() {
None
+ } else if index.is_empty() {
+ // No need to even try to open the data file then.
+ Some(Box::new(&[][..]))
} else {
let data_path = data_path.unwrap_or(&default_data_path);
let data_mmap = store_vfs.mmap_open(data_path)?;
@@ -386,7 +820,7 @@
if let Some(nodemap) = &self.nodemap {
nodemap
.find_bin(&self.index, node)?
- .ok_or(RevlogError::InvalidRevision)
+ .ok_or(RevlogError::InvalidRevision(format!("{:x}", node)))
} else {
self.rev_from_node_no_persistent_nodemap(node)
}
@@ -428,7 +862,8 @@
found_by_prefix = Some(rev)
}
}
- found_by_prefix.ok_or(RevlogError::InvalidRevision)
+ found_by_prefix
+ .ok_or(RevlogError::InvalidRevision(format!("{:x}", node)))
}
/// Returns whether the given revision exists in this revlog.
@@ -537,7 +972,7 @@
let index_entry = self
.index
.get_entry(rev)
- .ok_or(RevlogError::InvalidRevision)?;
+ .ok_or(RevlogError::InvalidRevision(rev.to_string()))?;
let offset = index_entry.offset();
let start = if self.index.is_inline() {
offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
@@ -745,7 +1180,7 @@
} else {
if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
return Err(HgError::unsupported(
- "ellipsis revisions are not supported by rhg",
+ "support for ellipsis nodes is missing",
)
.into());
}
@@ -877,10 +1312,13 @@
#[test]
fn test_empty() {
let temp = tempfile::tempdir().unwrap();
- let vfs = Vfs { base: temp.path() };
+ let vfs = VfsImpl {
+ base: temp.path().to_owned(),
+ };
std::fs::write(temp.path().join("foo.i"), b"").unwrap();
+ std::fs::write(temp.path().join("foo.d"), b"").unwrap();
let revlog =
- Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::new())
+ Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::default())
.unwrap();
assert!(revlog.is_empty());
assert_eq!(revlog.len(), 0);
@@ -898,7 +1336,9 @@
#[test]
fn test_inline() {
let temp = tempfile::tempdir().unwrap();
- let vfs = Vfs { base: temp.path() };
+ let vfs = VfsImpl {
+ base: temp.path().to_owned(),
+ };
let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
.unwrap();
let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
@@ -923,7 +1363,7 @@
.collect_vec();
std::fs::write(temp.path().join("foo.i"), contents).unwrap();
let revlog =
- Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::new())
+ Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::default())
.unwrap();
let entry0 = revlog.get_entry(0.into()).ok().unwrap();
@@ -965,7 +1405,9 @@
#[test]
fn test_nodemap() {
let temp = tempfile::tempdir().unwrap();
- let vfs = Vfs { base: temp.path() };
+ let vfs = VfsImpl {
+ base: temp.path().to_owned(),
+ };
// building a revlog with a forced Node starting with zeros
// This is a corruption, but it does not preclude using the nodemap
@@ -995,7 +1437,7 @@
&vfs,
"foo.i",
None,
- RevlogOpenOptions::new(),
+ RevlogOpenOptions::default(),
Some(idx.nt),
)
.unwrap();
--- a/rust/hg-core/src/revlog/node.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/node.rs Sat Oct 26 04:16:00 2024 +0200
@@ -83,7 +83,7 @@
#[inline]
fn try_from(bytes: &'a [u8]) -> Result<Self, Self::Error> {
match Node::from_bytes(bytes) {
- Ok((node, rest)) if rest.is_empty() => Ok(node),
+ Ok((node, [])) => Ok(node),
_ => Err(()),
}
}
@@ -323,6 +323,9 @@
}
#[cfg(test)]
+pub use tests::hex_pad_right;
+
+#[cfg(test)]
mod tests {
use super::*;
@@ -428,6 +431,3 @@
assert_eq!(prefix.first_different_nybble(&node), None);
}
}
-
-#[cfg(test)]
-pub use tests::hex_pad_right;
--- a/rust/hg-core/src/revlog/nodemap_docket.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs Sat Oct 26 04:16:00 2024 +0200
@@ -3,7 +3,7 @@
use memmap2::Mmap;
use std::path::{Path, PathBuf};
-use crate::vfs::Vfs;
+use crate::vfs::VfsImpl;
const ONDISK_VERSION: u8 = 1;
@@ -33,7 +33,7 @@
/// * The docket file points to a missing (likely deleted) data file (this
/// can happen in a rare race condition).
pub fn read_from_file(
- store_vfs: &Vfs,
+ store_vfs: &VfsImpl,
index_path: &Path,
) -> Result<Option<(Self, Mmap)>, HgError> {
let docket_path = index_path.with_extension("n");
--- a/rust/hg-core/src/revset.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/revset.rs Sat Oct 26 04:16:00 2024 +0200
@@ -28,9 +28,9 @@
}
match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
- Err(RevlogError::InvalidRevision) => {
+ Err(RevlogError::InvalidRevision(revision)) => {
// TODO: support for the rest of the language here.
- let msg = format!("cannot parse revset '{}'", input);
+ let msg = format!("cannot parse revset '{}'", revision);
Err(HgError::unsupported(msg).into())
}
result => result,
@@ -67,5 +67,5 @@
}
return revlog.rev_from_node(prefix);
}
- Err(RevlogError::InvalidRevision)
+ Err(RevlogError::InvalidRevision(input.to_string()))
}
--- a/rust/hg-core/src/sparse.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/sparse.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,14 +1,16 @@
-use std::{collections::HashSet, path::Path};
+use std::{collections::HashSet, fmt::Display, path::Path};
-use format_bytes::{write_bytes, DisplayBytes};
+use format_bytes::{format_bytes, write_bytes, DisplayBytes};
use crate::{
errors::HgError,
+ exit_codes::STATE_ERROR,
filepatterns::parse_pattern_file_contents,
matchers::{
AlwaysMatcher, DifferenceMatcher, IncludeMatcher, Matcher,
UnionMatcher,
},
+ narrow::VALID_PREFIXES,
operations::cat,
repo::Repo,
requirements::SPARSE_REQUIREMENT,
@@ -36,6 +38,15 @@
}
}
+impl Display for SparseConfigContext {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ SparseConfigContext::Sparse => write!(f, "sparse"),
+ SparseConfigContext::Narrow => write!(f, "narrow"),
+ }
+ }
+}
+
/// Possible warnings when reading sparse configuration
#[derive(Debug, derive_more::From)]
pub enum SparseWarning {
@@ -82,6 +93,59 @@
PatternError(PatternError),
}
+impl From<SparseConfigError> for HgError {
+ fn from(value: SparseConfigError) -> Self {
+ match value {
+ SparseConfigError::IncludesAfterExcludes { context } => {
+ HgError::Abort {
+ message: format!(
+ "{} config cannot have includes after excludes",
+ context,
+ ),
+ detailed_exit_code: STATE_ERROR,
+ hint: None,
+ }
+ }
+ SparseConfigError::EntryOutsideSection { context, line } => {
+ HgError::Abort {
+ message: format!(
+ "{} config entry outside of section: {}",
+ context,
+ String::from_utf8_lossy(&line)
+ ),
+ detailed_exit_code: STATE_ERROR,
+ hint: None,
+ }
+ }
+ SparseConfigError::IncludesInNarrow => HgError::Abort {
+ message: "including other spec files using '%include' is not \
+ supported in narrowspec"
+ .to_string(),
+ detailed_exit_code: STATE_ERROR,
+ hint: None,
+ },
+ SparseConfigError::InvalidNarrowPrefix(vec) => HgError::Abort {
+ message: String::from_utf8_lossy(&format_bytes!(
+ b"invalid prefix on narrow pattern: {}",
+ vec
+ ))
+ .to_string(),
+ detailed_exit_code: STATE_ERROR,
+ hint: Some(format!(
+ "narrow patterns must begin with one of the following: {}",
+ VALID_PREFIXES.join(", ")
+ )),
+ },
+ SparseConfigError::HgError(hg_error) => hg_error,
+ SparseConfigError::PatternError(pattern_error) => HgError::Abort {
+ message: pattern_error.to_string(),
+ detailed_exit_code: STATE_ERROR,
+ hint: None,
+ },
+ }
+ }
+}
+
/// Parse sparse config file content.
pub(crate) fn parse_config(
raw: &[u8],
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/update.rs Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,491 @@
+//! Tools for moving the repository to a given revision
+
+use std::{
+ fs::Permissions,
+ io::Write,
+ os::unix::fs::{MetadataExt, PermissionsExt},
+ path::Path,
+ time::Duration,
+};
+
+use crate::{
+ dirstate::{ParentFileData, TruncatedTimestamp},
+ dirstate_tree::{
+ dirstate_map::DirstateEntryReset, on_disk::write_tracked_key,
+ },
+ errors::{HgError, IoResultExt},
+ exit_codes,
+ filelog::Filelog,
+ narrow,
+ node::NULL_NODE,
+ operations::{list_rev_tracked_files, ExpandedManifestEntry},
+ progress::Progress,
+ repo::Repo,
+ sparse,
+ utils::{
+ files::{filesystem_now, get_path_from_bytes},
+ hg_path::{hg_path_to_path_buf, HgPath, HgPathError},
+ path_auditor::PathAuditor,
+ },
+ vfs::{is_on_nfs_mount, VfsImpl},
+ DirstateParents, RevlogError, RevlogOpenOptions, UncheckedRevision,
+};
+use crossbeam_channel::{Receiver, Sender};
+use rayon::prelude::*;
+
+fn write_dirstate(repo: &Repo) -> Result<(), HgError> {
+ repo.write_dirstate()
+ .map_err(|e| HgError::abort(e.to_string(), exit_codes::ABORT, None))?;
+ write_tracked_key(repo)
+}
+
+/// Update the current working copy of `repo` to the given revision `to`, from
+/// the null revision and set + write out the dirstate to reflect that.
+///
+/// Do not call this outside of a Python context. This does *not* handle any
+/// of the checks, hooks, lock taking needed to setup and get out of this
+/// update from the null revision.
+pub fn update_from_null(
+ repo: &Repo,
+ to: UncheckedRevision,
+ progress: &dyn Progress,
+) -> Result<usize, HgError> {
+ // Ignore the warnings, they've been displayed by Python already
+ // TODO non-Python clients: display narrow warnings
+ let (narrow_matcher, _) = narrow::matcher(repo)?;
+
+ let files_for_rev = list_rev_tracked_files(repo, to, narrow_matcher)
+ .map_err(handle_revlog_error)?;
+ repo.manually_set_parents(DirstateParents {
+ p1: repo.node(to).expect("update target should exist"),
+ p2: NULL_NODE,
+ })?;
+
+ // Filter the working copy according to the sparse spec
+ let tracked_files: Result<Vec<_>, _> = if !repo.has_sparse() {
+ files_for_rev.iter().collect()
+ } else {
+ // Ignore the warnings, they've been displayed by Python already
+ // TODO non-Python clients: display sparse warnings
+ let (sparse_matcher, _) = sparse::matcher(repo)?;
+ files_for_rev
+ .iter()
+ .filter(|f| {
+ match f {
+ Ok(f) => sparse_matcher.matches(f.0),
+ Err(_) => true, // Errors stop the update, include them
+ }
+ })
+ .collect()
+ };
+ let tracked_files = tracked_files?;
+
+ if tracked_files.is_empty() {
+ // Still write the dirstate because we might not be in the null
+ // revision.
+ // This can happen in narrow repos where all paths are excluded in
+ // this revision.
+ write_dirstate(repo)?;
+ return Ok(0);
+ }
+ let store_vfs = &repo.store_vfs();
+ let options = repo.default_revlog_options(crate::RevlogType::Filelog)?;
+ let (errors_sender, errors_receiver) = crossbeam_channel::unbounded();
+ let (files_sender, files_receiver) = crossbeam_channel::unbounded();
+ let working_directory_path = &repo.working_directory_path();
+
+ let files_count = tracked_files.len();
+ let chunks = chunk_tracked_files(tracked_files);
+ progress.update(0, Some(files_count as u64));
+
+ create_working_copy(
+ chunks,
+ working_directory_path,
+ store_vfs,
+ options,
+ files_sender,
+ errors_sender,
+ progress,
+ );
+
+ let errors: Vec<HgError> = errors_receiver.iter().collect();
+ if !errors.is_empty() {
+ log::debug!("{} errors during update (see trace logs)", errors.len());
+ for error in errors.iter() {
+ log::trace!("{}", error);
+ }
+ // Best we can do is raise the first error (in order of the channel)
+ return Err(errors.into_iter().next().expect("can never be empty"));
+ }
+
+ // TODO try to run this concurrently to update the dirstate while we're
+ // still writing out the working copy to see if that improves performance.
+ let total = update_dirstate(repo, files_receiver)?;
+
+ write_dirstate(repo)?;
+
+ Ok(total)
+}
+
+fn handle_revlog_error(e: RevlogError) -> HgError {
+ match e {
+ crate::RevlogError::Other(hg_error) => hg_error,
+ e => HgError::abort(
+ format!("revlog error: {}", e),
+ exit_codes::ABORT,
+ None,
+ ),
+ }
+}
+
+/// Preallocated size of Vec holding directory contents. This aims at
+/// preventing the need for re-allocating the Vec in most cases.
+///
+/// The value is arbitrarily picked as a little over an average number of files
+/// per directory done by looking at a few larger open-source repos.
+/// Most of the runtime is IO anyway, so this doesn't matter too much.
+const FILES_PER_DIRECTORY: usize = 16;
+
+/// Chunk files per directory prefix, so almost every directory is handled
+/// in a separate thread, which works around the FS inode mutex.
+/// Chunking less (and doing approximately `files_count`/`threads`) actually
+/// ends up being less performant: my hypothesis is `rayon`'s work stealing
+/// being more efficient with tasks of varying lengths.
+#[logging_timer::time("trace")]
+fn chunk_tracked_files(
+ tracked_files: Vec<ExpandedManifestEntry>,
+) -> Vec<(&HgPath, Vec<ExpandedManifestEntry>)> {
+ let files_count = tracked_files.len();
+
+ let mut chunks = Vec::with_capacity(files_count / FILES_PER_DIRECTORY);
+
+ let mut current_chunk = Vec::with_capacity(FILES_PER_DIRECTORY);
+ let mut last_directory = tracked_files[0].0.parent();
+
+ for file_info in tracked_files {
+ let current_directory = file_info.0.parent();
+ let different_directory = current_directory != last_directory;
+ if different_directory {
+ chunks.push((last_directory, current_chunk));
+ current_chunk = Vec::with_capacity(FILES_PER_DIRECTORY);
+ }
+ current_chunk.push(file_info);
+ last_directory = current_directory;
+ }
+ chunks.push((last_directory, current_chunk));
+ chunks
+}
+
+#[logging_timer::time("trace")]
+fn create_working_copy<'a: 'b, 'b>(
+ chunks: Vec<(&HgPath, Vec<ExpandedManifestEntry<'a>>)>,
+ working_directory_path: &Path,
+ store_vfs: &VfsImpl,
+ options: RevlogOpenOptions,
+ files_sender: Sender<(&'b HgPath, u32, usize, TruncatedTimestamp)>,
+ error_sender: Sender<HgError>,
+ progress: &dyn Progress,
+) {
+ let auditor = PathAuditor::new(working_directory_path);
+ chunks.into_par_iter().for_each(|(dir_path, chunk)| {
+ if let Err(e) = working_copy_worker(
+ dir_path,
+ chunk,
+ working_directory_path,
+ store_vfs,
+ options,
+ &files_sender,
+ progress,
+ &auditor,
+ ) {
+ error_sender
+ .send(e)
+ .expect("channel should not be disconnected")
+ }
+ });
+}
+
+/// Represents a work unit for a single thread, responsible for this set of
+/// files and restoring them to the working copy.
+#[allow(clippy::too_many_arguments)]
+fn working_copy_worker<'a: 'b, 'b>(
+ dir_path: &HgPath,
+ chunk: Vec<ExpandedManifestEntry<'a>>,
+ working_directory_path: &Path,
+ store_vfs: &VfsImpl,
+ options: RevlogOpenOptions,
+ files_sender: &Sender<(&'b HgPath, u32, usize, TruncatedTimestamp)>,
+ progress: &dyn Progress,
+ auditor: &PathAuditor,
+) -> Result<(), HgError> {
+ let dir_path =
+ hg_path_to_path_buf(dir_path).expect("invalid path in manifest");
+ let dir_path = working_directory_path.join(dir_path);
+ std::fs::create_dir_all(&dir_path).when_writing_file(&dir_path)?;
+
+ for (file, file_node, flags) in chunk {
+ auditor.audit_path(file)?;
+ let flags = flags.map(|f| f.into());
+ let path =
+ working_directory_path.join(get_path_from_bytes(file.as_bytes()));
+
+ // Treemanifest is not supported
+ assert!(flags != Some(b't'));
+
+ let filelog = Filelog::open_vfs(store_vfs, file, options)?;
+ let filelog_revision_data = &filelog
+ .data_for_node(file_node)
+ .map_err(handle_revlog_error)?;
+ let file_data = filelog_revision_data.file_data()?;
+
+ if flags == Some(b'l') {
+ let target = get_path_from_bytes(file_data);
+ if let Err(e) = std::os::unix::fs::symlink(target, &path) {
+ // If the path already exists either:
+ // - another process created this file while ignoring the
+ // lock => error
+ // - our check for the fast path is incorrect => error
+ // - this is a malicious repo/bundle and this is symlink that
+ // tries to write things where it shouldn't be able to.
+ match e.kind() {
+ std::io::ErrorKind::AlreadyExists => {
+ let metadata = std::fs::symlink_metadata(&path)
+ .when_reading_file(&path)?;
+ if metadata.is_dir() {
+ return Err(HgError::Path(
+ HgPathError::TraversesSymbolicLink {
+ // Technically it should be one of the
+ // children, but good enough
+ path: file
+ .join(HgPath::new(b"*"))
+ .to_owned(),
+ symlink: file.to_owned(),
+ },
+ ));
+ }
+ return Err(e).when_writing_file(&path);
+ }
+ _ => return Err(e).when_writing_file(&path),
+ }
+ }
+ } else {
+ let mut f =
+ std::fs::File::create(&path).when_writing_file(&path)?;
+ f.write_all(file_data).when_writing_file(&path)?;
+ }
+ if flags == Some(b'x') {
+ std::fs::set_permissions(&path, Permissions::from_mode(0o755))
+ .when_writing_file(&path)?;
+ }
+ let metadata =
+ std::fs::symlink_metadata(&path).when_reading_file(&path)?;
+
+ let mode = metadata.mode();
+
+ files_sender
+ .send((
+ file,
+ mode,
+ file_data.len(),
+ TruncatedTimestamp::for_mtime_of(&metadata)
+ .when_reading_file(&path)?,
+ ))
+ .expect("channel should not be closed");
+ progress.increment(1, None);
+ }
+ Ok(())
+}
+
+#[logging_timer::time("trace")]
+fn update_dirstate(
+ repo: &Repo,
+ files_receiver: Receiver<(&HgPath, u32, usize, TruncatedTimestamp)>,
+) -> Result<usize, HgError> {
+ let mut dirstate = repo
+ .dirstate_map_mut()
+ .map_err(|e| HgError::abort(e.to_string(), exit_codes::ABORT, None))?;
+
+ // (see the comments in `filter_ambiguous_files` in `merge.py` for more)
+ // It turns out that (on Linux at least) the filesystem resolution time
+ // for most filesystems is based on the HZ kernel config. Their internal
+ // clocks do return nanoseconds if the hardware clock is precise enough,
+ // which should be the case on most recent computers but are only updated
+ // every few milliseconds at best (every "jiffy").
+ //
+ // We are still not concerned with fixing the race with other
+ // processes that might modify the working copy right after it was created
+ // within the same tick, because it is impossible to catch.
+ // However, we might as well not race with operations that could run right
+ // after this one, especially other Mercurial operations that could be
+ // waiting for the wlock to change file contents and the dirstate.
+ //
+ // Thus: wait until the filesystem clock has ticked to filter ambiguous
+ // entries and write the dirstate, but only for dirstate-v2, since v1 only
+ // has second-level granularity and waiting for a whole second is too much
+ // of a penalty in the general case.
+ // Although we're assuming that people running dirstate-v2 on Linux
+ // don't have a second-granularity FS (with the exclusion of NFS), users
+ // can be surprising, and at some point in the future dirstate-v2 will
+ // become the default. To that end, we limit the wait time to 100ms and
+ // fall back to the filter method in case of a timeout.
+ //
+ // +------------+------+--------------+
+ // | version | wait | filter level |
+ // +------------+------+--------------+
+ // | V1 | No | Second |
+ // | V2 | Yes | Nanosecond |
+ // | V2-slow-fs | No | Second |
+ // +------------+------+--------------+
+ let dirstate_v2 = repo.use_dirstate_v2();
+
+ // Let's ignore NFS right off the bat
+ let mut fast_enough_fs = !is_on_nfs_mount(repo.working_directory_path());
+ let fs_time_now = if dirstate_v2 && fast_enough_fs {
+ match wait_until_fs_tick(repo.working_directory_path()) {
+ None => None,
+ Some(Ok(time)) => Some(time),
+ Some(Err(time)) => {
+ fast_enough_fs = false;
+ Some(time)
+ }
+ }
+ } else {
+ filesystem_now(repo.working_directory_path())
+ .ok()
+ .map(TruncatedTimestamp::from)
+ };
+
+ let mut total = 0;
+ for (filename, mode, size, mtime) in files_receiver.into_iter() {
+ total += 1;
+ // When using dirstate-v2 on a filesystem with reasonable performance
+ // this is basically always true unless you get a mtime from the
+ // far future.
+ let has_meaningful_mtime = if let Some(fs_time) = fs_time_now {
+ mtime.for_reliable_mtime_of_self(&fs_time).is_some_and(|t| {
+ // Dirstate-v1 only has second-level information
+ !t.second_ambiguous || dirstate_v2 && fast_enough_fs
+ })
+ } else {
+ // We somehow failed to write to the filesystem, so don't store
+ // the cache information.
+ false
+ };
+ let reset = DirstateEntryReset {
+ filename,
+ wc_tracked: true,
+ p1_tracked: true,
+ p2_info: false,
+ has_meaningful_mtime,
+ parent_file_data_opt: Some(ParentFileData {
+ mode_size: Some((
+ mode,
+ size.try_into().expect("invalid file size in manifest"),
+ )),
+ mtime: Some(mtime),
+ }),
+ from_empty: true,
+ };
+ dirstate.reset_state(reset).map_err(|e| {
+ HgError::abort(e.to_string(), exit_codes::ABORT, None)
+ })?;
+ }
+
+ Ok(total)
+}
+
+/// Wait until the next update from the filesystem time by writing in a loop
+/// a new temporary file inside the working directory and checking if its time
+/// differs from the first one observed.
+///
+/// Returns `None` if we are unable to get the filesystem time,
+/// `Some(Err(timestamp))` if we've timed out waiting for the filesystem clock
+/// to tick, and `Some(Ok(timestamp))` if we've waited successfully.
+///
+/// On Linux, your average tick is going to be a "jiffy", or 1/HZ.
+/// HZ is your kernel's tick rate (if it has one configured) and the value
+/// is the one returned by `grep 'CONFIG_HZ=' /boot/config-$(uname -r)`,
+/// again assuming a normal setup.
+///
+/// In my case (Alphare) at the time of writing, I get `CONFIG_HZ=250`,
+/// which equates to 4ms.
+///
+/// This might change with a series that could make it to Linux 6.12:
+/// https://lore.kernel.org/all/20241002-mgtime-v10-8-d1c4717f5284@kernel.org
+fn wait_until_fs_tick(
+ working_directory_path: &Path,
+) -> Option<Result<TruncatedTimestamp, TruncatedTimestamp>> {
+ let start = std::time::Instant::now();
+ let old_fs_time = filesystem_now(working_directory_path).ok()?;
+ let mut fs_time = filesystem_now(working_directory_path).ok()?;
+
+ const FS_TICK_WAIT_TIMEOUT: Duration = Duration::from_millis(100);
+
+ while fs_time == old_fs_time {
+ if std::time::Instant::now() - start > FS_TICK_WAIT_TIMEOUT {
+ log::trace!(
+ "timed out waiting for the fs clock to tick after {:?}",
+ FS_TICK_WAIT_TIMEOUT
+ );
+ return Some(Err(TruncatedTimestamp::from(old_fs_time)));
+ }
+ fs_time = filesystem_now(working_directory_path).ok()?;
+ }
+ log::trace!(
+ "waited for {:?} before writing the dirstate",
+ fs_time.duration_since(old_fs_time)
+ );
+ Some(Ok(TruncatedTimestamp::from(fs_time)))
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use pretty_assertions::assert_eq;
+
+ #[test]
+ fn test_chunk_tracked_files() {
+ fn chunk(v: Vec<&'static str>) -> Vec<ExpandedManifestEntry> {
+ v.into_iter()
+ .map(|f| (HgPath::new(f.as_bytes()), NULL_NODE, None))
+ .collect()
+ }
+ let p = HgPath::new;
+
+ let files = chunk(vec!["a"]);
+ let expected = vec![(p(""), chunk(vec!["a"]))];
+ assert_eq!(chunk_tracked_files(files), expected);
+
+ let files = chunk(vec!["a", "b", "c"]);
+ let expected = vec![(p(""), chunk(vec!["a", "b", "c"]))];
+ assert_eq!(chunk_tracked_files(files), expected);
+
+ let files = chunk(vec![
+ "dir/a-new",
+ "dir/a/mut",
+ "dir/a/mut-mut",
+ "dir/albert",
+ "dir/b",
+ "dir/subdir/c",
+ "dir/subdir/d",
+ "file",
+ ]);
+ let expected = vec![
+ (p("dir"), chunk(vec!["dir/a-new"])),
+ (p("dir/a"), chunk(vec!["dir/a/mut", "dir/a/mut-mut"])),
+ (p("dir"), chunk(vec!["dir/albert", "dir/b"])),
+ (p("dir/subdir"), chunk(vec!["dir/subdir/c", "dir/subdir/d"])),
+ (p(""), chunk(vec!["file"])),
+ ];
+ assert_eq!(chunk_tracked_files(files), expected);
+
+ // Doesn't get split
+ let large_dir = vec![
+ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
+ "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23",
+ ];
+ let files = chunk(large_dir.clone());
+ let expected = vec![(p(""), chunk(large_dir))];
+ assert_eq!(chunk_tracked_files(files), expected);
+ }
+}
--- a/rust/hg-core/src/utils/files.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/utils/files.rs Sat Oct 26 04:16:00 2024 +0200
@@ -16,11 +16,15 @@
};
use lazy_static::lazy_static;
use same_file::is_same_file;
-use std::borrow::{Cow, ToOwned};
use std::ffi::{OsStr, OsString};
use std::iter::FusedIterator;
use std::ops::Deref;
use std::path::{Path, PathBuf};
+use std::{
+ borrow::{Cow, ToOwned},
+ io,
+ time::SystemTime,
+};
pub fn get_os_str_from_bytes(bytes: &[u8]) -> &OsStr {
let os_str;
@@ -306,6 +310,25 @@
}
}
+/// Return the `mtime` of a temporary file newly-created in the `.hg` directory
+/// of the give repository.
+///
+/// This is similar to `SystemTime::now()`, with the result truncated to the
+/// same time resolution as other files’ modification times. Using `.hg`
+/// instead of the system’s default temporary directory (such as `/tmp`) makes
+/// it more likely the temporary file is in the same disk partition as contents
+/// of the working directory, which can matter since different filesystems may
+/// store timestamps with different resolutions.
+///
+/// This may fail, typically if we lack write permissions. In that case we
+/// should continue the `status()` algoritm anyway and consider the current
+/// date/time to be unknown.
+pub fn filesystem_now(repo_root: &Path) -> Result<SystemTime, io::Error> {
+ tempfile::tempfile_in(repo_root.join(".hg"))?
+ .metadata()?
+ .modified()
+}
+
#[cfg(test)]
mod tests {
use super::*;
--- a/rust/hg-core/src/utils/hg_path.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/utils/hg_path.rs Sat Oct 26 04:16:00 2024 +0200
@@ -5,6 +5,7 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::errors::HgError;
use crate::utils::SliceExt;
use std::borrow::Borrow;
use std::borrow::Cow;
@@ -48,6 +49,12 @@
},
}
+impl From<HgPathError> for HgError {
+ fn from(value: HgPathError) -> Self {
+ HgError::Path(value)
+ }
+}
+
impl fmt::Display for HgPathError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
@@ -76,41 +83,34 @@
bytes
),
HgPathError::EndsWithSlash(path) => {
- write!(f, "Audit failed for '{}': ends with a slash.", path)
+ write!(f, "path '{}': ends with a slash", path)
}
- HgPathError::ContainsIllegalComponent(path) => write!(
- f,
- "Audit failed for '{}': contains an illegal component.",
- path
- ),
- HgPathError::InsideDotHg(path) => write!(
- f,
- "Audit failed for '{}': is inside the '.hg' folder.",
- path
- ),
+ HgPathError::ContainsIllegalComponent(path) => {
+ write!(f, "path contains illegal component: {}", path)
+ }
+ HgPathError::InsideDotHg(path) => {
+ write!(f, "path '{}' is inside the '.hg' folder", path)
+ }
HgPathError::IsInsideNestedRepo {
path,
nested_repo: nested,
} => {
- write!(f,
- "Audit failed for '{}': is inside a nested repository '{}'.",
- path, nested
- )
+ write!(f, "path '{}' is inside nested repo '{}'", path, nested)
}
HgPathError::TraversesSymbolicLink { path, symlink } => write!(
f,
- "Audit failed for '{}': traverses symbolic link '{}'.",
+ "path '{}' traverses symbolic link '{}'",
path, symlink
),
HgPathError::NotFsCompliant(path) => write!(
f,
- "Audit failed for '{}': cannot be turned into a \
- filesystem path.",
+ "path '{}' cannot be turned into a \
+ filesystem path",
path
),
HgPathError::NotUnderRoot { path, root } => write!(
f,
- "Audit failed for '{}': not under root {}.",
+ "path '{}' not under root {}",
path.display(),
root.display()
),
--- a/rust/hg-core/src/utils/path_auditor.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/utils/path_auditor.rs Sat Oct 26 04:16:00 2024 +0200
@@ -180,7 +180,7 @@
#[cfg(test)]
mod tests {
use super::*;
- use std::fs::{create_dir, File};
+ use std::fs::{create_dir, create_dir_all, File};
use tempfile::tempdir;
#[test]
@@ -203,6 +203,16 @@
})
);
+ create_dir_all(base_dir_path.join("this/is/nested/.hg")).unwrap();
+ let path = HgPath::new(b"this/is/nested/repo");
+ assert_eq!(
+ auditor.audit_path(path),
+ Err(HgPathError::IsInsideNestedRepo {
+ path: path.to_owned(),
+ nested_repo: HgPathBuf::from_bytes(b"this/is/nested")
+ })
+ );
+
create_dir(base_dir_path.join("realdir")).unwrap();
File::create(base_dir_path.join("realdir/realfile")).unwrap();
// TODO make portable
--- a/rust/hg-core/src/vfs.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/src/vfs.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,17 +1,21 @@
use crate::errors::{HgError, IoErrorContext, IoResultExt};
+use crate::exit_codes;
+use dyn_clone::DynClone;
use memmap2::{Mmap, MmapOptions};
+use std::fs::File;
use std::io::{ErrorKind, Write};
+use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf};
/// Filesystem access abstraction for the contents of a given "base" diretory
-#[derive(Clone, Copy)]
-pub struct Vfs<'a> {
- pub(crate) base: &'a Path,
+#[derive(Clone)]
+pub struct VfsImpl {
+ pub(crate) base: PathBuf,
}
struct FileNotFound(std::io::Error, PathBuf);
-impl Vfs<'_> {
+impl VfsImpl {
pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
self.base.join(relative_path)
}
@@ -71,7 +75,12 @@
}
Ok(file) => file,
};
- // TODO: what are the safety requirements here?
+ // Safety is "enforced" by locks and assuming other processes are
+ // well-behaved. If any misbehaving or malicious process does touch
+ // the index, it could lead to corruption. This is inherent
+ // to file-based `mmap`, though some platforms have some ways of
+ // mitigating.
+ // TODO linux: set the immutable flag with `chattr(1)`?
let mmap = unsafe { MmapOptions::new().map(&file) }
.when_reading_file(&path)?;
Ok(Ok(mmap))
@@ -134,8 +143,8 @@
relative_path: impl AsRef<Path>,
contents: &[u8],
) -> Result<(), HgError> {
- let mut tmp = tempfile::NamedTempFile::new_in(self.base)
- .when_writing_file(self.base)?;
+ let mut tmp = tempfile::NamedTempFile::new_in(&self.base)
+ .when_writing_file(&self.base)?;
tmp.write_all(contents)
.and_then(|()| tmp.flush())
.when_writing_file(tmp.path())?;
@@ -165,6 +174,174 @@
}
}
+/// Writable file object that atomically updates a file
+///
+/// All writes will go to a temporary copy of the original file. Call
+/// [`Self::close`] when you are done writing, and [`Self`] will rename
+/// the temporary copy to the original name, making the changes
+/// visible. If the object is destroyed without being closed, all your
+/// writes are discarded.
+pub struct AtomicFile {
+ /// The temporary file to write to
+ fp: std::fs::File,
+ /// Path of the temp file
+ temp_path: PathBuf,
+ /// Used when stat'ing the file, is useful only if the target file is
+ /// guarded by any lock (e.g. repo.lock or repo.wlock).
+ check_ambig: bool,
+ /// Path of the target file
+ target_name: PathBuf,
+ /// Whether the file is open or not
+ is_open: bool,
+}
+
+impl AtomicFile {
+ pub fn new(
+ fp: std::fs::File,
+ check_ambig: bool,
+ temp_name: PathBuf,
+ target_name: PathBuf,
+ ) -> Self {
+ Self {
+ fp,
+ check_ambig,
+ temp_path: temp_name,
+ target_name,
+ is_open: true,
+ }
+ }
+
+ /// Write `buf` to the temporary file
+ pub fn write_all(&mut self, buf: &[u8]) -> Result<(), std::io::Error> {
+ self.fp.write_all(buf)
+ }
+
+ fn target(&self) -> PathBuf {
+ self.temp_path
+ .parent()
+ .expect("should not be at the filesystem root")
+ .join(&self.target_name)
+ }
+
+ /// Close the temporary file and rename to the target
+ pub fn close(mut self) -> Result<(), std::io::Error> {
+ self.fp.flush()?;
+ let target = self.target();
+ if self.check_ambig {
+ if let Ok(stat) = std::fs::metadata(&target) {
+ std::fs::rename(&self.temp_path, &target)?;
+ let new_stat = std::fs::metadata(&target)?;
+ let ctime = new_stat.ctime();
+ let is_ambiguous = ctime == stat.ctime();
+ if is_ambiguous {
+ let advanced =
+ filetime::FileTime::from_unix_time(ctime + 1, 0);
+ filetime::set_file_times(target, advanced, advanced)?;
+ }
+ } else {
+ std::fs::rename(&self.temp_path, target)?;
+ }
+ } else {
+ std::fs::rename(&self.temp_path, target).unwrap();
+ }
+ self.is_open = false;
+ Ok(())
+ }
+}
+
+impl Drop for AtomicFile {
+ fn drop(&mut self) {
+ if self.is_open {
+ std::fs::remove_file(self.target()).ok();
+ }
+ }
+}
+
+/// Abstracts over the VFS to allow for different implementations of the
+/// filesystem layer (like passing one from Python).
+pub trait Vfs: Sync + Send + DynClone {
+ fn open(&self, filename: &Path) -> Result<std::fs::File, HgError>;
+ fn open_read(&self, filename: &Path) -> Result<std::fs::File, HgError>;
+ fn open_check_ambig(
+ &self,
+ filename: &Path,
+ ) -> Result<std::fs::File, HgError>;
+ fn create(&self, filename: &Path) -> Result<std::fs::File, HgError>;
+ /// Must truncate the new file if exist
+ fn create_atomic(
+ &self,
+ filename: &Path,
+ check_ambig: bool,
+ ) -> Result<AtomicFile, HgError>;
+ fn file_size(&self, file: &File) -> Result<u64, HgError>;
+ fn exists(&self, filename: &Path) -> bool;
+ fn unlink(&self, filename: &Path) -> Result<(), HgError>;
+ fn rename(
+ &self,
+ from: &Path,
+ to: &Path,
+ check_ambig: bool,
+ ) -> Result<(), HgError>;
+ fn copy(&self, from: &Path, to: &Path) -> Result<(), HgError>;
+}
+
+/// These methods will need to be implemented once `rhg` (and other) non-Python
+/// users of `hg-core` start doing more on their own, like writing to files.
+impl Vfs for VfsImpl {
+ fn open(&self, _filename: &Path) -> Result<std::fs::File, HgError> {
+ todo!()
+ }
+ fn open_read(&self, filename: &Path) -> Result<std::fs::File, HgError> {
+ let path = self.base.join(filename);
+ std::fs::File::open(&path).when_reading_file(&path)
+ }
+ fn open_check_ambig(
+ &self,
+ _filename: &Path,
+ ) -> Result<std::fs::File, HgError> {
+ todo!()
+ }
+ fn create(&self, _filename: &Path) -> Result<std::fs::File, HgError> {
+ todo!()
+ }
+ fn create_atomic(
+ &self,
+ _filename: &Path,
+ _check_ambig: bool,
+ ) -> Result<AtomicFile, HgError> {
+ todo!()
+ }
+ fn file_size(&self, file: &File) -> Result<u64, HgError> {
+ Ok(file
+ .metadata()
+ .map_err(|e| {
+ HgError::abort(
+ format!("Could not get file metadata: {}", e),
+ exit_codes::ABORT,
+ None,
+ )
+ })?
+ .size())
+ }
+ fn exists(&self, _filename: &Path) -> bool {
+ todo!()
+ }
+ fn unlink(&self, _filename: &Path) -> Result<(), HgError> {
+ todo!()
+ }
+ fn rename(
+ &self,
+ _from: &Path,
+ _to: &Path,
+ _check_ambig: bool,
+ ) -> Result<(), HgError> {
+ todo!()
+ }
+ fn copy(&self, _from: &Path, _to: &Path) -> Result<(), HgError> {
+ todo!()
+ }
+}
+
pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
}
--- a/rust/hg-core/tests/test_missing_ancestors.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-core/tests/test_missing_ancestors.rs Sat Oct 26 04:16:00 2024 +0200
@@ -69,6 +69,7 @@
ancs
}
+#[allow(unused)] // Useful when debugging
#[derive(Clone, Debug)]
enum MissingAncestorsAction {
InitialBases(HashSet<Revision>),
--- a/rust/hg-cpython/src/ancestors.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/ancestors.rs Sat Oct 26 04:16:00 2024 +0200
@@ -19,8 +19,8 @@
//! `mercurial.ancestor.incrementalmissingancestors`.
//!
//! API differences:
-//! + it is instantiated with a C `parsers.index`
-//! instance instead of a parents function.
+//! + it is instantiated with a C `parsers.index` instance instead of a
+//! parents function.
//! + `MissingAncestors.bases` is a method returning a tuple instead of
//! a set-valued attribute. We could return a Python set easily if our
//! [PySet PR](https://github.com/dgrunwald/rust-cpython/pull/165)
--- a/rust/hg-cpython/src/dirstate.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/dirstate.rs Sat Oct 26 04:16:00 2024 +0200
@@ -16,12 +16,11 @@
mod status;
use self::item::DirstateItem;
use crate::{
- dirstate::{
- dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
- },
+ dirstate::{dirs_multiset::Dirs, status::status_wrapper},
exceptions,
};
use cpython::{PyBytes, PyDict, PyList, PyModule, PyObject, PyResult, Python};
+use dirstate_map::{DirstateIdentity, DirstateMap};
use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
/// Create the module, with `__package__` given from parent
@@ -42,6 +41,7 @@
m.add_class::<Dirs>(py)?;
m.add_class::<DirstateMap>(py)?;
m.add_class::<DirstateItem>(py)?;
+ m.add_class::<DirstateIdentity>(py)?;
m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
m.add(
py,
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Sat Oct 26 04:16:00 2024 +0200
@@ -14,7 +14,12 @@
exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
};
-use hg::dirstate::{ParentFileData, TruncatedTimestamp};
+use hg::{
+ dirstate::{ParentFileData, TruncatedTimestamp},
+ dirstate_tree::dirstate_map::{
+ DirstateEntryReset, DirstateIdentity as CoreDirstateIdentity,
+ },
+};
use crate::{
dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
@@ -48,10 +53,13 @@
@staticmethod
def new_v1(
on_disk: PyBytes,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> PyResult<PyObject> {
let on_disk = PyBytesDeref::new(py, on_disk);
- let (map, parents) = OwningDirstateMap::new_v1(on_disk, identity)
+ let (map, parents) = OwningDirstateMap::new_v1(
+ on_disk,
+ identity.map(|i| *i.inner(py))
+ )
.map_err(|e| dirstate_error(py, e))?;
let map = Self::create_instance(py, map)?;
let p1 = PyBytes::new(py, parents.p1.as_bytes());
@@ -67,7 +75,7 @@
data_size: usize,
tree_metadata: PyBytes,
uuid: PyBytes,
- identity: Option<u64>,
+ identity: Option<DirstateIdentity>,
) -> PyResult<PyObject> {
let dirstate_error = |e: DirstateError| {
PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
@@ -79,7 +87,7 @@
data_size,
tree_metadata.data(py),
uuid.to_owned(),
- identity,
+ identity.map(|i| *i.inner(py)),
).map_err(dirstate_error)?;
let map = Self::create_instance(py, map)?;
Ok(map.into_object())
@@ -88,7 +96,7 @@
/// Returns an empty DirstateMap. Only used for a new dirstate.
@staticmethod
def new_empty() -> PyResult<PyObject> {
- let map = OwningDirstateMap::new_empty(vec![]);
+ let map = OwningDirstateMap::new_empty(vec![], None);
let map = Self::create_instance(py, map)?;
Ok(map.into_object())
}
@@ -196,14 +204,16 @@
};
let bytes = f.extract::<PyBytes>(py)?;
let path = HgPath::new(bytes.data(py));
- let res = self.inner(py).borrow_mut().reset_state(
- path,
+ let reset = DirstateEntryReset {
+ filename: path,
wc_tracked,
p1_tracked,
p2_info,
has_meaningful_mtime,
- parent_file_data,
- );
+ parent_file_data_opt: parent_file_data,
+ from_empty: false
+ };
+ let res = self.inner(py).borrow_mut().reset_state(reset);
res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
Ok(PyNone)
}
@@ -539,6 +549,41 @@
Option<(PyBytes, PyObject)>
);
+py_class!(pub class DirstateIdentity |py| {
+ data inner: CoreDirstateIdentity;
+
+ def __new__(
+ _cls,
+ mode: u32,
+ dev: u64,
+ ino: u64,
+ nlink: u64,
+ uid: u32,
+ gid: u32,
+ size: u64,
+ mtime: i64,
+ mtime_nsec: i64,
+ ctime: i64,
+ ctime_nsec: i64) -> PyResult<DirstateIdentity> {
+ Self::create_instance(
+ py,
+ CoreDirstateIdentity {
+ mode,
+ dev,
+ ino,
+ nlink,
+ uid,
+ gid,
+ size,
+ mtime,
+ mtime_nsec,
+ ctime,
+ ctime_nsec
+ }
+ )
+ }
+});
+
fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
let bytes = obj.extract::<PyBytes>(py)?;
match bytes.data(py).try_into() {
--- a/rust/hg-cpython/src/lib.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/lib.rs Sat Oct 26 04:16:00 2024 +0200
@@ -45,6 +45,7 @@
pub mod exceptions;
mod pybytes_deref;
pub mod revlog;
+pub mod update;
pub mod utils;
/// Revision as exposed to/from the Python layer.
@@ -100,6 +101,7 @@
m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
+ m.add(py, "update", update::init_module(py, &dotted_name)?)?;
m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
Ok(())
});
--- a/rust/hg-cpython/src/revlog.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/revlog.rs Sat Oct 26 04:16:00 2024 +0200
@@ -27,7 +27,10 @@
revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
};
-use std::{cell::RefCell, collections::HashMap};
+use std::{
+ cell::RefCell,
+ collections::{HashMap, HashSet},
+};
use vcsgraph::graph::Graph as VCSGraph;
pub struct PySharedIndex {
@@ -304,9 +307,14 @@
}
/// get head revisions
- def headrevs(&self) -> PyResult<PyObject> {
- let rust_res = self.inner_headrevs(py)?;
- Ok(rust_res)
+ def headrevs(&self, *args, **_kw) -> PyResult<PyObject> {
+ let (filtered_revs, stop_rev) = match &args.len(py) {
+ 0 => Ok((py.None(), py.None())),
+ 1 => Ok((args.get_item(py, 0), py.None())),
+ 2 => Ok((args.get_item(py, 0), args.get_item(py, 1))),
+ _ => Err(PyErr::new::<cpython::exc::TypeError, _>(py, "too many arguments")),
+ }?;
+ self.inner_headrevs(py, &filtered_revs, &stop_rev)
}
/// get head nodeids
@@ -324,12 +332,6 @@
Ok(rust_res)
}
- /// get filtered head revisions
- def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
- let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
- Ok(rust_res)
- }
-
/// True if the object is a snapshot
def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
let index = self.index(py).borrow();
@@ -819,21 +821,65 @@
Ok(PyList::new(py, &res).into_object())
}
- fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
+ fn inner_headrevs(
+ &self,
+ py: Python,
+ filtered_revs: &PyObject,
+ stop_rev: &PyObject,
+ ) -> PyResult<PyObject> {
let index = &*self.index(py).borrow();
- if let Some(new_heads) =
- index.head_revs_shortcut().map_err(|e| graph_error(py, e))?
- {
- self.cache_new_heads_py_list(&new_heads, py);
- }
+ let stop_rev = if stop_rev.is_none(py) {
+ None
+ } else {
+ let rev = stop_rev.extract::<i32>(py)?;
+ if 0 <= rev && rev < index.len() as BaseRevision {
+ Some(Revision(rev))
+ } else {
+ None
+ }
+ };
+ let from_core = match (filtered_revs.is_none(py), stop_rev.is_none()) {
+ (true, true) => index.head_revs_shortcut(),
+ (true, false) => {
+ index.head_revs_advanced(&HashSet::new(), stop_rev, false)
+ }
+ _ => {
+ let filtered_revs =
+ rev_pyiter_collect(py, filtered_revs, index)?;
+ index.head_revs_advanced(
+ &filtered_revs,
+ stop_rev,
+ stop_rev.is_none(),
+ )
+ }
+ };
- Ok(self
- .head_revs_py_list(py)
- .borrow()
- .as_ref()
- .expect("head revs should be cached")
- .clone_ref(py)
- .into_object())
+ if stop_rev.is_some() {
+ // we don't cache result for now
+ let new_heads = from_core
+ .map_err(|e| graph_error(py, e))?
+ .expect("this case should not be cached yet");
+
+ let as_vec: Vec<PyObject> = new_heads
+ .iter()
+ .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
+ .collect();
+ Ok(PyList::new(py, &as_vec).into_object())
+ } else {
+ if let Some(new_heads) =
+ from_core.map_err(|e| graph_error(py, e))?
+ {
+ self.cache_new_heads_py_list(&new_heads, py);
+ }
+
+ Ok(self
+ .head_revs_py_list(py)
+ .borrow()
+ .as_ref()
+ .expect("head revs should be cached")
+ .clone_ref(py)
+ .into_object())
+ }
}
fn check_revision(
@@ -868,30 +914,6 @@
Ok(res)
}
- fn inner_headrevsfiltered(
- &self,
- py: Python,
- filtered_revs: &PyObject,
- ) -> PyResult<PyObject> {
- let index = &*self.index(py).borrow();
- let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
-
- if let Some(new_heads) = index
- .head_revs_filtered(&filtered_revs, true)
- .map_err(|e| graph_error(py, e))?
- {
- self.cache_new_heads_py_list(&new_heads, py);
- }
-
- Ok(self
- .head_revs_py_list(py)
- .borrow()
- .as_ref()
- .expect("head revs should be cached")
- .clone_ref(py)
- .into_object())
- }
-
fn cache_new_heads_node_ids_py_list(
&self,
new_heads: &[Revision],
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/update.rs Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,53 @@
+// debug.rs
+//
+// Copyright 2024 Mercurial developers
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Module for updating a repository.
+use cpython::{PyDict, PyModule, PyObject, PyResult, Python};
+use hg::{
+ progress::{HgProgressBar, Progress},
+ update::update_from_null,
+ BaseRevision,
+};
+
+use crate::{
+ exceptions::FallbackError,
+ utils::{hgerror_to_pyerr, repo_from_path},
+};
+
+pub fn update_from_null_fast_path(
+ py: Python,
+ repo_path: PyObject,
+ to: BaseRevision,
+) -> PyResult<usize> {
+ log::trace!("Using update from null fastpath");
+ let repo = repo_from_path(py, repo_path)?;
+ let progress: &dyn Progress = &HgProgressBar::new("updating");
+ hgerror_to_pyerr(py, update_from_null(&repo, to.into(), progress))
+}
+
+pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
+ let dotted_name = &format!("{}.update", package);
+ let m = PyModule::new(py, dotted_name)?;
+
+ m.add(py, "__package__", package)?;
+ m.add(py, "__doc__", "Rust module for updating a repository")?;
+ m.add(py, "FallbackError", py.get_type::<FallbackError>())?;
+ m.add(
+ py,
+ "update_from_null",
+ py_fn!(
+ py,
+ update_from_null_fast_path(repo_path: PyObject, to: BaseRevision,)
+ ),
+ )?;
+
+ let sys = PyModule::import(py, "sys")?;
+ let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
+ sys_modules.set_item(py, dotted_name, &m)?;
+
+ Ok(m)
+}
--- a/rust/hg-cpython/src/utils.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/hg-cpython/src/utils.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,6 +1,15 @@
use cpython::exc::ValueError;
-use cpython::{PyBytes, PyDict, PyErr, PyObject, PyResult, PyTuple, Python};
+use cpython::{
+ ObjectProtocol, PyBytes, PyDict, PyErr, PyObject, PyResult, PyTuple,
+ Python, ToPyObject,
+};
+use hg::config::Config;
+use hg::errors::HgError;
+use hg::repo::{Repo, RepoError};
use hg::revlog::Node;
+use hg::utils::files::get_path_from_bytes;
+
+use crate::exceptions::FallbackError;
#[allow(unused)]
pub fn print_python_trace(py: Python) -> PyResult<PyObject> {
@@ -14,6 +23,58 @@
traceback.call(py, "print_stack", PyTuple::new(py, &[]), Some(&kwargs))
}
+pub fn hgerror_to_pyerr<T>(
+ py: Python,
+ error: Result<T, HgError>,
+) -> PyResult<T> {
+ error.map_err(|e| match e {
+ HgError::IoError { .. } => {
+ PyErr::new::<cpython::exc::IOError, _>(py, e.to_string())
+ }
+ HgError::UnsupportedFeature(e) => {
+ let as_string = e.to_string();
+ log::trace!("Update from null fallback: {}", as_string);
+ PyErr::new::<FallbackError, _>(py, &as_string)
+ }
+ HgError::RaceDetected(_) => {
+ unreachable!("must not surface to the user")
+ }
+ HgError::Path(path_error) => {
+ let msg = PyBytes::new(py, path_error.to_string().as_bytes());
+ let cls = py
+ .import("mercurial.error")
+ .and_then(|m| m.get(py, "InputError"))
+ .unwrap();
+ PyErr::from_instance(
+ py,
+ cls.call(py, (msg,), None).ok().into_py_object(py),
+ )
+ }
+ e => PyErr::new::<cpython::exc::RuntimeError, _>(py, e.to_string()),
+ })
+}
+
+pub fn repo_error_to_pyerr<T>(
+ py: Python,
+ error: Result<T, RepoError>,
+) -> PyResult<T> {
+ hgerror_to_pyerr(py, error.map_err(HgError::from))
+}
+
+/// Get a repository from a given [`PyObject`] path, and bubble up any error
+/// that comes up.
+pub fn repo_from_path(py: Python, repo_path: PyObject) -> Result<Repo, PyErr> {
+ let config =
+ hgerror_to_pyerr(py, Config::load_non_repo().map_err(HgError::from))?;
+ let py_bytes = &repo_path.extract::<PyBytes>(py)?;
+ let repo_path = py_bytes.data(py);
+ let repo = repo_error_to_pyerr(
+ py,
+ Repo::find(&config, Some(get_path_from_bytes(repo_path).to_owned())),
+ )?;
+ Ok(repo)
+}
+
// Necessary evil for the time being, could maybe be moved to
// a TryFrom in Node itself
const NODE_BYTES_LENGTH: usize = 20;
--- a/rust/rhg/src/commands/debugdata.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/rhg/src/commands/debugdata.rs Sat Oct 26 04:16:00 2024 +0200
@@ -1,7 +1,8 @@
use crate::error::CommandError;
use clap::Arg;
use clap::ArgGroup;
-use hg::operations::{debug_data, DebugDataKind};
+use hg::operations::debug_data;
+use hg::RevlogType;
pub const HELP_TEXT: &str = "
Dump the contents of a data file revision
@@ -45,8 +46,8 @@
args.get_one::<bool>("changelog").unwrap(),
args.get_one::<bool>("manifest").unwrap(),
) {
- (true, false) => DebugDataKind::Changelog,
- (false, true) => DebugDataKind::Manifest,
+ (true, false) => RevlogType::Changelog,
+ (false, true) => RevlogType::Manifestlog,
(true, true) => {
unreachable!("Should not happen since options are exclusive")
}
--- a/rust/rhg/src/commands/files.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/rhg/src/commands/files.rs Sat Oct 26 04:16:00 2024 +0200
@@ -7,7 +7,7 @@
use hg::filepatterns::parse_pattern_args;
use hg::matchers::IntersectionMatcher;
use hg::narrow;
-use hg::operations::list_rev_tracked_files;
+use hg::operations::list_revset_tracked_files;
use hg::repo::Repo;
use hg::utils::files::get_bytes_from_os_str;
use hg::utils::filter_map_results;
@@ -88,9 +88,17 @@
};
if let Some(rev) = rev {
- let files = list_rev_tracked_files(repo, rev, matcher)
+ let files = list_revset_tracked_files(repo, rev, matcher)
.map_err(|e| (e, rev.as_ref()))?;
- display_files(invocation.ui, repo, relative_paths, files.iter())
+ display_files(
+ invocation.ui,
+ repo,
+ relative_paths,
+ files.iter().map::<Result<_, CommandError>, _>(|f| {
+ let (f, _, _) = f?;
+ Ok(f)
+ }),
+ )
} else {
// The dirstate always reflects the sparse narrowspec.
let dirstate = repo.dirstate_map()?;
--- a/rust/rhg/src/commands/status.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/rhg/src/commands/status.rs Sat Oct 26 04:16:00 2024 +0200
@@ -28,12 +28,12 @@
get_bytes_from_os_str, get_bytes_from_os_string, get_path_from_bytes,
};
use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
-use hg::PatternFileWarning;
use hg::Revision;
use hg::StatusError;
use hg::StatusOptions;
use hg::{self, narrow, sparse};
use hg::{DirstateStatus, RevlogOpenOptions};
+use hg::{PatternFileWarning, RevlogType};
use log::info;
use rayon::prelude::*;
use std::borrow::Cow;
@@ -383,7 +383,8 @@
})?;
let working_directory_vfs = repo.working_directory_vfs();
let store_vfs = repo.store_vfs();
- let revlog_open_options = repo.default_revlog_options(false)?;
+ let revlog_open_options =
+ repo.default_revlog_options(RevlogType::Manifestlog)?;
let res: Vec<_> = take(&mut ds_status.unsure)
.into_par_iter()
.map(|to_check| {
@@ -392,8 +393,8 @@
// + map_err + collect, so let's just inline some of the
// logic.
match unsure_is_modified(
- working_directory_vfs,
- store_vfs,
+ &working_directory_vfs,
+ &store_vfs,
check_exec,
&manifest,
&to_check.path,
@@ -747,8 +748,8 @@
/// This meant to be used for those that the dirstate cannot resolve, due
/// to time resolution limits.
fn unsure_is_modified(
- working_directory_vfs: hg::vfs::Vfs,
- store_vfs: hg::vfs::Vfs,
+ working_directory_vfs: &hg::vfs::VfsImpl,
+ store_vfs: &hg::vfs::VfsImpl,
check_exec: bool,
manifest: &Manifest,
hg_path: &HgPath,
@@ -775,17 +776,17 @@
let entry_flags = if check_exec {
entry.flags
- } else if entry.flags == Some(b'x') {
+ } else if entry.flags.map(|f| f.into()) == Some(b'x') {
None
} else {
entry.flags
};
- if entry_flags != fs_flags {
+ if entry_flags.map(|f| f.into()) != fs_flags {
return Ok(UnsureOutcome::Modified);
}
let filelog = hg::filelog::Filelog::open_vfs(
- &store_vfs,
+ store_vfs,
hg_path,
revlog_open_options,
)?;
--- a/rust/rhg/src/error.rs Thu Jan 11 20:37:34 2024 +0100
+++ b/rust/rhg/src/error.rs Sat Oct 26 04:16:00 2024 +0200
@@ -207,9 +207,9 @@
RevlogError::WDirUnsupported => CommandError::abort(
"abort: working directory revision cannot be specified",
),
- RevlogError::InvalidRevision => CommandError::abort(format!(
+ RevlogError::InvalidRevision(r) => CommandError::abort(format!(
"abort: invalid revision identifier: {}",
- rev
+ r
)),
RevlogError::AmbiguousPrefix => CommandError::abort(format!(
"abort: ambiguous revision identifier: {}",
--- a/setup.py Thu Jan 11 20:37:34 2024 +0100
+++ b/setup.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,11 +5,9 @@
# 'python setup.py --help' for more options
import os
-# Mercurial can't work on 3.6.0 or 3.6.1 due to a bug in % formatting
-# in bytestrings.
supportedpy = ','.join(
[
- '>=3.6.2',
+ '>=3.8.0',
]
)
@@ -127,7 +125,7 @@
from distutils.command.install_lib import install_lib
from distutils.command.install_scripts import install_scripts
from distutils import log
-from distutils.spawn import spawn, find_executable
+from distutils.spawn import spawn
from distutils import file_util
from distutils.errors import (
CCompilerError,
@@ -463,10 +461,15 @@
class hgbuildmo(build):
-
description = "build translations (.mo files)"
def run(self):
+ try:
+ from shutil import which as find_executable
+ except ImportError:
+ # Deprecated in py3.12
+ from distutils.spawn import find_executable
+
if not find_executable('msgfmt'):
self.warn(
"could not find msgfmt executable, no translations "
@@ -1056,7 +1059,6 @@
class hginstall(install):
-
user_options = install.user_options + [
(
'old-and-unmanageable',
@@ -1329,6 +1331,7 @@
'mercurial.admin',
'mercurial.cext',
'mercurial.cffi',
+ 'mercurial.branching',
'mercurial.defaultrc',
'mercurial.dirstateutils',
'mercurial.helptext',
@@ -1658,27 +1661,6 @@
pass
-if os.name == 'nt':
- # Allow compiler/linker flags to be added to Visual Studio builds. Passing
- # extra_link_args to distutils.extensions.Extension() doesn't have any
- # effect.
- try:
- # setuptools < 65.0
- from distutils import msvccompiler
- except ImportError:
- from distutils import _msvccompiler as msvccompiler
-
- msvccompilerclass = msvccompiler.MSVCCompiler
-
- class HackedMSVCCompiler(msvccompiler.MSVCCompiler):
- def initialize(self):
- msvccompilerclass.initialize(self)
- # "warning LNK4197: export 'func' specified multiple times"
- self.ldflags_shared.append('/ignore:4197')
- self.ldflags_shared_debug.append('/ignore:4197')
-
- msvccompiler.MSVCCompiler = HackedMSVCCompiler
-
packagedata = {
'mercurial': [
'configitems.toml',
@@ -1784,42 +1766,14 @@
setupversion = setupversion.split(r'+', 1)[0]
setup(
- name='mercurial',
version=setupversion,
- author='Olivia Mackall and many others',
- author_email='mercurial@mercurial-scm.org',
- url='https://mercurial-scm.org/',
- download_url='https://mercurial-scm.org/release/',
- description=(
- 'Fast scalable distributed SCM (revision control, version '
- 'control) system'
- ),
long_description=(
'Mercurial is a distributed SCM tool written in Python.'
' It is used by a number of large projects that require'
' fast, reliable distributed revision control, such as '
'Mozilla.'
),
- license='GNU GPLv2 or any later version',
- classifiers=[
- 'Development Status :: 6 - Mature',
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'Intended Audience :: System Administrators',
- 'License :: OSI Approved :: GNU General Public License (GPL)',
- 'Natural Language :: Danish',
- 'Natural Language :: English',
- 'Natural Language :: German',
- 'Natural Language :: Italian',
- 'Natural Language :: Japanese',
- 'Natural Language :: Portuguese (Brazilian)',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: OS Independent',
- 'Operating System :: POSIX',
- 'Programming Language :: C',
- 'Programming Language :: Python',
- 'Topic :: Software Development :: Version Control',
- ],
+ long_description_content_type='text/x-rst',
scripts=scripts,
packages=packages,
ext_modules=extmodules,
--- a/tests/common-pattern.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/common-pattern.py Sat Oct 26 04:16:00 2024 +0200
@@ -168,6 +168,12 @@
# FormatMessage(WSAEADDRNOTAVAIL)
br'The requested address is not valid in its context',
),
+ br'$ECONNREFUSED$': (
+ # strerror()
+ br'Connection refused',
+ # FormatMessage(WSAECONNREFUSED)
+ br'No connection could be made because the target machine actively refused it',
+ ),
}
for replace, msgs in _errors.items():
--- a/tests/dumbhttp.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/dumbhttp.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,7 +26,6 @@
class simplehttpserver(httpserver.httpserver):
address_family = socket.AF_INET6
-
else:
simplehttpserver = httpserver.httpserver
--- a/tests/dummysmtpd.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/dummysmtpd.py Sat Oct 26 04:16:00 2024 +0200
@@ -3,6 +3,7 @@
"""dummy SMTP server for use in tests"""
+import io
import optparse
import os
import socket
@@ -16,6 +17,13 @@
ui as uimod,
)
+if pycompat.iswindows:
+ sys.stdout = io.TextIOWrapper(
+ sys.stdout.buffer,
+ sys.stdout.encoding,
+ sys.stdout.errors,
+ newline="\n",
+ )
if os.environ.get('HGIPV6', '0') == '1':
family = socket.AF_INET6
@@ -31,8 +39,15 @@
def mocksmtpserversession(conn, addr):
conn.send(b'220 smtp.example.com ESMTP\r\n')
- line = conn.recv(1024)
+ try:
+ # Newer versions of OpenSSL raise on EOF
+ line = conn.recv(1024)
+ except ssl.SSLError:
+ log('no hello: EOF\n')
+ return
+
if not line.lower().startswith(b'ehlo '):
+ # Older versions of OpenSSl don't raise
log('no hello: %s\n' % line)
return
--- a/tests/fsmonitor-run-tests.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/fsmonitor-run-tests.py Sat Oct 26 04:16:00 2024 +0200
@@ -30,7 +30,6 @@
def _sys2bytes(p):
return p.encode('utf-8')
-
elif sys.version_info >= (3, 0, 0):
print(
'%s is only supported on Python 3.5+ and 2.7, not %s'
--- a/tests/generate-working-copy-states.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/generate-working-copy-states.py Sat Oct 26 04:16:00 2024 +0200
@@ -33,6 +33,7 @@
import os
import sys
+
# Generates pairs of (filename, contents), where 'contents' is a list
# describing the file's content at each revision (or in the working copy).
# At each revision, it is either None or the file's actual content. When not
--- a/tests/helpers-testrepo.sh Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/helpers-testrepo.sh Sat Oct 26 04:16:00 2024 +0200
@@ -58,9 +58,17 @@
# Use the system hg command if the bundled hg can't read the repository with
# no warning nor error.
if [ -n "`hg id -R "$TESTDIR/.." 2>&1 >/dev/null`" ]; then
- alias testrepohg=syshg
- alias testrepohgenv=syshgenv
+ testrepohg() {
+ syshg "$@"
+ }
+ testrepohgenv() {
+ syshgenv "$@"
+ }
else
- alias testrepohg="hg $extraoptions"
- alias testrepohgenv=:
+ testrepohg() {
+ hg $extraoptions "$@"
+ }
+ testrepohgenv() {
+ :
+ }
fi
--- a/tests/hghave.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/hghave.py Sat Oct 26 04:16:00 2024 +0200
@@ -609,14 +609,7 @@
@check("pyflakes", "Pyflakes python linter")
def has_pyflakes():
- try:
- import pyflakes
-
- pyflakes.__version__
- except ImportError:
- return False
- else:
- return True
+ return matchoutput("pyflakes --version", br"^\d+\.\d+\.\d+\b", True)
@check("pylint", "Pylint python linter")
@@ -624,12 +617,13 @@
return matchoutput("pylint --help", br"[Uu]sage:[ ]+pylint", True)
-@check("clang-format", "clang-format C code formatter (>= 11)")
+@check("clang-format", "clang-format C code formatter (11 <= … < 19)")
def has_clang_format():
m = matchoutput('clang-format --version', br'clang-format version (\d+)')
- # style changed somewhere between 10.x and 11.x
+ # style changed somewhere between 10.x and 11.x and after 19.
if m:
- return int(m.group(1)) >= 11
+ major_version = int(m.group(1))
+ return 11 <= major_version < 19
# Assist Googler contributors, they have a centrally-maintained version of
# clang-format that is generally very fresh, but unlike most builds (both
# official and unofficial), it does *not* include a version number.
@@ -738,6 +732,13 @@
return False
+@check("gui", "whether a gui environment is available or not")
+def has_gui():
+ from mercurial.utils import procutil
+
+ return procutil.gui()
+
+
@check("test-repo", "running tests from repository")
def has_test_repo():
t = os.environ["TESTDIR"]
@@ -1117,14 +1118,14 @@
return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
-@check('black', 'the black formatter for python (>= 20.8b1)')
+@check('black', 'the black formatter for python >=23.3.0')
def has_black():
blackcmd = 'black --version'
version_regex = b'black, (?:version )?([0-9a-b.]+)'
version = matchoutput(blackcmd, version_regex)
if not version:
return False
- return Version(_bytes2sys(version.group(1))) >= Version('20.8b1')
+ return Version(_bytes2sys(version.group(1))) >= Version('23.3.0')
@check('pytype', 'the pytype type checker')
@@ -1136,11 +1137,11 @@
return Version(_bytes2sys(version.group(0))) >= Version('2019.10.17')
-@check("rustfmt", "rustfmt tool at version nightly-2021-11-02")
+@check("rustfmt", "rustfmt tool at version nightly-2024-07-16")
def has_rustfmt():
# We use Nightly's rustfmt due to current unstable config options.
return matchoutput(
- '`rustup which --toolchain nightly-2021-11-02 rustfmt` --version',
+ '`rustup which --toolchain nightly-2024-07-16 rustfmt` --version',
b'rustfmt',
)
--- a/tests/killdaemons.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/killdaemons.py Sat Oct 26 04:16:00 2024 +0200
@@ -56,7 +56,12 @@
if r == WAIT_OBJECT_0:
pass # terminated, but process handle still available
elif r == WAIT_TIMEOUT:
- _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
+ # Allow the caller to optionally specify the exit code, to
+ # simulate killing with a signal.
+ exit_code = int(os.environ.get("DAEMON_EXITCODE", -1))
+ _check(
+ ctypes.windll.kernel32.TerminateProcess(handle, exit_code)
+ )
elif r == WAIT_FAILED:
_check(0) # err stored in GetLastError()
@@ -75,7 +80,6 @@
raise
_check(ctypes.windll.kernel32.CloseHandle(handle))
-
else:
def kill(pid, logfn, tryhard=True):
--- a/tests/mockblackbox.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/mockblackbox.py Sat Oct 26 04:16:00 2024 +0200
@@ -1,5 +1,6 @@
from mercurial.utils import procutil
+
# XXX: we should probably offer a devel option to do this in blackbox directly
def getuser():
return b'bob'
--- a/tests/run-tests.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/run-tests.py Sat Oct 26 04:16:00 2024 +0200
@@ -73,9 +73,9 @@
import xml.dom.minidom as minidom
-if sys.version_info < (3, 5, 0):
+if sys.version_info < (3, 8, 0):
print(
- '%s is only supported on Python 3.5+, not %s'
+ '%s is only supported on Python 3.8+, not %s'
% (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
)
sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
@@ -114,36 +114,73 @@
except ImportError:
pass
+progress_type = {}
+
if pygmentspresent:
+ _T_ERROR = token.string_to_tokentype("Token.Generic.Error")
+ _T_FAILED = token.string_to_tokentype("Token.Generic.Failed")
+ _T_FNAME = token.string_to_tokentype("Token.Generic.FName")
+ _T_IGNORED = token.string_to_tokentype("Token.Generic.Ignored")
+ _T_SKIPPED = token.string_to_tokentype("Token.Generic.Skipped")
+ _T_SNAME = token.string_to_tokentype("Token.Generic.SName")
+ _T_SKIPPED_DOT = token.string_to_tokentype("Token.Generic.SkippedDot")
+ _T_SUCCESS = token.string_to_tokentype("Token.Generic.Success")
+ _T_TIMEDOUT = token.string_to_tokentype("Token.Generic.TimedOut")
class TestRunnerStyle(style.Style):
default_style = ""
- skipped = token.string_to_tokentype("Token.Generic.Skipped")
- failed = token.string_to_tokentype("Token.Generic.Failed")
- skippedname = token.string_to_tokentype("Token.Generic.SName")
- failedname = token.string_to_tokentype("Token.Generic.FName")
styles = {
- skipped: '#e5e5e5',
- skippedname: '#00ffff',
- failed: '#7f0000',
- failedname: '#ff0000',
+ _T_ERROR: '#cd00cd',
+ _T_FAILED: '#7f0000',
+ _T_FNAME: '#ff0000',
+ _T_IGNORED: '#cdcd00',
+ _T_SKIPPED: '#e5e5e5',
+ _T_SNAME: '#00ffff',
+ _T_SKIPPED_DOT: '#00ffff',
+ _T_SUCCESS: '#00cd00',
+ _T_TIMEDOUT: '#ff00ff',
}
class TestRunnerLexer(lexer.RegexLexer):
testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
tokens = {
'root': [
- (r'^Skipped', token.Generic.Skipped, 'skipped'),
- (r'^Failed ', token.Generic.Failed, 'failed'),
- (r'^ERROR: ', token.Generic.Failed, 'failed'),
+ (r'^Skipped', _T_SKIPPED, 'skipped'),
+ (r'^Failed ', _T_FAILED, 'failed'),
+ (r'^ERROR: ', _T_FAILED, 'failed'),
],
'skipped': [
- (testpattern, token.Generic.SName),
- (r':.*', token.Generic.Skipped),
+ (testpattern, _T_SNAME),
+ (r':.*', _T_SKIPPED),
],
'failed': [
- (testpattern, token.Generic.FName),
- (r'(:| ).*', token.Generic.Failed),
+ (testpattern, _T_FNAME),
+ (r'(:| ).*', _T_FAILED),
+ ],
+ }
+
+ progress_type['.'] = _T_SUCCESS
+ progress_type['s'] = _T_SKIPPED_DOT
+ progress_type['i'] = _T_IGNORED
+ progress_type['!'] = _T_FAILED
+ progress_type['E'] = _T_ERROR
+ progress_type['t'] = _T_TIMEDOUT
+
+ class progressLexer(lexer.RegexLexer):
+ testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
+ tokens = {
+ 'root': [
+ (r'^Skipped', _T_SKIPPED, 'skipped'),
+ (r'^Failed ', _T_FAILED, 'failed'),
+ (r'^ERROR: ', _T_FAILED, 'failed'),
+ ],
+ 'skipped': [
+ (testpattern, _T_SNAME),
+ (r':.*', _T_SKIPPED),
+ ],
+ 'failed': [
+ (testpattern, _T_FNAME),
+ (r'(:| ).*', _T_FAILED),
],
}
@@ -337,7 +374,7 @@
default_defaults = {
'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
- 'timeout': ('HGTEST_TIMEOUT', 360),
+ 'timeout': ('HGTEST_TIMEOUT', 360 if not WINDOWS else 360 * 4),
'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
'port': ('HGTEST_PORT', 20059),
'shell': ('HGTEST_SHELL', 'sh'),
@@ -947,6 +984,20 @@
return pygments.highlight(msg, runnerlexer, runnerformatter)
+def highlight_progress(progress, color):
+ if not color:
+ return progress
+ assert pygmentspresent
+ token = progress_type.get(progress)
+ if token is None:
+ return progress
+ style = runnerformatter.style_string.get(str(token))
+ if style is None:
+ return progress
+ else:
+ return style[0] + progress + style[1]
+
+
def terminate(proc):
"""Terminate subprocess"""
vlog('# Terminating process %d' % proc.pid)
@@ -2247,6 +2298,7 @@
# sense to map it into skip some day.
self.ignored = []
+ self._dot_printed = 0
self.times = []
self._firststarttime = None
# Data stored for the benefit of generating xunit reports.
@@ -2266,6 +2318,15 @@
else: # 'always', for testing purposes
self.color = pygmentspresent
+ def _write_dot(self, progress):
+ """write an item of the "dot" progress"""
+ formated = highlight_progress(progress, self.color)
+ self.stream.write(formated)
+ self._dot_printed += 1
+ if not self._dot_printed % 75:
+ self.stream.write(f' [{self._dot_printed}]\n'.rjust(8))
+ self.stream.flush()
+
def onStart(self, test):
"""Can be overriden by custom TestResult"""
@@ -2280,24 +2341,33 @@
else:
with iolock:
if reason == "timed out":
- self.stream.write('t')
+ self._write_dot('t')
else:
if not self._options.nodiff:
self.stream.write('\n')
# Exclude the '\n' from highlighting to lex correctly
formatted = 'ERROR: %s output changed\n' % test
self.stream.write(highlightmsg(formatted, self.color))
- self.stream.write('!')
+ self._write_dot('!')
self.stream.flush()
def addSuccess(self, test):
with iolock:
- super(TestResult, self).addSuccess(test)
+ # bypass the TextTestResult method as do deal with the output ourself
+ super(base_class, self).addSuccess(test)
+ if self.showAll:
+ self._write_status(test, "ok")
+ elif self.dots:
+ self._write_dot('.')
self.successes.append(test)
def addError(self, test, err):
- super(TestResult, self).addError(test, err)
+ super(base_class, self).addError(test, err)
+ if self.showAll:
+ self._write_status(test, "ERROR")
+ elif self.dots:
+ self._write_dot('E')
if self._options.first:
self.stop()
@@ -2308,8 +2378,7 @@
if self.showAll:
self.stream.writeln('skipped %s' % reason)
else:
- self.stream.write('s')
- self.stream.flush()
+ self._write_dot('s')
def addIgnore(self, test, reason):
self.ignored.append((test, reason))
@@ -2318,10 +2387,9 @@
self.stream.writeln('ignored %s' % reason)
else:
if reason not in ('not retesting', "doesn't match keyword"):
- self.stream.write('i')
+ self._write_dot('i')
else:
self.testsRun += 1
- self.stream.flush()
def addOutputMismatch(self, test, ret, got, expected):
"""Record a mismatch in test output for a particular test."""
@@ -2450,7 +2518,7 @@
loadtest=None,
showchannels=False,
*args,
- **kwargs
+ **kwargs,
):
"""Create a new instance that can run tests with a configuration.
@@ -3290,14 +3358,15 @@
# Setting PYTHONPATH with an activated venv causes the modules installed
# in it to be ignored. Therefore, include the related paths in sys.path
- # in PYTHONPATH.
- virtual_env = osenvironb.get(b"VIRTUAL_ENV")
- if virtual_env:
- virtual_env = os.path.join(virtual_env, b'')
- for p in sys.path:
- p = _sys2bytes(p)
- if p.startswith(virtual_env):
- pypath.append(p)
+ # in PYTHONPATH. If the executable is run directly without activation,
+ # any modules installed in it would also be ignored, so include them for
+ # the same reason.
+
+ for p in sys.path:
+ if p.startswith(sys.exec_prefix):
+ path = _sys2bytes(p)
+ if path not in pypath:
+ pypath.append(path)
# We have to augment PYTHONPATH, rather than simply replacing
# it, in case external libraries are only available via current
@@ -3612,7 +3681,7 @@
usechg=bool(self.options.with_chg or self.options.chg),
chgdebug=self.options.chg_debug,
useipv6=useipv6,
- **kwds
+ **kwds,
)
t.should_reload = True
return t
--- a/tests/seq.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/seq.py Sat Oct 26 04:16:00 2024 +0200
@@ -7,17 +7,15 @@
# seq START STOP [START, STOP] stepping by 1
# seq START STEP STOP [START, STOP] stepping by STEP
-import os
+import io
import sys
-try:
- import msvcrt
-
- msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
- msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
- msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
-except ImportError:
- pass
+sys.stdout = io.TextIOWrapper(
+ sys.stdout.buffer,
+ sys.stdout.encoding,
+ sys.stdout.errors,
+ newline="\n",
+)
start = 1
if len(sys.argv) > 2:
--- a/tests/test-acl.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-acl.t Sat Oct 26 04:16:00 2024 +0200
@@ -202,7 +202,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -280,7 +280,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -355,7 +355,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
no rollback information available
0:6675d58eff77
@@ -879,7 +879,7 @@
bundle2-input-bundle: 7 parts total
transaction abort!
rollback completed
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
no rollback information available
0:6675d58eff77
@@ -1048,7 +1048,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
no rollback information available
0:6675d58eff77
@@ -1380,7 +1380,7 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
updating the branch cache
added 3 changesets with 3 changes to 3 files
bundle2-output-bundle: "HG20", 1 parts total
@@ -1464,7 +1464,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
@@ -1632,7 +1632,7 @@
bundle2-input-bundle: 5 parts total
transaction abort!
rollback completed
- truncating cache/rbc-revs-v1 to 8
+ resetting content of cache/rbc-revs-v2
abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
no rollback information available
0:6675d58eff77
--- a/tests/test-alias.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-alias.t Sat Oct 26 04:16:00 2024 +0200
@@ -139,38 +139,37 @@
show changed files in the working directory
- Show status of files in the repository. If names are given, only files
- that match are shown. Files that are clean or ignored or the source of a
- copy/move operation, are not listed unless -c/--clean, -i/--ignored,
- -C/--copies or -A/--all are given. Unless options described with "show
- only ..." are given, the options -mardu are used.
+ Show status of files in the repository. If names are given, only files that
+ match are shown. Files that are clean or ignored or the source of a copy/move
+ operation, are not listed unless -c/--clean, -i/--ignored, -C/--copies or
+ -A/--all are given. Unless options described with "show only ..." are given,
+ the options -mardu are used.
- Option -q/--quiet hides untracked (unknown and ignored) files unless
- explicitly requested with -u/--unknown or -i/--ignored.
+ Option -q/--quiet hides untracked (unknown and ignored) files unless
+ explicitly requested with -u/--unknown or -i/--ignored.
- Note:
- 'hg status' may appear to disagree with diff if permissions have
- changed or a merge has occurred. The standard diff format does not
- report permission changes and diff only reports changes relative to one
- merge parent.
+ Note:
+ 'hg status' may appear to disagree with diff if permissions have changed or
+ a merge has occurred. The standard diff format does not report permission
+ changes and diff only reports changes relative to one merge parent.
- If one revision is given, it is used as the base revision. If two
- revisions are given, the differences between them are shown. The --change
- option can also be used as a shortcut to list the changed files of a
- revision from its first parent.
+ If one revision is given, it is used as the base revision. If two revisions
+ are given, the differences between them are shown. The --change option can
+ also be used as a shortcut to list the changed files of a revision from its
+ first parent.
- The codes used to show the status of files are:
+ The codes used to show the status of files are:
- M = modified
- A = added
- R = removed
- C = clean
- ! = missing (deleted by non-hg command, but still tracked)
- ? = not tracked
- I = ignored
- = origin of the previous file (with --copies)
+ M = modified
+ A = added
+ R = removed
+ C = clean
+ ! = missing (deleted by non-hg command, but still tracked)
+ ? = not tracked
+ I = ignored
+ = origin of the previous file (with --copies)
- Returns 0 on success.
+ Returns 0 on success.
defined by: * (glob)
*/* (glob) (?)
--- a/tests/test-ancestor.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-ancestor.py Sat Oct 26 04:16:00 2024 +0200
@@ -87,6 +87,7 @@
testcount = 10
inccount = 10
nerrs = [0]
+
# the default mu and sigma give us a nice distribution of mostly
# single-digit counts (including 0) with some higher ones
def lognormrandom(mu, sigma):
--- a/tests/test-audit-path.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-audit-path.t Sat Oct 26 04:16:00 2024 +0200
@@ -91,7 +91,8 @@
$ hg manifest -r0
.hg/test
$ hg update -Cr0
- abort: path contains illegal component: .hg/test
+ abort: path contains illegal component: .hg/test (no-rust !)
+ abort: path '.hg/test' is inside the '.hg' folder (rust !)
[10]
attack foo/.hg/test
@@ -107,6 +108,7 @@
$ hg manifest -r2
back
back/test
+
#if symlink
$ hg update -Cr2
abort: path 'back/test' traverses symbolic link 'back'
@@ -114,7 +116,7 @@
#else
('back' will be a file and cause some other system specific error)
$ hg update -Cr2
- abort: $TESTTMP/target/back/test: $ENOTDIR$
+ abort: $TESTTMP/repo/target/back/test: $ENOTDIR$
[255]
#endif
@@ -220,17 +222,30 @@
'a' and 'a/b' are taken as good paths. still applyupdates() should fail.
$ hg up -qC null
+#if rust
+ $ hg up 1
+ abort: path 'a/*' traverses symbolic link 'a'
+ [10]
+#endif
+
+#if no-rust
$ hg up 1
abort: path 'a/b' traverses symbolic link 'a'
[255]
+#endif
$ ls ../update-symlink-out
try branch update replacing directory with symlink, and its content: the
path 'a' is audited as a directory first, which should be audited again as
a symlink.
+#if rust
+ $ rm -rf a
+#else
$ rm -f a
+#endif
$ hg up -qC 2
+
$ hg up 1
abort: path 'a/b' traverses symbolic link 'a'
[255]
--- a/tests/test-bad-extension.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-bad-extension.t Sat Oct 26 04:16:00 2024 +0200
@@ -143,6 +143,7 @@
YYYY/MM/DD HH:MM:SS (PID)> > extension baddocext take a total of * to load (glob)
YYYY/MM/DD HH:MM:SS (PID)> > extension gpg take a total of * to load (glob)
YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
+ Given a topic, extension, or command name, print help for that topic.
#endif
confirm that there's no crash when an extension's documentation is bad
--- a/tests/test-batching.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-batching.py Sat Oct 26 04:16:00 2024 +0200
@@ -55,7 +55,6 @@
# usage of "thing" interface
def use(it):
-
# Direct call to base method shared between client and server.
bprint(it.hello())
@@ -106,6 +105,7 @@
# server side
+
# equivalent of wireproto's global functions
class server:
def __init__(self, local):
@@ -156,6 +156,7 @@
# local side
+
# equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
# here we just transform the strings a bit to check we're properly en-/decoding
def mangle(s):
--- a/tests/test-bisect.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-bisect.t Sat Oct 26 04:16:00 2024 +0200
@@ -579,7 +579,7 @@
tip is obsolete
---------------------
- $ hg debugobsolete `hg id --debug -i -r tip`
+ $ hg debugobsolete `hg id -T "{node}" -r tip`
1 new obsolescence markers
obsoleted 1 changesets
$ hg bisect --reset
@@ -608,7 +608,7 @@
reverting a
$ hg commit -m 'msg 30 -- fixed'
created new head
- $ hg debugobsolete `hg id --debug -i -r 30` `hg id --debug -i -r .`
+ $ hg debugobsolete `hg id -T "{node}" -r 30` `hg id -T "{node}" -r .`
1 new obsolescence markers
obsoleted 1 changesets
$ hg bisect
--- a/tests/test-branches.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-branches.t Sat Oct 26 04:16:00 2024 +0200
@@ -790,14 +790,14 @@
$ rm -rf .hg/cache; hg head a -T '{rev}\n'
5
$ f --hexdump --size .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=92
+ .hg/cache/rbc-names-v2: size=92
0000: 64 65 66 61 75 6c 74 00 61 00 62 00 63 00 61 20 |default.a.b.c.a |
0010: 62 72 61 6e 63 68 20 6e 61 6d 65 20 6d 75 63 68 |branch name much|
0020: 20 6c 6f 6e 67 65 72 20 74 68 61 6e 20 74 68 65 | longer than the|
0030: 20 64 65 66 61 75 6c 74 20 6a 75 73 74 69 66 69 | default justifi|
0040: 63 61 74 69 6f 6e 20 75 73 65 64 20 62 79 20 62 |cation used by b|
0050: 72 61 6e 63 68 65 73 00 6d 00 6d 64 |ranches.m.md|
- .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=160
0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
0010: 88 1f e2 b9 00 00 00 01 ac 22 03 33 00 00 00 02 |.........".3....|
0020: ae e3 9c d1 00 00 00 02 d8 cb c6 1d 00 00 00 01 |................|
@@ -811,107 +811,204 @@
no errors when revbranchcache is not writable
- $ echo >> .hg/cache/rbc-revs-v1
- $ mv .hg/cache/rbc-revs-v1 .hg/cache/rbc-revs-v1_
- $ mkdir .hg/cache/rbc-revs-v1
+ $ echo >> .hg/cache/rbc-revs-v2
+ $ mv .hg/cache/rbc-revs-v2 .hg/cache/rbc-revs-v2_
+ $ mkdir .hg/cache/rbc-revs-v2
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n'
5
- $ rmdir .hg/cache/rbc-revs-v1
- $ mv .hg/cache/rbc-revs-v1_ .hg/cache/rbc-revs-v1
+ $ rmdir .hg/cache/rbc-revs-v2
+ $ mv .hg/cache/rbc-revs-v2_ .hg/cache/rbc-revs-v2
no errors when wlock cannot be acquired
#if unix-permissions
- $ mv .hg/cache/rbc-revs-v1 .hg/cache/rbc-revs-v1_
+ $ mv .hg/cache/rbc-revs-v2 .hg/cache/rbc-revs-v2_
$ rm -f .hg/cache/branch*
$ chmod 555 .hg
$ hg head a -T '{rev}\n'
5
$ chmod 755 .hg
- $ mv .hg/cache/rbc-revs-v1_ .hg/cache/rbc-revs-v1
+ $ mv .hg/cache/rbc-revs-v2_ .hg/cache/rbc-revs-v2
#endif
-recovery from invalid cache revs file with trailing data
- $ echo >> .hg/cache/rbc-revs-v1
+dealing with valid cache revs file but for extra trailing data
+--------------------------------------------------------------
+
+When the trailing data are smaller than a record, they are practically
+invisible to the cache and ignored. No warning is issued about them.
+
+ $ echo '42' >> .hg/cache/rbc-revs-v2
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
5
- truncating cache/rbc-revs-v1 to 160
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=164
+
+When the trailing data are larger than a record, they are seens as extra
+(probably invalid) data. We warn about them when writing.
+
+ $ echo 'abracadabra!' >> .hg/cache/rbc-revs-v2
+ $ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
+ 5
+ cache/rbc-revs-v2 contains 17 unknown trailing bytes
+ $ f --size .hg/cache/rbc-revs*
+ .hg/cache/rbc-revs-v2: size=177
recovery from invalid cache file with partial last record
- $ mv .hg/cache/rbc-revs-v1 .
- $ f -qDB 119 rbc-revs-v1 > .hg/cache/rbc-revs-v1
+---------------------------------------------------------
+ $ mv .hg/cache/rbc-revs-v2 .
+ $ f -qDB 119 rbc-revs-v2 > .hg/cache/rbc-revs-v2
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=119
+ .hg/cache/rbc-revs-v2: size=119
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
5
- truncating cache/rbc-revs-v1 to 112
+ resetting content of cache/rbc-revs-v2
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=160
recovery from invalid cache file with missing record - no truncation
- $ mv .hg/cache/rbc-revs-v1 .
- $ f -qDB 112 rbc-revs-v1 > .hg/cache/rbc-revs-v1
+ $ mv .hg/cache/rbc-revs-v2 .
+ $ f -qDB 112 rbc-revs-v2 > .hg/cache/rbc-revs-v2
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
5
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=160
recovery from invalid cache file with some bad records
- $ mv .hg/cache/rbc-revs-v1 .
- $ f -qDB 8 rbc-revs-v1 > .hg/cache/rbc-revs-v1
+ $ mv .hg/cache/rbc-revs-v2 .
+ $ f -qDB 8 rbc-revs-v2 > .hg/cache/rbc-revs-v2
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=8
- $ f -qDB 112 rbc-revs-v1 >> .hg/cache/rbc-revs-v1
+ .hg/cache/rbc-revs-v2: size=8
+ $ f -qDB 112 rbc-revs-v2 >> .hg/cache/rbc-revs-v2
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=120
+ .hg/cache/rbc-revs-v2: size=120
$ hg log -r 'branch(.)' -T '{rev} ' --debug
history modification detected - truncating revision branch cache to revision * (glob)
history modification detected - truncating revision branch cache to revision 1
- 3 4 8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 8
+ 3 4 8 9 10 11 12 13 resetting content of cache/rbc-revs-v2
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
5
- truncating cache/rbc-revs-v1 to 104
+ resetting content of cache/rbc-revs-v2
$ f --size --hexdump --bytes=16 .hg/cache/rbc-revs*
+ .hg/cache/rbc-revs-v2: size=160
+ 0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
+
+Smoothly reuse "v1" format if no v2 exists
+------------------------------------------
+
+read only operation with valid data
+(actively rewrite data)
+
+ $ rm .hg/cache/rbc-names-v2
+ $ rm .hg/cache/rbc-revs-v2
+ $ rm .hg/cache/branch*
+ $ hg head a -T '{rev}\n' --debug
+ 5
+ $ mv .hg/cache/rbc-names-v2 .hg/cache/rbc-names-v1
+ $ mv .hg/cache/rbc-revs-v2 .hg/cache/rbc-revs-v1
+ $ rm .hg/cache/branch*
+ $ hg head a -T '{rev}\n' --debug
+ 5
+ $ f --size .hg/cache/rbc-*-*
+ .hg/cache/rbc-names-v1: size=92
+ .hg/cache/rbc-names-v2: size=92
.hg/cache/rbc-revs-v1: size=160
- 0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
+ .hg/cache/rbc-revs-v2: size=160
+
+
+Write operation write a full v2 files
+
+ $ mv .hg/cache/rbc-names-v2 .hg/cache/rbc-names-v1
+ $ mv .hg/cache/rbc-revs-v2 .hg/cache/rbc-revs-v1
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=92
+ .hg/cache/rbc-revs-v1: size=160
+ $ hg branch not-here-for-long
+ marked working directory as branch not-here-for-long
+ $ hg ci -m not-long --debug
+ reusing manifest from p1 (no file change)
+ committing changelog
+ updating the branch cache
+ committed changeset * (glob)
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=92
+ .hg/cache/rbc-names-v2: size=110
+ .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=168
+
+So does explicit cache upgrade
+ $ mv .hg/cache/rbc-names-v2 .hg/cache/rbc-names-v1
+ $ mv .hg/cache/rbc-revs-v2 .hg/cache/rbc-revs-v1
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=110
+ .hg/cache/rbc-revs-v1: size=168
+ $ hg debugupdatecache
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=110
+ .hg/cache/rbc-names-v2: size=110
+ .hg/cache/rbc-revs-v1: size=168
+ .hg/cache/rbc-revs-v2: size=168
+
+With invalid v1 data, we rewrite it too (as v2)
+
+ $ cp .hg/cache/rbc-names-v2 .hg/cache/rbc-names-v1
+ $ mv .hg/cache/rbc-names-v2 .hg/cache/rbc-revs-v1
+ $ rm .hg/cache/rbc-revs-v2
+ $ rm .hg/cache/branch*
+ $
+ $ hg head a -T '{rev}\n' --debug
+ history modification detected - truncating revision branch cache to revision 0
+ 5
+ $ f --size .hg/cache/rbc-*-*
+ .hg/cache/rbc-names-v1: size=110
+ .hg/cache/rbc-names-v2: size=110
+ .hg/cache/rbc-revs-v1: size=110
+ .hg/cache/rbc-revs-v2: size=168
+
+cleanup
+
+ $ hg up -qr '.^'
+ $ hg rollback -qf
+ $ rm .hg/cache/*
+ $ hg debugupdatecache
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v2: size=92
+ .hg/cache/rbc-revs-v2: size=160
cache is updated when committing
$ hg branch i-will-regret-this
marked working directory as branch i-will-regret-this
$ hg ci -m regrets
$ f --size .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=111
- .hg/cache/rbc-revs-v1: size=168
+ .hg/cache/rbc-names-v2: size=111
+ .hg/cache/rbc-revs-v2: size=168
update after rollback - the cache will be correct but rbc-names will will still
contain the branch name even though it no longer is used
$ hg up -qr '.^'
$ hg rollback -qf
$ f --size .hg/cache/rbc-names-*
- .hg/cache/rbc-names-v1: size=111
+ .hg/cache/rbc-names-v2: size=111
$ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
$ f --size .hg/cache/rbc-revs-*
- .hg/cache/rbc-revs-v1: size=160
+ .hg/cache/rbc-revs-v2: size=168
cache is updated/truncated when stripping - it is thus very hard to get in a
situation where the cache is out of sync and the hash check detects it
$ hg --config extensions.strip= strip -r tip --nob
$ f --size .hg/cache/rbc-revs*
- .hg/cache/rbc-revs-v1: size=152
+ .hg/cache/rbc-revs-v2: size=152
cache is rebuilt when corruption is detected
- $ echo > .hg/cache/rbc-names-v1
+ $ echo > .hg/cache/rbc-names-v2
$ hg log -r '5:&branch(.)' -T '{rev} ' --debug
referenced branch names not found - rebuilding revision branch cache from scratch
- 8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 40
+ 8 9 10 11 12 13 resetting content of rbc-names-v2
$ f --size .hg/cache/rbc-names-*
- .hg/cache/rbc-names-v1: size=84
+ .hg/cache/rbc-names-v2: size=84
$ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
[1]
$ f --size .hg/cache/rbc-revs-*
- .hg/cache/rbc-revs-v1: size=152
+ .hg/cache/rbc-revs-v2: size=152
Test that cache files are created and grows correctly:
@@ -923,12 +1020,12 @@
#if v2
$ f --size .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=1
- .hg/cache/rbc-revs-v1: size=48
+ .hg/cache/rbc-names-v2: size=1
+ .hg/cache/rbc-revs-v2: size=48
#else
$ f --size .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=84
- .hg/cache/rbc-revs-v1: size=152
+ .hg/cache/rbc-names-v2: size=84
+ .hg/cache/rbc-revs-v2: size=152
#endif
$ cd ..
@@ -947,16 +1044,16 @@
#if v2
$ f --size --sha256 .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
- .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+ .hg/cache/rbc-names-v2: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+ .hg/cache/rbc-revs-v2: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
- $ : > .hg/cache/rbc-revs-v1
+ $ : > .hg/cache/rbc-revs-v2
No superfluous rebuilding of cache:
$ hg log -r "branch(null)&branch(branch)" --debug
$ f --size --sha256 .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
- .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+ .hg/cache/rbc-names-v2: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+ .hg/cache/rbc-revs-v2: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
#endif
$ cd ..
--- a/tests/test-bundle-phase-internal.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-bundle-phase-internal.t Sat Oct 26 04:16:00 2024 +0200
@@ -250,7 +250,8 @@
(1 internal changesets selected)
[255]
$ hg debugbundle ../internal-01.hg
- abort: $ENOENT$: '../internal-01.hg'
+ abort: $ENOENT$: '../internal-01.hg' (no-windows !)
+ abort: ../internal-01.hg: $ENOENT$ (windows !)
[255]
try to bundle it with other, somewhat explicitly
@@ -263,7 +264,8 @@
(1 internal changesets selected)
[255]
$ hg debugbundle ../internal-02.hg
- abort: $ENOENT$: '../internal-02.hg'
+ abort: $ENOENT$: '../internal-02.hg' (no-windows !)
+ abort: ../internal-02.hg: $ENOENT$ (windows !)
[255]
bundle visible ancestors
--- a/tests/test-cache-abuse.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-cache-abuse.t Sat Oct 26 04:16:00 2024 +0200
@@ -82,13 +82,13 @@
$ damage branches branch2-base "rm .hg/cache/branch2-[vs]*"
$ damage branches branch2-served "rm .hg/cache/branch2-[bv]*"
$ damage branches branch2-visible
- $ damage "log -r branch(.)" rbc-names-v1
- $ damage "log -r branch(default)" rbc-names-v1
- $ damage "log -r branch(b2)" rbc-revs-v1
+ $ damage "log -r branch(.)" rbc-names-v2
+ $ damage "log -r branch(default)" rbc-names-v2
+ $ damage "log -r branch(b2)" rbc-revs-v2
We currently can't detect an rbc cache with unknown names:
- $ damage "log -qr branch(b2)" rbc-names-v1
+ $ damage "log -qr branch(b2)" rbc-names-v2
--- before * (glob)
+++ after * (glob)
@@ -1,8 +?,0 @@ (glob)
--- a/tests/test-cbor.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-cbor.py Sat Oct 26 04:16:00 2024 +0200
@@ -216,11 +216,11 @@
for size in lens:
if size < 24:
hlen = 1
- elif size < 2 ** 8:
+ elif size < 2**8:
hlen = 2
- elif size < 2 ** 16:
+ elif size < 2**16:
hlen = 3
- elif size < 2 ** 32:
+ elif size < 2**32:
hlen = 5
else:
assert False
@@ -487,7 +487,7 @@
)
def testdecodepartialushort(self):
- encoded = b''.join(cborutil.streamencode(2 ** 15))
+ encoded = b''.join(cborutil.streamencode(2**15))
self.assertEqual(
cborutil.decodeitem(encoded[0:1]),
@@ -499,7 +499,7 @@
)
self.assertEqual(
cborutil.decodeitem(encoded[0:5]),
- (True, 2 ** 15, 3, cborutil.SPECIAL_NONE),
+ (True, 2**15, 3, cborutil.SPECIAL_NONE),
)
def testdecodepartialshort(self):
@@ -519,7 +519,7 @@
)
def testdecodepartialulong(self):
- encoded = b''.join(cborutil.streamencode(2 ** 28))
+ encoded = b''.join(cborutil.streamencode(2**28))
self.assertEqual(
cborutil.decodeitem(encoded[0:1]),
@@ -539,7 +539,7 @@
)
self.assertEqual(
cborutil.decodeitem(encoded[0:5]),
- (True, 2 ** 28, 5, cborutil.SPECIAL_NONE),
+ (True, 2**28, 5, cborutil.SPECIAL_NONE),
)
def testdecodepartiallong(self):
@@ -567,7 +567,7 @@
)
def testdecodepartialulonglong(self):
- encoded = b''.join(cborutil.streamencode(2 ** 32))
+ encoded = b''.join(cborutil.streamencode(2**32))
self.assertEqual(
cborutil.decodeitem(encoded[0:1]),
@@ -603,7 +603,7 @@
)
self.assertEqual(
cborutil.decodeitem(encoded[0:9]),
- (True, 2 ** 32, 9, cborutil.SPECIAL_NONE),
+ (True, 2**32, 9, cborutil.SPECIAL_NONE),
)
with self.assertRaisesRegex(
--- a/tests/test-censor.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-censor.t Sat Oct 26 04:16:00 2024 +0200
@@ -35,26 +35,26 @@
$ echo 'Tainted file' > target
$ echo 'Passwords: hunter2' >> target
$ hg ci -m taint target
- $ C1=`hg id --debug -i`
+ $ C1=`hg id -r . -T "{node}"`
$ echo 'hunter3' >> target
$ echo 'Normal file v2' > bystander
$ hg ci -m moretaint target bystander
- $ C2=`hg id --debug -i`
+ $ C2=`hg id -r . -T "{node}"`
Add a new sanitized versions to correct our mistake. Name the first head H1,
the second head H2, and so on
$ echo 'Tainted file is now sanitized' > target
$ hg ci -m sanitized target
- $ H1=`hg id --debug -i`
+ $ H1=`hg id -r . -T "{node}"`
$ hg update -r $C2
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ echo 'Tainted file now super sanitized' > target
$ hg ci -m 'super sanitized' target
created new head
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
Verify target contents before censorship at each revision
@@ -239,7 +239,7 @@
$ C3=$H1
$ echo 'advanced head H1' > target
$ hg ci -m 'advance head H1' target
- $ H1=`hg id --debug -i`
+ $ H1=`hg id -r . -T "{node}"`
$ hg --config extensions.censor= censor -r $C3 target
checking for the censored content in 2 heads
checking for the censored content in the working directory
@@ -262,7 +262,7 @@
[255]
$ echo 'twiddling thumbs' > bystander
$ hg ci -m 'bystander commit'
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ hg --config extensions.censor= censor -r "$H2^" target
checking for the censored content in 2 heads
abort: cannot censor file in heads (efbe78065929)
@@ -273,7 +273,7 @@
$ echo 'seriously no passwords' > target
$ hg ci -m 'extend second head arbitrarily' target
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ hg update -r "$H2^"
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg --config extensions.censor= censor -r . target
@@ -290,7 +290,7 @@
$ C4=$H2
$ hg rm target
$ hg ci -m 'delete target so it may be censored'
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ hg --config extensions.censor= censor -r $C4 target
checking for the censored content in 2 heads
checking for the censored content in the working directory
@@ -301,7 +301,7 @@
$ echo 'fresh start' > target
$ hg add target
$ hg ci -m reincarnated target
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ hg cat -r $H2 target | head -n 10
fresh start
$ hg cat -r "$H2^" target | head -n 10
@@ -318,11 +318,11 @@
8 ??? yes file target (glob) (revlogv1 !)
$ $TESTDIR/seq.py 4000 | $TESTDIR/sha256line.py > target
$ hg ci -m 'add 100k passwords'
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ C5=$H2
$ hg revert -r "$H2^" target
$ hg ci -m 'cleaned 100k passwords'
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ hg debugrevlogstats | grep target
rev-count data-size inl type target
10 ?????? no file target (glob)
@@ -399,11 +399,11 @@
$ echo 'Passwords: hunter2hunter2' > target
$ hg ci -m 're-add password from clone' target
created new head
- $ H3=`hg id --debug -i`
+ $ H3=`hg id -r . -T "{node}"`
$ REV=$H3
$ echo 'Re-sanitized; nothing to see here' > target
$ hg ci -m 're-sanitized' target
- $ H2=`hg id --debug -i`
+ $ H2=`hg id -r . -T "{node}"`
$ CLEANREV=$H2
$ hg cat -r $REV target | head -n 10
Passwords: hunter2hunter2
@@ -556,17 +556,17 @@
$ echo root > target
$ hg add target
$ hg commit -m root
- $ B0=`hg id --debug -i`
+ $ B0=`hg id -r . -T "{node}"`
$ for x in `"$PYTHON" $TESTDIR/seq.py 0 50000`
> do
> echo "Password: hunter$x" >> target
> done
$ hg ci -m 'write a long file'
- $ B1=`hg id --debug -i`
+ $ B1=`hg id -r . -T "{node}"`
$ echo 'small change (should create a delta)' >> target
$ hg ci -m 'create a delta over the password'
(should show that the last revision is a delta, not a snapshot)
- $ B2=`hg id --debug -i`
+ $ B2=`hg id -r . -T "{node}"`
Make sure the last revision is a delta against the revision we will censor
--- a/tests/test-chainsaw-update.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-chainsaw-update.t Sat Oct 26 04:16:00 2024 +0200
@@ -105,8 +105,13 @@
operation would not break, because the hostnames registered in locks differ
from the current hostname (happens a lot with succesive containers):
+#if symlink
$ ln -s invalid.host.test/effffffc:171814 .hg/store/lock
$ ln -s invalid.host.test/effffffc:171814 .hg/wlock
+#else
+ $ printf 'invalid.host.test/effffffc:171814' > .hg/store/lock
+ $ printf 'invalid.host.test/effffffc:171814' > .hg/wlock
+#endif
$ hg debuglock
lock: (.*?), process 171814, host invalid.host.test/effffffc \((\d+)s\) (re)
wlock: (.*?), process 171814, host invalid.host.test/effffffc \((\d+)s\) (re)
--- a/tests/test-check-format.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-format.t Sat Oct 26 04:16:00 2024 +0200
@@ -6,6 +6,8 @@
$ export USERPROFILE
#endif
+ $ . "$TESTDIR/helpers-testrepo.sh"
+
$ cd $RUNTESTDIR/..
- $ black --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
+ $ black --check --diff `testrepohg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
--- a/tests/test-check-help.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-help.t Sat Oct 26 04:16:00 2024 +0200
@@ -19,12 +19,11 @@
> stdout.write(b'%s\n' % s)
> EOF
- $ cd "$TESTDIR"/..
-
Check if ":hg:`help TOPIC`" is valid:
(use "xargs -n1 -t" to see which help commands are executed)
+ $ cd "$TESTDIR"/..
$ testrepohg files 'glob:{hgdemandimport,hgext,mercurial}/**/*.py' \
> | sed 's|\\|/|g' \
> | xargs "$PYTHON" "$TESTTMP/scanhelptopics.py" \
- > | xargs -n1 hg help --config extensions.phabricator= > /dev/null
+ > | xargs -n1 hg --cwd "$TESTTMP" help --config extensions.phabricator= > /dev/null
--- a/tests/test-check-interfaces.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-interfaces.py Sat Oct 26 04:16:00 2024 +0200
@@ -9,12 +9,6 @@
import subprocess
import sys
-# Only run if tests are run in a repo
-if subprocess.call(
- [sys.executable, '%s/hghave' % os.environ['TESTDIR'], 'test-repo']
-):
- sys.exit(80)
-
from mercurial.interfaces import (
dirstate as intdirstate,
repository,
@@ -41,7 +35,6 @@
)
testdir = os.path.dirname(__file__)
-rootdir = pycompat.fsencode(os.path.normpath(os.path.join(testdir, '..')))
sys.path[0:0] = [testdir]
import simplestorerepo
@@ -117,6 +110,14 @@
pass
+def init_test_repo():
+ testtmp_dir = os.path.normpath(os.environ['TESTTMP'])
+ test_repo_dir = os.path.join(testtmp_dir, "test-repo")
+ subprocess.run(["hg", "init", test_repo_dir])
+ subprocess.run(["hg", "--cwd", test_repo_dir, "debugbuilddag", "+3<3+1"])
+ return test_repo_dir
+
+
def main():
ui = uimod.ui()
# Needed so we can open a local repo with obsstore without a warning.
@@ -168,7 +169,8 @@
ziverify.verifyClass(
repository.ilocalrepositoryfilestorage, localrepo.revlogfilestorage
)
- repo = localrepo.makelocalrepository(ui, rootdir)
+ test_repo_dir = init_test_repo()
+ repo = localrepo.makelocalrepository(ui, pycompat.fsencode(test_repo_dir))
checkzobject(repo)
ziverify.verifyClass(
@@ -261,4 +263,7 @@
checkzobject(revlog.revlogproblem())
-main()
+# Skip checking until the interfaces are converted to protocols
+sys.exit(0)
+
+# main()
--- a/tests/test-check-interfaces.py.out Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-interfaces.py.out Sat Oct 26 04:16:00 2024 +0200
@@ -1,2 +0,0 @@
-public attribute not declared in interfaces: badpeer.badattribute
-public attribute not declared in interfaces: badpeer.badmethod
--- a/tests/test-check-py3-compat.t Thu Jan 11 20:37:34 2024 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#require test-repo pure
-
- $ . "$TESTDIR/helpers-testrepo.sh"
- $ cd "$TESTDIR"/..
-
- $ testrepohg files 'set:(**.py) - grep(pygments)' \
- > -X hgext/fsmonitor/pywatchman \
- > -X mercurial/cffi \
- > -X mercurial/thirdparty \
- > | sed 's|\\|/|g' | xargs "$PYTHON" contrib/check-py3-compat.py \
- > | sed 's/[0-9][0-9]*)$/*)/'
- hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob) (?)
- mercurial/scmwindows.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
- mercurial/win32.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
- mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob) (no-windows !)
- mercurial/posix.py: error importing: <*Error> No module named 'fcntl' (error at posix.py:*) (glob) (windows !)
- mercurial/scmposix.py: error importing: <*Error> No module named 'fcntl' (error at scmposix.py:*) (glob) (windows !)
-
-#if pygments
- $ testrepohg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
- > | xargs "$PYTHON" contrib/check-py3-compat.py \
- > | sed 's/[0-9][0-9]*)$/*)/'
-#endif
--- a/tests/test-check-pyflakes.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-pyflakes.t Sat Oct 26 04:16:00 2024 +0200
@@ -8,7 +8,7 @@
$ cat > test.py <<EOF
> print(undefinedname)
> EOF
- $ "$PYTHON" -m pyflakes test.py 2>/dev/null | "$TESTDIR/filterpyflakes.py"
+ $ pyflakes test.py 2>/dev/null | "$TESTDIR/filterpyflakes.py"
test.py:1:* undefined name 'undefinedname' (glob)
$ cd "`dirname "$TESTDIR"`"
@@ -18,7 +18,7 @@
> -X contrib/python-zstandard \
> -X mercurial/thirdparty \
> 2>/dev/null \
- > | xargs "$PYTHON" -m pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
+ > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
contrib/perf.py:*:* undefined name 'xrange' (glob) (?)
mercurial/pycompat.py:*:* 'codecs' imported but unused (glob)
mercurial/pycompat.py:*:* 'concurrent.futures' imported but unused (glob)
--- a/tests/test-check-rust-format.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-check-rust-format.t Sat Oct 26 04:16:00 2024 +0200
@@ -5,7 +5,7 @@
$ cd "$TESTDIR"/..
Warning: Keep this in sync with hghave.py
- $ RUSTFMT=$(rustup which --toolchain nightly-2021-11-02 rustfmt)
+ $ RUSTFMT=$(rustup which --toolchain nightly-2024-07-16 rustfmt)
$ for f in `testrepohg files 'glob:**/*.rs'` ; do
- > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
+ > $RUSTFMT --check --edition=2021 --unstable-features --color=never $f
> done
--- a/tests/test-clone-stream-revlog-split.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-clone-stream-revlog-split.t Sat Oct 26 04:16:00 2024 +0200
@@ -68,7 +68,7 @@
$ ( \
> hg clone --debug --stream -U http://localhost:$HGPORT1 \
- > clone-while-split > client.log 2>&1; \
+ > clone-while-split --config worker.backgroundclose=0 > client.log 2>&1; \
> touch "$HG_TEST_STREAM_WALKED_FILE_3" \
> ) &
@@ -115,8 +115,8 @@
adding [s] 00changelog.d (189 bytes)
adding [s] 00changelog.i (192 bytes)
adding [c] branch2-served (94 bytes)
- adding [c] rbc-names-v1 (7 bytes)
- adding [c] rbc-revs-v1 (24 bytes)
+ adding [c] rbc-names-v2 (7 bytes)
+ adding [c] rbc-revs-v2 (24 bytes)
updating the branch cache
transferred 2.11 KB in * seconds (* */sec) (glob) (no-rust !)
transferred 2.29 KB in * seconds (* */sec) (glob) (rust !)
--- a/tests/test-clone-stream.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-clone-stream.t Sat Oct 26 04:16:00 2024 +0200
@@ -221,8 +221,8 @@
$ ls -1 clone1/.hg/cache
branch2-base
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
#endif
--- a/tests/test-clone.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-clone.t Sat Oct 26 04:16:00 2024 +0200
@@ -33,8 +33,8 @@
default 10:a7949464abda
$ ls .hg/cache
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
Default operation:
@@ -48,8 +48,8 @@
$ ls .hg/cache
branch2-base
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
@@ -114,8 +114,8 @@
$ ls .hg/cache
branch2-base
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
@@ -618,15 +618,10 @@
No remote source
-#if windows
$ hg clone http://$LOCALIP:3121/a b
- abort: error: * (glob)
+ abort: error: $ECONNREFUSED$
[100]
-#else
- $ hg clone http://$LOCALIP:3121/a b
- abort: error: *refused* (glob)
- [100]
-#endif
+
$ rm -rf b # work around bug with http clone
@@ -1240,7 +1235,7 @@
#if linuxormacos no-fsmonitor
$ hg clone a nofsmonitor
updating to bookmark @ on branch stable
- (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
+ (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (no-rust !)
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
#else
$ hg clone a nofsmonitor
@@ -1289,7 +1284,7 @@
#if linuxormacos no-fsmonitor
$ hg up cf0fe1914066
- (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
+ (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (no-rust !)
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
#else
$ hg up cf0fe1914066
--- a/tests/test-clonebundles-autogen.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-clonebundles-autogen.t Sat Oct 26 04:16:00 2024 +0200
@@ -10,8 +10,8 @@
>
> [clone-bundles]
> auto-generate.on-change = yes
- > upload-command = cp "\$HGCB_BUNDLE_PATH" "$TESTTMP"/final-upload/
- > delete-command = rm -f "$TESTTMP/final-upload/\$HGCB_BASENAME"
+ > upload-command = sh -c 'cp "\$HGCB_BUNDLE_PATH" $TESTTMP_FORWARD_SLASH/final-upload/'
+ > delete-command = sh -c 'rm -f $TESTTMP_FORWARD_SLASH/final-upload/\$HGCB_BASENAME'
> url-template = file://$TESTTMP/final-upload/{basename}
>
> [devel]
@@ -58,6 +58,32 @@
$ hg -q commit -A -m 'add foo'
$ touch bar
$ hg -q commit -A -m 'add bar'
+
+Test that the HGCB_BUNDLE_BASENAME variable behaves as expected when unquoted.
+#if no-windows
+ $ hg clone ../server '../embed-"-name/server'
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cp ../server/.hg/hgrc '../embed-"-name/server/.hg/hgrc'
+
+ $ mv ../final-upload/ ../final-upload.bak/
+ $ mkdir ../final-upload/
+
+ $ hg push --config paths.default='../embed-"-name/server'
+ pushing to $TESTTMP/embed-"-name/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ 2 changesets found
+ added 2 changesets with 2 changes to 2 files
+ clone-bundles: starting bundle generation: bzip2-v2
+
+Restore the original upload directory for windows test consistency
+ $ rm -r ../final-upload/
+ $ mv ../final-upload.bak/ ../final-upload/
+#endif
+
$ hg push
pushing to $TESTTMP/server
searching for changes
--- a/tests/test-clonebundles.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-clonebundles.t Sat Oct 26 04:16:00 2024 +0200
@@ -78,7 +78,7 @@
$ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
$ hg clone http://localhost:$HGPORT server-not-runner
applying clone bundle from http://localhost:$HGPORT1/bundle.hg
- error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
+ error fetching bundle: (.*\$ECONNREFUSED\$|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
abort: error applying bundle
(if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
[255]
@@ -290,8 +290,10 @@
Hooks work with inline bundle
$ cp server/.hg/hgrc server/.hg/hgrc-beforeinlinehooks
- $ echo "[hooks]" >> server/.hg/hgrc
- $ echo "pretransmit-inline-clone-bundle=echo foo" >> server/.hg/hgrc
+ $ cat >> server/.hg/hgrc <<-EOF
+ > [hooks]
+ > pretransmit-inline-clone-bundle=sh -c 'printf "foo\n"'
+ > EOF
$ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook
applying clone bundle from peer-bundle-cache://full.hg
remote: foo
@@ -307,8 +309,10 @@
Hooks can make an inline bundle fail
$ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
- $ echo "[hooks]" >> server/.hg/hgrc
- $ echo "pretransmit-inline-clone-bundle=echo bar && false" >> server/.hg/hgrc
+ $ cat >> server/.hg/hgrc <<-EOF
+ > [hooks]
+ > pretransmit-inline-clone-bundle=sh -c 'printf "bar\n"' && false
+ > EOF
$ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook-fail
applying clone bundle from peer-bundle-cache://full.hg
remote: bar
@@ -743,6 +747,66 @@
(sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
$ killdaemons.py
+Testing a clone bundle with digest
+==================================
+
+ $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
+ $ cat http.pid >> $DAEMON_PIDS
+ $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
+ $ cat hg.pid >> $DAEMON_PIDS
+
+ $ digest=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()).hexdigest())")
+ $ cat > server/.hg/clonebundles.manifest << EOF
+ > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest}
+ > EOF
+ $ hg clone -U http://localhost:$HGPORT digest-valid
+ applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files
+ finished applying clone bundle
+ searching for changes
+ no changes found
+ 2 local changesets published
+ $ digest_bad=$("$PYTHON" -c "import hashlib; print (hashlib.sha256(open('gz-a.hg', 'rb').read()+b'.').hexdigest())")
+ $ cat > server/.hg/clonebundles.manifest << EOF
+ > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
+ > EOF
+ $ hg clone -U http://localhost:$HGPORT digest-invalid
+ applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
+ abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
+ [150]
+ $ cat > server/.hg/clonebundles.manifest << EOF
+ > http://localhost:$HGPORT1/bad-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:xx
+ > http://localhost:$HGPORT1/bad-b.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:0000
+ > http://localhost:$HGPORT1/bad-c.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:0000
+ > http://localhost:$HGPORT1/bad-d.hg BUNDLESPEC=gzip-v2 DIGEST=xxx:00,xxx:01
+ > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha256:${digest_bad}
+ > EOF
+ $ hg clone --debug -U http://localhost:$HGPORT digest-malformed
+ using http://localhost:$HGPORT/
+ sending capabilities command
+ sending clonebundles_manifest command
+ filtering http://localhost:$HGPORT1/bad-a.hg due to a bad DIGEST attribute
+ filtering http://localhost:$HGPORT1/bad-b.hg due to lack of supported digest
+ filtering http://localhost:$HGPORT1/bad-c.hg due to a bad sha256 digest
+ filtering http://localhost:$HGPORT1/bad-d.hg due to conflicting xxx digests
+ applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
+ bundle2-input-bundle: 1 params with-transaction
+ bundle2-input-bundle: 0 parts total
+ \(sent [0-9]* HTTP requests and [0-9]* bytes; received [0-9]* bytes in responses\) (re)
+ abort: file with digest [0-9a-f]* expected, but [0-9a-f]* found for [0-9]* bytes (re)
+ [150]
+ $ cat > server/.hg/clonebundles.manifest << EOF
+ > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 DIGEST=sha512:00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,sha256:0000000000000000000000000000000000000000000000000000000000000000
+ > EOF
+ $ hg clone -U http://localhost:$HGPORT digest-preference
+ applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
+ abort: file with digest 0{64} expected, but [0-9a-f]+ found for [0-9]+ bytes (re)
+ [150]
+ $ killdaemons.py
+
Testing a clone bundles that involves revlog splitting (issue6811)
==================================================================
--- a/tests/test-config-env.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-config-env.py Sat Oct 26 04:16:00 2024 +0200
@@ -15,6 +15,7 @@
testtmp = encoding.environ[b'TESTTMP']
+
# prepare hgrc files
def join(name):
return os.path.join(testtmp, name)
@@ -26,6 +27,7 @@
with open(join(b'userrc'), 'wb') as f:
f.write(b'[ui]\neditor=e1')
+
# replace rcpath functions so they point to the files above
def systemrcpath():
return [join(b'sysrc')]
@@ -40,6 +42,7 @@
rcutil.systemrcpath = systemrcpath
rcutil.userrcpath = userrcpath
+
# utility to print configs
def printconfigs(env):
encoding.environ = env
--- a/tests/test-convert-bzr.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-convert-bzr.t Sat Oct 26 04:16:00 2024 +0200
@@ -222,6 +222,7 @@
$ cd repo-trunk
$ echo a > a
$ brz add -q a
+ $ sleep 1 # help with sorting
$ brz ci -qm adda
$ brz tag trunk-tag
Created tag trunk-tag.
@@ -237,6 +238,7 @@
Updated to revision 1.
Switched to branch*/repo/trunk/ (glob)
$ echo a >> a
+ $ sleep 1 # help with sorting
$ brz ci -qm changea
$ cd ..
$ hg convert --datesort repo repo-bzr
--- a/tests/test-convert-splicemap.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-convert-splicemap.t Sat Oct 26 04:16:00 2024 +0200
@@ -15,11 +15,11 @@
$ echo a >> a
$ hg ci -Am addb
adding b
- $ PARENTID1=`hg id --debug -i`
+ $ PARENTID1=`hg id -r . -T "{node}"`
$ echo c > c
$ hg ci -Am addc
adding c
- $ PARENTID2=`hg id --debug -i`
+ $ PARENTID2=`hg id -r . -T "{node}"`
$ cd ..
$ glog -R repo1
@ 2:e55c719b85b6 "addc" files: c
@@ -38,10 +38,10 @@
adding d
$ INVALIDID1=afd12345af
$ INVALIDID2=28173x36ddd1e67bf7098d541130558ef5534a86
- $ CHILDID1=`hg id --debug -i`
+ $ CHILDID1=`hg id -r . -T "{node}"`
$ echo d >> d
$ hg ci -Am changed
- $ CHILDID2=`hg id --debug -i`
+ $ CHILDID2=`hg id -r . -T "{node}"`
$ echo e > e
$ hg ci -Am adde
adding e
--- a/tests/test-convert.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-convert.t Sat Oct 26 04:16:00 2024 +0200
@@ -9,34 +9,34 @@
convert a foreign SCM repository to a Mercurial one.
- Accepted source formats [identifiers]:
+ Accepted source formats [identifiers]:
- - Mercurial [hg]
- - CVS [cvs]
- - Darcs [darcs]
- - git [git]
- - Subversion [svn]
- - Monotone [mtn]
- - GNU Arch [gnuarch]
- - Bazaar [bzr]
- - Perforce [p4]
+ - Mercurial [hg]
+ - CVS [cvs]
+ - Darcs [darcs]
+ - git [git]
+ - Subversion [svn]
+ - Monotone [mtn]
+ - GNU Arch [gnuarch]
+ - Bazaar [bzr]
+ - Perforce [p4]
- Accepted destination formats [identifiers]:
+ Accepted destination formats [identifiers]:
- - Mercurial [hg]
- - Subversion [svn] (history on branches is not preserved)
+ - Mercurial [hg]
+ - Subversion [svn] (history on branches is not preserved)
- If no revision is given, all revisions will be converted. Otherwise,
- convert will only import up to the named revision (given in a format
- understood by the source).
+ If no revision is given, all revisions will be converted. Otherwise, convert
+ will only import up to the named revision (given in a format understood by the
+ source).
- If no destination directory name is specified, it defaults to the basename
- of the source with "-hg" appended. If the destination repository doesn't
- exist, it will be created.
+ If no destination directory name is specified, it defaults to the basename of
+ the source with "-hg" appended. If the destination repository doesn't exist,
+ it will be created.
- By default, all sources except Mercurial will use --branchsort. Mercurial
- uses --sourcesort to preserve original revision numbers order. Sort modes
- have the following effects:
+ By default, all sources except Mercurial will use --branchsort. Mercurial uses
+ --sourcesort to preserve original revision numbers order. Sort modes have the
+ following effects:
--branchsort convert from parent to child revision when possible, which
means branches are usually converted one after the other.
@@ -49,361 +49,346 @@
--closesort try to move closed revisions as close as possible to parent
branches, only supported by Mercurial sources.
- If "REVMAP" isn't given, it will be put in a default location
- ("<dest>/.hg/shamap" by default). The "REVMAP" is a simple text file that
- maps each source commit ID to the destination ID for that revision, like
- so:
+ If "REVMAP" isn't given, it will be put in a default location
+ ("<dest>/.hg/shamap" by default). The "REVMAP" is a simple text file that maps
+ each source commit ID to the destination ID for that revision, like so:
- <source ID> <destination ID>
+ <source ID> <destination ID>
- If the file doesn't exist, it's automatically created. It's updated on
- each commit copied, so 'hg convert' can be interrupted and can be run
- repeatedly to copy new commits.
+ If the file doesn't exist, it's automatically created. It's updated on each
+ commit copied, so 'hg convert' can be interrupted and can be run repeatedly to
+ copy new commits.
- The authormap is a simple text file that maps each source commit author to
- a destination commit author. It is handy for source SCMs that use unix
- logins to identify authors (e.g.: CVS). One line per author mapping and
- the line format is:
+ The authormap is a simple text file that maps each source commit author to a
+ destination commit author. It is handy for source SCMs that use unix logins to
+ identify authors (e.g.: CVS). One line per author mapping and the line format
+ is:
- source author = destination author
+ source author = destination author
- Empty lines and lines starting with a "#" are ignored.
+ Empty lines and lines starting with a "#" are ignored.
- The filemap is a file that allows filtering and remapping of files and
- directories. Each line can contain one of the following directives:
+ The filemap is a file that allows filtering and remapping of files and
+ directories. Each line can contain one of the following directives:
- include path/to/file-or-dir
+ include path/to/file-or-dir
- exclude path/to/file-or-dir
+ exclude path/to/file-or-dir
- rename path/to/source path/to/destination
+ rename path/to/source path/to/destination
- Comment lines start with "#". A specified path matches if it equals the
- full relative name of a file or one of its parent directories. The
- "include" or "exclude" directive with the longest matching path applies,
- so line order does not matter.
+ Comment lines start with "#". A specified path matches if it equals the full
+ relative name of a file or one of its parent directories. The "include" or
+ "exclude" directive with the longest matching path applies, so line order does
+ not matter.
- The "include" directive causes a file, or all files under a directory, to
- be included in the destination repository. The default if there are no
- "include" statements is to include everything. If there are any "include"
- statements, nothing else is included. The "exclude" directive causes files
- or directories to be omitted. The "rename" directive renames a file or
- directory if it is converted. To rename from a subdirectory into the root
- of the repository, use "." as the path to rename to.
+ The "include" directive causes a file, or all files under a directory, to be
+ included in the destination repository. The default if there are no "include"
+ statements is to include everything. If there are any "include" statements,
+ nothing else is included. The "exclude" directive causes files or directories
+ to be omitted. The "rename" directive renames a file or directory if it is
+ converted. To rename from a subdirectory into the root of the repository, use
+ "." as the path to rename to.
- "--full" will make sure the converted changesets contain exactly the right
- files with the right content. It will make a full conversion of all files,
- not just the ones that have changed. Files that already are correct will
- not be changed. This can be used to apply filemap changes when converting
- incrementally. This is currently only supported for Mercurial and
- Subversion.
+ "--full" will make sure the converted changesets contain exactly the right
+ files with the right content. It will make a full conversion of all files, not
+ just the ones that have changed. Files that already are correct will not be
+ changed. This can be used to apply filemap changes when converting
+ incrementally. This is currently only supported for Mercurial and Subversion.
- The splicemap is a file that allows insertion of synthetic history,
- letting you specify the parents of a revision. This is useful if you want
- to e.g. give a Subversion merge two parents, or graft two disconnected
- series of history together. Each entry contains a key, followed by a
- space, followed by one or two comma-separated values:
+ The splicemap is a file that allows insertion of synthetic history, letting
+ you specify the parents of a revision. This is useful if you want to e.g. give
+ a Subversion merge two parents, or graft two disconnected series of history
+ together. Each entry contains a key, followed by a space, followed by one or
+ two comma-separated values:
- key parent1, parent2
+ key parent1, parent2
- The key is the revision ID in the source revision control system whose
- parents should be modified (same format as a key in .hg/shamap). The
- values are the revision IDs (in either the source or destination revision
- control system) that should be used as the new parents for that node. For
- example, if you have merged "release-1.0" into "trunk", then you should
- specify the revision on "trunk" as the first parent and the one on the
- "release-1.0" branch as the second.
+ The key is the revision ID in the source revision control system whose parents
+ should be modified (same format as a key in .hg/shamap). The values are the
+ revision IDs (in either the source or destination revision control system)
+ that should be used as the new parents for that node. For example, if you have
+ merged "release-1.0" into "trunk", then you should specify the revision on
+ "trunk" as the first parent and the one on the "release-1.0" branch as the
+ second.
- The branchmap is a file that allows you to rename a branch when it is
- being brought in from whatever external repository. When used in
- conjunction with a splicemap, it allows for a powerful combination to help
- fix even the most badly mismanaged repositories and turn them into nicely
- structured Mercurial repositories. The branchmap contains lines of the
- form:
+ The branchmap is a file that allows you to rename a branch when it is being
+ brought in from whatever external repository. When used in conjunction with a
+ splicemap, it allows for a powerful combination to help fix even the most
+ badly mismanaged repositories and turn them into nicely structured Mercurial
+ repositories. The branchmap contains lines of the form:
- original_branch_name new_branch_name
+ original_branch_name new_branch_name
- where "original_branch_name" is the name of the branch in the source
- repository, and "new_branch_name" is the name of the branch is the
- destination repository. No whitespace is allowed in the new branch name.
- This can be used to (for instance) move code in one repository from
- "default" to a named branch.
+ where "original_branch_name" is the name of the branch in the source
+ repository, and "new_branch_name" is the name of the branch is the destination
+ repository. No whitespace is allowed in the new branch name. This can be used
+ to (for instance) move code in one repository from "default" to a named
+ branch.
- Mercurial Source
- ################
+ Mercurial Source
+ ################
- The Mercurial source recognizes the following configuration options, which
- you can set on the command line with "--config":
+ The Mercurial source recognizes the following configuration options, which you
+ can set on the command line with "--config":
- convert.hg.ignoreerrors
- ignore integrity errors when reading. Use it to fix
- Mercurial repositories with missing revlogs, by converting
- from and to Mercurial. Default is False.
- convert.hg.saverev
- store original revision ID in changeset (forces target IDs
- to change). It takes a boolean argument and defaults to
- False.
- convert.hg.startrev
- specify the initial Mercurial revision. The default is 0.
- convert.hg.revs
- revset specifying the source revisions to convert.
+ convert.hg.ignoreerrors
+ ignore integrity errors when reading. Use it to fix Mercurial
+ repositories with missing revlogs, by converting from and to
+ Mercurial. Default is False.
+ convert.hg.saverev
+ store original revision ID in changeset (forces target IDs to
+ change). It takes a boolean argument and defaults to False.
+ convert.hg.startrev
+ specify the initial Mercurial revision. The default is 0.
+ convert.hg.revs
+ revset specifying the source revisions to convert.
- Bazaar Source
- #############
+ Bazaar Source
+ #############
- The following options can be used with "--config":
+ The following options can be used with "--config":
- convert.bzr.saverev
- whether to store the original Bazaar commit ID in the
- metadata of the destination commit. The default is True.
+ convert.bzr.saverev
+ whether to store the original Bazaar commit ID in the metadata
+ of the destination commit. The default is True.
- CVS Source
- ##########
+ CVS Source
+ ##########
- CVS source will use a sandbox (i.e. a checked-out copy) from CVS to
- indicate the starting point of what will be converted. Direct access to
- the repository files is not needed, unless of course the repository is
- ":local:". The conversion uses the top level directory in the sandbox to
- find the CVS repository, and then uses CVS rlog commands to find files to
- convert. This means that unless a filemap is given, all files under the
- starting directory will be converted, and that any directory
- reorganization in the CVS sandbox is ignored.
+ CVS source will use a sandbox (i.e. a checked-out copy) from CVS to indicate
+ the starting point of what will be converted. Direct access to the repository
+ files is not needed, unless of course the repository is ":local:". The
+ conversion uses the top level directory in the sandbox to find the CVS
+ repository, and then uses CVS rlog commands to find files to convert. This
+ means that unless a filemap is given, all files under the starting directory
+ will be converted, and that any directory reorganization in the CVS sandbox is
+ ignored.
- The following options can be used with "--config":
+ The following options can be used with "--config":
- convert.cvsps.cache
- Set to False to disable remote log caching, for testing and
- debugging purposes. Default is True.
- convert.cvsps.fuzz
- Specify the maximum time (in seconds) that is allowed
- between commits with identical user and log message in a
- single changeset. When very large files were checked in as
- part of a changeset then the default may not be long enough.
- The default is 60.
- convert.cvsps.logencoding
- Specify encoding name to be used for transcoding CVS log
- messages. Multiple encoding names can be specified as a list
- (see 'hg help config.Syntax'), but only the first acceptable
- encoding in the list is used per CVS log entries. This
- transcoding is executed before cvslog hook below.
- convert.cvsps.mergeto
- Specify a regular expression to which commit log messages
- are matched. If a match occurs, then the conversion process
- will insert a dummy revision merging the branch on which
- this log message occurs to the branch indicated in the
- regex. Default is "{{mergetobranch ([-\w]+)}}"
- convert.cvsps.mergefrom
- Specify a regular expression to which commit log messages
- are matched. If a match occurs, then the conversion process
- will add the most recent revision on the branch indicated in
- the regex as the second parent of the changeset. Default is
- "{{mergefrombranch ([-\w]+)}}"
- convert.localtimezone
- use local time (as determined by the TZ environment
- variable) for changeset date/times. The default is False
- (use UTC).
- hooks.cvslog Specify a Python function to be called at the end of
- gathering the CVS log. The function is passed a list with
- the log entries, and can modify the entries in-place, or add
- or delete them.
- hooks.cvschangesets
- Specify a Python function to be called after the changesets
- are calculated from the CVS log. The function is passed a
- list with the changeset entries, and can modify the
- changesets in-place, or add or delete them.
+ convert.cvsps.cache
+ Set to False to disable remote log caching, for testing and
+ debugging purposes. Default is True.
+ convert.cvsps.fuzz
+ Specify the maximum time (in seconds) that is allowed between
+ commits with identical user and log message in a single
+ changeset. When very large files were checked in as part of a
+ changeset then the default may not be long enough. The default
+ is 60.
+ convert.cvsps.logencoding
+ Specify encoding name to be used for transcoding CVS log
+ messages. Multiple encoding names can be specified as a list
+ (see 'hg help config.Syntax'), but only the first acceptable
+ encoding in the list is used per CVS log entries. This
+ transcoding is executed before cvslog hook below.
+ convert.cvsps.mergeto
+ Specify a regular expression to which commit log messages are
+ matched. If a match occurs, then the conversion process will
+ insert a dummy revision merging the branch on which this log
+ message occurs to the branch indicated in the regex. Default is
+ "{{mergetobranch ([-\w]+)}}"
+ convert.cvsps.mergefrom
+ Specify a regular expression to which commit log messages are
+ matched. If a match occurs, then the conversion process will add
+ the most recent revision on the branch indicated in the regex as
+ the second parent of the changeset. Default is
+ "{{mergefrombranch ([-\w]+)}}"
+ convert.localtimezone
+ use local time (as determined by the TZ environment variable)
+ for changeset date/times. The default is False (use UTC).
+ hooks.cvslog Specify a Python function to be called at the end of gathering
+ the CVS log. The function is passed a list with the log entries,
+ and can modify the entries in-place, or add or delete them.
+ hooks.cvschangesets
+ Specify a Python function to be called after the changesets are
+ calculated from the CVS log. The function is passed a list with
+ the changeset entries, and can modify the changesets in-place,
+ or add or delete them.
- An additional "debugcvsps" Mercurial command allows the builtin changeset
- merging code to be run without doing a conversion. Its parameters and
- output are similar to that of cvsps 2.1. Please see the command help for
- more details.
+ An additional "debugcvsps" Mercurial command allows the builtin changeset
+ merging code to be run without doing a conversion. Its parameters and output
+ are similar to that of cvsps 2.1. Please see the command help for more
+ details.
- Subversion Source
- #################
+ Subversion Source
+ #################
- Subversion source detects classical trunk/branches/tags layouts. By
- default, the supplied "svn://repo/path/" source URL is converted as a
- single branch. If "svn://repo/path/trunk" exists it replaces the default
- branch. If "svn://repo/path/branches" exists, its subdirectories are
- listed as possible branches. If "svn://repo/path/tags" exists, it is
- looked for tags referencing converted branches. Default "trunk",
- "branches" and "tags" values can be overridden with following options. Set
- them to paths relative to the source URL, or leave them blank to disable
- auto detection.
+ Subversion source detects classical trunk/branches/tags layouts. By default,
+ the supplied "svn://repo/path/" source URL is converted as a single branch. If
+ "svn://repo/path/trunk" exists it replaces the default branch. If
+ "svn://repo/path/branches" exists, its subdirectories are listed as possible
+ branches. If "svn://repo/path/tags" exists, it is looked for tags referencing
+ converted branches. Default "trunk", "branches" and "tags" values can be
+ overridden with following options. Set them to paths relative to the source
+ URL, or leave them blank to disable auto detection.
- The following options can be set with "--config":
+ The following options can be set with "--config":
- convert.svn.branches
- specify the directory containing branches. The default is
- "branches".
- convert.svn.tags
- specify the directory containing tags. The default is
- "tags".
- convert.svn.trunk
- specify the name of the trunk branch. The default is
- "trunk".
- convert.localtimezone
- use local time (as determined by the TZ environment
- variable) for changeset date/times. The default is False
- (use UTC).
+ convert.svn.branches
+ specify the directory containing branches. The default is
+ "branches".
+ convert.svn.tags
+ specify the directory containing tags. The default is "tags".
+ convert.svn.trunk
+ specify the name of the trunk branch. The default is "trunk".
+ convert.localtimezone
+ use local time (as determined by the TZ environment variable)
+ for changeset date/times. The default is False (use UTC).
- Source history can be retrieved starting at a specific revision, instead
- of being integrally converted. Only single branch conversions are
- supported.
+ Source history can be retrieved starting at a specific revision, instead of
+ being integrally converted. Only single branch conversions are supported.
- convert.svn.startrev
- specify start Subversion revision number. The default is 0.
+ convert.svn.startrev
+ specify start Subversion revision number. The default is 0.
- Git Source
- ##########
+ Git Source
+ ##########
- The Git importer converts commits from all reachable branches (refs in
- refs/heads) and remotes (refs in refs/remotes) to Mercurial. Branches are
- converted to bookmarks with the same name, with the leading 'refs/heads'
- stripped. Git submodules are converted to Git subrepos in Mercurial.
+ The Git importer converts commits from all reachable branches (refs in
+ refs/heads) and remotes (refs in refs/remotes) to Mercurial. Branches are
+ converted to bookmarks with the same name, with the leading 'refs/heads'
+ stripped. Git submodules are converted to Git subrepos in Mercurial.
- The following options can be set with "--config":
+ The following options can be set with "--config":
- convert.git.similarity
- specify how similar files modified in a commit must be to be
- imported as renames or copies, as a percentage between "0"
- (disabled) and "100" (files must be identical). For example,
- "90" means that a delete/add pair will be imported as a
- rename if more than 90% of the file hasn't changed. The
- default is "50".
- convert.git.findcopiesharder
- while detecting copies, look at all files in the working
- copy instead of just changed ones. This is very expensive
- for large projects, and is only effective when
- "convert.git.similarity" is greater than 0. The default is
- False.
- convert.git.renamelimit
- perform rename and copy detection up to this many changed
- files in a commit. Increasing this will make rename and copy
- detection more accurate but will significantly slow down
- computation on large projects. The option is only relevant
- if "convert.git.similarity" is greater than 0. The default
- is "400".
- convert.git.committeractions
- list of actions to take when processing author and committer
- values.
+ convert.git.similarity
+ specify how similar files modified in a commit must be to be
+ imported as renames or copies, as a percentage between "0"
+ (disabled) and "100" (files must be identical). For example,
+ "90" means that a delete/add pair will be imported as a rename
+ if more than 90% of the file hasn't changed. The default is
+ "50".
+ convert.git.findcopiesharder
+ while detecting copies, look at all files in the working copy
+ instead of just changed ones. This is very expensive for large
+ projects, and is only effective when "convert.git.similarity" is
+ greater than 0. The default is False.
+ convert.git.renamelimit
+ perform rename and copy detection up to this many changed files
+ in a commit. Increasing this will make rename and copy detection
+ more accurate but will significantly slow down computation on
+ large projects. The option is only relevant if
+ "convert.git.similarity" is greater than 0. The default is
+ "400".
+ convert.git.committeractions
+ list of actions to take when processing author and committer
+ values.
- Git commits have separate author (who wrote the commit) and committer
- (who applied the commit) fields. Not all destinations support separate
- author and committer fields (including Mercurial). This config option
- controls what to do with these author and committer fields during
- conversion.
+ Git commits have separate author (who wrote the commit) and committer (who
+ applied the commit) fields. Not all destinations support separate author
+ and committer fields (including Mercurial). This config option controls
+ what to do with these author and committer fields during conversion.
+
+ A value of "messagedifferent" will append a "committer: ..." line to the
+ commit message if the Git committer is different from the author. The
+ prefix of that line can be specified using the syntax
+ "messagedifferent=<prefix>". e.g. "messagedifferent=git-committer:". When
+ a prefix is specified, a space will always be inserted between the prefix
+ and the value.
- A value of "messagedifferent" will append a "committer: ..." line to
- the commit message if the Git committer is different from the author.
- The prefix of that line can be specified using the syntax
- "messagedifferent=<prefix>". e.g. "messagedifferent=git-committer:".
- When a prefix is specified, a space will always be inserted between
- the prefix and the value.
+ "messagealways" behaves like "messagedifferent" except it will always
+ result in a "committer: ..." line being appended to the commit message.
+ This value is mutually exclusive with "messagedifferent".
- "messagealways" behaves like "messagedifferent" except it will always
- result in a "committer: ..." line being appended to the commit
- message. This value is mutually exclusive with "messagedifferent".
+ "dropcommitter" will remove references to the committer. Only references
+ to the author will remain. Actions that add references to the committer
+ will have no effect when this is set.
- "dropcommitter" will remove references to the committer. Only
- references to the author will remain. Actions that add references to
- the committer will have no effect when this is set.
+ "replaceauthor" will replace the value of the author field with the
+ committer. Other actions that add references to the committer will still
+ take effect when this is set.
- "replaceauthor" will replace the value of the author field with the
- committer. Other actions that add references to the committer will
- still take effect when this is set.
+ The default is "messagedifferent".
- The default is "messagedifferent".
-
- convert.git.extrakeys
- list of extra keys from commit metadata to copy to the
- destination. Some Git repositories store extra metadata in
- commits. By default, this non-default metadata will be lost
- during conversion. Setting this config option can retain
- that metadata. Some built-in keys such as "parent" and
- "branch" are not allowed to be copied.
- convert.git.remoteprefix
- remote refs are converted as bookmarks with
- "convert.git.remoteprefix" as a prefix followed by a /. The
- default is 'remote'.
- convert.git.saverev
- whether to store the original Git commit ID in the metadata
- of the destination commit. The default is True.
- convert.git.skipsubmodules
- does not convert root level .gitmodules files or files with
- 160000 mode indicating a submodule. Default is False.
+ convert.git.extrakeys
+ list of extra keys from commit metadata to copy to the
+ destination. Some Git repositories store extra metadata in
+ commits. By default, this non-default metadata will be lost
+ during conversion. Setting this config option can retain that
+ metadata. Some built-in keys such as "parent" and "branch" are
+ not allowed to be copied.
+ convert.git.remoteprefix
+ remote refs are converted as bookmarks with
+ "convert.git.remoteprefix" as a prefix followed by a /. The
+ default is 'remote'.
+ convert.git.saverev
+ whether to store the original Git commit ID in the metadata of
+ the destination commit. The default is True.
+ convert.git.skipsubmodules
+ does not convert root level .gitmodules files or files with
+ 160000 mode indicating a submodule. Default is False.
- Perforce Source
- ###############
+ Perforce Source
+ ###############
- The Perforce (P4) importer can be given a p4 depot path or a client
- specification as source. It will convert all files in the source to a flat
- Mercurial repository, ignoring labels, branches and integrations. Note
- that when a depot path is given you then usually should specify a target
- directory, because otherwise the target may be named "...-hg".
+ The Perforce (P4) importer can be given a p4 depot path or a client
+ specification as source. It will convert all files in the source to a flat
+ Mercurial repository, ignoring labels, branches and integrations. Note that
+ when a depot path is given you then usually should specify a target directory,
+ because otherwise the target may be named "...-hg".
- The following options can be set with "--config":
+ The following options can be set with "--config":
- convert.p4.encoding
- specify the encoding to use when decoding standard output of
- the Perforce command line tool. The default is default
- system encoding.
- convert.p4.startrev
- specify initial Perforce revision (a Perforce changelist
- number).
+ convert.p4.encoding
+ specify the encoding to use when decoding standard output of the
+ Perforce command line tool. The default is default system
+ encoding.
+ convert.p4.startrev
+ specify initial Perforce revision (a Perforce changelist
+ number).
- Mercurial Destination
- #####################
+ Mercurial Destination
+ #####################
- The Mercurial destination will recognize Mercurial subrepositories in the
- destination directory, and update the .hgsubstate file automatically if
- the destination subrepositories contain the <dest>/<sub>/.hg/shamap file.
- Converting a repository with subrepositories requires converting a single
- repository at a time, from the bottom up.
+ The Mercurial destination will recognize Mercurial subrepositories in the
+ destination directory, and update the .hgsubstate file automatically if the
+ destination subrepositories contain the <dest>/<sub>/.hg/shamap file.
+ Converting a repository with subrepositories requires converting a single
+ repository at a time, from the bottom up.
- The following options are supported:
+ The following options are supported:
- convert.hg.clonebranches
- dispatch source branches in separate clones. The default is
- False.
- convert.hg.tagsbranch
- branch name for tag revisions, defaults to "default".
- convert.hg.usebranchnames
- preserve branch names. The default is True.
- convert.hg.sourcename
- records the given string as a 'convert_source' extra value
- on each commit made in the target repository. The default is
- None.
- convert.hg.preserve-hash
- only works with mercurial sources. Make convert prevent
- performance improvement to the list of modified files in
- commits when such an improvement would cause the hash of a
- commit to change. The default is False.
+ convert.hg.clonebranches
+ dispatch source branches in separate clones. The default is
+ False.
+ convert.hg.tagsbranch
+ branch name for tag revisions, defaults to "default".
+ convert.hg.usebranchnames
+ preserve branch names. The default is True.
+ convert.hg.sourcename
+ records the given string as a 'convert_source' extra value on
+ each commit made in the target repository. The default is None.
+ convert.hg.preserve-hash
+ only works with mercurial sources. Make convert prevent
+ performance improvement to the list of modified files in commits
+ when such an improvement would cause the hash of a commit to
+ change. The default is False.
- All Destinations
- ################
+ All Destinations
+ ################
- All destination types accept the following options:
+ All destination types accept the following options:
- convert.skiptags
- does not convert tags from the source repo to the target
- repo. The default is False.
+ convert.skiptags
+ does not convert tags from the source repo to the target repo.
+ The default is False.
- Subversion Destination
- ######################
+ Subversion Destination
+ ######################
- Original commit dates are not preserved by default.
+ Original commit dates are not preserved by default.
- convert.svn.dangerous-set-commit-dates
- preserve original commit dates, forcefully setting
- "svn:date" revision properties. This option is DANGEROUS and
- may break some subversion functionality for the resulting
- repository (e.g. filtering revisions with date ranges in
- "svn log"), as original commit dates are not guaranteed to
- be monotonically increasing.
+ convert.svn.dangerous-set-commit-dates
+ preserve original commit dates, forcefully setting "svn:date"
+ revision properties. This option is DANGEROUS and may break some
+ subversion functionality for the resulting repository (e.g.
+ filtering revisions with date ranges in "svn log"), as original
+ commit dates are not guaranteed to be monotonically increasing.
- For commit dates setting to work destination repository must have "pre-
- revprop-change" hook configured to allow setting of "svn:date" revision
- properties. See Subversion documentation for more details.
+ For commit dates setting to work destination repository must have "pre-
+ revprop-change" hook configured to allow setting of "svn:date" revision
+ properties. See Subversion documentation for more details.
options ([+] can be repeated):
--- a/tests/test-debugcommands.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-debugcommands.t Sat Oct 26 04:16:00 2024 +0200
@@ -523,29 +523,15 @@
* Test setting the lock
-waitlock <file> will wait for file to be created. If it isn't in a reasonable
-amount of time, displays error message and returns 1
- $ waitlock() {
- > start=`date +%s`
- > timeout=5
- > while [ \( ! -f $1 \) -a \( ! -L $1 \) ]; do
- > now=`date +%s`
- > if [ "`expr $now - $start`" -gt $timeout ]; then
- > echo "timeout: $1 was not created in $timeout seconds"
- > return 1
- > fi
- > sleep 0.1
- > done
- > }
$ dolock() {
> {
- > waitlock .hg/unlock
+ > "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/unlock
> rm -f .hg/unlock
> echo y
> } | hg debuglocks "$@" > /dev/null
> }
$ dolock -s &
- $ waitlock .hg/store/lock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/store/lock
$ hg debuglocks
lock: user *, process * (*s) (glob)
@@ -559,7 +545,7 @@
* Test setting the wlock
$ dolock -S &
- $ waitlock .hg/wlock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/wlock
$ hg debuglocks
lock: free
@@ -573,7 +559,8 @@
* Test setting both locks
$ dolock -Ss &
- $ waitlock .hg/wlock && waitlock .hg/store/lock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/wlock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/store/lock
$ hg debuglocks
lock: user *, process * (*s) (glob)
@@ -600,7 +587,7 @@
* Test forcing the lock
$ dolock -s &
- $ waitlock .hg/store/lock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/store/lock
$ hg debuglocks
lock: user *, process * (*s) (glob)
@@ -619,7 +606,7 @@
* Test forcing the wlock
$ dolock -S &
- $ waitlock .hg/wlock
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 .hg/wlock
$ hg debuglocks
lock: free
@@ -649,8 +636,8 @@
$ ls -r .hg/cache/*
.hg/cache/tags2-served
.hg/cache/tags2
- .hg/cache/rbc-revs-v1
- .hg/cache/rbc-names-v1
+ .hg/cache/rbc-revs-v2
+ .hg/cache/rbc-names-v2
.hg/cache/hgtagsfnodes1
.hg/cache/branch2-served
--- a/tests/test-dirstate-read-race.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-dirstate-read-race.t Sat Oct 26 04:16:00 2024 +0200
@@ -167,23 +167,7 @@
The status process should return a consistent result and not crash.
-#if dirstate-v1
- $ cat $TESTTMP/status-race-lock.out
- A dir/n
- A dir/o
- R dir/nested/m
- ? p
- ? q
-#else
-#if rhg pre-some-read dirstate-v2-append
- $ cat $TESTTMP/status-race-lock.out
- A dir/o
- R dir/nested/m
- ? dir/n
- ? p
- ? q
-#else
-#if rust no-rhg dirstate-v2-append
+#if rust dirstate-v2-append pre-some-read
$ cat $TESTTMP/status-race-lock.out
A dir/o
R dir/nested/m
@@ -198,8 +182,6 @@
? p
? q
#endif
-#endif
-#endif
$ cat $TESTTMP/status-race-lock.log
final cleanup
@@ -249,30 +231,19 @@
The status process should return a consistent result and not crash.
-#if no-rhg
+#if dirstate-v1
+ $ cat $TESTTMP/status-race-lock.out
+ ? dir/n
+ ? p
+ ? q
+#endif
+#if dirstate-v2
$ cat $TESTTMP/status-race-lock.out
A dir/o
R dir/nested/m
? dir/n
? p
? q
- $ cat $TESTTMP/status-race-lock.log
-#else
-#if pre-some-read dirstate-v2-append
- $ cat $TESTTMP/status-race-lock.out
- A dir/o
- R dir/nested/m
- ? dir/n
- ? p
- ? q
- $ cat $TESTTMP/status-race-lock.log
-#else
- $ cat $TESTTMP/status-race-lock.out
- ? dir/n
- ? p
- ? q
- $ cat $TESTTMP/status-race-lock.log
-#endif
#endif
final cleanup
@@ -323,21 +294,7 @@
The status process should return a consistent result and not crash.
-#if rhg dirstate-v2-append pre-some-read
- $ cat $TESTTMP/status-race-lock.out
- A dir/o
- R dir/nested/m
- ! dir/i
- ! dir/j
- ! dir/nested/h
- ! dir2/k
- ! dir2/l
- ! g
- ? dir/n
- ? p
- ? q
-#else
-#if rust no-rhg dirstate-v2-append
+#if rust dirstate-v2-append pre-some-read
$ cat $TESTTMP/status-race-lock.out
A dir/o
R dir/nested/m
@@ -357,7 +314,6 @@
? p
? q
#endif
-#endif
$ cat $TESTTMP/status-race-lock.log
final cleanup
--- a/tests/test-doctest.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-doctest.py Sat Oct 26 04:16:00 2024 +0200
@@ -71,13 +71,9 @@
if not os.path.isdir(os.path.join(cwd, ".hg")):
sys.exit(0)
-files_cmd = "hg files --print0 \"%s\"" % fileset
-
-if 'HGTEST_RESTOREENV' in os.environ:
- files_cmd = '. $HGTEST_RESTOREENV; ' + files_cmd
-
files = subprocess.check_output(
- files_cmd,
+ "HGRCPATH=/dev/null . helpers-testrepo.sh; testrepohg files --print0 \"%s\""
+ % fileset,
shell=True,
cwd=cwd,
).split(b'\0')
--- a/tests/test-encoding-textwrap.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-encoding-textwrap.t Sat Oct 26 04:16:00 2024 +0200
@@ -63,11 +63,11 @@
\x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
- \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
- \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
+ \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
+ \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
- \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf\x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf\x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
- \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
+ \x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf\x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf\x82\xa0\x82\xa2\x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf\x82\xa0\x82\xa2 (esc)
+ \x82\xa4\x82\xa6\x82\xa8\x82\xa9\x82\xab\x82\xad\x82\xaf (esc)
(some details hidden, use --verbose to show complete help)
@@ -78,11 +78,11 @@
\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
- \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
- \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
+ \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
+ \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
- \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
- \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
+ \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x82\xe3\x81\x84 (esc)
+ \xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91 (esc)
(some details hidden, use --verbose to show complete help)
@@ -94,11 +94,11 @@
\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
- \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
- \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
+ \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
+ \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
- \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
- \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
+ \xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4 (esc)
+ \xb5\xb6\xb7\xb8\xb9\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9 (esc)
(some details hidden, use --verbose to show complete help)
@@ -109,11 +109,11 @@
\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
- \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
- \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
+ \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
+ \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
- \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
- \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
+ \xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4 (esc)
+ \xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9\xef\xbd\xb1\xef\xbd\xb2\xef\xbd\xb3\xef\xbd\xb4\xef\xbd\xb5\xef\xbd\xb6\xef\xbd\xb7\xef\xbd\xb8\xef\xbd\xb9 (esc)
(some details hidden, use --verbose to show complete help)
@@ -130,11 +130,11 @@
\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2 (esc)
+ \x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
(some details hidden, use --verbose to show complete help)
@@ -145,11 +145,11 @@
\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4 (esc)
+ \xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
(some details hidden, use --verbose to show complete help)
@@ -160,11 +160,11 @@
\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2 (esc)
+ \xf0\xee\xe9\xea\xe8 (esc)
(some details hidden, use --verbose to show complete help)
@@ -175,11 +175,11 @@
\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82 (esc)
+ \xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
(some details hidden, use --verbose to show complete help)
@@ -193,14 +193,13 @@
\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
- \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
+ \x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0 (esc)
+ \x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2\x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b\x83\xbf\x83\xc0\x83\xc1\x83\xc2 (esc)
+ \x83\xd2\x83\xc4\x83\xc5\x83\xc6\x81\x9b (esc)
(some details hidden, use --verbose to show complete help)
@@ -211,14 +210,13 @@
\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
- \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
+ \xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2 (esc)
+ \xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4\xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b\xce\xb1\xce\xb2\xce\xb3\xce\xb4 (esc)
+ \xcf\x85\xce\xb6\xce\xb7\xce\xb8\xe2\x97\x8b (esc)
(some details hidden, use --verbose to show complete help)
@@ -230,13 +228,12 @@
\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
- \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8 (esc)
+ \xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0 (esc)
+ \xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2\xf0\xee\xe9\xea\xe8\xcd\xe0\xf1\xf2 (esc)
+ \xf0\xee\xe9\xea\xe8 (esc)
(some details hidden, use --verbose to show complete help)
@@ -248,13 +245,12 @@
\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
- \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
+ \xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0 (esc)
+ \xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82\xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8\xd0\x9d\xd0\xb0\xd1\x81\xd1\x82 (esc)
+ \xd1\x80\xd0\xbe\xd0\xb9\xd0\xba\xd0\xb8 (esc)
(some details hidden, use --verbose to show complete help)
--- a/tests/test-eol-update.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-eol-update.t Sat Oct 26 04:16:00 2024 +0200
@@ -272,6 +272,7 @@
eol: detected change in .hgeol
filtering .hgeol through isbinary
filtering a.txt through tolf
+ skip updating dirstate: identity mismatch (?)
$ cd ..
--- a/tests/test-extdiff.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-extdiff.t Sat Oct 26 04:16:00 2024 +0200
@@ -37,15 +37,15 @@
use external program to diff repository (or selected files)
- Show differences between revisions for the specified files, using the
- following program:
+ Show differences between revisions for the specified files, using the
+ following program:
- 'echo'
+ 'echo'
- When two revision arguments are given, then changes are shown between
- those revisions. If only one revision is specified then that revision is
- compared to the working directory, and, when no revisions are specified,
- the working directory files are compared to its parent.
+ When two revision arguments are given, then changes are shown between those
+ revisions. If only one revision is specified then that revision is compared to
+ the working directory, and, when no revisions are specified, the working
+ directory files are compared to its parent.
options ([+] can be repeated):
@@ -158,9 +158,18 @@
diffing */extdiff.*/a.46c0e4daeb72/b a.81906f2b98ac/b (glob) (no-windows !)
[1]
+#if no-gui
+Test gui tool error:
+
+ $ hg --config extdiff.gui.alabalaf=True alabalaf
+ abort: tool 'alabalaf' requires a GUI
+ (to override, use: --config diff-tools.alabalaf.gui=False)
+ [255]
+#endif
+
Test --per-file option for gui tool:
- $ hg --config extdiff.gui.alabalaf=True alabalaf -c 6 --per-file --debug
+ $ DISPLAY=fake hg --config extdiff.gui.alabalaf=True alabalaf -c 6 --per-file --debug
diffing */extdiff.*/a.46c0e4daeb72/* a.81906f2b98ac/* (glob)
diffing */extdiff.*/a.46c0e4daeb72/* a.81906f2b98ac/* (glob)
making snapshot of 2 files from rev 46c0e4daeb72
@@ -176,7 +185,7 @@
Test --per-file option for gui tool again:
- $ hg --config merge-tools.alabalaf.gui=True alabalaf -c 6 --per-file --debug
+ $ DISPLAY=fake hg --config merge-tools.alabalaf.gui=True alabalaf -c 6 --per-file --debug
diffing */extdiff.*/a.46c0e4daeb72/* a.81906f2b98ac/* (glob)
diffing */extdiff.*/a.46c0e4daeb72/* a.81906f2b98ac/* (glob)
making snapshot of 2 files from rev 46c0e4daeb72
@@ -523,8 +532,8 @@
[10]
$ LC_MESSAGES=ja_JP.UTF-8 hg --config hgext.extdiff= --config extdiff.cmd.td=$U help td \
- > | grep "^ '"
- '\xa5\xa5'
+ > | grep "^ '"
+ '\xa5\xa5'
$ cd $TESTTMP
--- a/tests/test-extension.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-extension.t Sat Oct 26 04:16:00 2024 +0200
@@ -755,30 +755,28 @@
use external program to diff repository (or selected files)
- Show differences between revisions for the specified files, using an
- external program. The default program used is diff, with default options
- "-Npru".
+ Show differences between revisions for the specified files, using an external
+ program. The default program used is diff, with default options "-Npru".
- To select a different program, use the -p/--program option. The program
- will be passed the names of two directories to compare, unless the --per-
- file option is specified (see below). To pass additional options to the
- program, use -o/--option. These will be passed before the names of the
- directories or files to compare.
+ To select a different program, use the -p/--program option. The program will
+ be passed the names of two directories to compare, unless the --per-file
+ option is specified (see below). To pass additional options to the program,
+ use -o/--option. These will be passed before the names of the directories or
+ files to compare.
- The --from, --to, and --change options work the same way they do for 'hg
- diff'.
+ The --from, --to, and --change options work the same way they do for 'hg
+ diff'.
- The --per-file option runs the external program repeatedly on each file to
- diff, instead of once on two directories. By default, this happens one by
- one, where the next file diff is open in the external program only once
- the previous external program (for the previous file diff) has exited. If
- the external program has a graphical interface, it can open all the file
- diffs at once instead of one by one. See 'hg help -e extdiff' for
- information about how to tell Mercurial that a given program has a
- graphical interface.
+ The --per-file option runs the external program repeatedly on each file to
+ diff, instead of once on two directories. By default, this happens one by one,
+ where the next file diff is open in the external program only once the
+ previous external program (for the previous file diff) has exited. If the
+ external program has a graphical interface, it can open all the file diffs at
+ once instead of one by one. See 'hg help -e extdiff' for information about how
+ to tell Mercurial that a given program has a graphical interface.
- The --confirm option will prompt the user before each invocation of the
- external program. It is ignored if --per-file isn't specified.
+ The --confirm option will prompt the user before each invocation of the
+ external program. It is ignored if --per-file isn't specified.
(use 'hg help -e extdiff' to show help for the extdiff extension)
@@ -1832,9 +1830,7 @@
> return orig(*args, **opts)
> def uisetup(ui):
> synopsis = b' GREPME [--foo] [-x]'
- > docstring = '''
- > GREPME make sure that this is in the help!
- > '''
+ > docstring = '\nGREPME make sure that this is in the help!'
> extensions.wrapcommand(commands.table, b'bookmarks', exbookmarks,
> synopsis, docstring)
> EOF
@@ -1844,7 +1840,7 @@
$ cd exthelp
$ hg help bookmarks | grep GREPME
hg bookmarks [OPTIONS]... [NAME]... GREPME [--foo] [-x]
- GREPME make sure that this is in the help!
+ GREPME make sure that this is in the help!
$ cd ..
Prohibit the use of unicode strings as the default value of options
--- a/tests/test-extensions-wrapfunction.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-extensions-wrapfunction.py Sat Oct 26 04:16:00 2024 +0200
@@ -66,6 +66,7 @@
print('context manager', dummy.getstack())
print('context manager', dummy.getstack())
+
# Wrap callable object which has no __name__
class callableobj:
def __call__(self):
--- a/tests/test-filecache.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-filecache.py Sat Oct 26 04:16:00 2024 +0200
@@ -48,7 +48,7 @@
def sjoin(self, p):
return p
- @localrepo.repofilecache('x', 'y')
+ @localrepo.repofilecache(b'x', b'y')
def cached(self):
print('creating')
return 'string from function'
@@ -72,15 +72,15 @@
repo.cached
# create empty file
- f = open('x', 'w')
+ f = open('x', 'wb')
f.close()
repo.invalidate()
print("* empty file x created")
# should recreate the object
repo.cached
- f = open('x', 'w')
- f.write('a')
+ f = open('x', 'wb')
+ f.write(b'a')
f.close()
repo.invalidate()
print("* file x changed size")
@@ -104,15 +104,15 @@
repo.cached
# create empty file y
- f = open('y', 'w')
+ f = open('y', 'wb')
f.close()
repo.invalidate()
print("* empty file y created")
# should recreate the object
repo.cached
- f = open('y', 'w')
- f.write('A')
+ f = open('y', 'wb')
+ f.write(b'A')
f.close()
repo.invalidate()
print("* file y changed size")
@@ -151,7 +151,7 @@
util.cachestat, 'cacheable', wrapcacheable
)
- for fn in ['x', 'y']:
+ for fn in [b'x', b'y']:
try:
os.remove(fn)
except OSError:
@@ -180,15 +180,15 @@
def setbeforeget(repo):
- os.remove('x')
- os.remove('y')
+ os.remove(b'x')
+ os.remove(b'y')
repo.__class__.cached.set(repo, 'string set externally')
repo.invalidate()
print("* neither file exists")
print(repo.cached)
repo.invalidate()
- f = open('x', 'w')
- f.write('a')
+ f = open('x', 'wb')
+ f.write(b'a')
f.close()
print("* file x created")
print(repo.cached)
@@ -199,8 +199,8 @@
print(repo.cached)
repo.invalidate()
- f = open('y', 'w')
- f.write('b')
+ f = open('y', 'wb')
+ f.write(b'b')
f.close()
print("* file y created")
print(repo.cached)
@@ -212,8 +212,8 @@
# try some times, because reproduction of ambiguity depends on
# "filesystem time"
for i in range(5):
- fp = open(filename, 'w')
- fp.write('FOO')
+ fp = open(filename, 'wb')
+ fp.write(b'FOO')
fp.close()
oldstat = os.stat(filename)
@@ -227,13 +227,13 @@
# st_mtime is advanced multiple times as expected
for i in range(repetition):
# explicit closing
- fp = vfsmod.checkambigatclosing(open(filename, 'a'))
- fp.write('FOO')
+ fp = vfsmod.checkambigatclosing(open(filename, 'ab'))
+ fp.write(b'FOO')
fp.close()
# implicit closing by "with" statement
- with vfsmod.checkambigatclosing(open(filename, 'a')) as fp:
- fp.write('BAR')
+ with vfsmod.checkambigatclosing(open(filename, 'ab')) as fp:
+ fp.write(b'BAR')
newstat = os.stat(filename)
if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
--- a/tests/test-fix.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-fix.t Sat Oct 26 04:16:00 2024 +0200
@@ -84,27 +84,26 @@
rewrite file content in changesets or working directory
- Runs any configured tools to fix the content of files. (See 'hg help -e
- fix' for details about configuring tools.) Only affects files with
- changes, unless file arguments are provided. Only affects changed lines of
- files, unless the --whole flag is used. Some tools may always affect the
- whole file regardless of --whole.
+ Runs any configured tools to fix the content of files. (See 'hg help -e fix'
+ for details about configuring tools.) Only affects files with changes, unless
+ file arguments are provided. Only affects changed lines of files, unless the
+ --whole flag is used. Some tools may always affect the whole file regardless
+ of --whole.
- If --working-dir is used, files with uncommitted changes in the working
- copy will be fixed. Note that no backup are made.
+ If --working-dir is used, files with uncommitted changes in the working copy
+ will be fixed. Note that no backup are made.
- If revisions are specified with --source, those revisions and their
- descendants will be checked, and they may be replaced with new revisions
- that have fixed file content. By automatically including the descendants,
- no merging, rebasing, or evolution will be required. If an ancestor of the
- working copy is included, then the working copy itself will also be fixed,
- and the working copy will be updated to the fixed parent.
+ If revisions are specified with --source, those revisions and their
+ descendants will be checked, and they may be replaced with new revisions that
+ have fixed file content. By automatically including the descendants, no
+ merging, rebasing, or evolution will be required. If an ancestor of the
+ working copy is included, then the working copy itself will also be fixed, and
+ the working copy will be updated to the fixed parent.
- When determining what lines of each file to fix at each revision, the
- whole set of revisions being fixed is considered, so that fixes to earlier
- revisions are not forgotten in later ones. The --base flag can be used to
- override this default behavior, though it is not usually desirable to do
- so.
+ When determining what lines of each file to fix at each revision, the whole
+ set of revisions being fixed is considered, so that fixes to earlier revisions
+ are not forgotten in later ones. The --base flag can be used to override this
+ default behavior, though it is not usually desirable to do so.
(use 'hg help -e fix' to show help for the fix extension)
--- a/tests/test-fncache.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-fncache.t Sat Oct 26 04:16:00 2024 +0200
@@ -96,8 +96,8 @@
.hg/branch
.hg/cache
.hg/cache/branch2-served
- .hg/cache/rbc-names-v1
- .hg/cache/rbc-revs-v1
+ .hg/cache/rbc-names-v2
+ .hg/cache/rbc-revs-v2
.hg/data
.hg/data/tst.d.hg
.hg/data/tst.d.hg/foo.i
@@ -131,8 +131,8 @@
.hg/branch
.hg/cache
.hg/cache/branch2-served
- .hg/cache/rbc-names-v1
- .hg/cache/rbc-revs-v1
+ .hg/cache/rbc-names-v2
+ .hg/cache/rbc-revs-v2
.hg/dirstate
.hg/fsmonitor.state (fsmonitor !)
.hg/last-message.txt
@@ -304,7 +304,7 @@
$ cat > ../exceptionext.py <<EOF
> import os
- > import signal
+ > from mercurial.testing import ps_util
> from mercurial import (
> commands,
> error,
@@ -316,7 +316,7 @@
> def trwrapper(orig, self, *args, **kwargs):
> tr = orig(self, *args, **kwargs)
> def fail(tr):
- > os.kill(os.getpid(), signal.SIGKILL)
+ > ps_util.kill(os.getpid())
> # zzz prefix to ensure it sorted after store.write
> tr.addfinalize(b'zzz-forcefails', fail)
> return tr
--- a/tests/test-hardlinks.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-hardlinks.t Sat Oct 26 04:16:00 2024 +0200
@@ -264,8 +264,8 @@
[24] r4/.hg/branch (re)
2 r4/.hg/cache/branch2-base
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/rbc-names-v1
- 2 r4/.hg/cache/rbc-revs-v1
+ 2 r4/.hg/cache/rbc-names-v2
+ 2 r4/.hg/cache/rbc-revs-v2
2 r4/.hg/cache/tags2
2 r4/.hg/cache/tags2-served
2 r4/.hg/dirstate
@@ -317,8 +317,8 @@
1 r4/.hg/branch
2 r4/.hg/cache/branch2-base
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/rbc-names-v1
- 2 r4/.hg/cache/rbc-revs-v1
+ 2 r4/.hg/cache/rbc-names-v2
+ 2 r4/.hg/cache/rbc-revs-v2
2 r4/.hg/cache/tags2
2 r4/.hg/cache/tags2-served
1 r4/.hg/dirstate
--- a/tests/test-help.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-help.t Sat Oct 26 04:16:00 2024 +0200
@@ -467,15 +467,15 @@
add the specified files on the next commit
- Schedule files to be version controlled and added to the repository.
-
- The files will be added to the repository at the next commit. To undo an
- add before that, see 'hg forget'.
-
- If no names are given, add all files to the repository (except files
- matching ".hgignore").
-
- Returns 0 if all files are successfully added.
+ Schedule files to be version controlled and added to the repository.
+
+ The files will be added to the repository at the next commit. To undo an add
+ before that, see 'hg forget'.
+
+ If no names are given, add all files to the repository (except files matching
+ ".hgignore").
+
+ Returns 0 if all files are successfully added.
options ([+] can be repeated):
@@ -493,40 +493,40 @@
add the specified files on the next commit
- Schedule files to be version controlled and added to the repository.
-
- The files will be added to the repository at the next commit. To undo an
- add before that, see 'hg forget'.
-
- If no names are given, add all files to the repository (except files
- matching ".hgignore").
-
- Examples:
-
- - New (unknown) files are added automatically by 'hg add':
-
- $ ls
- foo.c
- $ hg status
- ? foo.c
- $ hg add
- adding foo.c
- $ hg status
- A foo.c
-
- - Specific files to be added can be specified:
-
- $ ls
- bar.c foo.c
- $ hg status
- ? bar.c
- ? foo.c
- $ hg add bar.c
- $ hg status
- A bar.c
- ? foo.c
-
- Returns 0 if all files are successfully added.
+ Schedule files to be version controlled and added to the repository.
+
+ The files will be added to the repository at the next commit. To undo an add
+ before that, see 'hg forget'.
+
+ If no names are given, add all files to the repository (except files matching
+ ".hgignore").
+
+ Examples:
+
+ - New (unknown) files are added automatically by 'hg add':
+
+ $ ls
+ foo.c
+ $ hg status
+ ? foo.c
+ $ hg add
+ adding foo.c
+ $ hg status
+ A foo.c
+
+ - Specific files to be added can be specified:
+
+ $ ls
+ bar.c foo.c
+ $ hg status
+ ? bar.c
+ ? foo.c
+ $ hg add bar.c
+ $ hg status
+ A bar.c
+ ? foo.c
+
+ Returns 0 if all files are successfully added.
options ([+] can be repeated):
@@ -568,10 +568,10 @@
print the root (top) of the current working
directory
- Print the root directory of the current
- repository.
-
- Returns 0 on success.
+ Print the root directory of the current
+ repository.
+
+ Returns 0 on success.
options:
@@ -630,20 +630,19 @@
verify the integrity of the repository
- Verify the integrity of the current repository.
-
- This will perform an extensive check of the repository's integrity,
- validating the hashes and checksums of each entry in the changelog,
- manifest, and tracked files, as well as the integrity of their crosslinks
- and indices.
-
- Please see https://mercurial-scm.org/wiki/RepositoryCorruption for more
- information about recovery from corruption of the repository.
-
- For an alternative UI with a lot more control over the verification
- process and better error reporting, try 'hg help admin::verify'.
-
- Returns 0 on success, 1 if errors are encountered.
+ Verify the integrity of the current repository.
+
+ This will perform an extensive check of the repository's integrity, validating
+ the hashes and checksums of each entry in the changelog, manifest, and tracked
+ files, as well as the integrity of their crosslinks and indices.
+
+ Please see https://mercurial-scm.org/wiki/RepositoryCorruption for more
+ information about recovery from corruption of the repository.
+
+ For an alternative UI with a lot more control over the verification process
+ and better error reporting, try 'hg help admin::verify'.
+
+ Returns 0 on success, 1 if errors are encountered.
options:
@@ -654,36 +653,36 @@
diff repository (or selected files)
- Show differences between revisions for the specified files.
-
- Differences between files are shown using the unified diff format.
-
- Note:
- 'hg diff' may generate unexpected results for merges, as it will
- default to comparing against the working directory's first parent
- changeset if no revisions are specified. To diff against the conflict
- regions, you can use '--config diff.merge=yes'.
-
- By default, the working directory files are compared to its first parent.
- To see the differences from another revision, use --from. To see the
- difference to another revision, use --to. For example, 'hg diff --from .^'
- will show the differences from the working copy's grandparent to the
- working copy, 'hg diff --to .' will show the diff from the working copy to
- its parent (i.e. the reverse of the default), and 'hg diff --from 1.0 --to
- 1.2' will show the diff between those two revisions.
-
- Alternatively you can specify -c/--change with a revision to see the
- changes in that changeset relative to its first parent (i.e. 'hg diff -c
- 42' is equivalent to 'hg diff --from 42^ --to 42')
-
- Without the -a/--text option, diff will avoid generating diffs of files it
- detects as binary. With -a, diff will generate a diff anyway, probably
- with undesirable results.
-
- Use the -g/--git option to generate diffs in the git extended diff format.
- For more information, read 'hg help diffs'.
-
- Returns 0 on success.
+ Show differences between revisions for the specified files.
+
+ Differences between files are shown using the unified diff format.
+
+ Note:
+ 'hg diff' may generate unexpected results for merges, as it will default to
+ comparing against the working directory's first parent changeset if no
+ revisions are specified. To diff against the conflict regions, you can use
+ '--config diff.merge=yes'.
+
+ By default, the working directory files are compared to its first parent. To
+ see the differences from another revision, use --from. To see the difference
+ to another revision, use --to. For example, 'hg diff --from .^' will show the
+ differences from the working copy's grandparent to the working copy, 'hg diff
+ --to .' will show the diff from the working copy to its parent (i.e. the
+ reverse of the default), and 'hg diff --from 1.0 --to 1.2' will show the diff
+ between those two revisions.
+
+ Alternatively you can specify -c/--change with a revision to see the changes
+ in that changeset relative to its first parent (i.e. 'hg diff -c 42' is
+ equivalent to 'hg diff --from 42^ --to 42')
+
+ Without the -a/--text option, diff will avoid generating diffs of files it
+ detects as binary. With -a, diff will generate a diff anyway, probably with
+ undesirable results.
+
+ Use the -g/--git option to generate diffs in the git extended diff format. For
+ more information, read 'hg help diffs'.
+
+ Returns 0 on success.
options ([+] can be repeated):
@@ -717,38 +716,37 @@
show changed files in the working directory
- Show status of files in the repository. If names are given, only files
- that match are shown. Files that are clean or ignored or the source of a
- copy/move operation, are not listed unless -c/--clean, -i/--ignored,
- -C/--copies or -A/--all are given. Unless options described with "show
- only ..." are given, the options -mardu are used.
-
- Option -q/--quiet hides untracked (unknown and ignored) files unless
- explicitly requested with -u/--unknown or -i/--ignored.
-
- Note:
- 'hg status' may appear to disagree with diff if permissions have
- changed or a merge has occurred. The standard diff format does not
- report permission changes and diff only reports changes relative to one
- merge parent.
-
- If one revision is given, it is used as the base revision. If two
- revisions are given, the differences between them are shown. The --change
- option can also be used as a shortcut to list the changed files of a
- revision from its first parent.
-
- The codes used to show the status of files are:
-
- M = modified
- A = added
- R = removed
- C = clean
- ! = missing (deleted by non-hg command, but still tracked)
- ? = not tracked
- I = ignored
- = origin of the previous file (with --copies)
-
- Returns 0 on success.
+ Show status of files in the repository. If names are given, only files that
+ match are shown. Files that are clean or ignored or the source of a copy/move
+ operation, are not listed unless -c/--clean, -i/--ignored, -C/--copies or
+ -A/--all are given. Unless options described with "show only ..." are given,
+ the options -mardu are used.
+
+ Option -q/--quiet hides untracked (unknown and ignored) files unless
+ explicitly requested with -u/--unknown or -i/--ignored.
+
+ Note:
+ 'hg status' may appear to disagree with diff if permissions have changed or
+ a merge has occurred. The standard diff format does not report permission
+ changes and diff only reports changes relative to one merge parent.
+
+ If one revision is given, it is used as the base revision. If two revisions
+ are given, the differences between them are shown. The --change option can
+ also be used as a shortcut to list the changed files of a revision from its
+ first parent.
+
+ The codes used to show the status of files are:
+
+ M = modified
+ A = added
+ R = removed
+ C = clean
+ ! = missing (deleted by non-hg command, but still tracked)
+ ? = not tracked
+ I = ignored
+ = origin of the previous file (with --copies)
+
+ Returns 0 on success.
options ([+] can be repeated):
@@ -907,13 +905,13 @@
summarize working directory state
- This generates a brief summary of the working directory state, including
- parents, branch, commit status, phase and available updates.
-
- With the --remote option, this will check the default paths for incoming
- and outgoing changes. This can be time-consuming.
-
- Returns 0 on success.
+ This generates a brief summary of the working directory state, including
+ parents, branch, commit status, phase and available updates.
+
+ With the --remote option, this will check the default paths for incoming and
+ outgoing changes. This can be time-consuming.
+
+ Returns 0 on success.
defined by: helpext
@@ -1605,7 +1603,7 @@
("profiling.type"[break]"ls"[break]"stat"[break])
$ hg help config.type | grep -E '^$'|wc -l
- \s*3 (re)
+ \s*4 (re)
$ hg help config.profiling.type.ls
"profiling.type.ls"
--- a/tests/test-hg-parseurl.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-hg-parseurl.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,7 +5,6 @@
class ParseRequestTests(unittest.TestCase):
def testparse(self):
-
self.assertEqual(
urlutil.parseurl(b'http://example.com/no/anchor'),
(b'http://example.com/no/anchor', (None, [])),
--- a/tests/test-hgrc.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-hgrc.t Sat Oct 26 04:16:00 2024 +0200
@@ -304,15 +304,6 @@
config error at $TESTTMP/.hg/hgrc:3: [broken
[255]
-XXX-PYOXIDIZER Pyoxidizer build have trouble with zeroconf for unclear reason,
-we accept the bad output for now as this is the last thing in the way of
-testing the pyoxidizer build.
-
-#if no-pyoxidizer-in-filesystem
$ HGRCSKIPREPO=1 hg paths --config extensions.zeroconf=
foo = $TESTTMP/bar
-#else
- $ HGRCSKIPREPO=1 hg paths --config extensions.zeroconf=
- abort: An invalid argument was supplied (known-bad-output !)
- [255]
-#endif
+
--- a/tests/test-hgweb-empty.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-hgweb-empty.t Sat Oct 26 04:16:00 2024 +0200
@@ -1,9 +1,15 @@
#require serve
-Some tests for hgweb in an empty repository
+Some tests for hgweb in an empty repository and empty archive
$ hg init test
$ cd test
+ $ cat << EOF >> .hg/hgrc
+ > [web]
+ > allow-archive = zip
+ > [ui]
+ > archivemeta = False
+ > EOF
$ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
$ cat hg.pid >> $DAEMON_PIDS
$ (get-with-headers.py localhost:$HGPORT 'shortlog')
@@ -44,6 +50,9 @@
</ul>
<ul>
+ <li>
+ <a href="/archive/tip.zip">zip</a>
+ </li>
</ul>
<ul>
<li><a href="/help">help</a></li>
@@ -155,6 +164,9 @@
</ul>
<ul>
+ <li>
+ <a href="/archive/tip.zip">zip</a>
+ </li>
</ul>
<ul>
<li><a href="/help">help</a></li>
@@ -264,6 +276,9 @@
</ul>
<ul>
+ <li>
+ <a href="/archive/tip.zip">zip</a>
+ </li>
</ul>
<ul>
<li><a href="/help">help</a></li>
@@ -369,6 +384,9 @@
</ul>
<ul>
+ <li>
+ <a href="/archive/tip.zip">zip</a>
+ </li>
</ul>
<ul>
<li><a href="/help">help</a></li>
@@ -428,4 +446,14 @@
</feed>
+Fetching an empty archive
+-------------------------
+
+Test that archiving without matching files is rejected as error,
+not as Internal Server Error.
+
+ $ get-with-headers.py --headeronly localhost:$HGPORT archive/null.zip
+ 403 Forbidden
+ [1]
+
$ cd ..
--- a/tests/test-http-bad-server.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-http-bad-server.t Sat Oct 26 04:16:00 2024 +0200
@@ -42,7 +42,7 @@
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
- abort: error: (\$ECONNRESET\$|\$EADDRNOTAVAIL\$) (re)
+ abort: error: (\$ECONNRESET\$|\$EADDRNOTAVAIL\$|\$ECONNREFUSED\$) (re)
[100]
(The server exits on its own, but there is a race between that and starting a new server.
--- a/tests/test-http-proxy.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-http-proxy.t Sat Oct 26 04:16:00 2024 +0200
@@ -81,7 +81,7 @@
misconfigured hosts)
$ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
- abort: error: (Connection refused|Protocol not supported|.* actively refused it|\$EADDRNOTAVAIL\$|No route to host) (re)
+ abort: error: (\$ECONNREFUSED\$|Protocol not supported|.* actively refused it|\$EADDRNOTAVAIL\$|No route to host) (re)
[100]
do not use the proxy if it is in the no list
@@ -99,7 +99,7 @@
proxy can't connect to server
$ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT2/ h
- abort: HTTP Error 404: Connection refused
+ abort: HTTP Error 404: $ECONNREFUSED$
[100]
$ cat proxy.log
@@ -118,5 +118,5 @@
* - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
$LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
- * - - [*] code 404, message Connection refused (glob)
+ * - - [*] code 404, message $ECONNREFUSED$ (glob)
$LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT2/?cmd=capabilities HTTP/1.1" 404 - (glob)
--- a/tests/test-https.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-https.t Sat Oct 26 04:16:00 2024 +0200
@@ -490,7 +490,7 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --insecure https://localhost:0/
pulling from https://localhost:0/
- abort: error: Tunnel connection failed: 404 (Connection refused|\$EADDRNOTAVAIL\$) (re)
+ abort: error: Tunnel connection failed: 404 (\$ECONNREFUSED\$|\$EADDRNOTAVAIL\$) (re)
[100]
--- a/tests/test-hybridencode.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-hybridencode.py Sat Oct 26 04:16:00 2024 +0200
@@ -5,7 +5,6 @@
class hybridencodetests(unittest.TestCase):
def hybridencode(self, input, want):
-
# Check the C implementation if it's in use
got = store._pathencode(input)
self.assertEqual(want, got)
--- a/tests/test-inherit-mode.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-inherit-mode.t Sat Oct 26 04:16:00 2024 +0200
@@ -71,8 +71,8 @@
00660 ./.hg/branch
00770 ./.hg/cache/
00660 ./.hg/cache/branch2-served
- 00660 ./.hg/cache/rbc-names-v1
- 00660 ./.hg/cache/rbc-revs-v1
+ 00660 ./.hg/cache/rbc-names-v2
+ 00660 ./.hg/cache/rbc-revs-v2
00660 ./.hg/dirstate
00660 ./.hg/fsmonitor.state (fsmonitor !)
00660 ./.hg/last-message.txt
@@ -136,8 +136,8 @@
00660 ../push/.hg/branch
00770 ../push/.hg/cache/
00660 ../push/.hg/cache/branch2-base
- 00660 ../push/.hg/cache/rbc-names-v1
- 00660 ../push/.hg/cache/rbc-revs-v1
+ 00660 ../push/.hg/cache/rbc-names-v2
+ 00660 ../push/.hg/cache/rbc-revs-v2
00660 ../push/.hg/requires
00770 ../push/.hg/store/
00660 ../push/.hg/store/00changelog-b870a51b.nd (rust !)
--- a/tests/test-install.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-install.t Sat Oct 26 04:16:00 2024 +0200
@@ -216,6 +216,9 @@
Failed building wheel for mercurial (?)
WARNING: You are using pip version *; however, version * is available. (glob) (?)
You should consider upgrading via the '$TESTTMP/installenv/bin/python* -m pip install --upgrade pip' command. (glob) (?)
+ (?)
+ [notice] A new release of pip is available: * -> * (glob) (?)
+ [notice] To update, run: python -m pip install --upgrade pip (?)
$ ./installenv/*/hg debuginstall || cat pip.log
checking encoding (ascii)...
checking Python executable (*) (glob)
--- a/tests/test-journal.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-journal.t Sat Oct 26 04:16:00 2024 +0200
@@ -92,7 +92,7 @@
$ hg journal babar
previous locations of 'babar':
000000000000 book -d babar
- cb9a9f314b8b book -f -r '.~1' babar
+ cb9a9f314b8b book -f -r ?.~1? babar (glob)
1e6c11564562 book -r . babar
Test that bookmarks and working copy tracking is not mixed
@@ -111,7 +111,7 @@
previous locations of the working copy and bookmarks:
1e6c11564562 baz book -r tip baz
000000000000 babar book -d babar
- cb9a9f314b8b babar book -f -r '.~1' babar
+ cb9a9f314b8b babar book -f -r ?.~1? babar (glob)
1e6c11564562 babar book -r . babar
1e6c11564562 bar up
1e6c11564562 . up
@@ -142,7 +142,7 @@
previous locations of 're:ba.':
1e6c11564562 baz book -r tip baz
000000000000 babar book -d babar
- cb9a9f314b8b babar book -f -r '.~1' babar
+ cb9a9f314b8b babar book -f -r ?.~1? babar (glob)
1e6c11564562 babar book -r . babar
1e6c11564562 bar up
cb9a9f314b8b bar book -f bar
@@ -154,7 +154,7 @@
previous locations of the working copy and bookmarks:
000000000000 -> 1e6c11564562 foobar baz 1970-01-01 00:00 +0000 book -r tip baz
cb9a9f314b8b -> 000000000000 foobar babar 1970-01-01 00:00 +0000 book -d babar
- 1e6c11564562 -> cb9a9f314b8b foobar babar 1970-01-01 00:00 +0000 book -f -r '.~1' babar
+ 1e6c11564562 -> cb9a9f314b8b foobar babar 1970-01-01 00:00 +0000 book -f -r ?.~1? babar (glob)
000000000000 -> 1e6c11564562 foobar babar 1970-01-01 00:00 +0000 book -r . babar
cb9a9f314b8b -> 1e6c11564562 foobar bar 1970-01-01 00:00 +0000 up
cb9a9f314b8b -> 1e6c11564562 foobar . 1970-01-01 00:00 +0000 up
--- a/tests/test-largefiles-cache.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-largefiles-cache.t Sat Oct 26 04:16:00 2024 +0200
@@ -142,6 +142,12 @@
$ cd ..
+#else
+
+This is deleted by `hg push` in the other block above.
+
+ $ rm src/.hg/largefiles/undo.backup.dirstate.bck
+
#endif
Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
--- a/tests/test-lfs-test-server.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-lfs-test-server.t Sat Oct 26 04:16:00 2024 +0200
@@ -858,6 +858,7 @@
(Restart the server in a different location so it no longer has the content)
$ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+ *Terminated: * (glob) (?)
#if hg-server
$ cat $TESTTMP/access.log $TESTTMP/errors.log
@@ -947,3 +948,5 @@
[50]
$ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+ *Terminated: * (glob) (?)
+
--- a/tests/test-linerange.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-linerange.py Sat Oct 26 04:16:00 2024 +0200
@@ -51,9 +51,9 @@
def setUp(self):
self.blocks = list(mdiff.allblocks(text1, text2))
assert self.blocks == [
- ([0, 3, 0, 2], b'!'),
+ ((0, 3, 0, 2), b'!'),
((3, 7, 2, 6), b'='),
- ([7, 12, 6, 12], b'!'),
+ ((7, 12, 6, 12), b'!'),
((12, 12, 12, 12), b'='),
], self.blocks
--- a/tests/test-lock.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-lock.py Sat Oct 26 04:16:00 2024 +0200
@@ -52,7 +52,7 @@
releasefn=self.releasefn,
acquirefn=self.acquirefn,
*args,
- **kwargs
+ **kwargs,
)
l.postrelease.append(self.postreleasefn)
return l
--- a/tests/test-merge-partial-tool.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-merge-partial-tool.t Sat Oct 26 04:16:00 2024 +0200
@@ -5,8 +5,8 @@
$ cat >> "$TESTTMP/head.sh" <<'EOF'
> #!/bin/sh
> for f in "$@"; do
- > head -5 $f > tmp
- > mv -f tmp $f
+ > head -5 "$f" > tmp
+ > mv -f tmp "$f"
> done
> EOF
$ chmod +x "$TESTTMP/head.sh"
@@ -14,8 +14,8 @@
$ cat >> "$TESTTMP/tail.sh" <<'EOF'
> #!/bin/sh
> for f in "$@"; do
- > tail -5 $f > tmp
- > mv -f tmp $f
+ > tail -5 "$f" > tmp
+ > mv -f tmp "$f"
> done
> EOF
$ chmod +x "$TESTTMP/tail.sh"
@@ -30,6 +30,27 @@
> tail.order=1
> EOF
+On Windows, running $TESTTMP/script.sh will open script.sh in an editor, if
+that's what the configured file association is. The code shell quotes the
+*.executable value, so we can't set it to `sh $TESTTMP/script.sh`, because it
+tries to run that as the executable. As a workaround, generate a bat file that
+invokes `sh script.sh`, and passes the args along.
+
+#if windows
+ $ cat >> "$TESTTMP/head.bat" <<'EOF'
+ > @echo off
+ > sh "%TESTTMP%/head.sh" %*
+ > EOF
+
+ $ cat >> "$TESTTMP/tail.bat" <<'EOF'
+ > @echo off
+ > sh "%TESTTMP%/tail.sh" %*
+ > EOF
+
+ $ sed 's/head.sh/head.bat/g' "${HGRCPATH}" > "${HGRCPATH}.tmp"
+ $ sed 's/tail.sh/tail.bat/g' "${HGRCPATH}.tmp" > "${HGRCPATH}"
+#endif
+
$ make_commit() {
> echo "$@" | xargs -n1 > file
> hg add file 2> /dev/null
@@ -294,6 +315,23 @@
> [partial-merge-tools]
> log-args.executable=$TESTTMP/log-args.sh
> EOF
+
+On Windows, running $TESTTMP/script.sh will open script.sh in an editor, if
+that's what the configured file association is. The code shell quotes the
+*.executable value, so we can't set it to `sh $TESTTMP/script.sh`, because it
+tries to run that as the executable. As a workaround, generate a bat file that
+invokes `sh script.sh`, and passes the args along.
+
+#if windows
+ $ cat >> "$TESTTMP/log-args.bat" <<'EOF'
+ > @echo off
+ > sh "%TESTTMP%/log-args.sh" %*
+ > EOF
+
+ $ sed 's/log-args.sh/log-args.bat/g' "$HGRCPATH" > "${HGRCPATH}.tmp"
+ $ mv "${HGRCPATH}.tmp" "${HGRCPATH}"
+#endif
+
$ hg up -C 2
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-merge-relaxed-block-sync.t Sat Oct 26 04:16:00 2024 +0200
@@ -0,0 +1,154 @@
+==============================================
+Test merge algorithm with "relaxed block sync"
+==============================================
+
+Setup
+=====
+
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > relaxed-block-sync-merge=yes
+ > [ui]
+ > merge=:merge3
+ > EOF
+ $ unset HGMERGE
+
+ $ hg init repo
+ $ cd repo
+
+ $ m=../scratch
+ $ mkdir "$m"
+
+# For the purpose of this test, we use a file [listing] that has one line
+# per file of [scratch] directory.
+# This way, the patches can be represented as bash scripts.
+#
+# Adding a line is then just "touch", removing a line is "rm", and
+# modifying a line is "echo modfied > file1".
+
+# Make_change takes a "patch script", as described above, and
+# produces a file [listing] with the coresponding contents
+# past applying the patch to a fixed base state.
+ $ make_change() {
+ > cmd=$1
+ > rm -r ../scratch
+ > mkdir ../scratch
+ > (cat listing 2>/dev/null || true) | while IFS=' :' read k v; do echo "$v" > ../scratch/"$k"; done
+ >
+ > (
+ > cd ../scratch
+ > eval "$cmd" >&2
+ > for f in *; do val=$(cat "$f"); printf "$f: $val\n"; done) > listing
+ > }
+
+# mk_rev takes a [base] and a patch, and produces a child revision of [base]
+# corresponding to that patch.
+ $ mk_rev() {
+ > base=$1
+ > cmd=$2
+ > (hg update -C "$base" -q
+ > make_change "$cmd"
+ > (hg commit -qAm _ 2>&1) | grep -v 'commit already existed') >&2
+ > hg log -r . -T '{rev}'
+ > }
+
+ $ test() {
+ > cmd1=$1
+ > cmd2=$2
+ > r2=$(mk_rev 0 "$cmd2")
+ > r1=$(mk_rev 0 "$cmd1")
+ > # already at r1
+ > hg merge -q "$r2"
+ > cat listing
+ > }
+
+ $ rev0=$(mk_rev 'rev(-1)' 'echo val1 > key1; echo val2 > key2; echo val3 > key3; ')
+ $ cat listing
+ key1: val1
+ key2: val2
+ key3: val3
+
+Actual testing
+==============
+
+easy merge: no need for relaxed block sync:
+-------------------------------------------
+
+ $ test 'echo modified1 > key1' 'echo modified3 > key3'
+ key1: modified1
+ key2: val2
+ key3: modified3
+
+Add adjacent to modify:
+-----------------------
+
+ $ test 'echo modified > key3' 'echo val4 > key4'
+ key1: val1
+ key2: val2
+ key3: modified
+ key4: val4
+
+Modify adjacent to modify:
+--------------------------
+
+ $ test 'echo modified3 > key3' 'echo modified2 > key2'
+ key1: val1
+ key2: modified2
+ key3: modified3
+
+Remove adjacent to modify:
+--------------------------
+
+ $ test 'rm key2' 'echo modified > key1'
+ key1: modified
+ key3: val3
+
+Add adjacent to remove:
+-----------------------
+
+ $ test 'rm key2' 'touch key1a'
+ key1: val1
+ key1a:
+ key3: val3
+
+Remove adjacent to remove:
+--------------------------
+
+ $ test 'rm key2' 'rm key1'
+ key3: val3
+
+It even works if you're sandwiched between additions above and below:
+
+ $ test 'echo val-changed-3 > key3' 'touch key2a; touch key4'
+ key1: val1
+ key2: val2
+ key2a:
+ key3: val-changed-3
+ key4:
+
+Add adjacent to add:
+--------------------
+
+Add adjacent to add is still disallowed because we don't know what order to add
+lines in:
+
+ $ test 'touch key1a' 'touch key1b'
+ warning: conflicts while merging listing! (edit, then use 'hg resolve --mark')
+ key1: val1
+ <<<<<<< working copy: 744662bcc33a - test: _
+ key1a:
+ ||||||| common ancestor: b1791e356cd4 - test: _
+ =======
+ key1b:
+ >>>>>>> merge rev: 06735b47f956 - test: _
+ key2: val2
+ key3: val3
+
+Add kinda-adjacent to add can still work if there's an
+adjacent line that helps resolve the order ambiguity:
+
+ $ test 'touch key1a; rm key2' 'touch key2a'
+ key1: val1
+ key1a:
+ key2a:
+ key3: val3
--- a/tests/test-mq-qrefresh-interactive.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-mq-qrefresh-interactive.t Sat Oct 26 04:16:00 2024 +0200
@@ -12,22 +12,22 @@
update the current patch
- If any file patterns are provided, the refreshed patch will contain only
- the modifications that match those patterns; the remaining modifications
- will remain in the working directory.
+ If any file patterns are provided, the refreshed patch will contain only the
+ modifications that match those patterns; the remaining modifications will
+ remain in the working directory.
- If -s/--short is specified, files currently included in the patch will be
- refreshed just like matched files and remain in the patch.
+ If -s/--short is specified, files currently included in the patch will be
+ refreshed just like matched files and remain in the patch.
- If -e/--edit is specified, Mercurial will start your configured editor for
- you to enter a message. In case qrefresh fails, you will find a backup of
- your message in ".hg/last-message.txt".
+ If -e/--edit is specified, Mercurial will start your configured editor for you
+ to enter a message. In case qrefresh fails, you will find a backup of your
+ message in ".hg/last-message.txt".
- hg add/remove/copy/rename work as usual, though you might want to use git-
- style patches (-g/--git or [diff] git=1) to track copies and renames. See
- the diffs help topic for more information on the git diff format.
+ hg add/remove/copy/rename work as usual, though you might want to use git-
+ style patches (-g/--git or [diff] git=1) to track copies and renames. See the
+ diffs help topic for more information on the git diff format.
- Returns 0 on success.
+ Returns 0 on success.
options ([+] can be repeated):
@@ -54,22 +54,22 @@
update the current patch
- If any file patterns are provided, the refreshed patch will contain only
- the modifications that match those patterns; the remaining modifications
- will remain in the working directory.
+ If any file patterns are provided, the refreshed patch will contain only the
+ modifications that match those patterns; the remaining modifications will
+ remain in the working directory.
- If -s/--short is specified, files currently included in the patch will be
- refreshed just like matched files and remain in the patch.
+ If -s/--short is specified, files currently included in the patch will be
+ refreshed just like matched files and remain in the patch.
- If -e/--edit is specified, Mercurial will start your configured editor for
- you to enter a message. In case qrefresh fails, you will find a backup of
- your message in ".hg/last-message.txt".
+ If -e/--edit is specified, Mercurial will start your configured editor for you
+ to enter a message. In case qrefresh fails, you will find a backup of your
+ message in ".hg/last-message.txt".
- hg add/remove/copy/rename work as usual, though you might want to use git-
- style patches (-g/--git or [diff] git=1) to track copies and renames. See
- the diffs help topic for more information on the git diff format.
+ hg add/remove/copy/rename work as usual, though you might want to use git-
+ style patches (-g/--git or [diff] git=1) to track copies and renames. See the
+ diffs help topic for more information on the git diff format.
- Returns 0 on success.
+ Returns 0 on success.
options ([+] can be repeated):
--- a/tests/test-parseindex2.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-parseindex2.py Sat Oct 26 04:16:00 2024 +0200
@@ -26,6 +26,7 @@
parsers = policy.importmod('parsers')
+
# original python implementation
def gettype(q):
return int(q & 0xFFFF)
--- a/tests/test-patchbomb-tls.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-patchbomb-tls.t Sat Oct 26 04:16:00 2024 +0200
@@ -98,7 +98,7 @@
$ wait_log "no hello:"
$ cat ../log
connection from * (glob)
- no hello: b''
+ no hello: (b''|EOF) (re)
$ : > ../log
With global certificates:
--- a/tests/test-paths.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-paths.t Sat Oct 26 04:16:00 2024 +0200
@@ -140,21 +140,11 @@
zeroconf wraps ui.configitems(), which shouldn't crash at least:
-XXX-PYOXIDIZER Pyoxidizer build have trouble with zeroconf for unclear reason,
-we accept the bad output for now as this is the last thing in the way of
-testing the pyoxidizer build.
-
-#if no-pyoxidizer-in-filesystem
$ hg paths --config extensions.zeroconf=
dupe = $TESTTMP/b#tip
dupe:pushurl = https://example.com/dupe
expand = $TESTTMP/a/$SOMETHING/bar
insecure = http://foo:***@example.com/
-#else
- $ hg paths --config extensions.zeroconf=
- abort: An invalid argument was supplied (known-bad-output !)
- [255]
-#endif
$ cd ..
--- a/tests/test-phases.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-phases.t Sat Oct 26 04:16:00 2024 +0200
@@ -766,7 +766,7 @@
> EOF
(making a changeset hidden; H in that case)
- $ hg debugobsolete `hg id --debug -r 5`
+ $ hg debugobsolete `hg id -T "{node}" -r 5`
1 new obsolescence markers
obsoleted 1 changesets
@@ -1018,7 +1018,7 @@
The hidden commit is an orphan but doesn't show up without --hidden
And internal changesets are not considered for unstability.
- $ hg debugobsolete `hg id --debug -ir 0`
+ $ hg debugobsolete `hg id -T "{node}" -r 0`
1 new obsolescence markers
obsoleted 1 changesets
$ hg --hidden log -G -r '(0::) - 0'
--- a/tests/test-qrecord.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-qrecord.t Sat Oct 26 04:16:00 2024 +0200
@@ -34,30 +34,30 @@
interactively select changes to commit
- If a list of files is omitted, all changes reported by 'hg status' will be
- candidates for recording.
+ If a list of files is omitted, all changes reported by 'hg status' will be
+ candidates for recording.
- See 'hg help dates' for a list of formats valid for -d/--date.
+ See 'hg help dates' for a list of formats valid for -d/--date.
- If using the text interface (see 'hg help config'), you will be prompted
- for whether to record changes to each modified file, and for files with
- multiple changes, for each change to use. For each query, the following
- responses are possible:
+ If using the text interface (see 'hg help config'), you will be prompted for
+ whether to record changes to each modified file, and for files with multiple
+ changes, for each change to use. For each query, the following responses are
+ possible:
- y - record this change
- n - skip this change
- e - edit this change manually
+ y - record this change
+ n - skip this change
+ e - edit this change manually
- s - skip remaining changes to this file
- f - record remaining changes to this file
+ s - skip remaining changes to this file
+ f - record remaining changes to this file
- d - done, skip remaining changes and files
- a - record all changes to all remaining files
- q - quit, recording no changes
+ d - done, skip remaining changes and files
+ a - record all changes to all remaining files
+ q - quit, recording no changes
- ? - display help
+ ? - display help
- This command is not available when committing a merge.
+ This command is not available when committing a merge.
(use 'hg help -e record' to show help for the record extension)
@@ -91,7 +91,7 @@
interactively record a new patch
- See 'hg help qnew' & 'hg help record' for more information and usage.
+ See 'hg help qnew' & 'hg help record' for more information and usage.
(some details hidden, use --verbose to show complete help)
@@ -123,7 +123,7 @@
interactively record a new patch
- See 'hg help qnew' & 'hg help record' for more information and usage.
+ See 'hg help qnew' & 'hg help record' for more information and usage.
(some details hidden, use --verbose to show complete help)
@@ -137,7 +137,7 @@
interactively record a new patch
- See 'hg help qnew' & 'hg help record' for more information and usage.
+ See 'hg help qnew' & 'hg help record' for more information and usage.
options ([+] can be repeated):
--- a/tests/test-racy-mutations.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-racy-mutations.t Sat Oct 26 04:16:00 2024 +0200
@@ -59,7 +59,7 @@
> WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
> WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
> hg commit -qAm 'r1 (foo)' --edit foo \
- > --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
+ > --config ui.editor="sh $TESTTMP/waitlock_editor.sh" \
> > .foo_commit_out 2>&1 ;\
> touch "${JOBS_FINISHED}"
> ) &
--- a/tests/test-rebase-conflicts.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-rebase-conflicts.t Sat Oct 26 04:16:00 2024 +0200
@@ -319,12 +319,14 @@
bundle2-input-part: "phase-heads" supported
bundle2-input-part: total payload size 24
bundle2-input-bundle: 3 parts total
- truncating cache/rbc-revs-v1 to 72
+ resetting content of rbc-names-v2
added 2 changesets with 2 changes to 1 files
updating the branch cache
invalid branch cache (served): tip differs
+ history modification detected - truncating revision branch cache to revision 1
invalid branch cache (served.hidden): tip differs
rebase completed
+ resetting content of cache/rbc-revs-v2
Test minimization of merge conflicts
$ hg up -q null
--- a/tests/test-record.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-record.t Sat Oct 26 04:16:00 2024 +0200
@@ -17,30 +17,30 @@
interactively select changes to commit
- If a list of files is omitted, all changes reported by 'hg status' will be
- candidates for recording.
+ If a list of files is omitted, all changes reported by 'hg status' will be
+ candidates for recording.
- See 'hg help dates' for a list of formats valid for -d/--date.
+ See 'hg help dates' for a list of formats valid for -d/--date.
- If using the text interface (see 'hg help config'), you will be prompted
- for whether to record changes to each modified file, and for files with
- multiple changes, for each change to use. For each query, the following
- responses are possible:
+ If using the text interface (see 'hg help config'), you will be prompted for
+ whether to record changes to each modified file, and for files with multiple
+ changes, for each change to use. For each query, the following responses are
+ possible:
- y - record this change
- n - skip this change
- e - edit this change manually
+ y - record this change
+ n - skip this change
+ e - edit this change manually
- s - skip remaining changes to this file
- f - record remaining changes to this file
+ s - skip remaining changes to this file
+ f - record remaining changes to this file
- d - done, skip remaining changes and files
- a - record all changes to all remaining files
- q - quit, recording no changes
+ d - done, skip remaining changes and files
+ a - record all changes to all remaining files
+ q - quit, recording no changes
- ? - display help
+ ? - display help
- This command is not available when committing a merge.
+ This command is not available when committing a merge.
(use 'hg help -e record' to show help for the record extension)
--- a/tests/test-remote-hidden.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-remote-hidden.t Sat Oct 26 04:16:00 2024 +0200
@@ -85,8 +85,8 @@
branch2-served
branch2-served.hidden
branch2-visible
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-visible
--- a/tests/test-remotefilelog-datapack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-remotefilelog-datapack.py Sat Oct 26 04:16:00 2024 +0200
@@ -186,7 +186,7 @@
content = b'put-something-here \n' * i
node = self.getHash(content)
meta = {
- constants.METAKEYFLAG: i ** 4,
+ constants.METAKEYFLAG: i**4,
constants.METAKEYSIZE: len(content),
b'Z': b'random_string',
b'_': b'\0' * i,
--- a/tests/test-remotefilelog-histpack.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-remotefilelog-histpack.py Sat Oct 26 04:16:00 2024 +0200
@@ -177,7 +177,7 @@
pack = self.createPack(revisions)
# Verify the pack contents
- for (filename, node) in allentries:
+ for filename, node in allentries:
ancestors = pack.getancestors(filename, node)
self.assertEqual(ancestorcounts[(filename, node)], len(ancestors))
for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
--- a/tests/test-revlog-mmapindex.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-revlog-mmapindex.t Sat Oct 26 04:16:00 2024 +0200
@@ -36,7 +36,9 @@
> EOF
mmap index which is now more than 4k long
- $ hg log -l 5 -T '{rev}\n' --config experimental.mmapindexthreshold=4k
+ $ hg log -l 5 -T '{rev}\n' \
+ > --config storage.revlog.mmap.index=yes \
+ > --config storage.revlog.mmap.index:size-threshold=4k
mmapping $TESTTMP/a/.hg/store/00changelog.i (no-pure !)
mmapping $TESTTMP/a/.hg/store/00changelog-????????.nd (glob) (rust !)
100
@@ -46,7 +48,9 @@
96
do not mmap index which is still less than 32k
- $ hg log -l 5 -T '{rev}\n' --config experimental.mmapindexthreshold=32k
+ $ hg log -l 5 -T '{rev}\n' \
+ > --config storage.revlog.mmap.index=yes \
+ > --config storage.revlog.mmap.index:size-threshold=32k
mmapping $TESTTMP/a/.hg/store/00changelog-????????.nd (glob) (rust !)
100
99
--- a/tests/test-revlog-raw.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-revlog-raw.py Sat Oct 26 04:16:00 2024 +0200
@@ -246,7 +246,7 @@
That is to say, given any x, y where both x, and y are in range(2 ** n),
there is an x followed immediately by y in the generated sequence.
"""
- m = 2 ** n
+ m = 2**n
# Gray Code. See https://en.wikipedia.org/wiki/Gray_code
gray = lambda x: x ^ (x >> 1)
--- a/tests/test-rhg-sparse-narrow.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-rhg-sparse-narrow.t Sat Oct 26 04:16:00 2024 +0200
@@ -70,7 +70,7 @@
TODO: bad error message
$ $NO_FALLBACK rhg cat -r "$tip" hide
- abort: invalid revision identifier: 6d714a4a2998cbfd0620db44da58b749f6565d63
+ abort: invalid revision identifier: 1406e74118627694268417491f018a4a883152f0
[255]
$ "$real_hg" cat -r "$tip" hide
[1]
--- a/tests/test-run-tests.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-run-tests.t Sat Oct 26 04:16:00 2024 +0200
@@ -191,7 +191,7 @@
\x1b[38;5;34m+ foo\x1b[39m (esc) (no-pygments211 !)
\x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
- !
+ \x1b[38;5;88m!\x1b[39m (esc)
\x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
# Ran 1 tests, 0 skipped, 1 failed.
python hash seed: * (glob)
--- a/tests/test-server-view.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-server-view.t Sat Oct 26 04:16:00 2024 +0200
@@ -38,8 +38,8 @@
branch2-base%89c45d2fa07e
branch2-served
hgtagsfnodes1
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served%89c45d2fa07e
--- a/tests/test-share.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-share.t Sat Oct 26 04:16:00 2024 +0200
@@ -52,8 +52,8 @@
manifestfulltextcache (reporevlogstore !)
$ ls -1 ../repo1/.hg/cache
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2-visible
Cloning a shared repo should pick up the full cache dir on the other hand.
@@ -64,8 +64,8 @@
$ ls -1 ../repo2-clone/.hg/cache
branch2-base
branch2-served
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
tags2-visible
--- a/tests/test-shelve.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-shelve.t Sat Oct 26 04:16:00 2024 +0200
@@ -45,34 +45,34 @@
save and set aside changes from the working directory
- Shelving takes files that "hg status" reports as not clean, saves the
- modifications to a bundle (a shelved change), and reverts the files so
- that their state in the working directory becomes clean.
+ Shelving takes files that "hg status" reports as not clean, saves the
+ modifications to a bundle (a shelved change), and reverts the files so that
+ their state in the working directory becomes clean.
- To restore these changes to the working directory, using "hg unshelve";
- this will work even if you switch to a different commit.
+ To restore these changes to the working directory, using "hg unshelve"; this
+ will work even if you switch to a different commit.
- When no files are specified, "hg shelve" saves all not-clean files. If
- specific files or directories are named, only changes to those files are
- shelved.
+ When no files are specified, "hg shelve" saves all not-clean files. If
+ specific files or directories are named, only changes to those files are
+ shelved.
- In bare shelve (when no files are specified, without interactive, include
- and exclude option), shelving remembers information if the working
- directory was on newly created branch, in other words working directory
- was on different branch than its first parent. In this situation
- unshelving restores branch information to the working directory.
+ In bare shelve (when no files are specified, without interactive, include and
+ exclude option), shelving remembers information if the working directory was
+ on newly created branch, in other words working directory was on different
+ branch than its first parent. In this situation unshelving restores branch
+ information to the working directory.
- Each shelved change has a name that makes it easier to find later. The
- name of a shelved change defaults to being based on the active bookmark,
- or if there is no active bookmark, the current named branch. To specify a
- different name, use "--name".
+ Each shelved change has a name that makes it easier to find later. The name of
+ a shelved change defaults to being based on the active bookmark, or if there
+ is no active bookmark, the current named branch. To specify a different name,
+ use "--name".
- To see a list of existing shelved changes, use the "--list" option. For
- each shelved change, this will print its name, age, and description; use "
- --patch" or "--stat" for more details.
+ To see a list of existing shelved changes, use the "--list" option. For each
+ shelved change, this will print its name, age, and description; use "--patch"
+ or "--stat" for more details.
- To delete specific shelved changes, use "--delete". To delete all shelved
- changes, use "--cleanup".
+ To delete specific shelved changes, use "--delete". To delete all shelved
+ changes, use "--cleanup".
options ([+] can be repeated):
--- a/tests/test-show.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-show.t Sat Oct 26 04:16:00 2024 +0200
@@ -25,25 +25,25 @@
show various repository information
- A requested view of repository data is displayed.
+ A requested view of repository data is displayed.
- If no view is requested, the list of available views is shown and the
- command aborts.
+ If no view is requested, the list of available views is shown and the command
+ aborts.
- Note:
- There are no backwards compatibility guarantees for the output of this
- command. Output may change in any future Mercurial release.
+ Note:
+ There are no backwards compatibility guarantees for the output of this
+ command. Output may change in any future Mercurial release.
- Consumers wanting stable command output should specify a template via
- "-T/--template".
+ Consumers wanting stable command output should specify a template via
+ "-T/--template".
- List of available views:
+ List of available views:
- bookmarks bookmarks and their associated changeset
+ bookmarks bookmarks and their associated changeset
- stack current line of work
+ stack current line of work
- work changesets that aren't finished
+ work changesets that aren't finished
(use 'hg help -e show' to show help for the show extension)
--- a/tests/test-simple-update.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-simple-update.t Sat Oct 26 04:16:00 2024 +0200
@@ -96,11 +96,12 @@
#if no-windows
$ cat <<EOF > forceworker.py
- > from mercurial import extensions, worker
+ > from mercurial import extensions, merge, worker
> def nocost(orig, ui, costperop, nops, threadsafe=True):
> return worker._numworkers(ui) > 1
> def uisetup(ui):
> extensions.wrapfunction(worker, 'worthwhile', nocost)
+ > merge.MAYBE_USE_RUST_UPDATE = False
> EOF
$ hg init worker
--- a/tests/test-simplemerge.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-simplemerge.py Sat Oct 26 04:16:00 2024 +0200
@@ -24,6 +24,8 @@
from mercurial.utils import stringutil
TestCase = unittest.TestCase
+
+
# bzr compatible interface, for the tests
class Merge3(simplemerge.Merge3Text):
"""3-way merge of texts.
--- a/tests/test-sparse.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-sparse.t Sat Oct 26 04:16:00 2024 +0200
@@ -34,7 +34,9 @@
$ mk 'foo' bar
$ mk 'foo-bar' x
$ mk 'unanchoredfoo-bar' x
+#if no-windows
$ mk 'foo*bar' x
+#endif
$ mk 'dir/foo-bar' x
$ hg status --config rhg.on-unsupported=abort
? dir/foo-bar/x
--- a/tests/test-split-legacy-inline-changelog.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-split-legacy-inline-changelog.t Sat Oct 26 04:16:00 2024 +0200
@@ -18,7 +18,8 @@
$ mkdir sanity-check
$ cd sanity-check
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
+
$ cd inlined-changelog
$ hg root
$TESTTMP/sanity-check/inlined-changelog
@@ -55,7 +56,7 @@
$ mkdir simple-commit
$ cd simple-commit
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ cd inlined-changelog
$ hg up --quiet
$ hg log -GT '[{rev}] {desc}\n'
@@ -85,7 +86,7 @@
$ mkdir pretxnclose-commit
$ cd pretxnclose-commit
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ cat >> inlined-changelog/.hg/hgrc <<EOF
> [hooks]
> pretxnclose=hg log -r tip -T "pre-txn tip rev: {rev}\n"
@@ -121,7 +122,7 @@
$ mkdir simple-local-push
$ cd simple-local-push
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ hg log -R inlined-changelog -T '[{rev}] {desc}\n'
[0] first commit
@@ -166,7 +167,7 @@
$ mkdir pretxnchangegroup-local-push
$ cd pretxnchangegroup-local-push
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ cat >> inlined-changelog/.hg/hgrc <<EOF
> [hooks]
> pretxnchangegroup=hg log -r tip -T "pre-txn tip rev: {rev}\n"
@@ -214,7 +215,7 @@
$ mkdir simple-ssh-push
$ cd simple-ssh-push
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ hg log -R inlined-changelog -T '[{rev}] {desc}\n'
[0] first commit
@@ -259,7 +260,7 @@
$ mkdir pretxnchangegroup-ssh-push
$ cd pretxnchangegroup-ssh-push
- $ tar xf $TESTDIR/bundles/inlined-changelog.tar
+ $ cat "$TESTDIR/bundles/inlined-changelog.tar" | tar xf -
$ cat >> inlined-changelog/.hg/hgrc <<EOF
> [hooks]
> pretxnchangegroup=hg log -r tip -T "pre-txn tip rev: {rev}\n"
--- a/tests/test-split.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-split.t Sat Oct 26 04:16:00 2024 +0200
@@ -65,7 +65,7 @@
Generate some content. The sed filter drop CR on Windows, which is dropped in
the a > b line.
- $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
+ $ $TESTDIR/seq.py 1 5 >> a
$ hg ci -m a1 -A a -q
$ hg bookmark -i r1
$ sed 's/1/11/;s/3/33/;s/5/55/' a > b
--- a/tests/test-static-http.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-static-http.t Sat Oct 26 04:16:00 2024 +0200
@@ -268,6 +268,7 @@
/remote/.hg/cache/branch2-served
/remote/.hg/cache/hgtagsfnodes1
/remote/.hg/cache/rbc-names-v1
+ /remote/.hg/cache/rbc-names-v2
/remote/.hg/cache/tags2-served
/remote/.hg/dirstate
/remote/.hg/localtags
--- a/tests/test-status-eacces.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-status-eacces.t Sat Oct 26 04:16:00 2024 +0200
@@ -28,13 +28,15 @@
$ touch d1/y
$ chmod -r d1
$ hg status
- d1: $EACCES$
+ d1: $EACCES$ (unix-permissions !)
! d1/x (rhg !)
! d1/x (no-rhg rust !)
+ ? d1/y (no-unix-permissions !)
$ hg status
- d1: $EACCES$
+ d1: $EACCES$ (unix-permissions !)
! d1/x (rust !)
! d1/x (no-rust rhg !)
+ ? d1/y (no-unix-permissions !)
$ chmod +r d1
$ hg status
? d1/y
--- a/tests/test-stream-bundle-v2.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-stream-bundle-v2.t Sat Oct 26 04:16:00 2024 +0200
@@ -130,8 +130,8 @@
adding [s] 00changelog.d (275 bytes)
adding [s] 00changelog.i (320 bytes)
adding [c] branch2-served (94 bytes)
- adding [c] rbc-names-v1 (7 bytes)
- adding [c] rbc-revs-v1 (40 bytes)
+ adding [c] rbc-names-v2 (7 bytes)
+ adding [c] rbc-revs-v2 (40 bytes)
transferred 1.65 KB in * seconds (* */sec) (glob) (no-rust !)
bundle2-input-part: total payload size 1857 (no-rust !)
transferred 1.78 KB in * seconds (* */sec) (glob) (rust !)
@@ -152,19 +152,19 @@
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
- A: remote created -> g
- getting A
- B: remote created -> g
- getting B
- C: remote created -> g
- getting C
- D: remote created -> g
- getting D
- E: remote created -> g
- getting E
+ resolving manifests (no-rust !)
+ branchmerge: False, force: False, partial: False (no-rust !)
+ ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041 (no-rust !)
+ A: remote created -> g (no-rust !)
+ getting A (no-rust !)
+ B: remote created -> g (no-rust !)
+ getting B (no-rust !)
+ C: remote created -> g (no-rust !)
+ getting C (no-rust !)
+ D: remote created -> g (no-rust !)
+ getting D (no-rust !)
+ E: remote created -> g (no-rust !)
+ getting E (no-rust !)
5 files updated, 0 files merged, 0 files removed, 0 files unresolved
updating the branch cache
(sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
@@ -193,8 +193,8 @@
adding [s] 00changelog.d (275 bytes)
adding [s] 00changelog.i (320 bytes)
adding [c] branch2-served (94 bytes)
- adding [c] rbc-names-v1 (7 bytes)
- adding [c] rbc-revs-v1 (40 bytes)
+ adding [c] rbc-names-v2 (7 bytes)
+ adding [c] rbc-revs-v2 (40 bytes)
transferred 1.65 KB in * seconds (* */sec) (glob) (no-rust !)
bundle2-input-part: total payload size 1857 (no-rust !)
transferred 1.78 KB in * seconds (* */sec) (glob) (rust !)
@@ -215,19 +215,19 @@
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
- A: remote created -> g
- getting A
- B: remote created -> g
- getting B
- C: remote created -> g
- getting C
- D: remote created -> g
- getting D
- E: remote created -> g
- getting E
+ resolving manifests (no-rust !)
+ branchmerge: False, force: False, partial: False (no-rust !)
+ ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041 (no-rust !)
+ A: remote created -> g (no-rust !)
+ getting A (no-rust !)
+ B: remote created -> g (no-rust !)
+ getting B (no-rust !)
+ C: remote created -> g (no-rust !)
+ getting C (no-rust !)
+ D: remote created -> g (no-rust !)
+ getting D (no-rust !)
+ E: remote created -> g (no-rust !)
+ getting E (no-rust !)
5 files updated, 0 files merged, 0 files removed, 0 files unresolved
updating the branch cache
(sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
@@ -258,8 +258,8 @@
adding [s] 00changelog.d (275 bytes)
adding [s] 00changelog.i (320 bytes)
adding [c] branch2-served (94 bytes)
- adding [c] rbc-names-v1 (7 bytes)
- adding [c] rbc-revs-v1 (40 bytes)
+ adding [c] rbc-names-v2 (7 bytes)
+ adding [c] rbc-revs-v2 (40 bytes)
transferred 1.65 KB in * seconds (* */sec) (glob) (no-rust !)
bundle2-input-part: total payload size 1869 (no-rust !)
transferred 1.78 KB in * seconds (* */sec) (glob) (rust !)
@@ -280,19 +280,19 @@
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
- A: remote created -> g
- getting A
- B: remote created -> g
- getting B
- C: remote created -> g
- getting C
- D: remote created -> g
- getting D
- E: remote created -> g
- getting E
+ resolving manifests (no-rust !)
+ branchmerge: False, force: False, partial: False (no-rust !)
+ ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041 (no-rust !)
+ A: remote created -> g (no-rust !)
+ getting A (no-rust !)
+ B: remote created -> g (no-rust !)
+ getting B (no-rust !)
+ C: remote created -> g (no-rust !)
+ getting C (no-rust !)
+ D: remote created -> g (no-rust !)
+ getting D (no-rust !)
+ E: remote created -> g (no-rust !)
+ getting E (no-rust !)
5 files updated, 0 files merged, 0 files removed, 0 files unresolved
updating the branch cache
(sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
@@ -320,8 +320,8 @@
adding [s] 00changelog.d (275 bytes)
adding [s] 00changelog.i (320 bytes)
adding [c] branch2-served (94 bytes)
- adding [c] rbc-names-v1 (7 bytes)
- adding [c] rbc-revs-v1 (40 bytes)
+ adding [c] rbc-names-v2 (7 bytes)
+ adding [c] rbc-revs-v2 (40 bytes)
transferred 1.65 KB in * seconds (* */sec) (glob) (no-rust !)
bundle2-input-part: total payload size 1869 (no-rust !)
transferred 1.78 KB in * seconds (* */sec) (glob) (rust !)
@@ -342,19 +342,19 @@
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
- A: remote created -> g
- getting A
- B: remote created -> g
- getting B
- C: remote created -> g
- getting C
- D: remote created -> g
- getting D
- E: remote created -> g
- getting E
+ resolving manifests (no-rust !)
+ branchmerge: False, force: False, partial: False (no-rust !)
+ ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041 (no-rust !)
+ A: remote created -> g (no-rust !)
+ getting A (no-rust !)
+ B: remote created -> g (no-rust !)
+ getting B (no-rust !)
+ C: remote created -> g (no-rust !)
+ getting C (no-rust !)
+ D: remote created -> g (no-rust !)
+ getting D (no-rust !)
+ E: remote created -> g (no-rust !)
+ getting E (no-rust !)
5 files updated, 0 files merged, 0 files removed, 0 files unresolved
updating the branch cache
(sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
--- a/tests/test-strip.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-strip.t Sat Oct 26 04:16:00 2024 +0200
@@ -913,6 +913,7 @@
saved backup bundle to $TESTTMP/issue4736/.hg/strip-backup/6625a5168474-345bb43d-backup.hg
updating the branch cache
invalid branch cache (served): tip differs
+ resetting content of rbc-names-v2
$ hg log -G
o changeset: 2:5c51d8d6557d
| tag: tip
--- a/tests/test-subrepo-deep-nested-change.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-subrepo-deep-nested-change.t Sat Oct 26 04:16:00 2024 +0200
@@ -45,9 +45,9 @@
linking [=======================================> ] 8/9\r (no-eol) (esc) (rust !)
linking [============================================>] 9/9\r (no-eol) (esc) (rust !)
\r (no-eol) (esc)
- \r (no-eol) (esc)
- updating [===========================================>] 1/1\r (no-eol) (esc)
- \r (no-eol) (esc)
+ \r (no-eol) (esc) (no-rust !)
+ updating [===========================================>] 1/1\r (no-eol) (esc) (no-rust !)
+ \r (no-eol) (esc) (no-rust !)
updating to branch default
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg add -R sub1
@@ -89,7 +89,6 @@
linking [==================================> ] 7/9\r (no-eol) (esc)
linking [=======================================> ] 8/9\r (no-eol) (esc)
linking [============================================>] 9/9\r (no-eol) (esc)
- updating [===========================================>] 1/1\r (no-eol) (esc)
\r (no-eol) (esc)
updating to branch default
cloning subrepo sub2 from $TESTTMP/sub2
--- a/tests/test-symlink-os-yes-fs-no.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-symlink-os-yes-fs-no.py Sat Oct 26 04:16:00 2024 +0200
@@ -30,6 +30,7 @@
time.sleep(1)
commands.status(u, repo)
+
# now disable symlink support -- this is what os.symlink would do on a
# non-symlink file system
def symlink_failure(src, dst):
--- a/tests/test-symlink-placeholder.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-symlink-placeholder.t Sat Oct 26 04:16:00 2024 +0200
@@ -3,12 +3,13 @@
Create extension that can disable symlink support:
$ cat > nolink.py <<EOF
- > from mercurial import extensions, util
+ > from mercurial import extensions, merge, util
> def setflags(orig, f, l, x):
> pass
> def checklink(orig, path):
> return False
> def extsetup(ui):
+ > merge.MAYBE_USE_RUST_UPDATE = False
> extensions.wrapfunction(util, 'setflags', setflags)
> extensions.wrapfunction(util, 'checklink', checklink)
> EOF
--- a/tests/test-tags.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-tags.t Sat Oct 26 04:16:00 2024 +0200
@@ -793,8 +793,8 @@
$ ls tagsclient/.hg/cache
branch2-base
hgtagsfnodes1
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
@@ -819,8 +819,8 @@
$ ls tagsclient/.hg/cache
branch2-base
hgtagsfnodes1
- rbc-names-v1
- rbc-revs-v1
+ rbc-names-v2
+ rbc-revs-v2
tags2
tags2-served
tags2-visible
--- a/tests/test-tools.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-tools.t Sat Oct 26 04:16:00 2024 +0200
@@ -51,12 +51,7 @@
foo: mode=644
#endif
-#if no-windows
$ "$PYTHON" $TESTDIR/seq.py 10 > bar
-#else
-Convert CRLF -> LF for consistency
- $ "$PYTHON" $TESTDIR/seq.py 10 | sed "s/$//" > bar
-#endif
#if unix-permissions symlink
$ chmod +x bar
--- a/tests/test-transaction-rollback-on-revlog-split.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-transaction-rollback-on-revlog-split.t Sat Oct 26 04:16:00 2024 +0200
@@ -8,29 +8,29 @@
$ cat > $TESTTMP/intercept_before_rename.py << EOF
> import os
- > import signal
> from mercurial import extensions, util
+ > from mercurial.testing import ps_util
>
> def extsetup(ui):
> def rename(orig, src, dest, *args, **kwargs):
> path = util.normpath(dest)
> if path.endswith(b'data/file.i'):
- > os.kill(os.getpid(), signal.SIGKILL)
+ > ps_util.kill(os.getpid())
> return orig(src, dest, *args, **kwargs)
> extensions.wrapfunction(util, 'rename', rename)
> EOF
$ cat > $TESTTMP/intercept_after_rename.py << EOF
> import os
- > import signal
> from mercurial import extensions, util
+ > from mercurial.testing import ps_util
>
> def extsetup(ui):
> def close(orig, *args, **kwargs):
> path = util.normpath(args[0]._atomictempfile__name)
> r = orig(*args, **kwargs)
> if path.endswith(b'/.hg/store/data/file.i'):
- > os.kill(os.getpid(), signal.SIGKILL)
+ > ps_util.kill(os.getpid())
> return r
> extensions.wrapfunction(util.atomictempfile, 'close', close)
> def extsetup(ui):
@@ -38,17 +38,17 @@
> path = util.normpath(dest)
> r = orig(src, dest, *args, **kwargs)
> if path.endswith(b'data/file.i'):
- > os.kill(os.getpid(), signal.SIGKILL)
+ > ps_util.kill(os.getpid())
> return r
> extensions.wrapfunction(util, 'rename', rename)
> EOF
$ cat > $TESTTMP/killme.py << EOF
> import os
- > import signal
+ > from mercurial.testing import ps_util
>
> def killme(ui, repo, hooktype, **kwargs):
- > os.kill(os.getpid(), signal.SIGKILL)
+ > ps_util.kill(os.getpid())
> EOF
$ cat > $TESTTMP/reader_wait_split.py << EOF
@@ -58,11 +58,11 @@
> def _wait_post_load(orig, self, *args, **kwargs):
> wait = b'data/file' in self.radix
> if wait:
- > testing.wait_file(b"$TESTTMP/writer-revlog-split")
+ > testing.wait_file(b"$TESTTMP_FORWARD_SLASH/writer-revlog-split")
> r = orig(self, *args, **kwargs)
> if wait:
- > testing.write_file(b"$TESTTMP/reader-index-read")
- > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
+ > testing.write_file(b"$TESTTMP_FORWARD_SLASH/reader-index-read")
+ > testing.wait_file(b"$TESTTMP_FORWARD_SLASH/writer-revlog-unsplit")
> return r
>
> def extsetup(ui):
@@ -441,7 +441,7 @@
$ cd troffset-computation-race
$ cat > .hg/hgrc <<EOF
> [hooks]
- > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
+ > pretxnchangegroup=sh "$RUNTESTDIR/testlib/wait-on-file" 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
> pretxnclose = false
> EOF
@@ -485,8 +485,8 @@
$ cd troffset-computation-hooks
$ cat > .hg/hgrc <<EOF
> [hooks]
- > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
- > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
+ > pretxnclose.01-echo = hg cat -r "max(all())" file | "$PYTHON" "$RUNTESTDIR/f" --size
+ > pretxnclose.02-echo = sh "$RUNTESTDIR/testlib/wait-on-file" 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
> pretxnclose.03-abort = false
> EOF
--- a/tests/test-transaction-wc-rollback-race.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-transaction-wc-rollback-race.t Sat Oct 26 04:16:00 2024 +0200
@@ -107,14 +107,15 @@
$ cat << EOF >> ../txn-close.sh
- > rm -f $TESTTMP/transaction-continue
- > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-continue $TESTTMP/transaction-waiting
- > rm -f $TESTTMP/transaction-waiting
+ > rm -f "$TESTTMP/transaction-continue"
+ > "$RUNTESTDIR/testlib/wait-on-file" 5 "$TESTTMP/transaction-continue" "$TESTTMP/transaction-waiting"
+ > rm -f "$TESTTMP/transaction-waiting"
> exit 1
> EOF
+
$ cat << EOF >> .hg/hgrc
> [hooks]
- > pretxnclose.test = sh $TESTTMP/txn-close.sh
+ > pretxnclose.test = sh "$TESTTMP/txn-close.sh"
> EOF
Check the overall logic is working, the transaction is holding the `lock` , but
@@ -129,7 +130,7 @@
$ echo y | hg --config ui.interactive=yes debuglock --set-lock
abort: lock is already held
[255]
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg phase --rev 0
0: draft
@@ -144,11 +145,11 @@
$ hg status
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 "$TESTTMP/transaction-waiting"
$ hg forget default_a
$ hg status
R default_a
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg status
R default_a
@@ -160,12 +161,12 @@
$ hg branch
default
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 "$TESTTMP/transaction-waiting"
$ hg branch celeste
marked working directory as branch celeste
$ hg branch
celeste
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg branch
celeste
@@ -177,12 +178,12 @@
$ hg branch
babar
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ "$RUNTESTDIR/testlib/wait-on-file" 5 "$TESTTMP/transaction-waiting"
$ hg branch celeste
marked working directory as branch celeste
$ hg branch
celeste
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg branch
celeste
@@ -194,12 +195,12 @@
$ hg log --rev . -T '{desc}\n'
babar_m
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ $RUNTESTDIR/testlib/wait-on-file 5 "$TESTTMP/transaction-waiting"
$ hg update "parents(.)" --quiet
$ hg log --rev . -T '{desc}\n'
babar_l
$ hg st
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg log --rev . -T '{desc}\n'
babar_l
@@ -234,13 +235,13 @@
$ hg log -r . -T '= {activebookmark} =\n'
= =
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ $RUNTESTDIR/testlib/wait-on-file 5 "$TESTTMP/transaction-waiting"
$ hg up bar
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(activating bookmark bar)
$ hg log -r . -T '= {activebookmark} =\n'
= bar =
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg log -r . -T '= {activebookmark} =\n'
= bar =
@@ -251,13 +252,13 @@
$ hg log -r . -T '= {activebookmark} =\n'
= bar =
$ hg phase --public --rev 0 2> ../log.err &
- $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/transaction-waiting
+ $ $RUNTESTDIR/testlib/wait-on-file 5 "$TESTTMP/transaction-waiting"
$ hg up .
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark bar)
$ hg log -r . -T '= {activebookmark} =\n'
= =
- $ touch $TESTTMP/transaction-continue
+ $ touch "$TESTTMP/transaction-continue"
$ wait
$ hg log -r . -T '= {activebookmark} =\n'
= =
--- a/tests/test-trusted.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-trusted.py Sat Oct 26 04:16:00 2024 +0200
@@ -29,7 +29,7 @@
def bprint(*args, **kwargs):
print(
*[_maybesysstr(a) for a in args],
- **{k: _maybesysstr(v) for k, v in kwargs.items()}
+ **{k: _maybesysstr(v) for k, v in kwargs.items()},
)
# avoid awkward interleaving with ui object's output
sys.stdout.flush()
--- a/tests/test-ui-color.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-ui-color.py Sat Oct 26 04:16:00 2024 +0200
@@ -25,6 +25,7 @@
# we're not interested in the output, so write that to devnull
ui_.fout = open(os.devnull, 'wb')
+
# call some arbitrary command just so we go through
# color's wrapped _runcommand twice.
def runcmd():
--- a/tests/test-uncommit.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-uncommit.t Sat Oct 26 04:16:00 2024 +0200
@@ -22,13 +22,12 @@
uncommit part or all of a local changeset
- This command undoes the effect of a local commit, returning the affected
- files to their uncommitted state. This means that files modified or
- deleted in the changeset will be left unchanged, and so will remain
- modified in the working directory.
+ This command undoes the effect of a local commit, returning the affected files
+ to their uncommitted state. This means that files modified or deleted in the
+ changeset will be left unchanged, and so will remain modified in the working
+ directory.
- If no files are specified, the commit will be pruned, unless --keep is
- given.
+ If no files are specified, the commit will be pruned, unless --keep is given.
(use 'hg help -e uncommit' to show help for the uncommit extension)
--- a/tests/test-upgrade-repo.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-upgrade-repo.t Sat Oct 26 04:16:00 2024 +0200
@@ -2082,6 +2082,8 @@
tracked-hint: no
share-safe: yes
+#if unix-permissions
+
Attempting Auto-upgrade on a read-only repository
-------------------------------------------------
@@ -2095,11 +2097,12 @@
$ chmod -R u+w auto-upgrade
+#endif
+
Attempting Auto-upgrade on a locked repository
----------------------------------------------
$ hg -R auto-upgrade debuglock --set-lock --quiet &
- $ echo $! >> $DAEMON_PIDS
$ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
$ hg status -R auto-upgrade \
> --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
--- a/tests/test-verify-repo-operations.py Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-verify-repo-operations.py Sat Oct 26 04:16:00 2024 +0200
@@ -615,8 +615,8 @@
settings(
timeout=-1,
stateful_step_count=1000,
- max_examples=10 ** 8,
- max_iterations=10 ** 8,
+ max_examples=10**8,
+ max_iterations=10**8,
database=writeonlydatabase(settings.default.database),
),
)
--- a/tests/test-wsgicgi.t Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/test-wsgicgi.t Sat Oct 26 04:16:00 2024 +0200
@@ -1,3 +1,5 @@
+#require no-msys
+
$ hg init test
$ cat >hgweb.cgi <<HGWEB
> #!$PYTHON
--- a/tests/testlib/wait-on-file Thu Jan 11 20:37:34 2024 +0100
+++ b/tests/testlib/wait-on-file Sat Oct 26 04:16:00 2024 +0200
@@ -11,14 +11,17 @@
timer="$1"
-# Scale the timeout to match the sleep steps below, i.e. 1/0.02.
-timer=$(( 50 * $timer ))
# If the test timeout have been extended, also scale the timer relative
# to the normal timing.
if [ "$HGTEST_TIMEOUT_DEFAULT" -lt "$HGTEST_TIMEOUT" ]; then
timer=$(( ( $timer * $HGTEST_TIMEOUT) / $HGTEST_TIMEOUT_DEFAULT ))
fi
+max_time=$timer
+
+# Scale the timeout to match the sleep steps below, i.e. 1/0.02.
+timer=$(( 50 * $timer ))
+
wait_on="$2"
create=""
if [ $# -eq 3 ]; then
@@ -34,6 +37,6 @@
sleep 0.02
done
if [ "$timer" -le 0 ]; then
- echo "file not created after $1 seconds: $wait_on" >&2
+ echo "file not created after $max_time seconds: $wait_on" >&2
exit 1
fi