Mercurial > hg
changeset 48569:f13fb742e1d8
branching: merge with stable
author | Raphaël Gomès <rgomes@octobus.net> |
---|---|
date | Tue, 18 Jan 2022 10:27:13 +0100 |
parents | d1210d56008b (diff) 440972d2175d (current diff) |
children | fe4922564661 |
files | mercurial/hg.py rust/hg-core/src/dirstate_tree/on_disk.rs |
diffstat | 274 files changed, 4507 insertions(+), 17731 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/automation/hgautomation/cli.py Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/automation/hgautomation/cli.py Tue Jan 18 10:27:13 2022 +0100 @@ -158,7 +158,7 @@ windows.synchronize_hg(SOURCE_ROOT, revision, instance) - for py_version in ("2.7", "3.7", "3.8", "3.9"): + for py_version in ("2.7", "3.7", "3.8", "3.9", "3.10"): for arch in ("x86", "x64"): windows.purge_hg(winrm_client) windows.build_wheel( @@ -377,7 +377,7 @@ sp.add_argument( '--python-version', help='Python version to build for', - choices={'2.7', '3.7', '3.8', '3.9'}, + choices={'2.7', '3.7', '3.8', '3.9', '3.10'}, nargs='*', default=['3.8'], ) @@ -501,7 +501,7 @@ sp.add_argument( '--python-version', help='Python version to use', - choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'}, + choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10'}, default='2.7', ) sp.add_argument(
--- a/contrib/automation/hgautomation/windows.py Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/automation/hgautomation/windows.py Tue Jan 18 10:27:13 2022 +0100 @@ -129,6 +129,8 @@ WHEEL_FILENAME_PYTHON38_X64 = 'mercurial-{version}-cp38-cp38-win_amd64.whl' WHEEL_FILENAME_PYTHON39_X86 = 'mercurial-{version}-cp39-cp39-win32.whl' WHEEL_FILENAME_PYTHON39_X64 = 'mercurial-{version}-cp39-cp39-win_amd64.whl' +WHEEL_FILENAME_PYTHON310_X86 = 'mercurial-{version}-cp310-cp310-win32.whl' +WHEEL_FILENAME_PYTHON310_X64 = 'mercurial-{version}-cp310-cp310-win_amd64.whl' EXE_FILENAME_PYTHON2_X86 = 'Mercurial-{version}-x86-python2.exe' EXE_FILENAME_PYTHON2_X64 = 'Mercurial-{version}-x64-python2.exe' @@ -480,6 +482,8 @@ dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), + dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version), + dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version), ) @@ -493,6 +497,8 @@ dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), + dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version), + dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version), dist_path / EXE_FILENAME_PYTHON2_X86.format(version=version), dist_path / EXE_FILENAME_PYTHON2_X64.format(version=version), dist_path / EXE_FILENAME_PYTHON3_X86.format(version=version),
--- a/contrib/install-windows-dependencies.ps1 Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/install-windows-dependencies.ps1 Tue Jan 18 10:27:13 2022 +0100 @@ -29,10 +29,15 @@ $PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe" $PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a" -$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe" -$PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de" -$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe" -$PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac" +$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9.exe" +$PYTHON39_x86_SHA256 = "6646a5683adf14d35e8c53aab946895bc0f0b825f7acac3a62cc85ee7d0dc71a" +$PYTHON39_X64_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9-amd64.exe" +$PYTHON39_x64_SHA256 = "137d59e5c0b01a8f1bdcba08344402ae658c81c6bf03b6602bd8b4e951ad0714" + +$PYTHON310_x86_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0.exe" +$PYTHON310_x86_SHA256 = "ea896eeefb1db9e12fb89ec77a6e28c9fe52b4a162a34c85d9688be2ec2392e8" +$PYTHON310_X64_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe" +$PYTHON310_x64_SHA256 = "cb580eb7dc55f9198e650f016645023e8b2224cf7d033857d12880b46c5c94ef" # PIP 19.2.3. $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py" @@ -132,6 +137,8 @@ Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256 Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256 Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256 + Secure-Download $PYTHON310_x86_URL ${prefix}\assets\python310-x86.exe $PYTHON310_x86_SHA256 + Secure-Download $PYTHON310_x64_URL ${prefix}\assets\python310-x64.exe $PYTHON310_x64_SHA256 Secure-Download $PIP_URL ${pip} $PIP_SHA256 Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256 Secure-Download $INNO_SETUP_URL ${prefix}\assets\InnoSetup.exe $INNO_SETUP_SHA256 @@ -146,6 +153,8 @@ # Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip} Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip} Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip} + Install-Python3 "Python 3.10 32-bit" ${prefix}\assets\python310-x86.exe ${prefix}\python310-x86 ${pip} + Install-Python3 "Python 3.10 64-bit" ${prefix}\assets\python310-x64.exe ${prefix}\python310-x64 ${pip} Write-Output "installing Visual Studio 2017 Build Tools and SDKs" Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
--- a/contrib/packaging/requirements-windows-py3.txt Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/packaging/requirements-windows-py3.txt Tue Jan 18 10:27:13 2022 +0100 @@ -1,68 +1,84 @@ # -# This file is autogenerated by pip-compile +# This file is autogenerated by pip-compile with python 3.7 # To update, run: # # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in # atomicwrites==1.4.0 \ --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \ - --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a \ + --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a # via pytest attrs==21.2.0 \ --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb \ + --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb # via pytest cached-property==1.5.2 \ --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ - --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \ + --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 # via pygit2 certifi==2021.5.30 \ --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 # via dulwich -cffi==1.14.4 \ - --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \ - --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \ - --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \ - --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \ - --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \ - --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \ - --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \ - --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \ - --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \ - --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \ - --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \ - --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \ - --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \ - --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \ - --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \ - --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \ - --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \ - --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \ - --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \ - --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \ - --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \ - --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \ - --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \ - --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \ - --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \ - --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \ - --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \ - --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \ - --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \ - --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \ - --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \ - --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \ - --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \ - --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \ +cffi==1.15.0 \ + --hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \ + --hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \ + --hash=sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636 \ + --hash=sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20 \ + --hash=sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728 \ + --hash=sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27 \ + --hash=sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66 \ + --hash=sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443 \ + --hash=sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0 \ + --hash=sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7 \ + --hash=sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39 \ + --hash=sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605 \ + --hash=sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a \ + --hash=sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37 \ + --hash=sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029 \ + --hash=sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139 \ + --hash=sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc \ + --hash=sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df \ + --hash=sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14 \ + --hash=sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880 \ + --hash=sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2 \ + --hash=sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a \ + --hash=sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e \ + --hash=sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474 \ + --hash=sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024 \ + --hash=sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8 \ + --hash=sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0 \ + --hash=sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e \ + --hash=sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a \ + --hash=sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e \ + --hash=sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032 \ + --hash=sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6 \ + --hash=sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e \ + --hash=sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b \ + --hash=sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e \ + --hash=sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954 \ + --hash=sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962 \ + --hash=sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c \ + --hash=sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4 \ + --hash=sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55 \ + --hash=sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962 \ + --hash=sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023 \ + --hash=sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c \ + --hash=sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6 \ + --hash=sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8 \ + --hash=sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382 \ + --hash=sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7 \ + --hash=sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc \ + --hash=sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997 \ + --hash=sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796 # via pygit2 colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ - --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \ + --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 # via pytest docutils==0.16 \ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ - --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \ + --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc # via -r contrib/packaging/requirements-windows.txt.in dulwich==0.20.6 ; python_version >= "3" \ --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \ @@ -77,26 +93,29 @@ --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \ --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \ --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \ - --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b \ + --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b # via -r contrib/packaging/requirements-windows.txt.in fuzzywuzzy==0.18.0 \ - --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \ + --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 # via -r contrib/packaging/requirements-windows.txt.in idna==3.2 \ --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ - --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 # via yarl importlib-metadata==3.1.0 \ --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \ - --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 \ - # via keyring, pluggy, pytest + --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 + # via + # keyring + # pluggy + # pytest iniconfig==1.1.1 \ --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ - --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 \ + --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 # via pytest keyring==21.4.0 \ --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \ - --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \ + --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 # via -r contrib/packaging/requirements-windows.txt.in multidict==5.1.0 \ --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ @@ -135,62 +154,68 @@ --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ - --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \ + --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 # via yarl packaging==21.0 \ --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via pytest pluggy==0.13.1 \ --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \ - --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d \ + --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d # via pytest py==1.10.0 \ --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ - --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a \ + --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a # via pytest -pycparser==2.20 \ - --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ - --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \ +pycparser==2.21 \ + --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ + --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygit2==1.4.0 ; python_version >= "3" \ - --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \ - --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \ - --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \ - --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \ - --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \ - --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \ - --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \ - --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \ - --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \ - --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \ - --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \ - --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \ - --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \ - --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \ - --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \ - --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \ - --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \ +pygit2==1.7.1 ; python_version >= "3" \ + --hash=sha256:2c9e95efb86c0b32cc07c26be3d179e851ca4a7899c47fef63c4203963144f5e \ + --hash=sha256:3ddacbf461652d3d4900382f821d9fbd5ae2dedecd7862b5245842419ad0ccba \ + --hash=sha256:4cb0414df6089d0072ebe93ff2f34730737172dd5f0e72289567d06a6caf09c0 \ + --hash=sha256:56e960dc74f4582bfa3ca17a1a9d542732fc93b5cf8f82574c235d06b2d61eae \ + --hash=sha256:6b17ab922c2a2d99b30ab9222472b07732bf7261d9f9655a4ea23b4c700049d8 \ + --hash=sha256:73a7b471f22cb59e8729016de1f447c472b3b2c1cc2b622194e5e3b48a7f5776 \ + --hash=sha256:761a8850e33822796c1c24d411d5cc2460c04e1a74b04ae8560efd3596bbd6bd \ + --hash=sha256:7c467e81158f5827b3bca6362e5cc9b92857eff9de65034d338c1f18524b09be \ + --hash=sha256:7c56e10592e62610a19bd3e2a633aafe3488c57b906c7c2fde0299937f0f0b2f \ + --hash=sha256:7cc2a8e29cc9598310a78cf58b70d9331277cf374802be8f97d97c4a9e5d8387 \ + --hash=sha256:812670f7994f31778e873a9eced29d2bbfa91674e8be0ab1e974c8a4bda9cbab \ + --hash=sha256:8cdb0b1d6c3d24b44f340fed143b16e64ba23fe2a449f1a5db87aaf9339a9dbe \ + --hash=sha256:91b77a305d8d18b649396e66e832d654cd593a3d29b5728f753f254a04533812 \ + --hash=sha256:a75bcde32238c77eb0cf7d9698a5aa899408d7ad999a5920a29a7c4b80fdeaa7 \ + --hash=sha256:b060240cf3038e7a0706bbfc5436dd03b8d5ac797ac1d512b613f4d04b974c80 \ + --hash=sha256:cdfa61c0428a8182e5a6a1161c017b824cd511574f080a40b10d6413774eb0ca \ + --hash=sha256:d7faa29558436decc2e78110f38d6677eb366b683ba5cdc2803d47195711165d \ + --hash=sha256:d831825ad9c3b3c28e6b3ef8a2401ad2d3fd4db5455427ff27175a7e254e2592 \ + --hash=sha256:df4c477bdfac85d32a1e3180282cd829a0980aa69be9bd0f7cbd4db1778ca72b \ + --hash=sha256:eced3529bafcaaac015d08dfaa743b3cbad37fcd5b13ae9d280b8b7f716ec5ce \ + --hash=sha256:fec17e2da668e6bb192d777417aad9c7ca924a166d0a0b9a81a11e00362b1bc7 # via -r contrib/packaging/requirements-windows.txt.in pygments==2.7.1 \ --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ - --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \ + --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 # via -r contrib/packaging/requirements-windows.txt.in pyparsing==2.4.7 \ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \ + --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b # via packaging -pytest-vcr==1.0.2 \ - --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \ - # via -r contrib/packaging/requirements-windows.txt.in pytest==6.2.4 \ --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \ - --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 \ + --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 # via pytest-vcr +pytest-vcr==1.0.2 \ + --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 + # via -r contrib/packaging/requirements-windows.txt.in pywin32-ctypes==0.2.0 \ --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ - --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \ - # via -r contrib/packaging/requirements-windows.txt.in, keyring + --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 + # via + # -r contrib/packaging/requirements-windows.txt.in + # keyring pyyaml==5.4.1 \ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ @@ -220,41 +245,43 @@ --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 \ + --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 # via vcrpy six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via vcrpy toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ - --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f \ + --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f # via pytest typing-extensions==3.10.0.0 \ --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ - --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \ + --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 # via yarl urllib3==1.25.11 \ --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \ - --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \ + --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e # via dulwich vcrpy==4.1.1 \ --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ - --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 \ + --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 # via pytest-vcr -windows-curses==2.2.0 \ - --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \ - --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \ - --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \ - --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \ - --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \ - --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \ - --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \ - --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \ +windows-curses==2.3.0 \ + --hash=sha256:170c0d941c2e0cdf864e7f0441c1bdf0709232bf4aa7ce7f54d90fc76a4c0504 \ + --hash=sha256:4d5fb991d1b90a41c2332f02241a1f84c8a1e6bc8f6e0d26f532d0da7a9f7b51 \ + --hash=sha256:7a35eda4cb120b9e1a5ae795f3bc06c55b92c9d391baba6be1903285a05f3551 \ + --hash=sha256:935be95cfdb9213f6f5d3d5bcd489960e3a8fbc9b574e7b2e8a3a3cc46efff49 \ + --hash=sha256:a3a63a0597729e10f923724c2cf972a23ea677b400d2387dee1d668cf7116177 \ + --hash=sha256:c860f596d28377e47f322b7382be4d3573fd76d1292234996bb7f72e0bc0ed0d \ + --hash=sha256:cc5fa913780d60f4a40824d374a4f8ca45b4e205546e83a2d85147315a57457e \ + --hash=sha256:d5cde8ec6d582aa77af791eca54f60858339fb3f391945f9cad11b1ab71062e3 \ + --hash=sha256:e913dc121446d92b33fe4f5bcca26d3a34e4ad19f2af160370d57c3d1e93b4e1 \ + --hash=sha256:fbc2131cec57e422c6660e6cdb3420aff5be5169b8e45bb7c471f884b0590a2b # via -r contrib/packaging/requirements-windows.txt.in wrapt==1.12.1 \ - --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \ + --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 # via vcrpy yarl==1.6.3 \ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ @@ -293,9 +320,9 @@ --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ - --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \ + --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 # via vcrpy zipp==3.4.0 \ --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \ - --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb \ + --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb # via importlib-metadata
--- a/contrib/packaging/requirements.txt Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/packaging/requirements.txt Tue Jan 18 10:27:13 2022 +0100 @@ -1,16 +1,16 @@ # -# This file is autogenerated by pip-compile +# This file is autogenerated by pip-compile with python 3.7 # To update, run: # # pip-compile --generate-hashes --output-file=contrib/packaging/requirements.txt contrib/packaging/requirements.txt.in # docutils==0.16 \ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ - --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \ + --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc # via -r contrib/packaging/requirements.txt.in jinja2==2.11.2 \ --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ - --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 \ + --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 # via -r contrib/packaging/requirements.txt.in markupsafe==1.1.1 \ --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ @@ -45,5 +45,5 @@ --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ - --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be \ + --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be # via jinja2
--- a/contrib/simplemerge Thu Dec 30 13:25:44 2021 +0100 +++ b/contrib/simplemerge Tue Jan 18 10:27:13 2022 +0100 @@ -80,7 +80,13 @@ sys.exit(0) if len(args) != 3: raise ParseError(_(b'wrong number of arguments').decode('utf8')) + if len(opts[b'label']) > 2: + opts[b'mode'] = b'merge3' local, base, other = args + overrides = opts[b'label'] + labels = [local, other, base] + labels[: len(overrides)] = overrides + opts[b'label'] = labels sys.exit( simplemerge.simplemerge( uimod.ui.load(),
--- a/hgext/commitextras.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/commitextras.py Tue Jan 18 10:27:13 2022 +0100 @@ -65,23 +65,23 @@ b"unable to parse '%s', should follow " b"KEY=VALUE format" ) - raise error.Abort(msg % raw) + raise error.InputError(msg % raw) k, v = raw.split(b'=', 1) if not k: msg = _(b"unable to parse '%s', keys can't be empty") - raise error.Abort(msg % raw) + raise error.InputError(msg % raw) if re.search(br'[^\w-]', k): msg = _( b"keys can only contain ascii letters, digits," b" '_' and '-'" ) - raise error.Abort(msg) + raise error.InputError(msg) if k in usedinternally: msg = _( b"key '%s' is used internally, can't be set " b"manually" ) - raise error.Abort(msg % k) + raise error.InputError(msg % k) inneropts['extra'][k] = v return super(repoextra, self).commit(*innerpats, **inneropts)
--- a/hgext/git/__init__.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/git/__init__.py Tue Jan 18 10:27:13 2022 +0100 @@ -51,6 +51,7 @@ class gitstore(object): # store.basicstore): def __init__(self, path, vfstype): self.vfs = vfstype(path) + self.opener = self.vfs self.path = self.vfs.base self.createmode = store._calcmode(self.vfs) # above lines should go away in favor of:
--- a/hgext/git/dirstate.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/git/dirstate.py Tue Jan 18 10:27:13 2022 +0100 @@ -257,7 +257,7 @@ if match(p): yield p - def set_clean(self, f, parentfiledata=None): + def set_clean(self, f, parentfiledata): """Mark a file normal and clean.""" # TODO: for now we just let libgit2 re-stat the file. We can # clearly do better.
--- a/hgext/histedit.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/histedit.py Tue Jan 18 10:27:13 2022 +0100 @@ -1324,6 +1324,10 @@ d: drop, e: edit, f: fold, m: mess, p: pick, r: roll pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort """ + if self.later_on_top: + help += b"Newer commits are shown above older commits.\n" + else: + help += b"Older commits are shown above newer commits.\n" return help.splitlines() def render_help(self, win):
--- a/hgext/keyword.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/keyword.py Tue Jan 18 10:27:13 2022 +0100 @@ -116,6 +116,7 @@ dateutil, stringutil, ) +from mercurial.dirstateutils import timestamp cmdtable = {} command = registrar.command(cmdtable) @@ -326,6 +327,7 @@ msg = _(b'overwriting %s expanding keywords\n') else: msg = _(b'overwriting %s shrinking keywords\n') + wctx = self.repo[None] for f in candidates: if self.restrict: data = self.repo.file(f).read(mf[f]) @@ -356,7 +358,12 @@ fp.write(data) fp.close() if kwcmd: - self.repo.dirstate.set_clean(f) + s = wctx[f].lstat() + mode = s.st_mode + size = s.st_size + mtime = timestamp.mtime_of(s) + cache_data = (mode, size, mtime) + self.repo.dirstate.set_clean(f, cache_data) elif self.postcommit: self.repo.dirstate.update_file_p1(f, p1_tracked=True)
--- a/hgext/largefiles/lfutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/largefiles/lfutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -32,6 +32,7 @@ vfs as vfsmod, ) from mercurial.utils import hashutil +from mercurial.dirstateutils import timestamp shortname = b'.hglf' shortnameslash = shortname + b'/' @@ -243,10 +244,11 @@ def lfdirstatestatus(lfdirstate, repo): pctx = repo[b'.'] match = matchmod.always() - unsure, s = lfdirstate.status( + unsure, s, mtime_boundary = lfdirstate.status( match, subrepos=[], ignored=False, clean=False, unknown=False ) modified, clean = s.modified, s.clean + wctx = repo[None] for lfile in unsure: try: fctx = pctx[standin(lfile)] @@ -256,7 +258,13 @@ modified.append(lfile) else: clean.append(lfile) - lfdirstate.set_clean(lfile) + st = wctx[lfile].lstat() + mode = st.st_mode + size = st.st_size + mtime = timestamp.reliable_mtime_of(st, mtime_boundary) + if mtime is not None: + cache_data = (mode, size, mtime) + lfdirstate.set_clean(lfile, cache_data) return s @@ -663,7 +671,7 @@ # large. lfdirstate = openlfdirstate(ui, repo) dirtymatch = matchmod.always() - unsure, s = lfdirstate.status( + unsure, s, mtime_boundary = lfdirstate.status( dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False ) modifiedfiles = unsure + s.modified + s.added + s.removed
--- a/hgext/largefiles/overrides.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/largefiles/overrides.py Tue Jan 18 10:27:13 2022 +0100 @@ -666,14 +666,12 @@ # Override filemerge to prompt the user about how they wish to merge # largefiles. This will handle identical edits without prompting the user. -@eh.wrapfunction(filemerge, b'_filemerge') +@eh.wrapfunction(filemerge, b'filemerge') def overridefilemerge( - origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None + origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None ): if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): - return origfn( - premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels - ) + return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels) ahash = lfutil.readasstandin(fca).lower() dhash = lfutil.readasstandin(fcd).lower() @@ -697,7 +695,7 @@ ) ): repo.wwrite(fcd.path(), fco.data(), fco.flags()) - return True, 0, False + return 0, False @eh.wrapfunction(copiesmod, b'pathcopies') @@ -1519,7 +1517,7 @@ return orig(repo, matcher, prefix, uipathfn, opts) # Get the list of missing largefiles so we can remove them lfdirstate = lfutil.openlfdirstate(repo.ui, repo) - unsure, s = lfdirstate.status( + unsure, s, mtime_boundary = lfdirstate.status( matchmod.always(), subrepos=[], ignored=False, @@ -1746,7 +1744,7 @@ # (*1) deprecated, but used internally (e.g: "rebase --collapse") lfdirstate = lfutil.openlfdirstate(repo.ui, repo) - unsure, s = lfdirstate.status( + unsure, s, mtime_boundary = lfdirstate.status( matchmod.always(), subrepos=[], ignored=False,
--- a/hgext/largefiles/reposetup.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/largefiles/reposetup.py Tue Jan 18 10:27:13 2022 +0100 @@ -22,6 +22,8 @@ util, ) +from mercurial.dirstateutils import timestamp + from . import ( lfcommands, lfutil, @@ -195,7 +197,7 @@ match._files = [f for f in match._files if sfindirstate(f)] # Don't waste time getting the ignored and unknown # files from lfdirstate - unsure, s = lfdirstate.status( + unsure, s, mtime_boundary = lfdirstate.status( match, subrepos=[], ignored=False, @@ -210,6 +212,7 @@ s.clean, ) if parentworking: + wctx = repo[None] for lfile in unsure: standin = lfutil.standin(lfile) if standin not in ctx1: @@ -222,7 +225,15 @@ else: if listclean: clean.append(lfile) - lfdirstate.set_clean(lfile) + s = wctx[lfile].lstat() + mode = s.st_mode + size = s.st_size + mtime = timestamp.reliable_mtime_of( + s, mtime_boundary + ) + if mtime is not None: + cache_data = (mode, size, mtime) + lfdirstate.set_clean(lfile, cache_data) else: tocheck = unsure + modified + added + clean modified, added, clean = [], [], []
--- a/hgext/narrow/narrowdirstate.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/narrow/narrowdirstate.py Tue Jan 18 10:27:13 2022 +0100 @@ -38,8 +38,8 @@ return super(narrowdirstate, self).normal(*args, **kwargs) @_editfunc - def set_tracked(self, *args): - return super(narrowdirstate, self).set_tracked(*args) + def set_tracked(self, *args, **kwargs): + return super(narrowdirstate, self).set_tracked(*args, **kwargs) @_editfunc def set_untracked(self, *args):
--- a/hgext/remotefilelog/README.md Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/remotefilelog/README.md Tue Jan 18 10:27:13 2022 +0100 @@ -88,7 +88,9 @@ 4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment. -5. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast. +5. Similarly, subrepositories should not be used with completely shallow repos. Use `excludepattern=.hgsub*` in your client configuration to ensure that the files are downloaded. + +6. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast. Contributing ============
--- a/hgext/remotefilelog/__init__.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/remotefilelog/__init__.py Tue Jan 18 10:27:13 2022 +0100 @@ -520,7 +520,7 @@ # Prefetch files before status attempts to look at their size and contents -def checklookup(orig, self, files): +def checklookup(orig, self, files, mtime_boundary): repo = self._repo if isenabled(repo): prefetchfiles = [] @@ -530,7 +530,7 @@ prefetchfiles.append((f, hex(parent.filenode(f)))) # batch fetch the needed files from the server repo.fileservice.prefetch(prefetchfiles) - return orig(self, files) + return orig(self, files, mtime_boundary) # Prefetch the logic that compares added and removed files for renames
--- a/hgext/remotefilelog/remotefilelog.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/remotefilelog/remotefilelog.py Tue Jan 18 10:27:13 2022 +0100 @@ -18,7 +18,6 @@ mdiff, pycompat, revlog, - util, ) from mercurial.utils import storageutil from mercurial.revlogutils import flagutil @@ -245,11 +244,11 @@ __bool__ = __nonzero__ def __len__(self): - if self.filename == b'.hgtags': - # The length of .hgtags is used to fast path tag checking. - # remotefilelog doesn't support .hgtags since the entire .hgtags - # history is needed. Use the excludepattern setting to make - # .hgtags a normal filelog. + if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'): + # Global tag and subrepository support require access to the + # file history for various performance sensitive operations. + # excludepattern should be used for repositories depending on + # those features to fallback to regular filelog. return 0 raise RuntimeError(b"len not supported") @@ -360,17 +359,6 @@ ) return rev - def _processflags(self, text, flags, operation, raw=False): - """deprecated entry point to access flag processors""" - msg = b'_processflag(...) use the specialized variant' - util.nouideprecwarn(msg, b'5.2', stacklevel=2) - if raw: - return text, flagutil.processflagsraw(self, text, flags) - elif operation == b'read': - return flagutil.processflagsread(self, text, flags) - else: # write operation - return flagutil.processflagswrite(self, text, flags) - def revision(self, node, raw=False): """returns the revlog contents at this node. this includes the meta data traditionally included in file revlogs.
--- a/hgext/win32text.py Thu Dec 30 13:25:44 2021 +0100 +++ b/hgext/win32text.py Tue Jan 18 10:27:13 2022 +0100 @@ -47,6 +47,8 @@ from mercurial.i18n import _ from mercurial.node import short from mercurial import ( + cmdutil, + extensions, pycompat, registrar, ) @@ -215,6 +217,23 @@ repo.adddatafilter(name, fn) +def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs): + # reset dirstate cache for file we touch + ds = repo.dirstate + with ds.parentchange(): + for filename in actions[b'revert'][0]: + entry = ds.get_entry(filename) + if entry is not None: + if entry.p1_tracked: + ds.update_file( + filename, + entry.tracked, + p1_tracked=True, + p2_info=entry.p2_info, + ) + return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs) + + def extsetup(ui): # deprecated config: win32text.warn if ui.configbool(b'win32text', b'warn'): @@ -224,3 +243,4 @@ b"https://mercurial-scm.org/wiki/Win32TextExtension\n" ) ) + extensions.wrapfunction(cmdutil, '_performrevert', wrap_revert)
--- a/mercurial/bundle2.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/bundle2.py Tue Jan 18 10:27:13 2022 +0100 @@ -2419,7 +2419,7 @@ op.records.add(b'bookmarks', record) else: raise error.ProgrammingError( - b'unkown bookmark mode: %s' % bookmarksmode + b'unknown bookmark mode: %s' % bookmarksmode )
--- a/mercurial/cext/parsers.c Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/cext/parsers.c Tue Jan 18 10:27:13 2022 +0100 @@ -61,11 +61,13 @@ int p2_info; int has_meaningful_data; int has_meaningful_mtime; + int mtime_second_ambiguous; int mode; int size; int mtime_s; int mtime_ns; PyObject *parentfiledata; + PyObject *mtime; PyObject *fallback_exec; PyObject *fallback_symlink; static char *keywords_name[] = { @@ -78,6 +80,7 @@ p2_info = 0; has_meaningful_mtime = 1; has_meaningful_data = 1; + mtime_second_ambiguous = 0; parentfiledata = Py_None; fallback_exec = Py_None; fallback_symlink = Py_None; @@ -118,10 +121,18 @@ } if (parentfiledata != Py_None) { - if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size, - &mtime_s, &mtime_ns)) { + if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size, + &mtime)) { return NULL; } + if (mtime != Py_None) { + if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, + &mtime_second_ambiguous)) { + return NULL; + } + } else { + has_meaningful_mtime = 0; + } } else { has_meaningful_data = 0; has_meaningful_mtime = 0; @@ -130,6 +141,9 @@ t->flags |= dirstate_flag_has_meaningful_data; t->mode = mode; t->size = size; + if (mtime_second_ambiguous) { + t->flags |= dirstate_flag_mtime_second_ambiguous; + } } else { t->mode = 0; t->size = 0; @@ -255,7 +269,8 @@ } else if (!(self->flags & dirstate_flag_has_mtime) || !(self->flags & dirstate_flag_p1_tracked) || !(self->flags & dirstate_flag_wc_tracked) || - (self->flags & dirstate_flag_p2_info)) { + (self->flags & dirstate_flag_p2_info) || + (self->flags & dirstate_flag_mtime_second_ambiguous)) { return ambiguous_time; } else { return self->mtime_s; @@ -311,33 +326,30 @@ return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); }; -static PyObject *dirstate_item_need_delay(dirstateItemObject *self, - PyObject *now) -{ - int now_s; - int now_ns; - if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) { - return NULL; - } - if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } -}; - static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, PyObject *other) { int other_s; int other_ns; - if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) { + int other_second_ambiguous; + if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns, + &other_second_ambiguous)) { return NULL; } - if ((self->flags & dirstate_flag_has_mtime) && - self->mtime_s == other_s && - (self->mtime_ns == other_ns || self->mtime_ns == 0 || - other_ns == 0)) { + if (!(self->flags & dirstate_flag_has_mtime)) { + Py_RETURN_FALSE; + } + if (self->mtime_s != other_s) { + Py_RETURN_FALSE; + } + if (self->mtime_ns == 0 || other_ns == 0) { + if (self->flags & dirstate_flag_mtime_second_ambiguous) { + Py_RETURN_FALSE; + } else { + Py_RETURN_TRUE; + } + } + if (self->mtime_ns == other_ns) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -438,14 +450,6 @@ dirstate_flag_has_meaningful_data | dirstate_flag_has_mtime); } - if (t->flags & dirstate_flag_mtime_second_ambiguous) { - /* The current code is not able to do the more subtle comparison - * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the - * mtime */ - t->flags &= ~(dirstate_flag_mtime_second_ambiguous | - dirstate_flag_has_meaningful_data | - dirstate_flag_has_mtime); - } t->mode = 0; if (t->flags & dirstate_flag_has_meaningful_data) { if (t->flags & dirstate_flag_mode_exec_perm) { @@ -474,14 +478,28 @@ static PyObject *dirstate_item_set_clean(dirstateItemObject *self, PyObject *args) { - int size, mode, mtime_s, mtime_ns; - if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s, - &mtime_ns)) { + int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous; + PyObject *mtime; + mtime_s = 0; + mtime_ns = 0; + mtime_second_ambiguous = 0; + if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) { return NULL; } + if (mtime != Py_None) { + if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, + &mtime_second_ambiguous)) { + return NULL; + } + } else { + self->flags &= ~dirstate_flag_has_mtime; + } self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | dirstate_flag_has_meaningful_data | dirstate_flag_has_mtime; + if (mtime_second_ambiguous) { + self->flags |= dirstate_flag_mtime_second_ambiguous; + } self->mode = mode; self->size = size; self->mtime_s = mtime_s; @@ -530,8 +548,6 @@ "return a \"size\" suitable for v1 serialization"}, {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, "return a \"mtime\" suitable for v1 serialization"}, - {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, - "True if the stored mtime would be ambiguous with the current time"}, {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, METH_O, "True if the stored mtime is likely equal to the given mtime"}, {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, @@ -904,12 +920,9 @@ Py_ssize_t nbytes, pos, l; PyObject *k, *v = NULL, *pn; char *p, *s; - int now_s; - int now_ns; - if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type, - &map, &PyDict_Type, ©map, &PyTuple_Type, &pl, - &now_s, &now_ns)) { + if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map, + &PyDict_Type, ©map, &PyTuple_Type, &pl)) { return NULL; } @@ -978,21 +991,6 @@ mode = dirstate_item_c_v1_mode(tuple); size = dirstate_item_c_v1_size(tuple); mtime = dirstate_item_c_v1_mtime(tuple); - if (state == 'n' && tuple->mtime_s == now_s) { - /* See pure/parsers.py:pack_dirstate for why we do - * this. */ - mtime = -1; - mtime_unset = (PyObject *)dirstate_item_from_v1_data( - state, mode, size, mtime); - if (!mtime_unset) { - goto bail; - } - if (PyDict_SetItem(map, k, mtime_unset) == -1) { - goto bail; - } - Py_DECREF(mtime_unset); - mtime_unset = NULL; - } *p++ = state; putbe32((uint32_t)mode, p); putbe32((uint32_t)size, p + 4);
--- a/mercurial/cext/revlog.c Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/cext/revlog.c Tue Jan 18 10:27:13 2022 +0100 @@ -120,9 +120,11 @@ static int index_find_node(indexObject *self, const char *node); #if LONG_MAX == 0x7fffffffL -static const char *const tuple_format = PY23("Kiiiiiis#KiBB", "Kiiiiiiy#KiBB"); +static const char *const tuple_format = + PY23("Kiiiiiis#KiBBi", "Kiiiiiiy#KiBBi"); #else -static const char *const tuple_format = PY23("kiiiiiis#kiBB", "kiiiiiiy#kiBB"); +static const char *const tuple_format = + PY23("kiiiiiis#kiBBi", "kiiiiiiy#kiBBi"); #endif /* A RevlogNG v1 index entry is 64 bytes long. */ @@ -135,6 +137,7 @@ static const long format_v2 = 2; /* Internal only, could be any number */ static const char comp_mode_inline = 2; +static const char rank_unknown = -1; static void raise_revlog_error(void) { @@ -352,7 +355,7 @@ return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, c_node_id, self->nodelen, sidedata_offset, sidedata_comp_len, - data_comp_mode, sidedata_comp_mode); + data_comp_mode, sidedata_comp_mode, rank_unknown); } /* * Pack header information in binary @@ -453,7 +456,7 @@ { uint64_t offset_flags, sidedata_offset; int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, - sidedata_comp_len; + sidedata_comp_len, rank; char data_comp_mode, sidedata_comp_mode; Py_ssize_t c_node_id_len; const char *c_node_id; @@ -464,8 +467,8 @@ &uncomp_len, &base_rev, &link_rev, &parent_1, &parent_2, &c_node_id, &c_node_id_len, &sidedata_offset, &sidedata_comp_len, - &data_comp_mode, &sidedata_comp_mode)) { - PyErr_SetString(PyExc_TypeError, "11-tuple required"); + &data_comp_mode, &sidedata_comp_mode, &rank)) { + PyErr_SetString(PyExc_TypeError, "12-tuple required"); return NULL; } @@ -2797,9 +2800,10 @@ self->entry_size = v1_entry_size; } - self->nullentry = Py_BuildValue( - PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, -1, -1, -1, - nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline); + self->nullentry = + Py_BuildValue(PY23("iiiiiiis#iiBBi", "iiiiiiiy#iiBBi"), 0, 0, 0, -1, + -1, -1, -1, nullid, self->nodelen, 0, 0, + comp_mode_inline, comp_mode_inline, rank_unknown); if (!self->nullentry) return -1;
--- a/mercurial/changegroup.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/changegroup.py Tue Jan 18 10:27:13 2022 +0100 @@ -350,10 +350,11 @@ def ondupchangelog(cl, rev): if rev < clstart: - duprevs.append(rev) + duprevs.append(rev) # pytype: disable=attribute-error def onchangelog(cl, rev): ctx = cl.changelogrevision(rev) + assert efilesset is not None # help pytype efilesset.update(ctx.files) repo.register_changeset(rev, ctx)
--- a/mercurial/chgserver.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/chgserver.py Tue Jan 18 10:27:13 2022 +0100 @@ -643,6 +643,13 @@ def __init__(self, ui): self.ui = ui + + # TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level) + # when 3.5 support is dropped. + self._hashstate = None # type: hashstate + self._baseaddress = None # type: bytes + self._realaddress = None # type: bytes + self._idletimeout = ui.configint(b'chgserver', b'idletimeout') self._lastactive = time.time()
--- a/mercurial/cmdutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/cmdutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -522,8 +522,10 @@ # 1. filter patch, since we are intending to apply subset of it try: chunks, newopts = filterfn(ui, original_headers, match) - except error.PatchError as err: + except error.PatchParseError as err: raise error.InputError(_(b'error parsing patch: %s') % err) + except error.PatchApplicationError as err: + raise error.StateError(_(b'error applying patch: %s') % err) opts.update(newopts) # We need to keep a backup of files that have been newly added and @@ -608,8 +610,10 @@ ui.debug(b'applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1, eolmode=None) - except error.PatchError as err: + except error.PatchParseError as err: raise error.InputError(pycompat.bytestr(err)) + except error.PatchApplicationError as err: + raise error.StateError(pycompat.bytestr(err)) del fp # 4. We prepared working directory according to filtered @@ -2020,9 +2024,16 @@ eolmode=None, similarity=sim / 100.0, ) - except error.PatchError as e: + except error.PatchParseError as e: + raise error.InputError( + pycompat.bytestr(e), + hint=_( + b'check that whitespace in the patch has not been mangled' + ), + ) + except error.PatchApplicationError as e: if not partial: - raise error.Abort(pycompat.bytestr(e)) + raise error.StateError(pycompat.bytestr(e)) if partial: rejects = True @@ -2079,8 +2090,15 @@ files, eolmode=None, ) - except error.PatchError as e: - raise error.Abort(stringutil.forcebytestr(e)) + except error.PatchParseError as e: + raise error.InputError( + stringutil.forcebytestr(e), + hint=_( + b'check that whitespace in the patch has not been mangled' + ), + ) + except error.PatchApplicationError as e: + raise error.StateError(stringutil.forcebytestr(e)) if opts.get(b'exact'): editor = None else: @@ -3628,15 +3646,14 @@ prntstatusmsg(b'drop', f) repo.dirstate.set_untracked(f) - normal = None - if node == parent: - # We're reverting to our parent. If possible, we'd like status - # to report the file as clean. We have to use normallookup for - # merges to avoid losing information about merged/dirty files. - if p2 != repo.nullid: - normal = repo.dirstate.set_tracked - else: - normal = repo.dirstate.set_clean + # We are reverting to our parent. If possible, we had like `hg status` + # to report the file as clean. We have to be less agressive for + # merges to avoid losing information about copy introduced by the merge. + # This might comes with bugs ? + reset_copy = p2 == repo.nullid + + def normal(filename): + return repo.dirstate.set_tracked(filename, reset_copy=reset_copy) newlyaddedandmodifiedfiles = set() if interactive: @@ -3674,8 +3691,10 @@ if operation == b'discard': chunks = patch.reversehunks(chunks) - except error.PatchError as err: - raise error.Abort(_(b'error parsing patch: %s') % err) + except error.PatchParseError as err: + raise error.InputError(_(b'error parsing patch: %s') % err) + except error.PatchApplicationError as err: + raise error.StateError(_(b'error applying patch: %s') % err) # FIXME: when doing an interactive revert of a copy, there's no way of # performing a partial revert of the added file, the only option is @@ -3710,8 +3729,10 @@ if dopatch: try: patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None) - except error.PatchError as err: - raise error.Abort(pycompat.bytestr(err)) + except error.PatchParseError as err: + raise error.InputError(pycompat.bytestr(err)) + except error.PatchApplicationError as err: + raise error.StateError(pycompat.bytestr(err)) del fp else: for f in actions[b'revert'][0]: @@ -3727,9 +3748,6 @@ checkout(f) repo.dirstate.set_tracked(f) - normal = repo.dirstate.set_tracked - if node == parent and p2 == repo.nullid: - normal = repo.dirstate.set_clean for f in actions[b'undelete'][0]: if interactive: choice = repo.ui.promptchoice(
--- a/mercurial/commands.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/commands.py Tue Jan 18 10:27:13 2022 +0100 @@ -6130,7 +6130,6 @@ ret = 0 didwork = False - tocomplete = [] hasconflictmarkers = [] if mark: markcheck = ui.config(b'commands', b'resolve.mark-check') @@ -6183,24 +6182,20 @@ # preresolve file overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} with ui.configoverride(overrides, b'resolve'): - complete, r = ms.preresolve(f, wctx) - if not complete: - tocomplete.append(f) - elif r: + r = ms.resolve(f, wctx) + if r: ret = 1 finally: ms.commit() - # replace filemerge's .orig file with our resolve file, but only - # for merges that are complete - if complete: - try: - util.rename( - a + b".resolve", scmutil.backuppath(ui, repo, f) - ) - except OSError as inst: - if inst.errno != errno.ENOENT: - raise + # replace filemerge's .orig file with our resolve file + try: + util.rename( + a + b".resolve", scmutil.backuppath(ui, repo, f) + ) + except OSError as inst: + if inst.errno != errno.ENOENT: + raise if hasconflictmarkers: ui.warn( @@ -6218,25 +6213,6 @@ hint=_(b'use --all to mark anyway'), ) - for f in tocomplete: - try: - # resolve file - overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} - with ui.configoverride(overrides, b'resolve'): - r = ms.resolve(f, wctx) - if r: - ret = 1 - finally: - ms.commit() - - # replace filemerge's .orig file with our resolve file - a = repo.wjoin(f) - try: - util.rename(a + b".resolve", scmutil.backuppath(ui, repo, f)) - except OSError as inst: - if inst.errno != errno.ENOENT: - raise - ms.commit() branchmerge = repo.dirstate.p2() != repo.nullid # resolve is not doing a parent change here, however, `record updates` @@ -6897,9 +6873,9 @@ cmdutil.check_at_most_one_arg(opts, 'rev', 'change') opts = pycompat.byteskwargs(opts) - revs = opts.get(b'rev') - change = opts.get(b'change') - terse = opts.get(b'terse') + revs = opts.get(b'rev', []) + change = opts.get(b'change', b'') + terse = opts.get(b'terse', _NOTTERSE) if terse is _NOTTERSE: if revs: terse = b'' @@ -7832,9 +7808,9 @@ raise error.InputError(_(b"you can't specify a revision and a date")) updatecheck = None - if check: + if check or merge is not None and not merge: updatecheck = b'abort' - elif merge: + elif merge or check is not None and not check: updatecheck = b'none' with repo.wlock():
--- a/mercurial/configitems.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/configitems.py Tue Jan 18 10:27:13 2022 +0100 @@ -1102,16 +1102,6 @@ ) coreconfigitem( b'experimental', - b'httppeer.advertise-v2', - default=False, -) -coreconfigitem( - b'experimental', - b'httppeer.v2-encoder-order', - default=None, -) -coreconfigitem( - b'experimental', b'httppostargs', default=False, ) @@ -1211,11 +1201,6 @@ ) coreconfigitem( b'experimental', - b'sshserver.support-v2', - default=False, -) -coreconfigitem( - b'experimental', b'sparse-read', default=False, ) @@ -1241,26 +1226,6 @@ ) coreconfigitem( b'experimental', - b'sshpeer.advertise-v2', - default=False, -) -coreconfigitem( - b'experimental', - b'web.apiserver', - default=False, -) -coreconfigitem( - b'experimental', - b'web.api.http-v2', - default=False, -) -coreconfigitem( - b'experimental', - b'web.api.debugreflect', - default=False, -) -coreconfigitem( - b'experimental', b'web.full-garbage-collection-rate', default=1, # still forcing a full collection on each request ) @@ -1281,11 +1246,17 @@ ) coreconfigitem( b'extensions', - b'.*', + b'[^:]*', default=None, generic=True, ) coreconfigitem( + b'extensions', + b'[^:]*:required', + default=False, + generic=True, +) +coreconfigitem( b'extdata', b'.*', default=None, @@ -1351,10 +1322,10 @@ ) # Experimental TODOs: # -# * Same as for evlogv2 (but for the reduction of the number of files) +# * Same as for revlogv2 (but for the reduction of the number of files) +# * Actually computing the rank of changesets # * Improvement to investigate # - storing .hgtags fnode -# - storing `rank` of changesets # - storing branch related identifier coreconfigitem(
--- a/mercurial/context.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/context.py Tue Jan 18 10:27:13 2022 +0100 @@ -46,6 +46,9 @@ dateutil, stringutil, ) +from .dirstateutils import ( + timestamp, +) propertycache = util.propertycache @@ -1793,13 +1796,14 @@ sane.append(f) return sane - def _checklookup(self, files): + def _checklookup(self, files, mtime_boundary): # check for any possibly clean files if not files: - return [], [], [] + return [], [], [], [] modified = [] deleted = [] + clean = [] fixup = [] pctx = self._parents[0] # do a full compare of any files that might have changed @@ -1813,8 +1817,18 @@ or pctx[f].cmp(self[f]) ): modified.append(f) + elif mtime_boundary is None: + clean.append(f) else: - fixup.append(f) + s = self[f].lstat() + mode = s.st_mode + size = s.st_size + file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary) + if file_mtime is not None: + cache_info = (mode, size, file_mtime) + fixup.append((f, cache_info)) + else: + clean.append(f) except (IOError, OSError): # A file become inaccessible in between? Mark it as deleted, # matching dirstate behavior (issue5584). @@ -1824,7 +1838,7 @@ # it's in the dirstate. deleted.append(f) - return modified, deleted, fixup + return modified, deleted, clean, fixup def _poststatusfixup(self, status, fixup): """update dirstate for files that are actually clean""" @@ -1842,13 +1856,13 @@ if dirstate.identity() == oldid: if fixup: if dirstate.pendingparentchange(): - normal = lambda f: dirstate.update_file( + normal = lambda f, pfd: dirstate.update_file( f, p1_tracked=True, wc_tracked=True ) else: normal = dirstate.set_clean - for f in fixup: - normal(f) + for f, pdf in fixup: + normal(f, pdf) # write changes out explicitly, because nesting # wlock at runtime may prevent 'wlock.release()' # after this block from doing so for subsequent @@ -1878,19 +1892,23 @@ subrepos = [] if b'.hgsub' in self: subrepos = sorted(self.substate) - cmp, s = self._repo.dirstate.status( + cmp, s, mtime_boundary = self._repo.dirstate.status( match, subrepos, ignored=ignored, clean=clean, unknown=unknown ) # check for any possibly clean files fixup = [] if cmp: - modified2, deleted2, fixup = self._checklookup(cmp) + modified2, deleted2, clean_set, fixup = self._checklookup( + cmp, mtime_boundary + ) s.modified.extend(modified2) s.deleted.extend(deleted2) + if clean_set and clean: + s.clean.extend(clean_set) if fixup and clean: - s.clean.extend(fixup) + s.clean.extend((f for f, _ in fixup)) self._poststatusfixup(s, fixup)
--- a/mercurial/copies.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/copies.py Tue Jan 18 10:27:13 2022 +0100 @@ -246,7 +246,6 @@ return {} repo = a.repo().unfiltered() - children = {} cl = repo.changelog isancestor = cl.isancestorrev @@ -290,7 +289,7 @@ # no common revision to track copies from return {} if has_graph_roots: - # this deal with the special case mentionned in the [1] footnotes. We + # this deal with the special case mentioned in the [1] footnotes. We # must filter out revisions that leads to non-common graphroots. roots = list(roots) m = min(roots) @@ -301,11 +300,11 @@ if repo.filecopiesmode == b'changeset-sidedata': # When using side-data, we will process the edges "from" the children. - # We iterate over the childre, gathering previous collected data for + # We iterate over the children, gathering previous collected data for # the parents. Do know when the parents data is no longer necessary, we # keep a counter of how many children each revision has. # - # An interresting property of `children_count` is that it only contains + # An interesting property of `children_count` is that it only contains # revision that will be relevant for a edge of the graph. So if a # children has parent not in `children_count`, that edges should not be # processed. @@ -449,7 +448,11 @@ # filter out internal details and return a {dest: source mapping} final_copies = {} - for dest, (tt, source) in all_copies[targetrev].items(): + + targetrev_items = all_copies[targetrev] + assert targetrev_items is not None # help pytype + + for dest, (tt, source) in targetrev_items.items(): if source is not None: final_copies[dest] = source if not alwaysmatch:
--- a/mercurial/debugcommands.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/debugcommands.py Tue Jan 18 10:27:13 2022 +0100 @@ -91,7 +91,6 @@ vfs as vfsmod, wireprotoframing, wireprotoserver, - wireprotov2peer, ) from .interfaces import repository from .utils import ( @@ -273,7 +272,10 @@ x[fn].data() for x in (pa, p1, p2) ] m3 = simplemerge.Merge3Text(base, local, other) - ml = [l.strip() for l in m3.merge_lines()] + ml = [ + l.strip() + for l in simplemerge.render_minimized(m3)[0] + ] ml.append(b"") elif at > 0: ml = p1[fn].data().split(b"\n") @@ -4352,8 +4354,8 @@ ``--peer`` can be used to bypass the handshake protocol and construct a peer instance using the specified class type. Valid values are ``raw``, - ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending - raw data payloads and don't support higher-level command actions. + ``ssh1``. ``raw`` instances only allow sending raw data payloads and + don't support higher-level command actions. ``--noreadstderr`` can be used to disable automatic reading from stderr of the peer (for SSH connections only). Disabling automatic reading of @@ -4528,13 +4530,11 @@ if opts[b'peer'] and opts[b'peer'] not in ( b'raw', - b'http2', b'ssh1', - b'ssh2', ): raise error.Abort( _(b'invalid value for --peer'), - hint=_(b'valid values are "raw", "ssh1", and "ssh2"'), + hint=_(b'valid values are "raw" and "ssh1"'), ) if path and opts[b'localssh']: @@ -4602,18 +4602,6 @@ None, autoreadstderr=autoreadstderr, ) - elif opts[b'peer'] == b'ssh2': - ui.write(_(b'creating ssh peer for wire protocol version 2\n')) - peer = sshpeer.sshv2peer( - ui, - url, - proc, - stdin, - stdout, - stderr, - None, - autoreadstderr=autoreadstderr, - ) elif opts[b'peer'] == b'raw': ui.write(_(b'using raw connection to peer\n')) peer = None @@ -4666,34 +4654,7 @@ opener = urlmod.opener(ui, authinfo, **openerargs) - if opts[b'peer'] == b'http2': - ui.write(_(b'creating http peer for wire protocol version 2\n')) - # We go through makepeer() because we need an API descriptor for - # the peer instance to be useful. - maybe_silent = ( - ui.silent() - if opts[b'nologhandshake'] - else util.nullcontextmanager() - ) - with maybe_silent, ui.configoverride( - {(b'experimental', b'httppeer.advertise-v2'): True} - ): - peer = httppeer.makepeer(ui, path, opener=opener) - - if not isinstance(peer, httppeer.httpv2peer): - raise error.Abort( - _( - b'could not instantiate HTTP peer for ' - b'wire protocol version 2' - ), - hint=_( - b'the server may not have the feature ' - b'enabled or is not allowing this ' - b'client version' - ), - ) - - elif opts[b'peer'] == b'raw': + if opts[b'peer'] == b'raw': ui.write(_(b'using raw connection to peer\n')) peer = None elif opts[b'peer']: @@ -4774,17 +4735,10 @@ with peer.commandexecutor() as e: res = e.callcommand(command, args).result() - if isinstance(res, wireprotov2peer.commandresponse): - val = res.objects() - ui.status( - _(b'response: %s\n') - % stringutil.pprint(val, bprefix=True, indent=2) - ) - else: - ui.status( - _(b'response: %s\n') - % stringutil.pprint(res, bprefix=True, indent=2) - ) + ui.status( + _(b'response: %s\n') + % stringutil.pprint(res, bprefix=True, indent=2) + ) elif action == b'batchbegin': if batchedcommands is not None:
--- a/mercurial/dirstate.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/dirstate.py Tue Jan 18 10:27:13 2022 +0100 @@ -66,16 +66,6 @@ return obj._join(fname) -def _getfsnow(vfs): - '''Get "now" timestamp on filesystem''' - tmpfd, tmpname = vfs.mkstemp() - try: - return timestamp.mtime_of(os.fstat(tmpfd)) - finally: - os.close(tmpfd) - vfs.unlink(tmpname) - - def requires_parents_change(func): def wrap(self, *args, **kwargs): if not self.pendingparentchange(): @@ -126,7 +116,6 @@ # UNC path pointing to root share (issue4557) self._rootdir = pathutil.normasprefix(root) self._dirty = False - self._lastnormaltime = timestamp.zero() self._ui = ui self._filecache = {} self._parentwriters = 0 @@ -332,27 +321,6 @@ return util.pconvert(path) return path - def __getitem__(self, key): - """Return the current state of key (a filename) in the dirstate. - - States are: - n normal - m needs merging - r marked for removal - a marked for addition - ? not tracked - - XXX The "state" is a bit obscure to be in the "public" API. we should - consider migrating all user of this to going through the dirstate entry - instead. - """ - msg = b"don't use dirstate[file], use dirstate.get_entry(file)" - util.nouideprecwarn(msg, b'6.1', stacklevel=2) - entry = self._map.get(key) - if entry is not None: - return entry.state - return b'?' - def get_entry(self, path): """return a DirstateItem for the associated path""" entry = self._map.get(path) @@ -440,7 +408,6 @@ for a in ("_map", "_branch", "_ignore"): if a in self.__dict__: delattr(self, a) - self._lastnormaltime = timestamp.zero() self._dirty = False self._parentwriters = 0 self._origpl = None @@ -462,19 +429,24 @@ return self._map.copymap @requires_no_parents_change - def set_tracked(self, filename): + def set_tracked(self, filename, reset_copy=False): """a "public" method for generic code to mark a file as tracked This function is to be called outside of "update/merge" case. For example by a command like `hg add X`. + if reset_copy is set, any existing copy information will be dropped. + return True the file was previously untracked, False otherwise. """ self._dirty = True entry = self._map.get(filename) if entry is None or not entry.tracked: self._check_new_tracked_filename(filename) - return self._map.set_tracked(filename) + pre_tracked = self._map.set_tracked(filename) + if reset_copy: + self._map.copymap.pop(filename, None) + return pre_tracked @requires_no_parents_change def set_untracked(self, filename): @@ -491,21 +463,13 @@ return ret @requires_no_parents_change - def set_clean(self, filename, parentfiledata=None): + def set_clean(self, filename, parentfiledata): """record that the current state of the file on disk is known to be clean""" self._dirty = True - if parentfiledata: - (mode, size, mtime) = parentfiledata - else: - (mode, size, mtime) = self._get_filedata(filename) if not self._map[filename].tracked: self._check_new_tracked_filename(filename) + (mode, size, mtime) = parentfiledata self._map.set_clean(filename, mode, size, mtime) - if mtime > self._lastnormaltime: - # Remember the most recent modification timeslot for status(), - # to make sure we won't miss future size-preserving file content - # modifications that happen within the same timeslot. - self._lastnormaltime = mtime @requires_no_parents_change def set_possibly_dirty(self, filename): @@ -544,10 +508,6 @@ if entry is not None and entry.added: return # avoid dropping copy information (maybe?) - parentfiledata = None - if wc_tracked and p1_tracked: - parentfiledata = self._get_filedata(filename) - self._map.reset_state( filename, wc_tracked, @@ -555,16 +515,7 @@ # the underlying reference might have changed, we will have to # check it. has_meaningful_mtime=False, - parentfiledata=parentfiledata, ) - if ( - parentfiledata is not None - and parentfiledata[2] > self._lastnormaltime - ): - # Remember the most recent modification timeslot for status(), - # to make sure we won't miss future size-preserving file content - # modifications that happen within the same timeslot. - self._lastnormaltime = parentfiledata[2] @requires_parents_change def update_file( @@ -594,13 +545,6 @@ self._dirty = True - need_parent_file_data = ( - not possibly_dirty and not p2_info and wc_tracked and p1_tracked - ) - - if need_parent_file_data and parentfiledata is None: - parentfiledata = self._get_filedata(filename) - self._map.reset_state( filename, wc_tracked, @@ -609,14 +553,6 @@ has_meaningful_mtime=not possibly_dirty, parentfiledata=parentfiledata, ) - if ( - parentfiledata is not None - and parentfiledata[2] > self._lastnormaltime - ): - # Remember the most recent modification timeslot for status(), - # to make sure we won't miss future size-preserving file content - # modifications that happen within the same timeslot. - self._lastnormaltime = parentfiledata[2] def _check_new_tracked_filename(self, filename): scmutil.checkfilename(filename) @@ -634,14 +570,6 @@ msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) raise error.Abort(msg) - def _get_filedata(self, filename): - """returns""" - s = os.lstat(self._join(filename)) - mode = s.st_mode - size = s.st_size - mtime = timestamp.mtime_of(s) - return (mode, size, mtime) - def _discoverpath(self, path, normed, ignoremissing, exists, storemap): if exists is None: exists = os.path.lexists(os.path.join(self._root, path)) @@ -720,7 +648,6 @@ def clear(self): self._map.clear() - self._lastnormaltime = timestamp.zero() self._dirty = True def rebuild(self, parent, allfiles, changedfiles=None): @@ -728,9 +655,7 @@ # Rebuild entire dirstate to_lookup = allfiles to_drop = [] - lastnormaltime = self._lastnormaltime self.clear() - self._lastnormaltime = lastnormaltime elif len(changedfiles) < 10: # Avoid turning allfiles into a set, which can be expensive if it's # large. @@ -779,20 +704,11 @@ filename = self._filename if tr: - # 'dirstate.write()' is not only for writing in-memory - # changes out, but also for dropping ambiguous timestamp. - # delayed writing re-raise "ambiguous timestamp issue". - # See also the wiki page below for detail: - # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan - - # record when mtime start to be ambiguous - now = _getfsnow(self._opener) - # delay writing in-memory changes out tr.addfilegenerator( b'dirstate', (self._filename,), - lambda f: self._writedirstate(tr, f, now=now), + lambda f: self._writedirstate(tr, f), location=b'plain', ) return @@ -811,7 +727,7 @@ """ self._plchangecallbacks[category] = callback - def _writedirstate(self, tr, st, now=None): + def _writedirstate(self, tr, st): # notify callbacks about parents change if self._origpl is not None and self._origpl != self._pl: for c, callback in sorted( @@ -820,32 +736,7 @@ callback(self, self._origpl, self._pl) self._origpl = None - if now is None: - # use the modification time of the newly created temporary file as the - # filesystem's notion of 'now' - now = timestamp.mtime_of(util.fstat(st)) - - # enough 'delaywrite' prevents 'pack_dirstate' from dropping - # timestamp of each entries in dirstate, because of 'now > mtime' - delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite') - if delaywrite > 0: - # do we have any files to delay for? - for f, e in pycompat.iteritems(self._map): - if e.need_delay(now): - import time # to avoid useless import - - # rather than sleep n seconds, sleep until the next - # multiple of n seconds - clock = time.time() - start = int(clock) - (int(clock) % delaywrite) - end = start + delaywrite - time.sleep(end - clock) - # trust our estimate that the end is near now - now = timestamp.timestamp((end, 0)) - break - - self._map.write(tr, st, now) - self._lastnormaltime = timestamp.zero() + self._map.write(tr, st) self._dirty = False def _dirignore(self, f): @@ -1243,7 +1134,6 @@ self._rootdir, self._ignorefiles(), self._checkexec, - self._lastnormaltime, bool(list_clean), bool(list_ignored), bool(list_unknown), @@ -1335,11 +1225,20 @@ # Some matchers have yet to be implemented use_rust = False + # Get the time from the filesystem so we can disambiguate files that + # appear modified in the present or future. + try: + mtime_boundary = timestamp.get_fs_now(self._opener) + except OSError: + # In largefiles or readonly context + mtime_boundary = None + if use_rust: try: - return self._rust_status( + res = self._rust_status( match, listclean, listignored, listunknown ) + return res + (mtime_boundary,) except rustmod.FallbackError: pass @@ -1361,7 +1260,6 @@ checkexec = self._checkexec checklink = self._checklink copymap = self._map.copymap - lastnormaltime = self._lastnormaltime # We need to do full walks when either # - we're listing all clean files, or @@ -1417,19 +1315,17 @@ else: madd(fn) elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)): - ladd(fn) - elif timestamp.mtime_of(st) == lastnormaltime: - # fn may have just been marked as normal and it may have - # changed in the same second without changing its size. - # This can happen if we quickly do multiple commits. - # Force lookup, so we don't miss such a racy file change. + # There might be a change in the future if for example the + # internal clock is off, but this is a case where the issues + # the user would face would be a lot worse and there is + # nothing we can really do. ladd(fn) elif listclean: cadd(fn) status = scmutil.status( modified, added, removed, deleted, unknown, ignored, clean ) - return (lookup, status) + return (lookup, status, mtime_boundary) def matches(self, match): """
--- a/mercurial/dirstatemap.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/dirstatemap.py Tue Jan 18 10:27:13 2022 +0100 @@ -444,13 +444,13 @@ self.__getitem__ = self._map.__getitem__ self.get = self._map.get - def write(self, tr, st, now): + def write(self, tr, st): if self._use_dirstate_v2: - packed, meta = v2.pack_dirstate(self._map, self.copymap, now) + packed, meta = v2.pack_dirstate(self._map, self.copymap) self.write_v2_no_append(tr, st, meta, packed) else: packed = parsers.pack_dirstate( - self._map, self.copymap, self.parents(), now + self._map, self.copymap, self.parents() ) st.write(packed) st.close() @@ -655,10 +655,10 @@ self._map return self.identity - def write(self, tr, st, now): + def write(self, tr, st): if not self._use_dirstate_v2: p1, p2 = self.parents() - packed = self._map.write_v1(p1, p2, now) + packed = self._map.write_v1(p1, p2) st.write(packed) st.close() self._dirtyparents = False @@ -666,7 +666,7 @@ # We can only append to an existing data file if there is one can_append = self.docket.uuid is not None - packed, meta, append = self._map.write_v2(now, can_append) + packed, meta, append = self._map.write_v2(can_append) if append: docket = self.docket data_filename = docket.data_filename()
--- a/mercurial/dirstateutils/timestamp.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/dirstateutils/timestamp.py Tue Jan 18 10:27:13 2022 +0100 @@ -6,8 +6,11 @@ from __future__ import absolute_import import functools +import os import stat +from .. import error + rangemask = 0x7FFFFFFF @@ -18,40 +21,45 @@ A Unix timestamp with optional nanoseconds precision, modulo 2**31 seconds. - A 2-tuple containing: + A 3-tuple containing: `truncated_seconds`: seconds since the Unix epoch, truncated to its lower 31 bits `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`. When this is zero, the sub-second precision is considered unknown. + + `second_ambiguous`: whether this timestamp is still "reliable" + (see `reliable_mtime_of`) if we drop its sub-second component. """ def __new__(cls, value): - truncated_seconds, subsec_nanos = value - value = (truncated_seconds & rangemask, subsec_nanos) + truncated_seconds, subsec_nanos, second_ambiguous = value + value = (truncated_seconds & rangemask, subsec_nanos, second_ambiguous) return super(timestamp, cls).__new__(cls, value) def __eq__(self, other): - self_secs, self_subsec_nanos = self - other_secs, other_subsec_nanos = other - return self_secs == other_secs and ( - self_subsec_nanos == other_subsec_nanos - or self_subsec_nanos == 0 - or other_subsec_nanos == 0 + raise error.ProgrammingError( + 'timestamp should never be compared directly' ) def __gt__(self, other): - self_secs, self_subsec_nanos = self - other_secs, other_subsec_nanos = other - if self_secs > other_secs: - return True - if self_secs < other_secs: - return False - if self_subsec_nanos == 0 or other_subsec_nanos == 0: - # they are considered equal, so not "greater than" - return False - return self_subsec_nanos > other_subsec_nanos + raise error.ProgrammingError( + 'timestamp should never be compared directly' + ) + + +def get_fs_now(vfs): + """return a timestamp for "now" in the current vfs + + This will raise an exception if no temporary files could be created. + """ + tmpfd, tmpname = vfs.mkstemp() + try: + return mtime_of(os.fstat(tmpfd)) + finally: + os.close(tmpfd) + vfs.unlink(tmpname) def zero(): @@ -84,4 +92,37 @@ secs = nanos // billion subsec_nanos = nanos % billion - return timestamp((secs, subsec_nanos)) + return timestamp((secs, subsec_nanos, False)) + + +def reliable_mtime_of(stat_result, present_mtime): + """Same as `mtime_of`, but return `None` or a `Timestamp` with + `second_ambiguous` set if the date might be ambiguous. + + A modification time is reliable if it is older than "present_time" (or + sufficiently in the future). + + Otherwise a concurrent modification might happens with the same mtime. + """ + file_mtime = mtime_of(stat_result) + file_second = file_mtime[0] + file_ns = file_mtime[1] + boundary_second = present_mtime[0] + boundary_ns = present_mtime[1] + # If the mtime of the ambiguous file is younger (or equal) to the starting + # point of the `status` walk, we cannot garantee that another, racy, write + # will not happen right after with the same mtime and we cannot cache the + # information. + # + # However if the mtime is far away in the future, this is likely some + # mismatch between the current clock and previous file system operation. So + # mtime more than one days in the future are considered fine. + if boundary_second == file_second: + if file_ns and boundary_ns: + if file_ns < boundary_ns: + return timestamp((file_second, file_ns, True)) + return None + elif boundary_second < file_second < (3600 * 24 + boundary_second): + return None + else: + return file_mtime
--- a/mercurial/dirstateutils/v2.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/dirstateutils/v2.py Tue Jan 18 10:27:13 2022 +0100 @@ -174,12 +174,10 @@ ) -def pack_dirstate(map, copy_map, now): +def pack_dirstate(map, copy_map): """ Pack `map` and `copy_map` into the dirstate v2 binary format and return the bytearray. - `now` is a timestamp of the current filesystem time used to detect race - conditions in writing the dirstate to disk, see inline comment. The on-disk format expects a tree-like structure where the leaves are written first (and sorted per-directory), going up levels until the root @@ -284,17 +282,6 @@ stack.append(current_node) for index, (path, entry) in enumerate(sorted_map, 1): - if entry.need_delay(now): - # The file was last modified "simultaneously" with the current - # write to dirstate (i.e. within the same second for file- - # systems with a granularity of 1 sec). This commonly happens - # for at least a couple of files on 'update'. - # The user could change the file without changing its size - # within the same second. Invalidate the file's mtime in - # dirstate, forcing future 'status' calls to compare the - # contents of the file if the size is the same. This prevents - # mistakenly treating such files as clean. - entry.set_possibly_dirty() nodes_with_entry_count += 1 if path in copy_map: nodes_with_copy_source_count += 1
--- a/mercurial/discovery.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/discovery.py Tue Jan 18 10:27:13 2022 +0100 @@ -141,17 +141,6 @@ self._computecommonmissing() return self._missing - @property - def missingheads(self): - util.nouideprecwarn( - b'outgoing.missingheads never contained what the name suggests and ' - b'was renamed to outgoing.ancestorsof. check your code for ' - b'correctness.', - b'5.5', - stacklevel=2, - ) - return self.ancestorsof - def findcommonoutgoing( repo, other, onlyheads=None, force=False, commoninc=None, portable=False
--- a/mercurial/error.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/error.py Tue Jan 18 10:27:13 2022 +0100 @@ -388,6 +388,14 @@ __bytes__ = _tobytes +class PatchParseError(PatchError): + __bytes__ = _tobytes + + +class PatchApplicationError(PatchError): + __bytes__ = _tobytes + + def getsimilar(symbols, value): # type: (Iterable[bytes], bytes) -> List[bytes] sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
--- a/mercurial/exchange.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/exchange.py Tue Jan 18 10:27:13 2022 +0100 @@ -22,7 +22,6 @@ changegroup, discovery, error, - exchangev2, lock as lockmod, logexchange, narrowspec, @@ -522,8 +521,16 @@ def _checksubrepostate(pushop): """Ensure all outgoing referenced subrepo revisions are present locally""" + + repo = pushop.repo + + # If the repository does not use subrepos, skip the expensive + # manifest checks. + if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')): + return + for n in pushop.outgoing.missing: - ctx = pushop.repo[n] + ctx = repo[n] if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files(): for subpath in sorted(ctx.substate): @@ -1666,21 +1673,17 @@ ): add_confirm_callback(repo, pullop) - # Use the modern wire protocol, if available. - if remote.capable(b'command-changesetdata'): - exchangev2.pull(pullop) - else: - # This should ideally be in _pullbundle2(). However, it needs to run - # before discovery to avoid extra work. - _maybeapplyclonebundle(pullop) - streamclone.maybeperformlegacystreamclone(pullop) - _pulldiscovery(pullop) - if pullop.canusebundle2: - _fullpullbundle2(repo, pullop) - _pullchangeset(pullop) - _pullphase(pullop) - _pullbookmarks(pullop) - _pullobsolete(pullop) + # This should ideally be in _pullbundle2(). However, it needs to run + # before discovery to avoid extra work. + _maybeapplyclonebundle(pullop) + streamclone.maybeperformlegacystreamclone(pullop) + _pulldiscovery(pullop) + if pullop.canusebundle2: + _fullpullbundle2(repo, pullop) + _pullchangeset(pullop) + _pullphase(pullop) + _pullbookmarks(pullop) + _pullobsolete(pullop) # storing remotenames if repo.ui.configbool(b'experimental', b'remotenames'):
--- a/mercurial/exchangev2.py Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,804 +0,0 @@ -# exchangev2.py - repository exchange for wire protocol version 2 -# -# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -import collections -import weakref - -from .i18n import _ -from .node import short -from . import ( - bookmarks, - error, - mdiff, - narrowspec, - phases, - pycompat, - requirements as requirementsmod, - setdiscovery, -) -from .interfaces import repository - - -def pull(pullop): - """Pull using wire protocol version 2.""" - repo = pullop.repo - remote = pullop.remote - - usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop) - - # If this is a clone and it was requested to perform a "stream clone", - # we obtain the raw files data from the remote then fall back to an - # incremental pull. This is somewhat hacky and is not nearly robust enough - # for long-term usage. - if usingrawchangelogandmanifest: - with repo.transaction(b'clone'): - _fetchrawstorefiles(repo, remote) - repo.invalidate(clearfilecache=True) - - tr = pullop.trmanager.transaction() - - # We don't use the repo's narrow matcher here because the patterns passed - # to exchange.pull() could be different. - narrowmatcher = narrowspec.match( - repo.root, - # Empty maps to nevermatcher. So always - # set includes if missing. - pullop.includepats or {b'path:.'}, - pullop.excludepats, - ) - - if pullop.includepats or pullop.excludepats: - pathfilter = {} - if pullop.includepats: - pathfilter[b'include'] = sorted(pullop.includepats) - if pullop.excludepats: - pathfilter[b'exclude'] = sorted(pullop.excludepats) - else: - pathfilter = None - - # Figure out what needs to be fetched. - common, fetch, remoteheads = _pullchangesetdiscovery( - repo, remote, pullop.heads, abortwhenunrelated=pullop.force - ) - - # And fetch the data. - pullheads = pullop.heads or remoteheads - csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads) - - # New revisions are written to the changelog. But all other updates - # are deferred. Do those now. - - # Ensure all new changesets are draft by default. If the repo is - # publishing, the phase will be adjusted by the loop below. - if csetres[b'added']: - phases.registernew( - repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']] - ) - - # And adjust the phase of all changesets accordingly. - for phasenumber, phase in phases.phasenames.items(): - if phase == b'secret' or not csetres[b'nodesbyphase'][phase]: - continue - - phases.advanceboundary( - repo, - tr, - phasenumber, - csetres[b'nodesbyphase'][phase], - ) - - # Write bookmark updates. - bookmarks.updatefromremote( - repo.ui, - repo, - csetres[b'bookmarks'], - remote.url(), - pullop.gettransaction, - explicit=pullop.explicitbookmarks, - ) - - manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes']) - - # We don't properly support shallow changeset and manifest yet. So we apply - # depth limiting locally. - if pullop.depth: - relevantcsetnodes = set() - clnode = repo.changelog.node - - for rev in repo.revs( - b'ancestors(%ln, %s)', pullheads, pullop.depth - 1 - ): - relevantcsetnodes.add(clnode(rev)) - - csetrelevantfilter = lambda n: n in relevantcsetnodes - - else: - csetrelevantfilter = lambda n: True - - # If obtaining the raw store files, we need to scan the full repo to - # derive all the changesets, manifests, and linkrevs. - if usingrawchangelogandmanifest: - csetsforfiles = [] - mnodesforfiles = [] - manifestlinkrevs = {} - - for rev in repo: - ctx = repo[rev] - node = ctx.node() - - if not csetrelevantfilter(node): - continue - - mnode = ctx.manifestnode() - - csetsforfiles.append(node) - mnodesforfiles.append(mnode) - manifestlinkrevs[mnode] = rev - - else: - csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)] - mnodesforfiles = manres[b'added'] - manifestlinkrevs = manres[b'linkrevs'] - - # Find all file nodes referenced by added manifests and fetch those - # revisions. - fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles) - _fetchfilesfromcsets( - repo, - tr, - remote, - pathfilter, - fnodes, - csetsforfiles, - manifestlinkrevs, - shallow=bool(pullop.depth), - ) - - -def _checkuserawstorefiledata(pullop): - """Check whether we should use rawstorefiledata command to retrieve data.""" - - repo = pullop.repo - remote = pullop.remote - - # Command to obtain raw store data isn't available. - if b'rawstorefiledata' not in remote.apidescriptor[b'commands']: - return False - - # Only honor if user requested stream clone operation. - if not pullop.streamclonerequested: - return False - - # Only works on empty repos. - if len(repo): - return False - - # TODO This is super hacky. There needs to be a storage API for this. We - # also need to check for compatibility with the remote. - if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements: - return False - - return True - - -def _fetchrawstorefiles(repo, remote): - with remote.commandexecutor() as e: - objs = e.callcommand( - b'rawstorefiledata', - { - b'files': [b'changelog', b'manifestlog'], - }, - ).result() - - # First object is a summary of files data that follows. - overall = next(objs) - - progress = repo.ui.makeprogress( - _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes') - ) - with progress: - progress.update(0) - - # Next are pairs of file metadata, data. - while True: - try: - filemeta = next(objs) - except StopIteration: - break - - for k in (b'location', b'path', b'size'): - if k not in filemeta: - raise error.Abort( - _(b'remote file data missing key: %s') % k - ) - - if filemeta[b'location'] == b'store': - vfs = repo.svfs - else: - raise error.Abort( - _(b'invalid location for raw file data: %s') - % filemeta[b'location'] - ) - - bytesremaining = filemeta[b'size'] - - with vfs.open(filemeta[b'path'], b'wb') as fh: - while True: - try: - chunk = next(objs) - except StopIteration: - break - - bytesremaining -= len(chunk) - - if bytesremaining < 0: - raise error.Abort( - _( - b'received invalid number of bytes for file ' - b'data; expected %d, got extra' - ) - % filemeta[b'size'] - ) - - progress.increment(step=len(chunk)) - fh.write(chunk) - - try: - if chunk.islast: - break - except AttributeError: - raise error.Abort( - _( - b'did not receive indefinite length bytestring ' - b'for file data' - ) - ) - - if bytesremaining: - raise error.Abort( - _( - b'received invalid number of bytes for' - b'file data; expected %d got %d' - ) - % ( - filemeta[b'size'], - filemeta[b'size'] - bytesremaining, - ) - ) - - -def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True): - """Determine which changesets need to be pulled.""" - - if heads: - knownnode = repo.changelog.hasnode - if all(knownnode(head) for head in heads): - return heads, False, heads - - # TODO wire protocol version 2 is capable of more efficient discovery - # than setdiscovery. Consider implementing something better. - common, fetch, remoteheads = setdiscovery.findcommonheads( - repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated - ) - - common = set(common) - remoteheads = set(remoteheads) - - # If a remote head is filtered locally, put it back in the common set. - # See the comment in exchange._pulldiscoverychangegroup() for more. - - if fetch and remoteheads: - has_node = repo.unfiltered().changelog.index.has_node - - common |= {head for head in remoteheads if has_node(head)} - - if set(remoteheads).issubset(common): - fetch = [] - - common.discard(repo.nullid) - - return common, fetch, remoteheads - - -def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads): - # TODO consider adding a step here where we obtain the DAG shape first - # (or ask the server to slice changesets into chunks for us) so that - # we can perform multiple fetches in batches. This will facilitate - # resuming interrupted clones, higher server-side cache hit rates due - # to smaller segments, etc. - with remote.commandexecutor() as e: - objs = e.callcommand( - b'changesetdata', - { - b'revisions': [ - { - b'type': b'changesetdagrange', - b'roots': sorted(common), - b'heads': sorted(remoteheads), - } - ], - b'fields': {b'bookmarks', b'parents', b'phase', b'revision'}, - }, - ).result() - - # The context manager waits on all response data when exiting. So - # we need to remain in the context manager in order to stream data. - return _processchangesetdata(repo, tr, objs) - - -def _processchangesetdata(repo, tr, objs): - repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)) - - urepo = repo.unfiltered() - cl = urepo.changelog - - cl.delayupdate(tr) - - # The first emitted object is a header describing the data that - # follows. - meta = next(objs) - - progress = repo.ui.makeprogress( - _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems') - ) - - manifestnodes = {} - added = [] - - def linkrev(node): - repo.ui.debug(b'add changeset %s\n' % short(node)) - # Linkrev for changelog is always self. - return len(cl) - - def ondupchangeset(cl, rev): - added.append(cl.node(rev)) - - def onchangeset(cl, rev): - progress.increment() - - revision = cl.changelogrevision(rev) - added.append(cl.node(rev)) - - # We need to preserve the mapping of changelog revision to node - # so we can set the linkrev accordingly when manifests are added. - manifestnodes[rev] = revision.manifest - - repo.register_changeset(rev, revision) - - nodesbyphase = {phase: set() for phase in phases.phasenames.values()} - remotebookmarks = {} - - # addgroup() expects a 7-tuple describing revisions. This normalizes - # the wire data to that format. - # - # This loop also aggregates non-revision metadata, such as phase - # data. - def iterrevisions(): - for cset in objs: - node = cset[b'node'] - - if b'phase' in cset: - nodesbyphase[cset[b'phase']].add(node) - - for mark in cset.get(b'bookmarks', []): - remotebookmarks[mark] = node - - # TODO add mechanism for extensions to examine records so they - # can siphon off custom data fields. - - extrafields = {} - - for field, size in cset.get(b'fieldsfollowing', []): - extrafields[field] = next(objs) - - # Some entries might only be metadata only updates. - if b'revision' not in extrafields: - continue - - data = extrafields[b'revision'] - - yield ( - node, - cset[b'parents'][0], - cset[b'parents'][1], - # Linknode is always itself for changesets. - cset[b'node'], - # We always send full revisions. So delta base is not set. - repo.nullid, - mdiff.trivialdiffheader(len(data)) + data, - # Flags not yet supported. - 0, - # Sidedata not yet supported - {}, - ) - - cl.addgroup( - iterrevisions(), - linkrev, - weakref.proxy(tr), - alwayscache=True, - addrevisioncb=onchangeset, - duplicaterevisioncb=ondupchangeset, - ) - - progress.complete() - - return { - b'added': added, - b'nodesbyphase': nodesbyphase, - b'bookmarks': remotebookmarks, - b'manifestnodes': manifestnodes, - } - - -def _fetchmanifests(repo, tr, remote, manifestnodes): - rootmanifest = repo.manifestlog.getstorage(b'') - - # Some manifests can be shared between changesets. Filter out revisions - # we already know about. - fetchnodes = [] - linkrevs = {} - seen = set() - - for clrev, node in sorted(pycompat.iteritems(manifestnodes)): - if node in seen: - continue - - try: - rootmanifest.rev(node) - except error.LookupError: - fetchnodes.append(node) - linkrevs[node] = clrev - - seen.add(node) - - # TODO handle tree manifests - - # addgroup() expects 7-tuple describing revisions. This normalizes - # the wire data to that format. - def iterrevisions(objs, progress): - for manifest in objs: - node = manifest[b'node'] - - extrafields = {} - - for field, size in manifest.get(b'fieldsfollowing', []): - extrafields[field] = next(objs) - - if b'delta' in extrafields: - basenode = manifest[b'deltabasenode'] - delta = extrafields[b'delta'] - elif b'revision' in extrafields: - basenode = repo.nullid - revision = extrafields[b'revision'] - delta = mdiff.trivialdiffheader(len(revision)) + revision - else: - continue - - yield ( - node, - manifest[b'parents'][0], - manifest[b'parents'][1], - # The value passed in is passed to the lookup function passed - # to addgroup(). We already have a map of manifest node to - # changelog revision number. So we just pass in the - # manifest node here and use linkrevs.__getitem__ as the - # resolution function. - node, - basenode, - delta, - # Flags not yet supported. - 0, - # Sidedata not yet supported. - {}, - ) - - progress.increment() - - progress = repo.ui.makeprogress( - _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes) - ) - - commandmeta = remote.apidescriptor[b'commands'][b'manifestdata'] - batchsize = commandmeta.get(b'recommendedbatchsize', 10000) - # TODO make size configurable on client? - - # We send commands 1 at a time to the remote. This is not the most - # efficient because we incur a round trip at the end of each batch. - # However, the existing frame-based reactor keeps consuming server - # data in the background. And this results in response data buffering - # in memory. This can consume gigabytes of memory. - # TODO send multiple commands in a request once background buffering - # issues are resolved. - - added = [] - - for i in pycompat.xrange(0, len(fetchnodes), batchsize): - batch = [node for node in fetchnodes[i : i + batchsize]] - if not batch: - continue - - with remote.commandexecutor() as e: - objs = e.callcommand( - b'manifestdata', - { - b'tree': b'', - b'nodes': batch, - b'fields': {b'parents', b'revision'}, - b'haveparents': True, - }, - ).result() - - # Chomp off header object. - next(objs) - - def onchangeset(cl, rev): - added.append(cl.node(rev)) - - rootmanifest.addgroup( - iterrevisions(objs, progress), - linkrevs.__getitem__, - weakref.proxy(tr), - addrevisioncb=onchangeset, - duplicaterevisioncb=onchangeset, - ) - - progress.complete() - - return { - b'added': added, - b'linkrevs': linkrevs, - } - - -def _derivefilesfrommanifests(repo, matcher, manifestnodes): - """Determine what file nodes are relevant given a set of manifest nodes. - - Returns a dict mapping file paths to dicts of file node to first manifest - node. - """ - ml = repo.manifestlog - fnodes = collections.defaultdict(dict) - - progress = repo.ui.makeprogress( - _(b'scanning manifests'), total=len(manifestnodes) - ) - - with progress: - for manifestnode in manifestnodes: - m = ml.get(b'', manifestnode) - - # TODO this will pull in unwanted nodes because it takes the storage - # delta into consideration. What we really want is something that - # takes the delta between the manifest's parents. And ideally we - # would ignore file nodes that are known locally. For now, ignore - # both these limitations. This will result in incremental fetches - # requesting data we already have. So this is far from ideal. - md = m.readfast() - - for path, fnode in md.items(): - if matcher(path): - fnodes[path].setdefault(fnode, manifestnode) - - progress.increment() - - return fnodes - - -def _fetchfiles(repo, tr, remote, fnodes, linkrevs): - """Fetch file data from explicit file revisions.""" - - def iterrevisions(objs, progress): - for filerevision in objs: - node = filerevision[b'node'] - - extrafields = {} - - for field, size in filerevision.get(b'fieldsfollowing', []): - extrafields[field] = next(objs) - - if b'delta' in extrafields: - basenode = filerevision[b'deltabasenode'] - delta = extrafields[b'delta'] - elif b'revision' in extrafields: - basenode = repo.nullid - revision = extrafields[b'revision'] - delta = mdiff.trivialdiffheader(len(revision)) + revision - else: - continue - - yield ( - node, - filerevision[b'parents'][0], - filerevision[b'parents'][1], - node, - basenode, - delta, - # Flags not yet supported. - 0, - # Sidedata not yet supported. - {}, - ) - - progress.increment() - - progress = repo.ui.makeprogress( - _(b'files'), - unit=_(b'chunks'), - total=sum(len(v) for v in pycompat.itervalues(fnodes)), - ) - - # TODO make batch size configurable - batchsize = 10000 - fnodeslist = [x for x in sorted(fnodes.items())] - - for i in pycompat.xrange(0, len(fnodeslist), batchsize): - batch = [x for x in fnodeslist[i : i + batchsize]] - if not batch: - continue - - with remote.commandexecutor() as e: - fs = [] - locallinkrevs = {} - - for path, nodes in batch: - fs.append( - ( - path, - e.callcommand( - b'filedata', - { - b'path': path, - b'nodes': sorted(nodes), - b'fields': {b'parents', b'revision'}, - b'haveparents': True, - }, - ), - ) - ) - - locallinkrevs[path] = { - node: linkrevs[manifestnode] - for node, manifestnode in pycompat.iteritems(nodes) - } - - for path, f in fs: - objs = f.result() - - # Chomp off header objects. - next(objs) - - store = repo.file(path) - store.addgroup( - iterrevisions(objs, progress), - locallinkrevs[path].__getitem__, - weakref.proxy(tr), - ) - - -def _fetchfilesfromcsets( - repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False -): - """Fetch file data from explicit changeset revisions.""" - - def iterrevisions(objs, remaining, progress): - while remaining: - filerevision = next(objs) - - node = filerevision[b'node'] - - extrafields = {} - - for field, size in filerevision.get(b'fieldsfollowing', []): - extrafields[field] = next(objs) - - if b'delta' in extrafields: - basenode = filerevision[b'deltabasenode'] - delta = extrafields[b'delta'] - elif b'revision' in extrafields: - basenode = repo.nullid - revision = extrafields[b'revision'] - delta = mdiff.trivialdiffheader(len(revision)) + revision - else: - continue - - if b'linknode' in filerevision: - linknode = filerevision[b'linknode'] - else: - linknode = node - - yield ( - node, - filerevision[b'parents'][0], - filerevision[b'parents'][1], - linknode, - basenode, - delta, - # Flags not yet supported. - 0, - # Sidedata not yet supported. - {}, - ) - - progress.increment() - remaining -= 1 - - progress = repo.ui.makeprogress( - _(b'files'), - unit=_(b'chunks'), - total=sum(len(v) for v in pycompat.itervalues(fnodes)), - ) - - commandmeta = remote.apidescriptor[b'commands'][b'filesdata'] - batchsize = commandmeta.get(b'recommendedbatchsize', 50000) - - shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features - fields = {b'parents', b'revision'} - clrev = repo.changelog.rev - - # There are no guarantees that we'll have ancestor revisions if - # a) this repo has shallow file storage b) shallow data fetching is enabled. - # Force remote to not delta against possibly unknown revisions when these - # conditions hold. - haveparents = not (shallowfiles or shallow) - - # Similarly, we may not have calculated linkrevs for all incoming file - # revisions. Ask the remote to do work for us in this case. - if not haveparents: - fields.add(b'linknode') - - for i in pycompat.xrange(0, len(csets), batchsize): - batch = [x for x in csets[i : i + batchsize]] - if not batch: - continue - - with remote.commandexecutor() as e: - args = { - b'revisions': [ - { - b'type': b'changesetexplicit', - b'nodes': batch, - } - ], - b'fields': fields, - b'haveparents': haveparents, - } - - if pathfilter: - args[b'pathfilter'] = pathfilter - - objs = e.callcommand(b'filesdata', args).result() - - # First object is an overall header. - overall = next(objs) - - # We have overall['totalpaths'] segments. - for i in pycompat.xrange(overall[b'totalpaths']): - header = next(objs) - - path = header[b'path'] - store = repo.file(path) - - linkrevs = { - fnode: manlinkrevs[mnode] - for fnode, mnode in pycompat.iteritems(fnodes[path]) - } - - def getlinkrev(node): - if node in linkrevs: - return linkrevs[node] - else: - return clrev(node) - - store.addgroup( - iterrevisions(objs, header[b'totalitems'], progress), - getlinkrev, - weakref.proxy(tr), - maybemissingparents=shallow, - )
--- a/mercurial/extensions.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/extensions.py Tue Jan 18 10:27:13 2022 +0100 @@ -282,6 +282,7 @@ result = ui.configitems(b"extensions") if whitelist is not None: result = [(k, v) for (k, v) in result if k in whitelist] + result = [(k, v) for (k, v) in result if b':' not in k] newindex = len(_order) ui.log( b'extension', @@ -290,6 +291,8 @@ ) ui.log(b'extension', b'- processing %d entries\n', len(result)) with util.timedcm('load all extensions') as stats: + default_sub_options = ui.configsuboptions(b"extensions", b"*")[1] + for (name, path) in result: if path: if path[0:1] == b'!': @@ -306,18 +309,32 @@ except Exception as inst: msg = stringutil.forcebytestr(inst) if path: - ui.warn( - _(b"*** failed to import extension %s from %s: %s\n") - % (name, path, msg) + error_msg = _( + b'failed to import extension "%s" from %s: %s' ) + error_msg %= (name, path, msg) else: - ui.warn( - _(b"*** failed to import extension %s: %s\n") - % (name, msg) - ) - if isinstance(inst, error.Hint) and inst.hint: - ui.warn(_(b"*** (%s)\n") % inst.hint) - ui.traceback() + error_msg = _(b'failed to import extension "%s": %s') + error_msg %= (name, msg) + + options = default_sub_options.copy() + ext_options = ui.configsuboptions(b"extensions", name)[1] + options.update(ext_options) + if stringutil.parsebool(options.get(b"required", b'no')): + hint = None + if isinstance(inst, error.Hint) and inst.hint: + hint = inst.hint + if hint is None: + hint = _( + b"loading of this extension was required, " + b"see `hg help config.extensions` for details" + ) + raise error.Abort(error_msg, hint=hint) + else: + ui.warn((b"*** %s\n") % error_msg) + if isinstance(inst, error.Hint) and inst.hint: + ui.warn(_(b"*** (%s)\n") % inst.hint) + ui.traceback() ui.log( b'extension',
--- a/mercurial/filelog.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/filelog.py Tue Jan 18 10:27:13 2022 +0100 @@ -97,8 +97,8 @@ def iscensored(self, rev): return self._revlog.iscensored(rev) - def revision(self, node, _df=None, raw=False): - return self._revlog.revision(node, _df=_df, raw=raw) + def revision(self, node, _df=None): + return self._revlog.revision(node, _df=_df) def rawdata(self, node, _df=None): return self._revlog.rawdata(node, _df=_df)
--- a/mercurial/filemerge.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/filemerge.py Tue Jan 18 10:27:13 2022 +0100 @@ -293,9 +293,9 @@ return None # unknown -def _matcheol(file, back): +def _matcheol(file, backup): """Convert EOL markers in a file to match origfile""" - tostyle = _eoltype(back.data()) # No repo.wread filters? + tostyle = _eoltype(backup.data()) # No repo.wread filters? if tostyle: data = util.readfile(file) style = _eoltype(data) @@ -306,7 +306,7 @@ @internaltool(b'prompt', nomerge) -def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None): +def _iprompt(repo, mynode, fcd, fco, fca, toolconf, labels=None): """Asks the user which of the local `p1()` or the other `p2()` version to keep as the merged version.""" ui = repo.ui @@ -347,24 +347,24 @@ choice = [b'local', b'other', b'unresolved'][index] if choice == b'other': - return _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels) + return _iother(repo, mynode, fcd, fco, fca, toolconf, labels) elif choice == b'local': - return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels) + return _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels) elif choice == b'unresolved': - return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels) + return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels) except error.ResponseExpected: ui.write(b"\n") - return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels) + return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels) @internaltool(b'local', nomerge) -def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None): +def _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels=None): """Uses the local `p1()` version of files as the merged version.""" return 0, fcd.isabsent() @internaltool(b'other', nomerge) -def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None): +def _iother(repo, mynode, fcd, fco, fca, toolconf, labels=None): """Uses the other `p2()` version of files as the merged version.""" if fco.isabsent(): # local changed, remote deleted -- 'deleted' picked @@ -377,7 +377,7 @@ @internaltool(b'fail', nomerge) -def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None): +def _ifail(repo, mynode, fcd, fco, fca, toolconf, labels=None): """ Rather than attempting to merge files that were modified on both branches, it marks them as unresolved. The resolve command must be @@ -399,11 +399,10 @@ return filectx -def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None): +def _premerge(repo, fcd, fco, fca, toolconf, backup, labels=None): tool, toolpath, binary, symlink, scriptfn = toolconf if symlink or fcd.isabsent() or fco.isabsent(): return 1 - unused, unused, unused, back = files ui = repo.ui @@ -422,14 +421,15 @@ ) if premerge: + if not labels: + labels = _defaultconflictlabels + if len(labels) < 3: + labels.append(b'base') mode = b'merge' - if premerge in {b'keep-merge3', b'keep-mergediff'}: - if not labels: - labels = _defaultconflictlabels - if len(labels) < 3: - labels.append(b'base') - if premerge == b'keep-mergediff': - mode = b'mergediff' + if premerge == b'keep-mergediff': + mode = b'mergediff' + elif premerge == b'keep-merge3': + mode = b'merge3' r = simplemerge.simplemerge( ui, fcd, fca, fco, quiet=True, label=labels, mode=mode ) @@ -438,11 +438,11 @@ return 0 if premerge not in validkeep: # restore from backup and try again - _restorebackup(fcd, back) + _restorebackup(fcd, backup) return 1 # continue merging -def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf): +def _mergecheck(repo, mynode, fcd, fco, fca, toolconf): tool, toolpath, binary, symlink, scriptfn = toolconf uipathfn = scmutil.getuipathfn(repo) if symlink: @@ -463,7 +463,7 @@ return True -def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode): +def _merge(repo, mynode, fcd, fco, fca, toolconf, backup, labels, mode): """ Uses the internal non-interactive simple merge algorithm for merging files. It will fail if there are any conflicts and leave markers in @@ -484,13 +484,13 @@ ), precheck=_mergecheck, ) -def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _iunion(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Uses the internal non-interactive simple merge algorithm for merging files. It will use both left and right sides for conflict regions. No markers are inserted.""" return _merge( - repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'union' + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'union' ) @@ -503,14 +503,14 @@ ), precheck=_mergecheck, ) -def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Uses the internal non-interactive simple merge algorithm for merging files. It will fail if there are any conflicts and leave markers in the partially merged file. Markers will have two sections, one for each side of merge.""" return _merge( - repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'merge' + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'merge' ) @@ -523,7 +523,7 @@ ), precheck=_mergecheck, ) -def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _imerge3(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Uses the internal non-interactive simple merge algorithm for merging files. It will fail if there are any conflicts and leave markers in @@ -533,7 +533,9 @@ labels = _defaultconflictlabels if len(labels) < 3: labels.append(b'base') - return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels) + return _merge( + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'merge3' + ) @internaltool( @@ -564,9 +566,7 @@ ), precheck=_mergecheck, ) -def _imerge_diff( - repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None -): +def _imerge_diff(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Uses the internal non-interactive simple merge algorithm for merging files. It will fail if there are any conflicts and leave markers in @@ -578,48 +578,28 @@ if len(labels) < 3: labels.append(b'base') return _merge( - repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'mergediff' + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'mergediff' ) -def _imergeauto( - repo, - mynode, - orig, - fcd, - fco, - fca, - toolconf, - files, - labels=None, - localorother=None, -): - """ - Generic driver for _imergelocal and _imergeother - """ - assert localorother is not None - r = simplemerge.simplemerge( - repo.ui, fcd, fca, fco, label=labels, localorother=localorother - ) - return True, r - - @internaltool(b'merge-local', mergeonly, precheck=_mergecheck) -def _imergelocal(*args, **kwargs): +def _imergelocal(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Like :merge, but resolve all conflicts non-interactively in favor of the local `p1()` changes.""" - success, status = _imergeauto(localorother=b'local', *args, **kwargs) - return success, status, False + return _merge( + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'local' + ) @internaltool(b'merge-other', mergeonly, precheck=_mergecheck) -def _imergeother(*args, **kwargs): +def _imergeother(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Like :merge, but resolve all conflicts non-interactively in favor of the other `p2()` changes.""" - success, status = _imergeauto(localorother=b'other', *args, **kwargs) - return success, status, False + return _merge( + repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'other' + ) @internaltool( @@ -631,7 +611,7 @@ b"tool of your choice)\n" ), ) -def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _itagmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Uses the internal tag merge algorithm (experimental). """ @@ -640,7 +620,7 @@ @internaltool(b'dump', fullmerge, binary=True, symlink=True) -def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Creates three versions of the files to merge, containing the contents of local, other and base. These files can then be used to @@ -669,16 +649,14 @@ @internaltool(b'forcedump', mergeonly, binary=True, symlink=True) -def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _forcedump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): """ Creates three versions of the files as same as :dump, but omits premerge. """ - return _idump( - repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels - ) + return _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=labels) -def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): +def _xmergeimm(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None): # In-memory merge simply raises an exception on all external merge tools, # for now. # @@ -746,7 +724,7 @@ ui.status(t.renderdefault(props)) -def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels): +def _xmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels): tool, toolpath, binary, symlink, scriptfn = toolconf uipathfn = scmutil.getuipathfn(repo) if fcd.isabsent() or fco.isabsent(): @@ -755,12 +733,11 @@ % (tool, uipathfn(fcd.path())) ) return False, 1, None - unused, unused, unused, back = files localpath = _workingpath(repo, fcd) args = _toolstr(repo.ui, tool, b"args") with _maketempfiles( - repo, fco, fca, repo.wvfs.join(back.path()), b"$output" in args + repo, fco, fca, repo.wvfs.join(backup.path()), b"$output" in args ) as temppaths: basepath, otherpath, localoutputpath = temppaths outpath = b"" @@ -846,7 +823,7 @@ return True, r, False -def _formatconflictmarker(ctx, template, label, pad): +def _formatlabel(ctx, template, label, pad): """Applies the given template to the ctx, prefixed by the label. Pad is the minimum width of the label prefix, so that multiple markers @@ -893,11 +870,11 @@ pad = max(len(l) for l in labels) newlabels = [ - _formatconflictmarker(cd, tmpl, labels[0], pad), - _formatconflictmarker(co, tmpl, labels[1], pad), + _formatlabel(cd, tmpl, labels[0], pad), + _formatlabel(co, tmpl, labels[1], pad), ] if len(labels) > 2: - newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad)) + newlabels.append(_formatlabel(ca, tmpl, labels[2], pad)) return newlabels @@ -918,13 +895,13 @@ } -def _restorebackup(fcd, back): +def _restorebackup(fcd, backup): # TODO: Add a workingfilectx.write(otherfilectx) path so we can use # util.copy here instead. - fcd.write(back.data(), fcd.flags()) + fcd.write(backup.data(), fcd.flags()) -def _makebackup(repo, ui, wctx, fcd, premerge): +def _makebackup(repo, ui, wctx, fcd): """Makes and returns a filectx-like object for ``fcd``'s backup file. In addition to preserving the user's pre-existing modifications to `fcd` @@ -932,8 +909,8 @@ merge changed anything, and determine what line endings the new file should have. - Backups only need to be written once (right before the premerge) since their - content doesn't change afterwards. + Backups only need to be written once since their content doesn't change + afterwards. """ if fcd.isabsent(): return None @@ -941,32 +918,30 @@ # merge -> filemerge). (I suspect the fileset import is the weakest link) from . import context - back = scmutil.backuppath(ui, repo, fcd.path()) - inworkingdir = back.startswith(repo.wvfs.base) and not back.startswith( + backup = scmutil.backuppath(ui, repo, fcd.path()) + inworkingdir = backup.startswith(repo.wvfs.base) and not backup.startswith( repo.vfs.base ) if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir: # If the backup file is to be in the working directory, and we're # merging in-memory, we must redirect the backup to the memory context # so we don't disturb the working directory. - relpath = back[len(repo.wvfs.base) + 1 :] - if premerge: - wctx[relpath].write(fcd.data(), fcd.flags()) + relpath = backup[len(repo.wvfs.base) + 1 :] + wctx[relpath].write(fcd.data(), fcd.flags()) return wctx[relpath] else: - if premerge: - # Otherwise, write to wherever path the user specified the backups - # should go. We still need to switch based on whether the source is - # in-memory so we can use the fast path of ``util.copy`` if both are - # on disk. - if isinstance(fcd, context.overlayworkingfilectx): - util.writefile(back, fcd.data()) - else: - a = _workingpath(repo, fcd) - util.copyfile(a, back) + # Otherwise, write to wherever path the user specified the backups + # should go. We still need to switch based on whether the source is + # in-memory so we can use the fast path of ``util.copy`` if both are + # on disk. + if isinstance(fcd, context.overlayworkingfilectx): + util.writefile(backup, fcd.data()) + else: + a = _workingpath(repo, fcd) + util.copyfile(a, backup) # A arbitraryfilectx is returned, so we can run the same functions on # the backup context regardless of where it lives. - return context.arbitraryfilectx(back, repo=repo) + return context.arbitraryfilectx(backup, repo=repo) @contextlib.contextmanager @@ -995,7 +970,7 @@ def tempfromcontext(prefix, ctx): f, name = maketempfrompath(prefix, ctx.path()) - data = repo.wwritedata(ctx.path(), ctx.data()) + data = ctx.decodeddata() f.write(data) f.close() return name @@ -1027,10 +1002,9 @@ util.unlink(d) -def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None): +def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): """perform a 3-way merge in the working directory - premerge = whether this is a premerge mynode = parent node before merge orig = original local filename before merge fco = other file context @@ -1041,7 +1015,7 @@ a boolean indicating whether the file was deleted from disk.""" if not fco.cmp(fcd): # files identical? - return True, None, False + return None, False ui = repo.ui fd = fcd.path() @@ -1099,31 +1073,28 @@ toolconf = tool, toolpath, binary, symlink, scriptfn if mergetype == nomerge: - r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels) - return True, r, deleted + return func(repo, mynode, fcd, fco, fca, toolconf, labels) - if premerge: - if orig != fco.path(): - ui.status( - _(b"merging %s and %s to %s\n") - % (uipathfn(orig), uipathfn(fco.path()), fduipath) - ) - else: - ui.status(_(b"merging %s\n") % fduipath) + if orig != fco.path(): + ui.status( + _(b"merging %s and %s to %s\n") + % (uipathfn(orig), uipathfn(fco.path()), fduipath) + ) + else: + ui.status(_(b"merging %s\n") % fduipath) ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca)) - if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf): + if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf): if onfailure: if wctx.isinmemory(): raise error.InMemoryMergeConflictsError( b'in-memory merge does not support merge conflicts' ) ui.warn(onfailure % fduipath) - return True, 1, False + return 1, False - back = _makebackup(repo, ui, wctx, fcd, premerge) - files = (None, None, None, back) + backup = _makebackup(repo, ui, wctx, fcd) r = 1 try: internalmarkerstyle = ui.config(b'ui', b'mergemarkers') @@ -1140,7 +1111,7 @@ repo, fcd, fco, fca, labels, tool=tool ) - if premerge and mergetype == fullmerge: + if mergetype == fullmerge: # conflict markers generated by premerge will use 'detailed' # settings if either ui.mergemarkers or the tool's mergemarkers # setting is 'detailed'. This way tools can have basic labels in @@ -1158,25 +1129,25 @@ ) r = _premerge( - repo, fcd, fco, fca, toolconf, files, labels=premergelabels + repo, fcd, fco, fca, toolconf, backup, labels=premergelabels ) - # complete if premerge successful (r is 0) - return not r, r, False + # we're done if premerge was successful (r is 0) + if not r: + return r, False needcheck, r, deleted = func( repo, mynode, - orig, fcd, fco, fca, toolconf, - files, + backup, labels=formattedlabels, ) if needcheck: - r = _check(repo, r, ui, tool, fcd, files) + r = _check(repo, r, ui, tool, fcd, backup) if r: if onfailure: @@ -1189,10 +1160,10 @@ ui.warn(onfailure % fduipath) _onfilemergefailure(ui) - return True, r, deleted + return r, deleted finally: - if not r and back is not None: - back.remove() + if not r and backup is not None: + backup.remove() def _haltmerge(): @@ -1225,10 +1196,9 @@ ) -def _check(repo, r, ui, tool, fcd, files): +def _check(repo, r, ui, tool, fcd, backup): fd = fcd.path() uipathfn = scmutil.getuipathfn(repo) - unused, unused, unused, back = files if not r and ( _toolbool(ui, tool, b"checkconflicts") @@ -1255,7 +1225,7 @@ or b'changed' in _toollist(ui, tool, b"check") ) ): - if back is not None and not fcd.cmp(back): + if backup is not None and not fcd.cmp(backup): if ui.promptchoice( _( b" output file %s appears unchanged\n" @@ -1267,8 +1237,8 @@ ): r = 1 - if back is not None and _toolbool(ui, tool, b"fixeol"): - _matcheol(_workingpath(repo, fcd), back) + if backup is not None and _toolbool(ui, tool, b"fixeol"): + _matcheol(_workingpath(repo, fcd), backup) return r @@ -1277,18 +1247,6 @@ return repo.wjoin(ctx.path()) -def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): - return _filemerge( - True, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels - ) - - -def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): - return _filemerge( - False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels - ) - - def loadinternalmerge(ui, extname, registrarobj): """Load internal merge tool from specified registrarobj""" for name, func in pycompat.iteritems(registrarobj._table):
--- a/mercurial/helptext/config.txt Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/helptext/config.txt Tue Jan 18 10:27:13 2022 +0100 @@ -513,13 +513,18 @@ ``update.check`` Determines what level of checking :hg:`update` will perform before moving to a destination revision. Valid values are ``abort``, ``none``, - ``linear``, and ``noconflict``. ``abort`` always fails if the working - directory has uncommitted changes. ``none`` performs no checking, and may - result in a merge with uncommitted changes. ``linear`` allows any update - as long as it follows a straight line in the revision history, and may - trigger a merge with uncommitted changes. ``noconflict`` will allow any - update which would not trigger a merge with uncommitted changes, if any - are present. + ``linear``, and ``noconflict``. + + - ``abort`` always fails if the working directory has uncommitted changes. + + - ``none`` performs no checking, and may result in a merge with uncommitted changes. + + - ``linear`` allows any update as long as it follows a straight line in the + revision history, and may trigger a merge with uncommitted changes. + + - ``noconflict`` will allow any update which would not trigger a merge with + uncommitted changes, if any are present. + (default: ``linear``) ``update.requiredest`` @@ -850,6 +855,24 @@ # (this extension will get loaded from the file specified) myfeature = ~/.hgext/myfeature.py +If an extension fails to load, a warning will be issued, and Mercurial will +proceed. To enforce that an extension must be loaded, one can set the `required` +suboption in the config:: + + [extensions] + myfeature = ~/.hgext/myfeature.py + myfeature:required = yes + +To debug extension loading issue, one can add `--traceback` to their mercurial +invocation. + +A default setting can we set using the special `*` extension key:: + + [extensions] + *:required = yes + myfeature = ~/.hgext/myfeature.py + rebase= + ``format`` ----------
--- a/mercurial/helptext/internals/wireprotocol.txt Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/helptext/internals/wireprotocol.txt Tue Jan 18 10:27:13 2022 +0100 @@ -332,95 +332,6 @@ after responses. In other words, the length of the response contains the trailing ``\n``. -Clients supporting version 2 of the SSH transport send a line beginning -with ``upgrade`` before the ``hello`` and ``between`` commands. The line -(which isn't a well-formed command line because it doesn't consist of a -single command name) serves to both communicate the client's intent to -switch to transport version 2 (transports are version 1 by default) as -well as to advertise the client's transport-level capabilities so the -server may satisfy that request immediately. - -The upgrade line has the form: - - upgrade <token> <transport capabilities> - -That is the literal string ``upgrade`` followed by a space, followed by -a randomly generated string, followed by a space, followed by a string -denoting the client's transport capabilities. - -The token can be anything. However, a random UUID is recommended. (Use -of version 4 UUIDs is recommended because version 1 UUIDs can leak the -client's MAC address.) - -The transport capabilities string is a URL/percent encoded string -containing key-value pairs defining the client's transport-level -capabilities. The following capabilities are defined: - -proto - A comma-delimited list of transport protocol versions the client - supports. e.g. ``ssh-v2``. - -If the server does not recognize the ``upgrade`` line, it should issue -an empty response and continue processing the ``hello`` and ``between`` -commands. Here is an example handshake between a version 2 aware client -and a non version 2 aware server: - - c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 - c: hello\n - c: between\n - c: pairs 81\n - c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - s: 0\n - s: 324\n - s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n - s: 1\n - s: \n - -(The initial ``0\n`` line from the server indicates an empty response to -the unknown ``upgrade ..`` command/line.) - -If the server recognizes the ``upgrade`` line and is willing to satisfy that -upgrade request, it replies to with a payload of the following form: - - upgraded <token> <transport name>\n - -This line is the literal string ``upgraded``, a space, the token that was -specified by the client in its ``upgrade ...`` request line, a space, and the -name of the transport protocol that was chosen by the server. The transport -name MUST match one of the names the client specified in the ``proto`` field -of its ``upgrade ...`` request line. - -If a server issues an ``upgraded`` response, it MUST also read and ignore -the lines associated with the ``hello`` and ``between`` command requests -that were issued by the server. It is assumed that the negotiated transport -will respond with equivalent requested information following the transport -handshake. - -All data following the ``\n`` terminating the ``upgraded`` line is the -domain of the negotiated transport. It is common for the data immediately -following to contain additional metadata about the state of the transport and -the server. However, this isn't strictly speaking part of the transport -handshake and isn't covered by this section. - -Here is an example handshake between a version 2 aware client and a version -2 aware server: - - c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 - c: hello\n - c: between\n - c: pairs 81\n - c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n - s: <additional transport specific data> - -The client-issued token that is echoed in the response provides a more -resilient mechanism for differentiating *banner* output from Mercurial -output. In version 1, properly formatted banner output could get confused -for Mercurial server output. By submitting a randomly generated token -that is then present in the response, the client can look for that token -in response lines and have reasonable certainty that the line did not -originate from a *banner* message. - SSH Version 1 Transport ----------------------- @@ -488,31 +399,6 @@ should issue a ``protocaps`` command after the initial handshake to annonunce its own capabilities. The client capabilities are persistent. -SSH Version 2 Transport ------------------------ - -**Experimental and under development** - -Version 2 of the SSH transport behaves identically to version 1 of the SSH -transport with the exception of handshake semantics. See above for how -version 2 of the SSH transport is negotiated. - -Immediately following the ``upgraded`` line signaling a switch to version -2 of the SSH protocol, the server automatically sends additional details -about the capabilities of the remote server. This has the form: - - <integer length of value>\n - capabilities: ...\n - -e.g. - - s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n - s: 240\n - s: capabilities: known getbundle batch ...\n - -Following capabilities advertisement, the peers communicate using version -1 of the SSH transport. - Capabilities ============
--- a/mercurial/helptext/patterns.txt Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/helptext/patterns.txt Tue Jan 18 10:27:13 2022 +0100 @@ -1,8 +1,10 @@ Mercurial accepts several notations for identifying one or more files at a time. -By default, Mercurial treats filenames as shell-style extended glob -patterns. +By default, Mercurial treats filenames verbatim without pattern +matching, relative to the current working directory. Note that your +system shell might perform pattern matching of its own before passing +filenames into Mercurial. Alternate pattern notations must be specified explicitly.
--- a/mercurial/hg.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/hg.py Tue Jan 18 10:27:13 2022 +0100 @@ -132,13 +132,6 @@ return revs, revs[0] -def parseurl(path, branches=None): - '''parse url#branch, returning (url, (branch, branches))''' - msg = b'parseurl(...) moved to mercurial.utils.urlutil' - util.nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.parseurl(path, branches=branches) - - schemes = { b'bundle': bundlerepo, b'union': unionrepo,
--- a/mercurial/hgweb/hgweb_mod.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/hgweb/hgweb_mod.py Tue Jan 18 10:27:13 2022 +0100 @@ -366,17 +366,6 @@ # replace it. res.headers[b'Content-Security-Policy'] = rctx.csp - # /api/* is reserved for various API implementations. Dispatch - # accordingly. But URL paths can conflict with subrepos and virtual - # repos in hgwebdir. So until we have a workaround for this, only - # expose the URLs if the feature is enabled. - apienabled = rctx.repo.ui.configbool(b'experimental', b'web.apiserver') - if apienabled and req.dispatchparts and req.dispatchparts[0] == b'api': - wireprotoserver.handlewsgiapirequest( - rctx, req, res, self.check_perm - ) - return res.sendresponse() - handled = wireprotoserver.handlewsgirequest( rctx, req, res, self.check_perm )
--- a/mercurial/hgweb/webcommands.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/hgweb/webcommands.py Tue Jan 18 10:27:13 2022 +0100 @@ -519,6 +519,7 @@ def decodepath(path): + # type: (bytes) -> bytes """Hook for mapping a path in the repository to a path in the working copy. @@ -616,7 +617,9 @@ yield { b"parity": next(parity), b"path": path, + # pytype: disable=wrong-arg-types b"emptydirs": b"/".join(emptydirs), + # pytype: enable=wrong-arg-types b"basename": d, }
--- a/mercurial/httppeer.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/httppeer.py Tue Jan 18 10:27:13 2022 +0100 @@ -13,7 +13,6 @@ import os import socket import struct -import weakref from .i18n import _ from .pycompat import getattr @@ -25,21 +24,9 @@ statichttprepo, url as urlmod, util, - wireprotoframing, - wireprototypes, wireprotov1peer, - wireprotov2peer, - wireprotov2server, ) -from .interfaces import ( - repository, - util as interfaceutil, -) -from .utils import ( - cborutil, - stringutil, - urlutil, -) +from .utils import urlutil httplib = util.httplib urlerr = util.urlerr @@ -331,9 +318,7 @@ self.respurl = respurl -def parsev1commandresponse( - ui, baseurl, requrl, qs, resp, compressible, allowcbor=False -): +def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible): # record the url we got redirected to redirected = False respurl = pycompat.bytesurl(resp.geturl()) @@ -376,17 +361,6 @@ try: subtype = proto.split(b'-', 1)[1] - # Unless we end up supporting CBOR in the legacy wire protocol, - # this should ONLY be encountered for the initial capabilities - # request during handshake. - if subtype == b'cbor': - if allowcbor: - return respurl, proto, resp - else: - raise error.RepoError( - _(b'unexpected CBOR response from server') - ) - version_info = tuple([int(n) for n in subtype.split(b'.')]) except ValueError: raise error.RepoError( @@ -564,85 +538,6 @@ raise exception -def sendv2request( - ui, opener, requestbuilder, apiurl, permission, requests, redirect -): - wireprotoframing.populatestreamencoders() - - uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order') - - if uiencoders: - encoders = [] - - for encoder in uiencoders: - if encoder not in wireprotoframing.STREAM_ENCODERS: - ui.warn( - _( - b'wire protocol version 2 encoder referenced in ' - b'config (%s) is not known; ignoring\n' - ) - % encoder - ) - else: - encoders.append(encoder) - - else: - encoders = wireprotoframing.STREAM_ENCODERS_ORDER - - reactor = wireprotoframing.clientreactor( - ui, - hasmultiplesend=False, - buffersends=True, - clientcontentencoders=encoders, - ) - - handler = wireprotov2peer.clienthandler( - ui, reactor, opener=opener, requestbuilder=requestbuilder - ) - - url = b'%s/%s' % (apiurl, permission) - - if len(requests) > 1: - url += b'/multirequest' - else: - url += b'/%s' % requests[0][0] - - ui.debug(b'sending %d commands\n' % len(requests)) - for command, args, f in requests: - ui.debug( - b'sending command %s: %s\n' - % (command, stringutil.pprint(args, indent=2)) - ) - assert not list( - handler.callcommand(command, args, f, redirect=redirect) - ) - - # TODO stream this. - body = b''.join(map(bytes, handler.flushcommands())) - - # TODO modify user-agent to reflect v2 - headers = { - 'Accept': wireprotov2server.FRAMINGTYPE, - 'Content-Type': wireprotov2server.FRAMINGTYPE, - } - - req = requestbuilder(pycompat.strurl(url), body, headers) - req.add_unredirected_header('Content-Length', '%d' % len(body)) - - try: - res = opener.open(req) - except urlerr.httperror as e: - if e.code == 401: - raise error.Abort(_(b'authorization failed')) - - raise - except httplib.HTTPException as e: - ui.traceback() - raise IOError(None, e) - - return handler, res - - class queuedcommandfuture(pycompat.futures.Future): """Wraps result() on command futures to trigger submission on call.""" @@ -657,302 +552,6 @@ return self.result(timeout) -@interfaceutil.implementer(repository.ipeercommandexecutor) -class httpv2executor(object): - def __init__( - self, ui, opener, requestbuilder, apiurl, descriptor, redirect - ): - self._ui = ui - self._opener = opener - self._requestbuilder = requestbuilder - self._apiurl = apiurl - self._descriptor = descriptor - self._redirect = redirect - self._sent = False - self._closed = False - self._neededpermissions = set() - self._calls = [] - self._futures = weakref.WeakSet() - self._responseexecutor = None - self._responsef = None - - def __enter__(self): - return self - - def __exit__(self, exctype, excvalue, exctb): - self.close() - - def callcommand(self, command, args): - if self._sent: - raise error.ProgrammingError( - b'callcommand() cannot be used after commands are sent' - ) - - if self._closed: - raise error.ProgrammingError( - b'callcommand() cannot be used after close()' - ) - - # The service advertises which commands are available. So if we attempt - # to call an unknown command or pass an unknown argument, we can screen - # for this. - if command not in self._descriptor[b'commands']: - raise error.ProgrammingError( - b'wire protocol command %s is not available' % command - ) - - cmdinfo = self._descriptor[b'commands'][command] - unknownargs = set(args.keys()) - set(cmdinfo.get(b'args', {})) - - if unknownargs: - raise error.ProgrammingError( - b'wire protocol command %s does not accept argument: %s' - % (command, b', '.join(sorted(unknownargs))) - ) - - self._neededpermissions |= set(cmdinfo[b'permissions']) - - # TODO we /could/ also validate types here, since the API descriptor - # includes types... - - f = pycompat.futures.Future() - - # Monkeypatch it so result() triggers sendcommands(), otherwise result() - # could deadlock. - f.__class__ = queuedcommandfuture - f._peerexecutor = self - - self._futures.add(f) - self._calls.append((command, args, f)) - - return f - - def sendcommands(self): - if self._sent: - return - - if not self._calls: - return - - self._sent = True - - # Unhack any future types so caller sees a clean type and so we - # break reference cycle. - for f in self._futures: - if isinstance(f, queuedcommandfuture): - f.__class__ = pycompat.futures.Future - f._peerexecutor = None - - # Mark the future as running and filter out cancelled futures. - calls = [ - (command, args, f) - for command, args, f in self._calls - if f.set_running_or_notify_cancel() - ] - - # Clear out references, prevent improper object usage. - self._calls = None - - if not calls: - return - - permissions = set(self._neededpermissions) - - if b'push' in permissions and b'pull' in permissions: - permissions.remove(b'pull') - - if len(permissions) > 1: - raise error.RepoError( - _(b'cannot make request requiring multiple permissions: %s') - % _(b', ').join(sorted(permissions)) - ) - - permission = { - b'push': b'rw', - b'pull': b'ro', - }[permissions.pop()] - - handler, resp = sendv2request( - self._ui, - self._opener, - self._requestbuilder, - self._apiurl, - permission, - calls, - self._redirect, - ) - - # TODO we probably want to validate the HTTP code, media type, etc. - - self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1) - self._responsef = self._responseexecutor.submit( - self._handleresponse, handler, resp - ) - - def close(self): - if self._closed: - return - - self.sendcommands() - - self._closed = True - - if not self._responsef: - return - - # TODO ^C here may not result in immediate program termination. - - try: - self._responsef.result() - finally: - self._responseexecutor.shutdown(wait=True) - self._responsef = None - self._responseexecutor = None - - # If any of our futures are still in progress, mark them as - # errored, otherwise a result() could wait indefinitely. - for f in self._futures: - if not f.done(): - f.set_exception( - error.ResponseError(_(b'unfulfilled command response')) - ) - - self._futures = None - - def _handleresponse(self, handler, resp): - # Called in a thread to read the response. - - while handler.readdata(resp): - pass - - -@interfaceutil.implementer(repository.ipeerv2) -class httpv2peer(object): - - limitedarguments = False - - def __init__( - self, ui, repourl, apipath, opener, requestbuilder, apidescriptor - ): - self.ui = ui - self.apidescriptor = apidescriptor - - if repourl.endswith(b'/'): - repourl = repourl[:-1] - - self._url = repourl - self._apipath = apipath - self._apiurl = b'%s/%s' % (repourl, apipath) - self._opener = opener - self._requestbuilder = requestbuilder - - self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor) - - # Start of ipeerconnection. - - def url(self): - return self._url - - def local(self): - return None - - def peer(self): - return self - - def canpush(self): - # TODO change once implemented. - return False - - def close(self): - self.ui.note( - _( - b'(sent %d HTTP requests and %d bytes; ' - b'received %d bytes in responses)\n' - ) - % ( - self._opener.requestscount, - self._opener.sentbytescount, - self._opener.receivedbytescount, - ) - ) - - # End of ipeerconnection. - - # Start of ipeercapabilities. - - def capable(self, name): - # The capabilities used internally historically map to capabilities - # advertised from the "capabilities" wire protocol command. However, - # version 2 of that command works differently. - - # Maps to commands that are available. - if name in ( - b'branchmap', - b'getbundle', - b'known', - b'lookup', - b'pushkey', - ): - return True - - # Other concepts. - if name in (b'bundle2',): - return True - - # Alias command-* to presence of command of that name. - if name.startswith(b'command-'): - return name[len(b'command-') :] in self.apidescriptor[b'commands'] - - return False - - def requirecap(self, name, purpose): - if self.capable(name): - return - - raise error.CapabilityError( - _( - b'cannot %s; client or remote repository does not support the ' - b'\'%s\' capability' - ) - % (purpose, name) - ) - - # End of ipeercapabilities. - - def _call(self, name, **args): - with self.commandexecutor() as e: - return e.callcommand(name, args).result() - - def commandexecutor(self): - return httpv2executor( - self.ui, - self._opener, - self._requestbuilder, - self._apiurl, - self.apidescriptor, - self._redirect, - ) - - -# Registry of API service names to metadata about peers that handle it. -# -# The following keys are meaningful: -# -# init -# Callable receiving (ui, repourl, servicepath, opener, requestbuilder, -# apidescriptor) to create a peer. -# -# priority -# Integer priority for the service. If we could choose from multiple -# services, we choose the one with the highest priority. -API_PEERS = { - wireprototypes.HTTP_WIREPROTO_V2: { - b'init': httpv2peer, - b'priority': 50, - }, -} - - def performhandshake(ui, url, opener, requestbuilder): # The handshake is a request to the capabilities command. @@ -963,28 +562,6 @@ args = {} - # The client advertises support for newer protocols by adding an - # X-HgUpgrade-* header with a list of supported APIs and an - # X-HgProto-* header advertising which serializing formats it supports. - # We only support the HTTP version 2 transport and CBOR responses for - # now. - advertisev2 = ui.configbool(b'experimental', b'httppeer.advertise-v2') - - if advertisev2: - args[b'headers'] = { - 'X-HgProto-1': 'cbor', - } - - args[b'headers'].update( - encodevalueinheaders( - b' '.join(sorted(API_PEERS)), - b'X-HgUpgrade', - # We don't know the header limit this early. - # So make it small. - 1024, - ) - ) - req, requrl, qs = makev1commandrequest( ui, requestbuilder, caps, capable, url, b'capabilities', args ) @@ -1004,7 +581,7 @@ # redirect that drops the query string to "just work." try: respurl, ct, resp = parsev1commandresponse( - ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2 + ui, url, requrl, qs, resp, compressible=False ) except RedirectedRepoError as e: req, requrl, qs = makev1commandrequest( @@ -1012,7 +589,7 @@ ) resp = sendrequest(ui, opener, req) respurl, ct, resp = parsev1commandresponse( - ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2 + ui, url, requrl, qs, resp, compressible=False ) try: @@ -1023,29 +600,7 @@ if not ct.startswith(b'application/mercurial-'): raise error.ProgrammingError(b'unexpected content-type: %s' % ct) - if advertisev2: - if ct == b'application/mercurial-cbor': - try: - info = cborutil.decodeall(rawdata)[0] - except cborutil.CBORDecodeError: - raise error.Abort( - _(b'error decoding CBOR from remote server'), - hint=_( - b'try again and consider contacting ' - b'the server operator' - ), - ) - - # We got a legacy response. That's fine. - elif ct in (b'application/mercurial-0.1', b'application/mercurial-0.2'): - info = {b'v1capabilities': set(rawdata.split())} - - else: - raise error.RepoError( - _(b'unexpected response type from server: %s') % ct - ) - else: - info = {b'v1capabilities': set(rawdata.split())} + info = {b'v1capabilities': set(rawdata.split())} return respurl, info @@ -1073,29 +628,6 @@ respurl, info = performhandshake(ui, url, opener, requestbuilder) - # Given the intersection of APIs that both we and the server support, - # sort by their advertised priority and pick the first one. - # - # TODO consider making this request-based and interface driven. For - # example, the caller could say "I want a peer that does X." It's quite - # possible that not all peers would do that. Since we know the service - # capabilities, we could filter out services not meeting the - # requirements. Possibly by consulting the interfaces defined by the - # peer type. - apipeerchoices = set(info.get(b'apis', {}).keys()) & set(API_PEERS.keys()) - - preferredchoices = sorted( - apipeerchoices, key=lambda x: API_PEERS[x][b'priority'], reverse=True - ) - - for service in preferredchoices: - apipath = b'%s/%s' % (info[b'apibase'].rstrip(b'/'), service) - - return API_PEERS[service][b'init']( - ui, respurl, apipath, opener, requestbuilder, info[b'apis'][service] - ) - - # Failed to construct an API peer. Fall back to legacy. return httppeer( ui, path, respurl, opener, requestbuilder, info[b'v1capabilities'] )
--- a/mercurial/interfaces/dirstate.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/interfaces/dirstate.py Tue Jan 18 10:27:13 2022 +0100 @@ -66,17 +66,6 @@ def pathto(f, cwd=None): pass - def __getitem__(key): - """Return the current state of key (a filename) in the dirstate. - - States are: - n normal - m needs merging - r marked for removal - a marked for addition - ? not tracked - """ - def __contains__(key): """Check if bytestring `key` is known to the dirstate."""
--- a/mercurial/interfaces/repository.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/interfaces/repository.py Tue Jan 18 10:27:13 2022 +0100 @@ -1278,7 +1278,7 @@ def linkrev(rev): """Obtain the changeset revision number a revision is linked to.""" - def revision(node, _df=None, raw=False): + def revision(node, _df=None): """Obtain fulltext data for a node.""" def rawdata(node, _df=None):
--- a/mercurial/localrepo.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/localrepo.py Tue Jan 18 10:27:13 2022 +0100 @@ -1,4 +1,5 @@ # localrepo.py - read/write repository class for mercurial +# coding: utf-8 # # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> # @@ -3566,16 +3567,6 @@ Extensions can wrap this function to specify custom requirements for new repositories. """ - # If the repo is being created from a shared repository, we copy - # its requirements. - if b'sharedrepo' in createopts: - requirements = set(createopts[b'sharedrepo'].requirements) - if createopts.get(b'sharedrelative'): - requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) - else: - requirements.add(requirementsmod.SHARED_REQUIREMENT) - - return requirements if b'backend' not in createopts: raise error.ProgrammingError( @@ -3671,6 +3662,36 @@ if ui.configbool(b'format', b'use-share-safe'): requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) + # if we are creating a share-repo¹ we have to handle requirement + # differently. + # + # [1] (i.e. reusing the store from another repository, just having a + # working copy) + if b'sharedrepo' in createopts: + source_requirements = set(createopts[b'sharedrepo'].requirements) + + if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements: + # share to an old school repository, we have to copy the + # requirements and hope for the best. + requirements = source_requirements + else: + # We have control on the working copy only, so "copy" the non + # working copy part over, ignoring previous logic. + to_drop = set() + for req in requirements: + if req in requirementsmod.WORKING_DIR_REQUIREMENTS: + continue + if req in source_requirements: + continue + to_drop.add(req) + requirements -= to_drop + requirements |= source_requirements + + if createopts.get(b'sharedrelative'): + requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) + else: + requirements.add(requirementsmod.SHARED_REQUIREMENT) + return requirements
--- a/mercurial/logcmdutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/logcmdutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -62,9 +62,9 @@ try: limit = int(limit) except ValueError: - raise error.Abort(_(b'limit must be a positive integer')) + raise error.InputError(_(b'limit must be a positive integer')) if limit <= 0: - raise error.Abort(_(b'limit must be positive')) + raise error.InputError(_(b'limit must be positive')) else: limit = None return limit @@ -831,7 +831,7 @@ # take the slow path. found = slowpath = True if not found: - raise error.Abort( + raise error.StateError( _( b'cannot follow file not in any of the specified ' b'revisions: "%s"' @@ -847,7 +847,7 @@ slowpath = True continue else: - raise error.Abort( + raise error.StateError( _( b'cannot follow file not in parent ' b'revision: "%s"' @@ -858,7 +858,7 @@ if not filelog: # A file exists in wdir but not in history, which means # the file isn't committed yet. - raise error.Abort( + raise error.StateError( _(b'cannot follow nonexistent file: "%s"') % f ) else: @@ -1108,11 +1108,13 @@ try: pat, linerange = pat.rsplit(b',', 1) except ValueError: - raise error.Abort(_(b'malformatted line-range pattern %s') % pat) + raise error.InputError( + _(b'malformatted line-range pattern %s') % pat + ) try: fromline, toline = map(int, linerange.split(b':')) except ValueError: - raise error.Abort(_(b"invalid line range for %s") % pat) + raise error.InputError(_(b"invalid line range for %s") % pat) msg = _(b"line range pattern '%s' must match exactly one file") % pat fname = scmutil.parsefollowlinespattern(repo, None, pat, msg) linerangebyfname.append( @@ -1136,7 +1138,7 @@ linerangesbyrev = {} for fname, (fromline, toline) in _parselinerangeopt(repo, opts): if fname not in wctx: - raise error.Abort( + raise error.StateError( _(b'cannot follow file not in parent revision: "%s"') % fname ) fctx = wctx.filectx(fname) @@ -1271,7 +1273,7 @@ def checkunsupportedgraphflags(pats, opts): for op in [b"newest_first"]: if op in opts and opts[op]: - raise error.Abort( + raise error.InputError( _(b"-G/--graph option is incompatible with --%s") % op.replace(b"_", b"-") )
--- a/mercurial/manifest.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/manifest.py Tue Jan 18 10:27:13 2022 +0100 @@ -1819,8 +1819,8 @@ def checksize(self): return self._revlog.checksize() - def revision(self, node, _df=None, raw=False): - return self._revlog.revision(node, _df=_df, raw=raw) + def revision(self, node, _df=None): + return self._revlog.revision(node, _df=_df) def rawdata(self, node, _df=None): return self._revlog.rawdata(node, _df=_df)
--- a/mercurial/mdiff.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/mdiff.py Tue Jan 18 10:27:13 2022 +0100 @@ -84,7 +84,7 @@ try: self.context = int(self.context) except ValueError: - raise error.Abort( + raise error.InputError( _(b'diff context lines count must be an integer, not %r') % pycompat.bytestr(self.context) )
--- a/mercurial/merge.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/merge.py Tue Jan 18 10:27:13 2022 +0100 @@ -542,7 +542,7 @@ hint=_(b'merging in the other direction may work'), ) else: - raise error.Abort( + raise error.StateError( _(b'conflict in file \'%s\' is outside narrow clone') % f ) @@ -1404,6 +1404,34 @@ atomictemp=atomictemp, ) if wantfiledata: + # XXX note that there is a race window between the time we + # write the clean data into the file and we stats it. So another + # writing process meddling with the file content right after we + # wrote it could cause bad stat data to be gathered. + # + # They are 2 data we gather here + # - the mode: + # That we actually just wrote, we should not need to read + # it from disk, (except not all mode might have survived + # the disk round-trip, which is another issue: we should + # not depends on this) + # - the mtime, + # On system that support nanosecond precision, the mtime + # could be accurate enough to tell the two writes appart. + # However gathering it in a racy way make the mtime we + # gather "unreliable". + # + # (note: we get the size from the data we write, which is sane) + # + # So in theory the data returned here are fully racy, but in + # practice "it works mostly fine". + # + # Do not be surprised if you end up reading this while looking + # for the causes of some buggy status. Feel free to improve + # this in the future, but we cannot simply stop gathering + # information. Otherwise `hg status` call made after a large `hg + # update` runs would have to redo a similar amount of work to + # restore and compare all files content. s = wfctx.lstat() mode = s.st_mode mtime = timestamp.mtime_of(s) @@ -1690,10 +1718,8 @@ ) try: - # premerge - tocomplete = [] for f, args, msg in mergeactions: - repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg)) + repo.ui.debug(b" %s: %s -> m\n" % (f, msg)) ms.addcommitinfo(f, {b'merged': b'yes'}) progress.increment(item=f) if f == b'.hgsubstate': # subrepo states need updating @@ -1702,16 +1728,6 @@ ) continue wctx[f].audit() - complete, r = ms.preresolve(f, wctx) - if not complete: - numupdates += 1 - tocomplete.append((f, args, msg)) - - # merge - for f, args, msg in tocomplete: - repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg)) - ms.addcommitinfo(f, {b'merged': b'yes'}) - progress.increment(item=f, total=numupdates) ms.resolve(f, wctx) except error.InterventionRequired: @@ -2144,6 +2160,71 @@ mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 ) with repo.dirstate.parentchange(): + ### Filter Filedata + # + # We gathered "cache" information for the clean file while + # updating them: mtime, size and mode. + # + # At the time this comment is written, they are various issues + # with how we gather the `mode` and `mtime` information (see + # the comment in `batchget`). + # + # We are going to smooth one of this issue here : mtime ambiguity. + # + # i.e. even if the mtime gathered during `batchget` was + # correct[1] a change happening right after it could change the + # content while keeping the same mtime[2]. + # + # When we reach the current code, the "on disk" part of the + # update operation is finished. We still assume that no other + # process raced that "on disk" part, but we want to at least + # prevent later file change to alter the content of the file + # right after the update operation. So quickly that the same + # mtime is record for the operation. + # To prevent such ambiguity to happens, we will only keep the + # "file data" for files with mtime that are stricly in the past, + # i.e. whose mtime is strictly lower than the current time. + # + # This protect us from race conditions from operation that could + # run right after this one, especially other Mercurial + # operation that could be waiting for the wlock to touch files + # content and the dirstate. + # + # In an ideal world, we could only get reliable information in + # `getfiledata` (from `getbatch`), however the current approach + # have been a successful compromise since many years. + # + # At the time this comment is written, not using any "cache" + # file data at all here would not be viable. As it would result is + # a very large amount of work (equivalent to the previous `hg + # update` during the next status after an update). + # + # [1] the current code cannot grantee that the `mtime` and + # `mode` are correct, but the result is "okay in practice". + # (see the comment in `batchget`). # + # + # [2] using nano-second precision can greatly help here because + # it makes the "different write with same mtime" issue + # virtually vanish. However, dirstate v1 cannot store such + # precision and a bunch of python-runtime, operating-system and + # filesystem does not provide use with such precision, so we + # have to operate as if it wasn't available. + if getfiledata: + ambiguous_mtime = {} + now = timestamp.get_fs_now(repo.vfs) + if now is None: + # we can't write to the FS, so we won't actually update + # the dirstate content anyway, no need to put cache + # information. + getfiledata = None + else: + now_sec = now[0] + for f, m in pycompat.iteritems(getfiledata): + if m is not None and m[2][0] >= now_sec: + ambiguous_mtime[f] = (m[0], m[1], None) + for f, m in pycompat.iteritems(ambiguous_mtime): + getfiledata[f] = m + repo.setparents(fp1, fp2) mergestatemod.recordupdates( repo, mresult.actionsdict, branchmerge, getfiledata @@ -2386,13 +2467,13 @@ if confirm: nb_ignored = len(status.ignored) - nb_unkown = len(status.unknown) - if nb_unkown and nb_ignored: - msg = _(b"permanently delete %d unkown and %d ignored files?") - msg %= (nb_unkown, nb_ignored) - elif nb_unkown: - msg = _(b"permanently delete %d unkown files?") - msg %= nb_unkown + nb_unknown = len(status.unknown) + if nb_unknown and nb_ignored: + msg = _(b"permanently delete %d unknown and %d ignored files?") + msg %= (nb_unknown, nb_ignored) + elif nb_unknown: + msg = _(b"permanently delete %d unknown files?") + msg %= nb_unknown elif nb_ignored: msg = _(b"permanently delete %d ignored files?") msg %= nb_ignored
--- a/mercurial/mergestate.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/mergestate.py Tue Jan 18 10:27:13 2022 +0100 @@ -313,16 +313,15 @@ """return extras stored with the mergestate for the given filename""" return self._stateextras[filename] - def _resolve(self, preresolve, dfile, wctx): - """rerun merge process for file path `dfile`. - Returns whether the merge was completed and the return value of merge - obtained from filemerge._filemerge(). - """ + def resolve(self, dfile, wctx): + """run merge process for dfile + + Returns the exit code of the merge.""" if self[dfile] in ( MERGE_RECORD_RESOLVED, LEGACY_RECORD_DRIVER_RESOLVED, ): - return True, 0 + return 0 stateentry = self._state[dfile] state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry octx = self._repo[self._other] @@ -341,43 +340,30 @@ fla = fca.flags() if b'x' in flags + flo + fla and b'l' not in flags + flo + fla: if fca.rev() == nullrev and flags != flo: - if preresolve: - self._repo.ui.warn( - _( - b'warning: cannot merge flags for %s ' - b'without common ancestor - keeping local flags\n' - ) - % afile + self._repo.ui.warn( + _( + b'warning: cannot merge flags for %s ' + b'without common ancestor - keeping local flags\n' ) + % afile + ) elif flags == fla: flags = flo - if preresolve: - # restore local - if localkey != self._repo.nodeconstants.nullhex: - self._restore_backup(wctx[dfile], localkey, flags) - else: - wctx[dfile].remove(ignoremissing=True) - complete, merge_ret, deleted = filemerge.premerge( - self._repo, - wctx, - self._local, - lfile, - fcd, - fco, - fca, - labels=self._labels, - ) + # restore local + if localkey != self._repo.nodeconstants.nullhex: + self._restore_backup(wctx[dfile], localkey, flags) else: - complete, merge_ret, deleted = filemerge.filemerge( - self._repo, - wctx, - self._local, - lfile, - fcd, - fco, - fca, - labels=self._labels, - ) + wctx[dfile].remove(ignoremissing=True) + merge_ret, deleted = filemerge.filemerge( + self._repo, + wctx, + self._local, + lfile, + fcd, + fco, + fca, + labels=self._labels, + ) if merge_ret is None: # If return value of merge is None, then there are no real conflict del self._state[dfile] @@ -385,40 +371,27 @@ elif not merge_ret: self.mark(dfile, MERGE_RECORD_RESOLVED) - if complete: - action = None - if deleted: - if fcd.isabsent(): - # dc: local picked. Need to drop if present, which may - # happen on re-resolves. - action = ACTION_FORGET - else: - # cd: remote picked (or otherwise deleted) - action = ACTION_REMOVE + action = None + if deleted: + if fcd.isabsent(): + # dc: local picked. Need to drop if present, which may + # happen on re-resolves. + action = ACTION_FORGET else: - if fcd.isabsent(): # dc: remote picked - action = ACTION_GET - elif fco.isabsent(): # cd: local picked - if dfile in self.localctx: - action = ACTION_ADD_MODIFIED - else: - action = ACTION_ADD - # else: regular merges (no action necessary) - self._results[dfile] = merge_ret, action - - return complete, merge_ret + # cd: remote picked (or otherwise deleted) + action = ACTION_REMOVE + else: + if fcd.isabsent(): # dc: remote picked + action = ACTION_GET + elif fco.isabsent(): # cd: local picked + if dfile in self.localctx: + action = ACTION_ADD_MODIFIED + else: + action = ACTION_ADD + # else: regular merges (no action necessary) + self._results[dfile] = merge_ret, action - def preresolve(self, dfile, wctx): - """run premerge process for dfile - - Returns whether the merge is complete, and the exit code.""" - return self._resolve(True, dfile, wctx) - - def resolve(self, dfile, wctx): - """run merge process (assuming premerge was run) for dfile - - Returns the exit code of the merge.""" - return self._resolve(False, dfile, wctx)[1] + return merge_ret def counts(self): """return counts for updated, merged and removed files in this
--- a/mercurial/narrowspec.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/narrowspec.py Tue Jan 18 10:27:13 2022 +0100 @@ -323,7 +323,7 @@ removedmatch = matchmod.differencematcher(oldmatch, newmatch) ds = repo.dirstate - lookup, status = ds.status( + lookup, status, _mtime_boundary = ds.status( removedmatch, subrepos=[], ignored=True, clean=True, unknown=True ) trackeddirty = status.modified + status.added
--- a/mercurial/obsutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/obsutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -218,7 +218,7 @@ or - # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) + # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally) # # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
--- a/mercurial/patch.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/patch.py Tue Jan 18 10:27:13 2022 +0100 @@ -55,6 +55,8 @@ ) PatchError = error.PatchError +PatchParseError = error.PatchParseError +PatchApplicationError = error.PatchApplicationError # public functions @@ -107,7 +109,9 @@ def mimesplit(stream, cur): def msgfp(m): fp = stringio() + # pytype: disable=wrong-arg-types g = mail.Generator(fp, mangle_from_=False) + # pytype: enable=wrong-arg-types g.flatten(m) fp.seek(0) return fp @@ -553,7 +557,9 @@ if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists( fname ): - raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) + raise PatchApplicationError( + _(b'cannot patch %s: file is not tracked') % fname + ) def setfile(self, fname, data, mode, copysource): self._checkknown(fname) @@ -637,7 +643,9 @@ def _checkknown(self, fname): if fname not in self.ctx: - raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) + raise PatchApplicationError( + _(b'cannot patch %s: file is not tracked') % fname + ) def getfile(self, fname): try: @@ -793,7 +801,7 @@ def apply(self, h): if not h.complete(): - raise PatchError( + raise PatchParseError( _(b"bad hunk #%d %s (%d %d %d %d)") % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb) ) @@ -1388,7 +1396,7 @@ def read_unified_hunk(self, lr): m = unidesc.match(self.desc) if not m: - raise PatchError(_(b"bad hunk #%d") % self.number) + raise PatchParseError(_(b"bad hunk #%d") % self.number) self.starta, self.lena, self.startb, self.lenb = m.groups() if self.lena is None: self.lena = 1 @@ -1405,7 +1413,7 @@ lr, self.hunk, self.lena, self.lenb, self.a, self.b ) except error.ParseError as e: - raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e)) + raise PatchParseError(_(b"bad hunk #%d: %s") % (self.number, e)) # if we hit eof before finishing out the hunk, the last line will # be zero length. Lets try to fix it up. while len(self.hunk[-1]) == 0: @@ -1420,7 +1428,7 @@ self.desc = lr.readline() m = contextdesc.match(self.desc) if not m: - raise PatchError(_(b"bad hunk #%d") % self.number) + raise PatchParseError(_(b"bad hunk #%d") % self.number) self.starta, aend = m.groups() self.starta = int(self.starta) if aend is None: @@ -1440,7 +1448,7 @@ elif l.startswith(b' '): u = b' ' + s else: - raise PatchError( + raise PatchParseError( _(b"bad hunk #%d old text line %d") % (self.number, x) ) self.a.append(u) @@ -1454,7 +1462,7 @@ l = lr.readline() m = contextdesc.match(l) if not m: - raise PatchError(_(b"bad hunk #%d") % self.number) + raise PatchParseError(_(b"bad hunk #%d") % self.number) self.startb, bend = m.groups() self.startb = int(self.startb) if bend is None: @@ -1487,7 +1495,7 @@ lr.push(l) break else: - raise PatchError( + raise PatchParseError( _(b"bad hunk #%d old text line %d") % (self.number, x) ) self.b.append(s) @@ -1601,7 +1609,7 @@ while True: line = getline(lr, self.hunk) if not line: - raise PatchError( + raise PatchParseError( _(b'could not extract "%s" binary data') % self._fname ) if line.startswith(b'literal '): @@ -1622,14 +1630,14 @@ try: dec.append(util.b85decode(line[1:])[:l]) except ValueError as e: - raise PatchError( + raise PatchParseError( _(b'could not decode "%s" binary patch: %s') % (self._fname, stringutil.forcebytestr(e)) ) line = getline(lr, self.hunk) text = zlib.decompress(b''.join(dec)) if len(text) != size: - raise PatchError( + raise PatchParseError( _(b'"%s" length is %d bytes, should be %d') % (self._fname, len(text), size) ) @@ -1847,7 +1855,7 @@ try: p.transitions[state][newstate](p, data) except KeyError: - raise PatchError( + raise PatchParseError( b'unhandled transition: %s -> %s' % (state, newstate) ) state = newstate @@ -1874,7 +1882,7 @@ ('a//b/', 'd/e/c') >>> pathtransform(b'a/b/c', 3, b'') Traceback (most recent call last): - PatchError: unable to strip away 1 of 3 dirs from a/b/c + PatchApplicationError: unable to strip away 1 of 3 dirs from a/b/c """ pathlen = len(path) i = 0 @@ -1884,7 +1892,7 @@ while count > 0: i = path.find(b'/', i) if i == -1: - raise PatchError( + raise PatchApplicationError( _(b"unable to strip away %d of %d dirs from %s") % (count, strip, path) ) @@ -1947,7 +1955,7 @@ elif not nulla: fname = afile else: - raise PatchError(_(b"undefined source and destination files")) + raise PatchParseError(_(b"undefined source and destination files")) gp = patchmeta(fname) if create: @@ -2097,7 +2105,7 @@ gp.copy(), ) if not gitpatches: - raise PatchError( + raise PatchParseError( _(b'failed to synchronize metadata for "%s"') % afile[2:] ) newfile = True @@ -2193,7 +2201,7 @@ out += binchunk[i:offset_end] i += cmd else: - raise PatchError(_(b'unexpected delta opcode 0')) + raise PatchApplicationError(_(b'unexpected delta opcode 0')) return out @@ -2270,7 +2278,7 @@ data, mode = store.getfile(gp.oldpath)[:2] if data is None: # This means that the old path does not exist - raise PatchError( + raise PatchApplicationError( _(b"source file '%s' does not exist") % gp.oldpath ) if gp.mode: @@ -2283,7 +2291,7 @@ if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists( gp.path ): - raise PatchError( + raise PatchApplicationError( _( b"cannot create %s: destination " b"already exists" @@ -2365,7 +2373,7 @@ scmutil.marktouched(repo, files, similarity) code = fp.close() if code: - raise PatchError( + raise PatchApplicationError( _(b"patch command failed: %s") % procutil.explainexit(code) ) return fuzz @@ -2397,7 +2405,7 @@ files.update(backend.close()) store.close() if ret < 0: - raise PatchError(_(b'patch failed to apply')) + raise PatchApplicationError(_(b'patch failed to apply')) return ret > 0
--- a/mercurial/pathutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/pathutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -79,20 +79,24 @@ return # AIX ignores "/" at end of path, others raise EISDIR. if util.endswithsep(path): - raise error.Abort(_(b"path ends in directory separator: %s") % path) + raise error.InputError( + _(b"path ends in directory separator: %s") % path + ) parts = util.splitpath(path) if ( os.path.splitdrive(path)[0] or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'') or pycompat.ospardir in parts ): - raise error.Abort(_(b"path contains illegal component: %s") % path) + raise error.InputError( + _(b"path contains illegal component: %s") % path + ) # Windows shortname aliases for p in parts: if b"~" in p: first, last = p.split(b"~", 1) if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]: - raise error.Abort( + raise error.InputError( _(b"path contains illegal component: %s") % path ) if b'.hg' in _lowerclean(path): @@ -101,7 +105,7 @@ if p in lparts[1:]: pos = lparts.index(p) base = os.path.join(*parts[:pos]) - raise error.Abort( + raise error.InputError( _(b"path '%s' is inside nested repo %r") % (path, pycompat.bytestr(base)) )
--- a/mercurial/pure/parsers.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/pure/parsers.py Tue Jan 18 10:27:13 2022 +0100 @@ -104,6 +104,7 @@ _mtime_ns = attr.ib() _fallback_exec = attr.ib() _fallback_symlink = attr.ib() + _mtime_second_ambiguous = attr.ib() def __init__( self, @@ -127,24 +128,27 @@ self._size = None self._mtime_s = None self._mtime_ns = None + self._mtime_second_ambiguous = False if parentfiledata is None: has_meaningful_mtime = False has_meaningful_data = False + elif parentfiledata[2] is None: + has_meaningful_mtime = False if has_meaningful_data: self._mode = parentfiledata[0] self._size = parentfiledata[1] if has_meaningful_mtime: - self._mtime_s, self._mtime_ns = parentfiledata[2] + ( + self._mtime_s, + self._mtime_ns, + self._mtime_second_ambiguous, + ) = parentfiledata[2] @classmethod def from_v2_data(cls, flags, size, mtime_s, mtime_ns): """Build a new DirstateItem object from V2 data""" has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) - if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS: - # The current code is not able to do the more subtle comparison that the - # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime - has_meaningful_mtime = False mode = None if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: @@ -171,13 +175,15 @@ mode |= stat.S_IFLNK else: mode |= stat.S_IFREG + + second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS return cls( wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), p2_info=bool(flags & DIRSTATE_V2_P2_INFO), has_meaningful_data=has_mode_size, has_meaningful_mtime=has_meaningful_mtime, - parentfiledata=(mode, size, (mtime_s, mtime_ns)), + parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)), fallback_exec=fallback_exec, fallback_symlink=fallback_symlink, ) @@ -214,13 +220,13 @@ wc_tracked=True, p1_tracked=True, has_meaningful_mtime=False, - parentfiledata=(mode, size, (42, 0)), + parentfiledata=(mode, size, (42, 0, False)), ) else: return cls( wc_tracked=True, p1_tracked=True, - parentfiledata=(mode, size, (mtime, 0)), + parentfiledata=(mode, size, (mtime, 0, False)), ) else: raise RuntimeError(b'unknown state: %s' % state) @@ -246,7 +252,7 @@ self._p1_tracked = True self._mode = mode self._size = size - self._mtime_s, self._mtime_ns = mtime + self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime def set_tracked(self): """mark a file as tracked in the working copy @@ -301,10 +307,22 @@ if self_sec is None: return False self_ns = self._mtime_ns - other_sec, other_ns = other_mtime - return self_sec == other_sec and ( - self_ns == other_ns or self_ns == 0 or other_ns == 0 - ) + other_sec, other_ns, second_ambiguous = other_mtime + if self_sec != other_sec: + # seconds are different theses mtime are definitly not equal + return False + elif other_ns == 0 or self_ns == 0: + # at least one side as no nano-seconds information + + if self._mtime_second_ambiguous: + # We cannot trust the mtime in this case + return False + else: + # the "seconds" value was reliable on its own. We are good to go. + return True + else: + # We have nano second information, let us use them ! + return self_ns == other_ns @property def state(self): @@ -463,6 +481,8 @@ flags |= DIRSTATE_V2_MODE_IS_SYMLINK if self._mtime_s is not None: flags |= DIRSTATE_V2_HAS_MTIME + if self._mtime_second_ambiguous: + flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS if self._fallback_exec is not None: flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC @@ -531,13 +551,11 @@ return AMBIGUOUS_TIME elif not self._p1_tracked: return AMBIGUOUS_TIME + elif self._mtime_second_ambiguous: + return AMBIGUOUS_TIME else: return self._mtime_s - def need_delay(self, now): - """True if the stored mtime would be ambiguous with the current time""" - return self.v1_state() == b'n' and self._mtime_s == now[0] - def gettype(q): return int(q & 0xFFFF) @@ -566,18 +584,13 @@ 0, revlog_constants.COMP_MODE_INLINE, revlog_constants.COMP_MODE_INLINE, + revlog_constants.RANK_UNKNOWN, ) @util.propertycache def entry_size(self): return self.index_format.size - @property - def nodemap(self): - msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" - util.nouideprecwarn(msg, b'5.3', stacklevel=2) - return self._nodemap - @util.propertycache def _nodemap(self): nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) @@ -629,7 +642,7 @@ if not isinstance(i, int): raise TypeError(b"expecting int indexes") if i < 0 or i >= len(self): - raise IndexError + raise IndexError(i) def __getitem__(self, i): if i == -1: @@ -653,6 +666,7 @@ 0, revlog_constants.COMP_MODE_INLINE, revlog_constants.COMP_MODE_INLINE, + revlog_constants.RANK_UNKNOWN, ) return r @@ -835,7 +849,7 @@ entry = data[:10] data_comp = data[10] & 3 sidedata_comp = (data[10] & (3 << 2)) >> 2 - return entry + (data_comp, sidedata_comp) + return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN) def _pack_entry(self, rev, entry): data = entry[:10] @@ -860,20 +874,53 @@ class IndexChangelogV2(IndexObject2): index_format = revlog_constants.INDEX_ENTRY_CL_V2 + null_item = ( + IndexObject2.null_item[: revlog_constants.ENTRY_RANK] + + (0,) # rank of null is 0 + + IndexObject2.null_item[revlog_constants.ENTRY_RANK :] + ) + def _unpack_entry(self, rev, data, r=True): items = self.index_format.unpack(data) - entry = items[:3] + (rev, rev) + items[3:8] - data_comp = items[8] & 3 - sidedata_comp = (items[8] >> 2) & 3 - return entry + (data_comp, sidedata_comp) + return ( + items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET], + items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH], + items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH], + rev, + rev, + items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1], + items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2], + items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID], + items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET], + items[ + revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH + ], + items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3, + (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2) + & 3, + items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK], + ) def _pack_entry(self, rev, entry): - assert entry[3] == rev, entry[3] - assert entry[4] == rev, entry[4] - data = entry[:3] + entry[5:10] - data_comp = entry[10] & 3 - sidedata_comp = (entry[11] & 3) << 2 - data += (data_comp | sidedata_comp,) + + base = entry[revlog_constants.ENTRY_DELTA_BASE] + link_rev = entry[revlog_constants.ENTRY_LINK_REV] + assert base == rev, (base, rev) + assert link_rev == rev, (link_rev, rev) + data = ( + entry[revlog_constants.ENTRY_DATA_OFFSET], + entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH], + entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH], + entry[revlog_constants.ENTRY_PARENT_1], + entry[revlog_constants.ENTRY_PARENT_2], + entry[revlog_constants.ENTRY_NODE_ID], + entry[revlog_constants.ENTRY_SIDEDATA_OFFSET], + entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH], + entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3 + | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3) + << 2, + entry[revlog_constants.ENTRY_RANK], + ) return self.index_format.pack(*data) @@ -903,23 +950,11 @@ return parents -def pack_dirstate(dmap, copymap, pl, now): +def pack_dirstate(dmap, copymap, pl): cs = stringio() write = cs.write write(b"".join(pl)) for f, e in pycompat.iteritems(dmap): - if e.need_delay(now): - # The file was last modified "simultaneously" with the current - # write to dirstate (i.e. within the same second for file- - # systems with a granularity of 1 sec). This commonly happens - # for at least a couple of files on 'update'. - # The user could change the file without changing its size - # within the same second. Invalidate the file's mtime in - # dirstate, forcing future 'status' calls to compare the - # contents of the file if the size is the same. This prevents - # mistakenly treating such files as clean. - e.set_possibly_dirty() - if f in copymap: f = b"%s\0%s" % (f, copymap[f]) e = _pack(
--- a/mercurial/revlog.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlog.py Tue Jan 18 10:27:13 2022 +0100 @@ -741,21 +741,6 @@ """iterate over all rev in this revlog (from start to stop)""" return storageutil.iterrevs(len(self), start=start, stop=stop) - @property - def nodemap(self): - msg = ( - b"revlog.nodemap is deprecated, " - b"use revlog.index.[has_node|rev|get_rev]" - ) - util.nouideprecwarn(msg, b'5.3', stacklevel=2) - return self.index.nodemap - - @property - def _nodecache(self): - msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap" - util.nouideprecwarn(msg, b'5.3', stacklevel=2) - return self.index.nodemap - def hasnode(self, node): try: self.rev(node) @@ -870,7 +855,7 @@ if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0: return self.rawsize(rev) - return len(self.revision(rev, raw=False)) + return len(self.revision(rev)) def chainbase(self, rev): base = self._chainbasecache.get(rev) @@ -1776,33 +1761,13 @@ return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) - def _processflags(self, text, flags, operation, raw=False): - """deprecated entry point to access flag processors""" - msg = b'_processflag(...) use the specialized variant' - util.nouideprecwarn(msg, b'5.2', stacklevel=2) - if raw: - return text, flagutil.processflagsraw(self, text, flags) - elif operation == b'read': - return flagutil.processflagsread(self, text, flags) - else: # write operation - return flagutil.processflagswrite(self, text, flags) - - def revision(self, nodeorrev, _df=None, raw=False): + def revision(self, nodeorrev, _df=None): """return an uncompressed revision of a given node or revision number. _df - an existing file handle to read from. (internal-only) - raw - an optional argument specifying if the revision data is to be - treated as raw data when applying flag transforms. 'raw' should be set - to True when generating changegroups or in debug commands. """ - if raw: - msg = ( - b'revlog.revision(..., raw=True) is deprecated, ' - b'use revlog.rawdata(...)' - ) - util.nouideprecwarn(msg, b'5.2', stacklevel=2) - return self._revisiondata(nodeorrev, _df, raw=raw) + return self._revisiondata(nodeorrev, _df) def sidedata(self, nodeorrev, _df=None): """a map of extra data related to the changeset but not part of the hash
--- a/mercurial/revlogutils/__init__.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlogutils/__init__.py Tue Jan 18 10:27:13 2022 +0100 @@ -12,6 +12,7 @@ # See mercurial.revlogutils.constants for doc COMP_MODE_INLINE = 2 +RANK_UNKNOWN = -1 def offset_type(offset, type): @@ -34,6 +35,7 @@ sidedata_offset=0, sidedata_compressed_length=0, sidedata_compression_mode=COMP_MODE_INLINE, + rank=RANK_UNKNOWN, ): """Build one entry from symbolic name @@ -56,6 +58,7 @@ sidedata_compressed_length, data_compression_mode, sidedata_compression_mode, + rank, )
--- a/mercurial/revlogutils/constants.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlogutils/constants.py Tue Jan 18 10:27:13 2022 +0100 @@ -103,6 +103,17 @@ # (see "COMP_MODE_*" constants for details) ENTRY_SIDEDATA_COMPRESSION_MODE = 11 +# [12] Revision rank: +# The number of revision under this one. +# +# Formally this is defined as : rank(X) = len(ancestors(X) + X) +# +# If rank == -1; then we do not have this information available. +# Only `null` has a rank of 0. +ENTRY_RANK = 12 + +RANK_UNKNOWN = -1 + ### main revlog header # We cannot rely on Struct.format is inconsistent for python <=3.6 versus above @@ -181,9 +192,20 @@ # 8 bytes: sidedata offset # 4 bytes: sidedata compressed length # 1 bytes: compression mode (2 lower bit are data_compression_mode) -# 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page) -INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x") -assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size +# 4 bytes: changeset rank (i.e. `len(::REV)`) +# 23 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page) +INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiBi23x") +assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_CL_V2.size +INDEX_ENTRY_V2_IDX_OFFSET = 0 +INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH = 1 +INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH = 2 +INDEX_ENTRY_V2_IDX_PARENT_1 = 3 +INDEX_ENTRY_V2_IDX_PARENT_2 = 4 +INDEX_ENTRY_V2_IDX_NODEID = 5 +INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET = 6 +INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH = 7 +INDEX_ENTRY_V2_IDX_COMPRESSION_MODE = 8 +INDEX_ENTRY_V2_IDX_RANK = 9 # revlog index flags
--- a/mercurial/revlogutils/deltas.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlogutils/deltas.py Tue Jan 18 10:27:13 2022 +0100 @@ -526,7 +526,7 @@ else: # deltabase is rawtext before changed by flag processors, which is # equivalent to non-raw text - basetext = revlog.revision(baserev, _df=fh, raw=False) + basetext = revlog.revision(baserev, _df=fh) fulltext = mdiff.patch(basetext, delta) try:
--- a/mercurial/revlogutils/flagutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlogutils/flagutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -32,6 +32,7 @@ REVIDX_FLAGS_ORDER REVIDX_RAWTEXT_CHANGING_FLAGS +# Keep this in sync with REVIDX_KNOWN_FLAGS in rust/hg-core/src/revlog/revlog.rs REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER) # Store flag processors (cf. 'addflagprocessor()' to register)
--- a/mercurial/revlogutils/revlogv0.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/revlogutils/revlogv0.py Tue Jan 18 10:27:13 2022 +0100 @@ -47,12 +47,6 @@ node_id=sha1nodeconstants.nullid, ) - @property - def nodemap(self): - msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" - util.nouideprecwarn(msg, b'5.3', stacklevel=2) - return self._nodemap - @util.propertycache def _nodemap(self): nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
--- a/mercurial/scmutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/scmutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -180,6 +180,8 @@ ) ) except error.RepoError as inst: + if isinstance(inst, error.RepoLookupError): + detailed_exit_code = 10 ui.error(_(b"abort: %s\n") % inst) if inst.hint: ui.error(_(b"(%s)\n") % inst.hint) @@ -341,7 +343,7 @@ if fl in self._loweredfiles and f not in self._dirstate: msg = _(b'possible case-folding collision for %s') % f if self._abort: - raise error.Abort(msg) + raise error.StateError(msg) self._ui.warn(_(b"warning: %s\n") % msg) self._loweredfiles.add(fl) self._newfiles.add(f) @@ -2195,6 +2197,9 @@ returns a repo object with the required changesets unhidden """ + if not specs: + return repo + if not repo.filtername or not repo.ui.configbool( b'experimental', b'directaccess' ):
--- a/mercurial/simplemerge.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/simplemerge.py Tue Jan 18 10:27:13 2022 +0100 @@ -19,20 +19,14 @@ from __future__ import absolute_import from .i18n import _ -from .node import nullrev from . import ( error, mdiff, pycompat, - util, ) from .utils import stringutil -class CantReprocessAndShowBase(Exception): - pass - - def intersect(ra, rb): """Given two ranges return the range where they intersect or None. @@ -89,72 +83,6 @@ self.a = a self.b = b - def merge_lines( - self, - name_a=None, - name_b=None, - name_base=None, - start_marker=b'<<<<<<<', - mid_marker=b'=======', - end_marker=b'>>>>>>>', - base_marker=None, - localorother=None, - minimize=False, - ): - """Return merge in cvs-like form.""" - self.conflicts = False - newline = b'\n' - if len(self.a) > 0: - if self.a[0].endswith(b'\r\n'): - newline = b'\r\n' - elif self.a[0].endswith(b'\r'): - newline = b'\r' - if name_a and start_marker: - start_marker = start_marker + b' ' + name_a - if name_b and end_marker: - end_marker = end_marker + b' ' + name_b - if name_base and base_marker: - base_marker = base_marker + b' ' + name_base - merge_regions = self.merge_regions() - if minimize: - merge_regions = self.minimize(merge_regions) - for t in merge_regions: - what = t[0] - if what == b'unchanged': - for i in range(t[1], t[2]): - yield self.base[i] - elif what == b'a' or what == b'same': - for i in range(t[1], t[2]): - yield self.a[i] - elif what == b'b': - for i in range(t[1], t[2]): - yield self.b[i] - elif what == b'conflict': - if localorother == b'local': - for i in range(t[3], t[4]): - yield self.a[i] - elif localorother == b'other': - for i in range(t[5], t[6]): - yield self.b[i] - else: - self.conflicts = True - if start_marker is not None: - yield start_marker + newline - for i in range(t[3], t[4]): - yield self.a[i] - if base_marker is not None: - yield base_marker + newline - for i in range(t[1], t[2]): - yield self.base[i] - if mid_marker is not None: - yield mid_marker + newline - for i in range(t[5], t[6]): - yield self.b[i] - if end_marker is not None: - yield end_marker + newline - else: - raise ValueError(what) - def merge_groups(self): """Yield sequence of line groups. Each one is a tuple: @@ -170,7 +98,7 @@ 'b', lines Lines taken from b - 'conflict', base_lines, a_lines, b_lines + 'conflict', (base_lines, a_lines, b_lines) Lines from base were changed to either a or b and conflict. """ for t in self.merge_regions(): @@ -184,9 +112,11 @@ elif what == b'conflict': yield ( what, - self.base[t[1] : t[2]], - self.a[t[3] : t[4]], - self.b[t[5] : t[6]], + ( + self.base[t[1] : t[2]], + self.a[t[3] : t[4]], + self.b[t[5] : t[6]], + ), ) else: raise ValueError(what) @@ -280,67 +210,6 @@ ia = aend ib = bend - def minimize(self, merge_regions): - """Trim conflict regions of lines where A and B sides match. - - Lines where both A and B have made the same changes at the beginning - or the end of each merge region are eliminated from the conflict - region and are instead considered the same. - """ - for region in merge_regions: - if region[0] != b"conflict": - yield region - continue - # pytype thinks this tuple contains only 3 things, but - # that's clearly not true because this code successfully - # executes. It might be wise to rework merge_regions to be - # some kind of attrs type. - ( - issue, - z1, - z2, - a1, - a2, - b1, - b2, - ) = region # pytype: disable=bad-unpacking - alen = a2 - a1 - blen = b2 - b1 - - # find matches at the front - ii = 0 - while ( - ii < alen and ii < blen and self.a[a1 + ii] == self.b[b1 + ii] - ): - ii += 1 - startmatches = ii - - # find matches at the end - ii = 0 - while ( - ii < alen - and ii < blen - and self.a[a2 - ii - 1] == self.b[b2 - ii - 1] - ): - ii += 1 - endmatches = ii - - if startmatches > 0: - yield b'same', a1, a1 + startmatches - - yield ( - b'conflict', - z1, - z2, - a1 + startmatches, - a2 - endmatches, - b1 + startmatches, - b2 - endmatches, - ) - - if endmatches > 0: - yield b'same', a2 - endmatches, a2 - def find_sync_regions(self): """Return a list of sync regions, where both descendants match the base. @@ -415,27 +284,117 @@ return text -def _picklabels(defaults, overrides): +def _picklabels(overrides): if len(overrides) > 3: raise error.Abort(_(b"can only specify three labels.")) - result = defaults[:] + result = [None, None, None] for i, override in enumerate(overrides): result[i] = override return result -def is_not_null(ctx): - if not util.safehasattr(ctx, "node"): - return False - return ctx.rev() != nullrev +def _detect_newline(m3): + if len(m3.a) > 0: + if m3.a[0].endswith(b'\r\n'): + return b'\r\n' + elif m3.a[0].endswith(b'\r'): + return b'\r' + return b'\n' + + +def _minimize(a_lines, b_lines): + """Trim conflict regions of lines where A and B sides match. + + Lines where both A and B have made the same changes at the beginning + or the end of each merge region are eliminated from the conflict + region and are instead considered the same. + """ + alen = len(a_lines) + blen = len(b_lines) + + # find matches at the front + ii = 0 + while ii < alen and ii < blen and a_lines[ii] == b_lines[ii]: + ii += 1 + startmatches = ii + + # find matches at the end + ii = 0 + while ii < alen and ii < blen and a_lines[-ii - 1] == b_lines[-ii - 1]: + ii += 1 + endmatches = ii + + lines_before = a_lines[:startmatches] + new_a_lines = a_lines[startmatches : alen - endmatches] + new_b_lines = b_lines[startmatches : blen - endmatches] + lines_after = a_lines[alen - endmatches :] + return lines_before, new_a_lines, new_b_lines, lines_after -def _mergediff(m3, name_a, name_b, name_base): +def render_minimized( + m3, + name_a=None, + name_b=None, + start_marker=b'<<<<<<<', + mid_marker=b'=======', + end_marker=b'>>>>>>>', +): + """Return merge in cvs-like form.""" + newline = _detect_newline(m3) + conflicts = False + if name_a: + start_marker = start_marker + b' ' + name_a + if name_b: + end_marker = end_marker + b' ' + name_b + merge_groups = m3.merge_groups() + lines = [] + for what, group_lines in merge_groups: + if what == b'conflict': + conflicts = True + base_lines, a_lines, b_lines = group_lines + minimized = _minimize(a_lines, b_lines) + lines_before, a_lines, b_lines, lines_after = minimized + lines.extend(lines_before) + lines.append(start_marker + newline) + lines.extend(a_lines) + lines.append(mid_marker + newline) + lines.extend(b_lines) + lines.append(end_marker + newline) + lines.extend(lines_after) + else: + lines.extend(group_lines) + return lines, conflicts + + +def render_merge3(m3, name_a, name_b, name_base): + """Render conflicts as 3-way conflict markers.""" + newline = _detect_newline(m3) + conflicts = False + lines = [] + for what, group_lines in m3.merge_groups(): + if what == b'conflict': + base_lines, a_lines, b_lines = group_lines + conflicts = True + lines.append(b'<<<<<<< ' + name_a + newline) + lines.extend(a_lines) + lines.append(b'||||||| ' + name_base + newline) + lines.extend(base_lines) + lines.append(b'=======' + newline) + lines.extend(b_lines) + lines.append(b'>>>>>>> ' + name_b + newline) + else: + lines.extend(group_lines) + return lines, conflicts + + +def render_mergediff(m3, name_a, name_b, name_base): + """Render conflicts as conflict markers with one snapshot and one diff.""" + newline = _detect_newline(m3) lines = [] conflicts = False - for group in m3.merge_groups(): - if group[0] == b'conflict': - base_lines, a_lines, b_lines = group[1:] + for what, group_lines in m3.merge_groups(): + if what == b'conflict': + base_lines, a_lines, b_lines = group_lines base_text = b''.join(base_lines) b_blocks = list( mdiff.allblocks( @@ -472,26 +431,37 @@ for line in lines2[block[2] : block[3]]: yield b'+' + line - lines.append(b"<<<<<<<\n") + lines.append(b"<<<<<<<" + newline) if matching_lines(a_blocks) < matching_lines(b_blocks): - lines.append(b"======= %s\n" % name_a) + lines.append(b"======= " + name_a + newline) lines.extend(a_lines) - lines.append(b"------- %s\n" % name_base) - lines.append(b"+++++++ %s\n" % name_b) + lines.append(b"------- " + name_base + newline) + lines.append(b"+++++++ " + name_b + newline) lines.extend(diff_lines(b_blocks, base_lines, b_lines)) else: - lines.append(b"------- %s\n" % name_base) - lines.append(b"+++++++ %s\n" % name_a) + lines.append(b"------- " + name_base + newline) + lines.append(b"+++++++ " + name_a + newline) lines.extend(diff_lines(a_blocks, base_lines, a_lines)) - lines.append(b"======= %s\n" % name_b) + lines.append(b"======= " + name_b + newline) lines.extend(b_lines) - lines.append(b">>>>>>>\n") + lines.append(b">>>>>>>" + newline) conflicts = True else: - lines.extend(group[1]) + lines.extend(group_lines) return lines, conflicts +def _resolve(m3, sides): + lines = [] + for what, group_lines in m3.merge_groups(): + if what == b'conflict': + for side in sides: + lines.extend(group_lines[side]) + else: + lines.extend(group_lines) + return lines + + def simplemerge(ui, localctx, basectx, otherctx, **opts): """Performs the simplemerge algorithm. @@ -508,59 +478,37 @@ # repository usually sees) might be more useful. return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts) - mode = opts.get('mode', b'merge') - name_a, name_b, name_base = None, None, None - if mode != b'union': - name_a, name_b, name_base = _picklabels( - [localctx.path(), otherctx.path(), None], opts.get('label', []) - ) - try: localtext = readctx(localctx) basetext = readctx(basectx) othertext = readctx(otherctx) except error.Abort: - return 1 + return True m3 = Merge3Text(basetext, localtext, othertext) - extrakwargs = { - b"localorother": opts.get("localorother", None), - b'minimize': True, - } + conflicts = False + mode = opts.get('mode', b'merge') if mode == b'union': - extrakwargs[b'start_marker'] = None - extrakwargs[b'mid_marker'] = None - extrakwargs[b'end_marker'] = None - elif name_base is not None: - extrakwargs[b'base_marker'] = b'|||||||' - extrakwargs[b'name_base'] = name_base - extrakwargs[b'minimize'] = False - - if mode == b'mergediff': - lines, conflicts = _mergediff(m3, name_a, name_b, name_base) + lines = _resolve(m3, (1, 2)) + elif mode == b'local': + lines = _resolve(m3, (1,)) + elif mode == b'other': + lines = _resolve(m3, (2,)) else: - lines = list( - m3.merge_lines( - name_a=name_a, name_b=name_b, **pycompat.strkwargs(extrakwargs) - ) - ) - conflicts = m3.conflicts - - # merge flags if necessary - flags = localctx.flags() - localflags = set(pycompat.iterbytestr(flags)) - otherflags = set(pycompat.iterbytestr(otherctx.flags())) - if is_not_null(basectx) and localflags != otherflags: - baseflags = set(pycompat.iterbytestr(basectx.flags())) - commonflags = localflags & otherflags - addedflags = (localflags ^ otherflags) - baseflags - flags = b''.join(sorted(commonflags | addedflags)) + name_a, name_b, name_base = _picklabels(opts.get('label', [])) + if mode == b'mergediff': + lines, conflicts = render_mergediff(m3, name_a, name_b, name_base) + elif mode == b'merge3': + lines, conflicts = render_merge3(m3, name_a, name_b, name_base) + else: + lines, conflicts = render_minimized(m3, name_a, name_b) mergedtext = b''.join(lines) if opts.get('print'): ui.fout.write(mergedtext) else: - localctx.write(mergedtext, flags) + # localctx.flags() already has the merged flags (done in + # mergestate.resolve()) + localctx.write(mergedtext, localctx.flags()) - if conflicts and not mode == b'union': - return 1 + return conflicts
--- a/mercurial/sshpeer.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/sshpeer.py Tue Jan 18 10:27:13 2022 +0100 @@ -16,7 +16,6 @@ error, pycompat, util, - wireprotoserver, wireprototypes, wireprotov1peer, wireprotov1server, @@ -288,10 +287,6 @@ # Generate a random token to help identify responses to version 2 # upgrade request. token = pycompat.sysbytes(str(uuid.uuid4())) - upgradecaps = [ - (b'proto', wireprotoserver.SSHV2), - ] - upgradecaps = util.urlreq.urlencode(upgradecaps) try: pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) @@ -302,11 +297,6 @@ pairsarg, ] - # Request upgrade to version 2 if configured. - if ui.configbool(b'experimental', b'sshpeer.advertise-v2'): - ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps)) - handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps)) - if requestlog: ui.debug(b'devel-peer-request: hello+between\n') ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) @@ -365,24 +355,6 @@ if l.startswith(b'capabilities:'): caps.update(l[:-1].split(b':')[1].split()) break - elif protoname == wireprotoserver.SSHV2: - # We see a line with number of bytes to follow and then a value - # looking like ``capabilities: *``. - line = stdout.readline() - try: - valuelen = int(line) - except ValueError: - badresponse() - - capsline = stdout.read(valuelen) - if not capsline.startswith(b'capabilities: '): - badresponse() - - ui.debug(b'remote: %s\n' % capsline) - - caps.update(capsline.split(b':')[1].split()) - # Trailing newline. - stdout.read(1) # Error if we couldn't find capabilities, this means: # @@ -601,14 +573,6 @@ self._readerr() -class sshv2peer(sshv1peer): - """A peer that speakers version 2 of the transport protocol.""" - - # Currently version 2 is identical to version 1 post handshake. - # And handshake is performed before the peer is instantiated. So - # we need no custom code. - - def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): """Make a peer instance from existing pipes. @@ -640,17 +604,6 @@ caps, autoreadstderr=autoreadstderr, ) - elif protoname == wireprototypes.SSHV2: - return sshv2peer( - ui, - path, - proc, - stdin, - stdout, - stderr, - caps, - autoreadstderr=autoreadstderr, - ) else: _cleanuppipes(ui, stdout, stdin, stderr, warn=None) raise error.RepoError(
--- a/mercurial/sslutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/sslutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -139,12 +139,18 @@ alg, fingerprint = fingerprint.split(b':', 1) fingerprint = fingerprint.replace(b':', b'').lower() + # pytype: disable=attribute-error + # `s` is heterogeneous, but this entry is always a list of tuples s[b'certfingerprints'].append((alg, fingerprint)) + # pytype: enable=attribute-error # Fingerprints from [hostfingerprints] are always SHA-1. for fingerprint in ui.configlist(b'hostfingerprints', bhostname): fingerprint = fingerprint.replace(b':', b'').lower() + # pytype: disable=attribute-error + # `s` is heterogeneous, but this entry is always a list of tuples s[b'certfingerprints'].append((b'sha1', fingerprint)) + # pytype: enable=attribute-error s[b'legacyfingerprint'] = True # If a host cert fingerprint is defined, it is the only thing that
--- a/mercurial/statprof.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/statprof.py Tue Jan 18 10:27:13 2022 +0100 @@ -494,9 +494,9 @@ data = state if fp is None: - import sys + from .utils import procutil - fp = sys.stdout + fp = procutil.stdout if len(data.samples) == 0: fp.write(b'No samples recorded.\n') return @@ -516,7 +516,7 @@ elif format == DisplayFormats.Chrome: write_to_chrome(data, fp, **kwargs) else: - raise Exception(b"Invalid display format") + raise Exception("Invalid display format") if format not in (DisplayFormats.Json, DisplayFormats.Chrome): fp.write(b'---\n') @@ -625,7 +625,7 @@ def display_about_method(data, fp, function=None, **kwargs): if function is None: - raise Exception(b"Invalid function") + raise Exception("Invalid function") filename = None if b':' in function: @@ -1080,7 +1080,7 @@ printusage() return 0 else: - assert False, b"unhandled option %s" % o + assert False, "unhandled option %s" % o if not path: print('must specify --file to load')
--- a/mercurial/unionrepo.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/unionrepo.py Tue Jan 18 10:27:13 2022 +0100 @@ -71,6 +71,7 @@ _sds, _dcm, _sdcm, + rank, ) = rev flags = _start & 0xFFFF @@ -107,6 +108,7 @@ 0, # sidedata size revlog_constants.COMP_MODE_INLINE, revlog_constants.COMP_MODE_INLINE, + rank, ) self.index.append(e) self.bundlerevs.add(n)
--- a/mercurial/upgrade.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/upgrade.py Tue Jan 18 10:27:13 2022 +0100 @@ -42,27 +42,16 @@ ): """Upgrade a repository in place.""" if optimize is None: - optimize = {} + optimize = set() repo = repo.unfiltered() - revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) - specentries = ( - (upgrade_engine.UPGRADE_CHANGELOG, changelog), - (upgrade_engine.UPGRADE_MANIFEST, manifest), - (upgrade_engine.UPGRADE_FILELOGS, filelogs), - ) - specified = [(y, x) for (y, x) in specentries if x is not None] - if specified: - # we have some limitation on revlogs to be recloned - if any(x for y, x in specified): - revlogs = set() - for upgrade, enabled in specified: - if enabled: - revlogs.add(upgrade) - else: - # none are enabled - for upgrade, __ in specified: - revlogs.discard(upgrade) + specified_revlogs = {} + if changelog is not None: + specified_revlogs[upgrade_engine.UPGRADE_CHANGELOG] = changelog + if manifest is not None: + specified_revlogs[upgrade_engine.UPGRADE_MANIFEST] = manifest + if filelogs is not None: + specified_revlogs[upgrade_engine.UPGRADE_FILELOGS] = filelogs # Ensure the repository can be upgraded. upgrade_actions.check_source_requirements(repo) @@ -96,20 +85,76 @@ ) removed_actions = upgrade_actions.find_format_downgrades(repo) - removedreqs = repo.requirements - newreqs - addedreqs = newreqs - repo.requirements + # check if we need to touch revlog and if so, which ones + + touched_revlogs = set() + overwrite_msg = _(b'warning: ignoring %14s, as upgrade is changing: %s\n') + select_msg = _(b'note: selecting %s for processing to change: %s\n') + msg_issued = 0 + + FL = upgrade_engine.UPGRADE_FILELOGS + MN = upgrade_engine.UPGRADE_MANIFEST + CL = upgrade_engine.UPGRADE_CHANGELOG + + if optimizations: + if any(specified_revlogs.values()): + # we have some limitation on revlogs to be recloned + for rl, enabled in specified_revlogs.items(): + if enabled: + touched_revlogs.add(rl) + else: + touched_revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) + for rl, enabled in specified_revlogs.items(): + if not enabled: + touched_revlogs.discard(rl) + + for action in sorted(up_actions + removed_actions, key=lambda a: a.name): + # optimisation does not "requires anything, they just needs it. + if action.type != upgrade_actions.FORMAT_VARIANT: + continue - if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: - incompatible = upgrade_actions.RECLONES_REQUIREMENTS & ( - removedreqs | addedreqs - ) - if incompatible: - msg = _( - b'ignoring revlogs selection flags, format requirements ' - b'change: %s\n' - ) - ui.warn(msg % b', '.join(sorted(incompatible))) - revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS + if action.touches_filelogs and FL not in touched_revlogs: + if FL in specified_revlogs: + if not specified_revlogs[FL]: + msg = overwrite_msg % (b'--no-filelogs', action.name) + ui.warn(msg) + msg_issued = 2 + else: + msg = select_msg % (b'all-filelogs', action.name) + ui.status(msg) + if not ui.quiet: + msg_issued = 1 + touched_revlogs.add(FL) + + if action.touches_manifests and MN not in touched_revlogs: + if MN in specified_revlogs: + if not specified_revlogs[MN]: + msg = overwrite_msg % (b'--no-manifest', action.name) + ui.warn(msg) + msg_issued = 2 + else: + msg = select_msg % (b'all-manifestlogs', action.name) + ui.status(msg) + if not ui.quiet: + msg_issued = 1 + touched_revlogs.add(MN) + + if action.touches_changelog and CL not in touched_revlogs: + if CL in specified_revlogs: + if not specified_revlogs[CL]: + msg = overwrite_msg % (b'--no-changelog', action.name) + ui.warn(msg) + msg_issued = True + else: + msg = select_msg % (b'changelog', action.name) + ui.status(msg) + if not ui.quiet: + msg_issued = 1 + touched_revlogs.add(CL) + if msg_issued >= 2: + ui.warn((b"\n")) + elif msg_issued >= 1: + ui.status((b"\n")) upgrade_op = upgrade_actions.UpgradeOperation( ui, @@ -117,7 +162,7 @@ repo.requirements, up_actions, removed_actions, - revlogs, + touched_revlogs, backup, )
--- a/mercurial/util.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/util.py Tue Jan 18 10:27:13 2022 +0100 @@ -57,7 +57,6 @@ hashutil, procutil, stringutil, - urlutil, ) if pycompat.TYPE_CHECKING: @@ -2991,54 +2990,6 @@ return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) -def getport(*args, **kwargs): - msg = b'getport(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.getport(*args, **kwargs) - - -def url(*args, **kwargs): - msg = b'url(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.url(*args, **kwargs) - - -def hasscheme(*args, **kwargs): - msg = b'hasscheme(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.hasscheme(*args, **kwargs) - - -def hasdriveletter(*args, **kwargs): - msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.hasdriveletter(*args, **kwargs) - - -def urllocalpath(*args, **kwargs): - msg = b'urllocalpath(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.urllocalpath(*args, **kwargs) - - -def checksafessh(*args, **kwargs): - msg = b'checksafessh(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.checksafessh(*args, **kwargs) - - -def hidepassword(*args, **kwargs): - msg = b'hidepassword(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.hidepassword(*args, **kwargs) - - -def removeauth(*args, **kwargs): - msg = b'removeauth(...) moved to mercurial.utils.urlutil' - nouideprecwarn(msg, b'6.0', stacklevel=2) - return urlutil.removeauth(*args, **kwargs) - - timecount = unitcountfn( (1, 1e3, _(b'%.0f s')), (100, 1, _(b'%.1f s')),
--- a/mercurial/utils/procutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/utils/procutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -75,7 +75,9 @@ return res +# pytype: disable=attribute-error io.BufferedIOBase.register(LineBufferedWrapper) +# pytype: enable=attribute-error def make_line_buffered(stream): @@ -114,7 +116,9 @@ return total_written +# pytype: disable=attribute-error io.IOBase.register(WriteAllWrapper) +# pytype: enable=attribute-error def _make_write_all(stream): @@ -738,6 +742,8 @@ start_new_session = False ensurestart = True + stdin = None + try: if stdin_bytes is None: stdin = subprocess.DEVNULL @@ -766,7 +772,8 @@ record_wait(255) raise finally: - if stdin_bytes is not None: + if stdin_bytes is not None and stdin is not None: + assert not isinstance(stdin, int) stdin.close() if not ensurestart: # Even though we're not waiting on the child process, @@ -847,6 +854,8 @@ return returncode = 255 + stdin = None + try: if record_wait is None: # Start a new session @@ -889,7 +898,8 @@ finally: # mission accomplished, this child needs to exit and not # continue the hg process here. - stdin.close() + if stdin is not None: + stdin.close() if record_wait is None: os._exit(returncode)
--- a/mercurial/utils/storageutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/utils/storageutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -112,6 +112,13 @@ 2-tuple of the source filename and node. """ if store.parents(node)[0] != sha1nodeconstants.nullid: + # When creating a copy or move we set filelog parents to null, + # because contents are probably unrelated and making a delta + # would not be useful. + # Conversely, if filelog p1 is non-null we know + # there is no copy metadata. + # In the presence of merges, this reasoning becomes invalid + # if we reorder parents. See tests/test-issue6528.t. return False meta = parsemeta(store.revision(node))[0]
--- a/mercurial/utils/stringutil.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/utils/stringutil.py Tue Jan 18 10:27:13 2022 +0100 @@ -264,7 +264,11 @@ q1 = rs.find(b'<', p1 + 1) if q1 < 0: q1 = len(rs) + # pytype: disable=wrong-arg-count + # TODO: figure out why pytype doesn't recognize the optional start + # arg elif q1 > p1 + 1 and rs.startswith(b'=', q1 - 1): + # pytype: enable=wrong-arg-count # backtrack for ' field=<' q0 = rs.rfind(b' ', p1 + 1, q1 - 1) if q0 < 0: @@ -692,11 +696,11 @@ s = bytes(s) # call underlying function of s.encode('string_escape') directly for # Python 3 compatibility - return codecs.escape_encode(s)[0] + return codecs.escape_encode(s)[0] # pytype: disable=module-attr def unescapestr(s): - return codecs.escape_decode(s)[0] + return codecs.escape_decode(s)[0] # pytype: disable=module-attr def forcebytestr(obj):
--- a/mercurial/wireprotoserver.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/wireprotoserver.py Tue Jan 18 10:27:13 2022 +0100 @@ -18,11 +18,9 @@ util, wireprototypes, wireprotov1server, - wireprotov2server, ) from .interfaces import util as interfaceutil from .utils import ( - cborutil, compression, stringutil, ) @@ -39,7 +37,6 @@ HGERRTYPE = b'application/hg-error' SSHV1 = wireprototypes.SSHV1 -SSHV2 = wireprototypes.SSHV2 def decodevaluefromheaders(req, headerprefix): @@ -244,97 +241,6 @@ return True -def _availableapis(repo): - apis = set() - - # Registered APIs are made available via config options of the name of - # the protocol. - for k, v in API_HANDLERS.items(): - section, option = v[b'config'] - if repo.ui.configbool(section, option): - apis.add(k) - - return apis - - -def handlewsgiapirequest(rctx, req, res, checkperm): - """Handle requests to /api/*.""" - assert req.dispatchparts[0] == b'api' - - repo = rctx.repo - - # This whole URL space is experimental for now. But we want to - # reserve the URL space. So, 404 all URLs if the feature isn't enabled. - if not repo.ui.configbool(b'experimental', b'web.apiserver'): - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'Experimental API server endpoint not enabled')) - return - - # The URL space is /api/<protocol>/*. The structure of URLs under varies - # by <protocol>. - - availableapis = _availableapis(repo) - - # Requests to /api/ list available APIs. - if req.dispatchparts == [b'api']: - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - lines = [ - _( - b'APIs can be accessed at /api/<name>, where <name> can be ' - b'one of the following:\n' - ) - ] - if availableapis: - lines.extend(sorted(availableapis)) - else: - lines.append(_(b'(no available APIs)\n')) - res.setbodybytes(b'\n'.join(lines)) - return - - proto = req.dispatchparts[1] - - if proto not in API_HANDLERS: - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'Unknown API: %s\nKnown APIs: %s') - % (proto, b', '.join(sorted(availableapis))) - ) - return - - if proto not in availableapis: - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'API %s not enabled\n') % proto) - return - - API_HANDLERS[proto][b'handler']( - rctx, req, res, checkperm, req.dispatchparts[2:] - ) - - -# Maps API name to metadata so custom API can be registered. -# Keys are: -# -# config -# Config option that controls whether service is enabled. -# handler -# Callable receiving (rctx, req, res, checkperm, urlparts) that is called -# when a request to this API is received. -# apidescriptor -# Callable receiving (req, repo) that is called to obtain an API -# descriptor for this service. The response must be serializable to CBOR. -API_HANDLERS = { - wireprotov2server.HTTP_WIREPROTO_V2: { - b'config': (b'experimental', b'web.api.http-v2'), - b'handler': wireprotov2server.handlehttpv2request, - b'apidescriptor': wireprotov2server.httpv2apidescriptor, - }, -} - - def _httpresponsetype(ui, proto, prefer_uncompressed): """Determine the appropriate response type and compression settings. @@ -371,55 +277,6 @@ return HGTYPE, util.compengines[b'zlib'], opts -def processcapabilitieshandshake(repo, req, res, proto): - """Called during a ?cmd=capabilities request. - - If the client is advertising support for a newer protocol, we send - a CBOR response with information about available services. If no - advertised services are available, we don't handle the request. - """ - # Fall back to old behavior unless the API server is enabled. - if not repo.ui.configbool(b'experimental', b'web.apiserver'): - return False - - clientapis = decodevaluefromheaders(req, b'X-HgUpgrade') - protocaps = decodevaluefromheaders(req, b'X-HgProto') - if not clientapis or not protocaps: - return False - - # We currently only support CBOR responses. - protocaps = set(protocaps.split(b' ')) - if b'cbor' not in protocaps: - return False - - descriptors = {} - - for api in sorted(set(clientapis.split()) & _availableapis(repo)): - handler = API_HANDLERS[api] - - descriptorfn = handler.get(b'apidescriptor') - if not descriptorfn: - continue - - descriptors[api] = descriptorfn(req, repo) - - v1caps = wireprotov1server.dispatch(repo, proto, b'capabilities') - assert isinstance(v1caps, wireprototypes.bytesresponse) - - m = { - # TODO allow this to be configurable. - b'apibase': b'api/', - b'apis': descriptors, - b'v1capabilities': v1caps.data, - } - - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'application/mercurial-cbor' - res.setbodybytes(b''.join(cborutil.streamencode(m))) - - return True - - def _callhttp(repo, req, res, proto, cmd): # Avoid cycle involving hg module. from .hgweb import common as hgwebcommon @@ -461,13 +318,6 @@ proto.checkperm(wireprotov1server.commands[cmd].permission) - # Possibly handle a modern client wanting to switch protocols. - if cmd == b'capabilities' and processcapabilitieshandshake( - repo, req, res, proto - ): - - return - rsp = wireprotov1server.dispatch(repo, proto, cmd) if isinstance(rsp, bytes): @@ -596,17 +446,6 @@ pass -class sshv2protocolhandler(sshv1protocolhandler): - """Protocol handler for version 2 of the SSH protocol.""" - - @property - def name(self): - return wireprototypes.SSHV2 - - def addcapabilities(self, repo, caps): - return caps - - def _runsshserver(ui, repo, fin, fout, ev): # This function operates like a state machine of sorts. The following # states are defined: @@ -616,19 +455,6 @@ # new lines. These commands are processed in this state, one command # after the other. # - # protov2-serving - # Server is in protocol version 2 serving mode. - # - # upgrade-initial - # The server is going to process an upgrade request. - # - # upgrade-v2-filter-legacy-handshake - # The protocol is being upgraded to version 2. The server is expecting - # the legacy handshake from version 1. - # - # upgrade-v2-finish - # The upgrade to version 2 of the protocol is imminent. - # # shutdown # The server is shutting down, possibly in reaction to a client event. # @@ -637,32 +463,9 @@ # protov1-serving -> shutdown # When server receives an empty request or encounters another # error. - # - # protov1-serving -> upgrade-initial - # An upgrade request line was seen. - # - # upgrade-initial -> upgrade-v2-filter-legacy-handshake - # Upgrade to version 2 in progress. Server is expecting to - # process a legacy handshake. - # - # upgrade-v2-filter-legacy-handshake -> shutdown - # Client did not fulfill upgrade handshake requirements. - # - # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish - # Client fulfilled version 2 upgrade requirements. Finishing that - # upgrade. - # - # upgrade-v2-finish -> protov2-serving - # Protocol upgrade to version 2 complete. Server can now speak protocol - # version 2. - # - # protov2-serving -> protov1-serving - # Ths happens by default since protocol version 2 is the same as - # version 1 except for the handshake. state = b'protov1-serving' proto = sshv1protocolhandler(ui, fin, fout) - protoswitched = False while not ev.is_set(): if state == b'protov1-serving': @@ -674,21 +477,6 @@ state = b'shutdown' continue - # It looks like a protocol upgrade request. Transition state to - # handle it. - if request.startswith(b'upgrade '): - if protoswitched: - _sshv1respondooberror( - fout, - ui.ferr, - b'cannot upgrade protocols multiple times', - ) - state = b'shutdown' - continue - - state = b'upgrade-initial' - continue - available = wireprotov1server.commands.commandavailable( request, proto ) @@ -724,108 +512,6 @@ b'wire protocol command: %s' % rsp ) - # For now, protocol version 2 serving just goes back to version 1. - elif state == b'protov2-serving': - state = b'protov1-serving' - continue - - elif state == b'upgrade-initial': - # We should never transition into this state if we've switched - # protocols. - assert not protoswitched - assert proto.name == wireprototypes.SSHV1 - - # Expected: upgrade <token> <capabilities> - # If we get something else, the request is malformed. It could be - # from a future client that has altered the upgrade line content. - # We treat this as an unknown command. - try: - token, caps = request.split(b' ')[1:] - except ValueError: - _sshv1respondbytes(fout, b'') - state = b'protov1-serving' - continue - - # Send empty response if we don't support upgrading protocols. - if not ui.configbool(b'experimental', b'sshserver.support-v2'): - _sshv1respondbytes(fout, b'') - state = b'protov1-serving' - continue - - try: - caps = urlreq.parseqs(caps) - except ValueError: - _sshv1respondbytes(fout, b'') - state = b'protov1-serving' - continue - - # We don't see an upgrade request to protocol version 2. Ignore - # the upgrade request. - wantedprotos = caps.get(b'proto', [b''])[0] - if SSHV2 not in wantedprotos: - _sshv1respondbytes(fout, b'') - state = b'protov1-serving' - continue - - # It looks like we can honor this upgrade request to protocol 2. - # Filter the rest of the handshake protocol request lines. - state = b'upgrade-v2-filter-legacy-handshake' - continue - - elif state == b'upgrade-v2-filter-legacy-handshake': - # Client should have sent legacy handshake after an ``upgrade`` - # request. Expected lines: - # - # hello - # between - # pairs 81 - # 0000...-0000... - - ok = True - for line in (b'hello', b'between', b'pairs 81'): - request = fin.readline()[:-1] - - if request != line: - _sshv1respondooberror( - fout, - ui.ferr, - b'malformed handshake protocol: missing %s' % line, - ) - ok = False - state = b'shutdown' - break - - if not ok: - continue - - request = fin.read(81) - if request != b'%s-%s' % (b'0' * 40, b'0' * 40): - _sshv1respondooberror( - fout, - ui.ferr, - b'malformed handshake protocol: ' - b'missing between argument value', - ) - state = b'shutdown' - continue - - state = b'upgrade-v2-finish' - continue - - elif state == b'upgrade-v2-finish': - # Send the upgrade response. - fout.write(b'upgraded %s %s\n' % (token, SSHV2)) - servercaps = wireprotov1server.capabilities(repo, proto) - rsp = b'capabilities: %s' % servercaps.data - fout.write(b'%d\n%s\n' % (len(rsp), rsp)) - fout.flush() - - proto = sshv2protocolhandler(ui, fin, fout) - protoswitched = True - - state = b'protov2-serving' - continue - elif state == b'shutdown': break
--- a/mercurial/wireprototypes.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/wireprototypes.py Tue Jan 18 10:27:13 2022 +0100 @@ -21,10 +21,6 @@ # Names of the SSH protocol implementations. SSHV1 = b'ssh-v1' -# These are advertised over the wire. Increment the counters at the end -# to reflect BC breakages. -SSHV2 = b'exp-ssh-v2-0003' -HTTP_WIREPROTO_V2 = b'exp-http-v2-0003' NARROWCAP = b'exp-narrow-1' ELLIPSESCAP1 = b'exp-ellipses-1' @@ -37,19 +33,10 @@ b'transport': b'ssh', b'version': 1, }, - SSHV2: { - b'transport': b'ssh', - # TODO mark as version 2 once all commands are implemented. - b'version': 1, - }, b'http-v1': { b'transport': b'http', b'version': 1, }, - HTTP_WIREPROTO_V2: { - b'transport': b'http', - b'version': 2, - }, }
--- a/mercurial/wireprotov1server.py Thu Dec 30 13:25:44 2021 +0100 +++ b/mercurial/wireprotov1server.py Tue Jan 18 10:27:13 2022 +0100 @@ -147,12 +147,6 @@ k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1 } - # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to - # SSHv2. - # TODO undo this hack when SSH is using the unified frame protocol. - if name == b'batch': - transports.add(wireprototypes.SSHV2) - if permission not in (b'push', b'pull'): raise error.ProgrammingError( b'invalid wire protocol permission; '
--- a/mercurial/wireprotov2peer.py Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,576 +0,0 @@ -# wireprotov2peer.py - client side code for wire protocol version 2 -# -# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -import threading - -from .i18n import _ -from . import ( - encoding, - error, - pycompat, - sslutil, - url as urlmod, - util, - wireprotoframing, - wireprototypes, -) -from .utils import cborutil - - -def formatrichmessage(atoms): - """Format an encoded message from the framing protocol.""" - - chunks = [] - - for atom in atoms: - msg = _(atom[b'msg']) - - if b'args' in atom: - msg = msg % tuple(atom[b'args']) - - chunks.append(msg) - - return b''.join(chunks) - - -SUPPORTED_REDIRECT_PROTOCOLS = { - b'http', - b'https', -} - -SUPPORTED_CONTENT_HASHES = { - b'sha1', - b'sha256', -} - - -def redirecttargetsupported(ui, target): - """Determine whether a redirect target entry is supported. - - ``target`` should come from the capabilities data structure emitted by - the server. - """ - if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS: - ui.note( - _(b'(remote redirect target %s uses unsupported protocol: %s)\n') - % (target[b'name'], target.get(b'protocol', b'')) - ) - return False - - if target.get(b'snirequired') and not sslutil.hassni: - ui.note( - _(b'(redirect target %s requires SNI, which is unsupported)\n') - % target[b'name'] - ) - return False - - if b'tlsversions' in target: - tlsversions = set(target[b'tlsversions']) - supported = set() - - for v in sslutil.supportedprotocols: - assert v.startswith(b'tls') - supported.add(v[3:]) - - if not tlsversions & supported: - ui.note( - _( - b'(remote redirect target %s requires unsupported TLS ' - b'versions: %s)\n' - ) - % (target[b'name'], b', '.join(sorted(tlsversions))) - ) - return False - - ui.note(_(b'(remote redirect target %s is compatible)\n') % target[b'name']) - - return True - - -def supportedredirects(ui, apidescriptor): - """Resolve the "redirect" command request key given an API descriptor. - - Given an API descriptor returned by the server, returns a data structure - that can be used in hte "redirect" field of command requests to advertise - support for compatible redirect targets. - - Returns None if no redirect targets are remotely advertised or if none are - supported. - """ - if not apidescriptor or b'redirect' not in apidescriptor: - return None - - targets = [ - t[b'name'] - for t in apidescriptor[b'redirect'][b'targets'] - if redirecttargetsupported(ui, t) - ] - - hashes = [ - h - for h in apidescriptor[b'redirect'][b'hashes'] - if h in SUPPORTED_CONTENT_HASHES - ] - - return { - b'targets': targets, - b'hashes': hashes, - } - - -class commandresponse(object): - """Represents the response to a command request. - - Instances track the state of the command and hold its results. - - An external entity is required to update the state of the object when - events occur. - """ - - def __init__(self, requestid, command, fromredirect=False): - self.requestid = requestid - self.command = command - self.fromredirect = fromredirect - - # Whether all remote input related to this command has been - # received. - self._inputcomplete = False - - # We have a lock that is acquired when important object state is - # mutated. This is to prevent race conditions between 1 thread - # sending us new data and another consuming it. - self._lock = threading.RLock() - - # An event is set when state of the object changes. This event - # is waited on by the generator emitting objects. - self._serviceable = threading.Event() - - self._pendingevents = [] - self._pendingerror = None - self._decoder = cborutil.bufferingdecoder() - self._seeninitial = False - self._redirect = None - - def _oninputcomplete(self): - with self._lock: - self._inputcomplete = True - self._serviceable.set() - - def _onresponsedata(self, data): - available, readcount, wanted = self._decoder.decode(data) - - if not available: - return - - with self._lock: - for o in self._decoder.getavailable(): - if not self._seeninitial and not self.fromredirect: - self._handleinitial(o) - continue - - # We should never see an object after a content redirect, - # as the spec says the main status object containing the - # content redirect is the only object in the stream. Fail - # if we see a misbehaving server. - if self._redirect: - raise error.Abort( - _( - b'received unexpected response data ' - b'after content redirect; the remote is ' - b'buggy' - ) - ) - - self._pendingevents.append(o) - - self._serviceable.set() - - def _onerror(self, e): - self._pendingerror = e - - with self._lock: - self._serviceable.set() - - def _handleinitial(self, o): - self._seeninitial = True - if o[b'status'] == b'ok': - return - - elif o[b'status'] == b'redirect': - l = o[b'location'] - self._redirect = wireprototypes.alternatelocationresponse( - url=l[b'url'], - mediatype=l[b'mediatype'], - size=l.get(b'size'), - fullhashes=l.get(b'fullhashes'), - fullhashseed=l.get(b'fullhashseed'), - serverdercerts=l.get(b'serverdercerts'), - servercadercerts=l.get(b'servercadercerts'), - ) - return - - atoms = [{b'msg': o[b'error'][b'message']}] - if b'args' in o[b'error']: - atoms[0][b'args'] = o[b'error'][b'args'] - - raise error.RepoError(formatrichmessage(atoms)) - - def objects(self): - """Obtained decoded objects from this response. - - This is a generator of data structures that were decoded from the - command response. - - Obtaining the next member of the generator may block due to waiting - on external data to become available. - - If the server encountered an error in the middle of serving the data - or if another error occurred, an exception may be raised when - advancing the generator. - """ - while True: - # TODO this can infinite loop if self._inputcomplete is never - # set. We likely want to tie the lifetime of this object/state - # to that of the background thread receiving frames and updating - # our state. - self._serviceable.wait(1.0) - - if self._pendingerror: - raise self._pendingerror - - with self._lock: - self._serviceable.clear() - - # Make copies because objects could be mutated during - # iteration. - stop = self._inputcomplete - pending = list(self._pendingevents) - self._pendingevents[:] = [] - - for o in pending: - yield o - - if stop: - break - - -class clienthandler(object): - """Object to handle higher-level client activities. - - The ``clientreactor`` is used to hold low-level state about the frame-based - protocol, such as which requests and streams are active. This type is used - for higher-level operations, such as reading frames from a socket, exposing - and managing a higher-level primitive for representing command responses, - etc. This class is what peers should probably use to bridge wire activity - with the higher-level peer API. - """ - - def __init__( - self, ui, clientreactor, opener=None, requestbuilder=util.urlreq.request - ): - self._ui = ui - self._reactor = clientreactor - self._requests = {} - self._futures = {} - self._responses = {} - self._redirects = [] - self._frameseof = False - self._opener = opener or urlmod.opener(ui) - self._requestbuilder = requestbuilder - - def callcommand(self, command, args, f, redirect=None): - """Register a request to call a command. - - Returns an iterable of frames that should be sent over the wire. - """ - request, action, meta = self._reactor.callcommand( - command, args, redirect=redirect - ) - - if action != b'noop': - raise error.ProgrammingError(b'%s not yet supported' % action) - - rid = request.requestid - self._requests[rid] = request - self._futures[rid] = f - # TODO we need some kind of lifetime on response instances otherwise - # objects() may deadlock. - self._responses[rid] = commandresponse(rid, command) - - return iter(()) - - def flushcommands(self): - """Flush all queued commands. - - Returns an iterable of frames that should be sent over the wire. - """ - action, meta = self._reactor.flushcommands() - - if action != b'sendframes': - raise error.ProgrammingError(b'%s not yet supported' % action) - - return meta[b'framegen'] - - def readdata(self, framefh): - """Attempt to read data and do work. - - Returns None if no data was read. Presumably this means we're - done with all read I/O. - """ - if not self._frameseof: - frame = wireprotoframing.readframe(framefh) - if frame is None: - # TODO tell reactor? - self._frameseof = True - else: - self._ui.debug(b'received %r\n' % frame) - self._processframe(frame) - - # Also try to read the first redirect. - if self._redirects: - if not self._processredirect(*self._redirects[0]): - self._redirects.pop(0) - - if self._frameseof and not self._redirects: - return None - - return True - - def _processframe(self, frame): - """Process a single read frame.""" - - action, meta = self._reactor.onframerecv(frame) - - if action == b'error': - e = error.RepoError(meta[b'message']) - - if frame.requestid in self._responses: - self._responses[frame.requestid]._oninputcomplete() - - if frame.requestid in self._futures: - self._futures[frame.requestid].set_exception(e) - del self._futures[frame.requestid] - else: - raise e - - return - elif action == b'noop': - return - elif action == b'responsedata': - # Handled below. - pass - else: - raise error.ProgrammingError(b'action not handled: %s' % action) - - if frame.requestid not in self._requests: - raise error.ProgrammingError( - b'received frame for unknown request; this is either a bug in ' - b'the clientreactor not screening for this or this instance was ' - b'never told about this request: %r' % frame - ) - - response = self._responses[frame.requestid] - - if action == b'responsedata': - # Any failures processing this frame should bubble up to the - # future tracking the request. - try: - self._processresponsedata(frame, meta, response) - except BaseException as e: - # If an exception occurs before the future is resolved, - # fail the future. Otherwise, we stuff the exception on - # the response object so it can be raised during objects() - # iteration. If nothing is consuming objects(), we could - # silently swallow this exception. That's a risk we'll have to - # take. - if frame.requestid in self._futures: - self._futures[frame.requestid].set_exception(e) - del self._futures[frame.requestid] - response._oninputcomplete() - else: - response._onerror(e) - else: - raise error.ProgrammingError( - b'unhandled action from clientreactor: %s' % action - ) - - def _processresponsedata(self, frame, meta, response): - # This can raise. The caller can handle it. - response._onresponsedata(meta[b'data']) - - # We need to be careful about resolving futures prematurely. If a - # response is a redirect response, resolving the future before the - # redirect is processed would result in the consumer seeing an - # empty stream of objects, since they'd be consuming our - # response.objects() instead of the redirect's response.objects(). - # - # Our strategy is to not resolve/finish the request until either - # EOS occurs or until the initial response object is fully received. - - # Always react to eos. - if meta[b'eos']: - response._oninputcomplete() - del self._requests[frame.requestid] - - # Not EOS but we haven't decoded the initial response object yet. - # Return and wait for more data. - elif not response._seeninitial: - return - - # The specification says no objects should follow the initial/redirect - # object. So it should be safe to handle the redirect object if one is - # decoded, without having to wait for EOS. - if response._redirect: - self._followredirect(frame.requestid, response._redirect) - return - - # If the command has a decoder, we wait until all input has been - # received before resolving the future. Otherwise we resolve the - # future immediately. - if frame.requestid not in self._futures: - return - - if response.command not in COMMAND_DECODERS: - self._futures[frame.requestid].set_result(response.objects()) - del self._futures[frame.requestid] - elif response._inputcomplete: - decoded = COMMAND_DECODERS[response.command](response.objects()) - self._futures[frame.requestid].set_result(decoded) - del self._futures[frame.requestid] - - def _followredirect(self, requestid, redirect): - """Called to initiate redirect following for a request.""" - self._ui.note(_(b'(following redirect to %s)\n') % redirect.url) - - # TODO handle framed responses. - if redirect.mediatype != b'application/mercurial-cbor': - raise error.Abort( - _(b'cannot handle redirects for the %s media type') - % redirect.mediatype - ) - - if redirect.fullhashes: - self._ui.warn( - _( - b'(support for validating hashes on content ' - b'redirects not supported)\n' - ) - ) - - if redirect.serverdercerts or redirect.servercadercerts: - self._ui.warn( - _( - b'(support for pinning server certificates on ' - b'content redirects not supported)\n' - ) - ) - - headers = { - 'Accept': redirect.mediatype, - } - - req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers) - - try: - res = self._opener.open(req) - except util.urlerr.httperror as e: - if e.code == 401: - raise error.Abort(_(b'authorization failed')) - raise - except util.httplib.HTTPException as e: - self._ui.debug(b'http error requesting %s\n' % req.get_full_url()) - self._ui.traceback() - raise IOError(None, e) - - urlmod.wrapresponse(res) - - # The existing response object is associated with frame data. Rather - # than try to normalize its state, just create a new object. - oldresponse = self._responses[requestid] - self._responses[requestid] = commandresponse( - requestid, oldresponse.command, fromredirect=True - ) - - self._redirects.append((requestid, res)) - - def _processredirect(self, rid, res): - """Called to continue processing a response from a redirect. - - Returns a bool indicating if the redirect is still serviceable. - """ - response = self._responses[rid] - - try: - data = res.read(32768) - response._onresponsedata(data) - - # We're at end of stream. - if not data: - response._oninputcomplete() - - if rid not in self._futures: - return bool(data) - - if response.command not in COMMAND_DECODERS: - self._futures[rid].set_result(response.objects()) - del self._futures[rid] - elif response._inputcomplete: - decoded = COMMAND_DECODERS[response.command](response.objects()) - self._futures[rid].set_result(decoded) - del self._futures[rid] - - return bool(data) - - except BaseException as e: - self._futures[rid].set_exception(e) - del self._futures[rid] - response._oninputcomplete() - return False - - -def decodebranchmap(objs): - # Response should be a single CBOR map of branch name to array of nodes. - bm = next(objs) - - return {encoding.tolocal(k): v for k, v in bm.items()} - - -def decodeheads(objs): - # Array of node bytestrings. - return next(objs) - - -def decodeknown(objs): - # Bytestring where each byte is a 0 or 1. - raw = next(objs) - - return [True if raw[i : i + 1] == b'1' else False for i in range(len(raw))] - - -def decodelistkeys(objs): - # Map with bytestring keys and values. - return next(objs) - - -def decodelookup(objs): - return next(objs) - - -def decodepushkey(objs): - return next(objs) - - -COMMAND_DECODERS = { - b'branchmap': decodebranchmap, - b'heads': decodeheads, - b'known': decodeknown, - b'listkeys': decodelistkeys, - b'lookup': decodelookup, - b'pushkey': decodepushkey, -}
--- a/mercurial/wireprotov2server.py Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1613 +0,0 @@ -# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> -# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -import collections -import contextlib - -from .i18n import _ -from .node import hex -from . import ( - discovery, - encoding, - error, - match as matchmod, - narrowspec, - pycompat, - streamclone, - templatefilters, - util, - wireprotoframing, - wireprototypes, -) -from .interfaces import util as interfaceutil -from .utils import ( - cborutil, - hashutil, - stringutil, -) - -FRAMINGTYPE = b'application/mercurial-exp-framing-0006' - -HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2 - -COMMANDS = wireprototypes.commanddict() - -# Value inserted into cache key computation function. Change the value to -# force new cache keys for every command request. This should be done when -# there is a change to how caching works, etc. -GLOBAL_CACHE_VERSION = 1 - - -def handlehttpv2request(rctx, req, res, checkperm, urlparts): - from .hgweb import common as hgwebcommon - - # URL space looks like: <permissions>/<command>, where <permission> can - # be ``ro`` or ``rw`` to signal read-only or read-write, respectively. - - # Root URL does nothing meaningful... yet. - if not urlparts: - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'HTTP version 2 API handler')) - return - - if len(urlparts) == 1: - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'do not know how to process %s\n') % req.dispatchpath - ) - return - - permission, command = urlparts[0:2] - - if permission not in (b'ro', b'rw'): - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'unknown permission: %s') % permission) - return - - if req.method != b'POST': - res.status = b'405 Method Not Allowed' - res.headers[b'Allow'] = b'POST' - res.setbodybytes(_(b'commands require POST requests')) - return - - # At some point we'll want to use our own API instead of recycling the - # behavior of version 1 of the wire protocol... - # TODO return reasonable responses - not responses that overload the - # HTTP status line message for error reporting. - try: - checkperm(rctx, req, b'pull' if permission == b'ro' else b'push') - except hgwebcommon.ErrorResponse as e: - res.status = hgwebcommon.statusmessage( - e.code, stringutil.forcebytestr(e) - ) - for k, v in e.headers: - res.headers[k] = v - res.setbodybytes(b'permission denied') - return - - # We have a special endpoint to reflect the request back at the client. - if command == b'debugreflect': - _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res) - return - - # Extra commands that we handle that aren't really wire protocol - # commands. Think extra hard before making this hackery available to - # extension. - extracommands = {b'multirequest'} - - if command not in COMMANDS and command not in extracommands: - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'unknown wire protocol command: %s\n') % command) - return - - repo = rctx.repo - ui = repo.ui - - proto = httpv2protocolhandler(req, ui) - - if ( - not COMMANDS.commandavailable(command, proto) - and command not in extracommands - ): - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'invalid wire protocol command: %s') % command) - return - - # TODO consider cases where proxies may add additional Accept headers. - if req.headers.get(b'Accept') != FRAMINGTYPE: - res.status = b'406 Not Acceptable' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'client MUST specify Accept header with value: %s\n') - % FRAMINGTYPE - ) - return - - if req.headers.get(b'Content-Type') != FRAMINGTYPE: - res.status = b'415 Unsupported Media Type' - # TODO we should send a response with appropriate media type, - # since client does Accept it. - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'client MUST send Content-Type header with value: %s\n') - % FRAMINGTYPE - ) - return - - _processhttpv2request(ui, repo, req, res, permission, command, proto) - - -def _processhttpv2reflectrequest(ui, repo, req, res): - """Reads unified frame protocol request and dumps out state to client. - - This special endpoint can be used to help debug the wire protocol. - - Instead of routing the request through the normal dispatch mechanism, - we instead read all frames, decode them, and feed them into our state - tracker. We then dump the log of all that activity back out to the - client. - """ - # Reflection APIs have a history of being abused, accidentally disclosing - # sensitive data, etc. So we have a config knob. - if not ui.configbool(b'experimental', b'web.api.debugreflect'): - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'debugreflect service not available')) - return - - # We assume we have a unified framing protocol request body. - - reactor = wireprotoframing.serverreactor(ui) - states = [] - - while True: - frame = wireprotoframing.readframe(req.bodyfh) - - if not frame: - states.append(b'received: <no frame>') - break - - states.append( - b'received: %d %d %d %s' - % (frame.typeid, frame.flags, frame.requestid, frame.payload) - ) - - action, meta = reactor.onframerecv(frame) - states.append(templatefilters.json((action, meta))) - - action, meta = reactor.oninputeof() - meta[b'action'] = action - states.append(templatefilters.json(meta)) - - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(b'\n'.join(states)) - - -def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto): - """Post-validation handler for HTTPv2 requests. - - Called when the HTTP request contains unified frame-based protocol - frames for evaluation. - """ - # TODO Some HTTP clients are full duplex and can receive data before - # the entire request is transmitted. Figure out a way to indicate support - # for that so we can opt into full duplex mode. - reactor = wireprotoframing.serverreactor(ui, deferoutput=True) - seencommand = False - - outstream = None - - while True: - frame = wireprotoframing.readframe(req.bodyfh) - if not frame: - break - - action, meta = reactor.onframerecv(frame) - - if action == b'wantframe': - # Need more data before we can do anything. - continue - elif action == b'runcommand': - # Defer creating output stream because we need to wait for - # protocol settings frames so proper encoding can be applied. - if not outstream: - outstream = reactor.makeoutputstream() - - sentoutput = _httpv2runcommand( - ui, - repo, - req, - res, - authedperm, - reqcommand, - reactor, - outstream, - meta, - issubsequent=seencommand, - ) - - if sentoutput: - return - - seencommand = True - - elif action == b'error': - # TODO define proper error mechanism. - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(meta[b'message'] + b'\n') - return - else: - raise error.ProgrammingError( - b'unhandled action from frame processor: %s' % action - ) - - action, meta = reactor.oninputeof() - if action == b'sendframes': - # We assume we haven't started sending the response yet. If we're - # wrong, the response type will raise an exception. - res.status = b'200 OK' - res.headers[b'Content-Type'] = FRAMINGTYPE - res.setbodygen(meta[b'framegen']) - elif action == b'noop': - pass - else: - raise error.ProgrammingError( - b'unhandled action from frame processor: %s' % action - ) - - -def _httpv2runcommand( - ui, - repo, - req, - res, - authedperm, - reqcommand, - reactor, - outstream, - command, - issubsequent, -): - """Dispatch a wire protocol command made from HTTPv2 requests. - - The authenticated permission (``authedperm``) along with the original - command from the URL (``reqcommand``) are passed in. - """ - # We already validated that the session has permissions to perform the - # actions in ``authedperm``. In the unified frame protocol, the canonical - # command to run is expressed in a frame. However, the URL also requested - # to run a specific command. We need to be careful that the command we - # run doesn't have permissions requirements greater than what was granted - # by ``authedperm``. - # - # Our rule for this is we only allow one command per HTTP request and - # that command must match the command in the URL. However, we make - # an exception for the ``multirequest`` URL. This URL is allowed to - # execute multiple commands. We double check permissions of each command - # as it is invoked to ensure there is no privilege escalation. - # TODO consider allowing multiple commands to regular command URLs - # iff each command is the same. - - proto = httpv2protocolhandler(req, ui, args=command[b'args']) - - if reqcommand == b'multirequest': - if not COMMANDS.commandavailable(command[b'command'], proto): - # TODO proper error mechanism - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'wire protocol command not available: %s') - % command[b'command'] - ) - return True - - # TODO don't use assert here, since it may be elided by -O. - assert authedperm in (b'ro', b'rw') - wirecommand = COMMANDS[command[b'command']] - assert wirecommand.permission in (b'push', b'pull') - - if authedperm == b'ro' and wirecommand.permission != b'pull': - # TODO proper error mechanism - res.status = b'403 Forbidden' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'insufficient permissions to execute command: %s') - % command[b'command'] - ) - return True - - # TODO should we also call checkperm() here? Maybe not if we're going - # to overhaul that API. The granted scope from the URL check should - # be good enough. - - else: - # Don't allow multiple commands outside of ``multirequest`` URL. - if issubsequent: - # TODO proper error mechanism - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes( - _(b'multiple commands cannot be issued to this URL') - ) - return True - - if reqcommand != command[b'command']: - # TODO define proper error mechanism - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(_(b'command in frame must match command in URL')) - return True - - res.status = b'200 OK' - res.headers[b'Content-Type'] = FRAMINGTYPE - - try: - objs = dispatch(repo, proto, command[b'command'], command[b'redirect']) - - action, meta = reactor.oncommandresponsereadyobjects( - outstream, command[b'requestid'], objs - ) - - except error.WireprotoCommandError as e: - action, meta = reactor.oncommanderror( - outstream, command[b'requestid'], e.message, e.messageargs - ) - - except Exception as e: - action, meta = reactor.onservererror( - outstream, - command[b'requestid'], - _(b'exception when invoking command: %s') - % stringutil.forcebytestr(e), - ) - - if action == b'sendframes': - res.setbodygen(meta[b'framegen']) - return True - elif action == b'noop': - return False - else: - raise error.ProgrammingError( - b'unhandled event from reactor: %s' % action - ) - - -def getdispatchrepo(repo, proto, command): - viewconfig = repo.ui.config(b'server', b'view') - return repo.filtered(viewconfig) - - -def dispatch(repo, proto, command, redirect): - """Run a wire protocol command. - - Returns an iterable of objects that will be sent to the client. - """ - repo = getdispatchrepo(repo, proto, command) - - entry = COMMANDS[command] - func = entry.func - spec = entry.args - - args = proto.getargs(spec) - - # There is some duplicate boilerplate code here for calling the command and - # emitting objects. It is either that or a lot of indented code that looks - # like a pyramid (since there are a lot of code paths that result in not - # using the cacher). - callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args)) - - # Request is not cacheable. Don't bother instantiating a cacher. - if not entry.cachekeyfn: - for o in callcommand(): - yield o - return - - if redirect: - redirecttargets = redirect[b'targets'] - redirecthashes = redirect[b'hashes'] - else: - redirecttargets = [] - redirecthashes = [] - - cacher = makeresponsecacher( - repo, - proto, - command, - args, - cborutil.streamencode, - redirecttargets=redirecttargets, - redirecthashes=redirecthashes, - ) - - # But we have no cacher. Do default handling. - if not cacher: - for o in callcommand(): - yield o - return - - with cacher: - cachekey = entry.cachekeyfn( - repo, proto, cacher, **pycompat.strkwargs(args) - ) - - # No cache key or the cacher doesn't like it. Do default handling. - if cachekey is None or not cacher.setcachekey(cachekey): - for o in callcommand(): - yield o - return - - # Serve it from the cache, if possible. - cached = cacher.lookup() - - if cached: - for o in cached[b'objs']: - yield o - return - - # Else call the command and feed its output into the cacher, allowing - # the cacher to buffer/mutate objects as it desires. - for o in callcommand(): - for o in cacher.onobject(o): - yield o - - for o in cacher.onfinished(): - yield o - - -@interfaceutil.implementer(wireprototypes.baseprotocolhandler) -class httpv2protocolhandler(object): - def __init__(self, req, ui, args=None): - self._req = req - self._ui = ui - self._args = args - - @property - def name(self): - return HTTP_WIREPROTO_V2 - - def getargs(self, args): - # First look for args that were passed but aren't registered on this - # command. - extra = set(self._args) - set(args) - if extra: - raise error.WireprotoCommandError( - b'unsupported argument to command: %s' - % b', '.join(sorted(extra)) - ) - - # And look for required arguments that are missing. - missing = {a for a in args if args[a][b'required']} - set(self._args) - - if missing: - raise error.WireprotoCommandError( - b'missing required arguments: %s' % b', '.join(sorted(missing)) - ) - - # Now derive the arguments to pass to the command, taking into - # account the arguments specified by the client. - data = {} - for k, meta in sorted(args.items()): - # This argument wasn't passed by the client. - if k not in self._args: - data[k] = meta[b'default']() - continue - - v = self._args[k] - - # Sets may be expressed as lists. Silently normalize. - if meta[b'type'] == b'set' and isinstance(v, list): - v = set(v) - - # TODO consider more/stronger type validation. - - data[k] = v - - return data - - def getprotocaps(self): - # Protocol capabilities are currently not implemented for HTTP V2. - return set() - - def getpayload(self): - raise NotImplementedError - - @contextlib.contextmanager - def mayberedirectstdio(self): - raise NotImplementedError - - def client(self): - raise NotImplementedError - - def addcapabilities(self, repo, caps): - return caps - - def checkperm(self, perm): - raise NotImplementedError - - -def httpv2apidescriptor(req, repo): - proto = httpv2protocolhandler(req, repo.ui) - - return _capabilitiesv2(repo, proto) - - -def _capabilitiesv2(repo, proto): - """Obtain the set of capabilities for version 2 transports. - - These capabilities are distinct from the capabilities for version 1 - transports. - """ - caps = { - b'commands': {}, - b'framingmediatypes': [FRAMINGTYPE], - b'pathfilterprefixes': set(narrowspec.VALID_PREFIXES), - } - - for command, entry in COMMANDS.items(): - args = {} - - for arg, meta in entry.args.items(): - args[arg] = { - # TODO should this be a normalized type using CBOR's - # terminology? - b'type': meta[b'type'], - b'required': meta[b'required'], - } - - if not meta[b'required']: - args[arg][b'default'] = meta[b'default']() - - if meta[b'validvalues']: - args[arg][b'validvalues'] = meta[b'validvalues'] - - # TODO this type of check should be defined in a per-command callback. - if ( - command == b'rawstorefiledata' - and not streamclone.allowservergeneration(repo) - ): - continue - - caps[b'commands'][command] = { - b'args': args, - b'permissions': [entry.permission], - } - - if entry.extracapabilitiesfn: - extracaps = entry.extracapabilitiesfn(repo, proto) - caps[b'commands'][command].update(extracaps) - - caps[b'rawrepoformats'] = sorted(repo.requirements & repo.supportedformats) - - targets = getadvertisedredirecttargets(repo, proto) - if targets: - caps[b'redirect'] = { - b'targets': [], - b'hashes': [b'sha256', b'sha1'], - } - - for target in targets: - entry = { - b'name': target[b'name'], - b'protocol': target[b'protocol'], - b'uris': target[b'uris'], - } - - for key in (b'snirequired', b'tlsversions'): - if key in target: - entry[key] = target[key] - - caps[b'redirect'][b'targets'].append(entry) - - return proto.addcapabilities(repo, caps) - - -def getadvertisedredirecttargets(repo, proto): - """Obtain a list of content redirect targets. - - Returns a list containing potential redirect targets that will be - advertised in capabilities data. Each dict MUST have the following - keys: - - name - The name of this redirect target. This is the identifier clients use - to refer to a target. It is transferred as part of every command - request. - - protocol - Network protocol used by this target. Typically this is the string - in front of the ``://`` in a URL. e.g. ``https``. - - uris - List of representative URIs for this target. Clients can use the - URIs to test parsing for compatibility or for ordering preference - for which target to use. - - The following optional keys are recognized: - - snirequired - Bool indicating if Server Name Indication (SNI) is required to - connect to this target. - - tlsversions - List of bytes indicating which TLS versions are supported by this - target. - - By default, clients reflect the target order advertised by servers - and servers will use the first client-advertised target when picking - a redirect target. So targets should be advertised in the order the - server prefers they be used. - """ - return [] - - -def wireprotocommand( - name, - args=None, - permission=b'push', - cachekeyfn=None, - extracapabilitiesfn=None, -): - """Decorator to declare a wire protocol command. - - ``name`` is the name of the wire protocol command being provided. - - ``args`` is a dict defining arguments accepted by the command. Keys are - the argument name. Values are dicts with the following keys: - - ``type`` - The argument data type. Must be one of the following string - literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``, - or ``bool``. - - ``default`` - A callable returning the default value for this argument. If not - specified, ``None`` will be the default value. - - ``example`` - An example value for this argument. - - ``validvalues`` - Set of recognized values for this argument. - - ``permission`` defines the permission type needed to run this command. - Can be ``push`` or ``pull``. These roughly map to read-write and read-only, - respectively. Default is to assume command requires ``push`` permissions - because otherwise commands not declaring their permissions could modify - a repository that is supposed to be read-only. - - ``cachekeyfn`` defines an optional callable that can derive the - cache key for this request. - - ``extracapabilitiesfn`` defines an optional callable that defines extra - command capabilities/parameters that are advertised next to the command - in the capabilities data structure describing the server. The callable - receives as arguments the repository and protocol objects. It returns - a dict of extra fields to add to the command descriptor. - - Wire protocol commands are generators of objects to be serialized and - sent to the client. - - If a command raises an uncaught exception, this will be translated into - a command error. - - All commands can opt in to being cacheable by defining a function - (``cachekeyfn``) that is called to derive a cache key. This function - receives the same arguments as the command itself plus a ``cacher`` - argument containing the active cacher for the request and returns a bytes - containing the key in a cache the response to this command may be cached - under. - """ - transports = { - k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 2 - } - - if permission not in (b'push', b'pull'): - raise error.ProgrammingError( - b'invalid wire protocol permission; ' - b'got %s; expected "push" or "pull"' % permission - ) - - if args is None: - args = {} - - if not isinstance(args, dict): - raise error.ProgrammingError( - b'arguments for version 2 commands must be declared as dicts' - ) - - for arg, meta in args.items(): - if arg == b'*': - raise error.ProgrammingError( - b'* argument name not allowed on version 2 commands' - ) - - if not isinstance(meta, dict): - raise error.ProgrammingError( - b'arguments for version 2 commands ' - b'must declare metadata as a dict' - ) - - if b'type' not in meta: - raise error.ProgrammingError( - b'%s argument for command %s does not ' - b'declare type field' % (arg, name) - ) - - if meta[b'type'] not in ( - b'bytes', - b'int', - b'list', - b'dict', - b'set', - b'bool', - ): - raise error.ProgrammingError( - b'%s argument for command %s has ' - b'illegal type: %s' % (arg, name, meta[b'type']) - ) - - if b'example' not in meta: - raise error.ProgrammingError( - b'%s argument for command %s does not ' - b'declare example field' % (arg, name) - ) - - meta[b'required'] = b'default' not in meta - - meta.setdefault(b'default', lambda: None) - meta.setdefault(b'validvalues', None) - - def register(func): - if name in COMMANDS: - raise error.ProgrammingError( - b'%s command already registered for version 2' % name - ) - - COMMANDS[name] = wireprototypes.commandentry( - func, - args=args, - transports=transports, - permission=permission, - cachekeyfn=cachekeyfn, - extracapabilitiesfn=extracapabilitiesfn, - ) - - return func - - return register - - -def makecommandcachekeyfn(command, localversion=None, allargs=False): - """Construct a cache key derivation function with common features. - - By default, the cache key is a hash of: - - * The command name. - * A global cache version number. - * A local cache version number (passed via ``localversion``). - * All the arguments passed to the command. - * The media type used. - * Wire protocol version string. - * The repository path. - """ - if not allargs: - raise error.ProgrammingError( - b'only allargs=True is currently supported' - ) - - if localversion is None: - raise error.ProgrammingError(b'must set localversion argument value') - - def cachekeyfn(repo, proto, cacher, **args): - spec = COMMANDS[command] - - # Commands that mutate the repo can not be cached. - if spec.permission == b'push': - return None - - # TODO config option to disable caching. - - # Our key derivation strategy is to construct a data structure - # holding everything that could influence cacheability and to hash - # the CBOR representation of that. Using CBOR seems like it might - # be overkill. However, simpler hashing mechanisms are prone to - # duplicate input issues. e.g. if you just concatenate two values, - # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides - # "padding" between values and prevents these problems. - - # Seed the hash with various data. - state = { - # To invalidate all cache keys. - b'globalversion': GLOBAL_CACHE_VERSION, - # More granular cache key invalidation. - b'localversion': localversion, - # Cache keys are segmented by command. - b'command': command, - # Throw in the media type and API version strings so changes - # to exchange semantics invalid cache. - b'mediatype': FRAMINGTYPE, - b'version': HTTP_WIREPROTO_V2, - # So same requests for different repos don't share cache keys. - b'repo': repo.root, - } - - # The arguments passed to us will have already been normalized. - # Default values will be set, etc. This is important because it - # means that it doesn't matter if clients send an explicit argument - # or rely on the default value: it will all normalize to the same - # set of arguments on the server and therefore the same cache key. - # - # Arguments by their very nature must support being encoded to CBOR. - # And the CBOR encoder is deterministic. So we hash the arguments - # by feeding the CBOR of their representation into the hasher. - if allargs: - state[b'args'] = pycompat.byteskwargs(args) - - cacher.adjustcachekeystate(state) - - hasher = hashutil.sha1() - for chunk in cborutil.streamencode(state): - hasher.update(chunk) - - return pycompat.sysbytes(hasher.hexdigest()) - - return cachekeyfn - - -def makeresponsecacher( - repo, proto, command, args, objencoderfn, redirecttargets, redirecthashes -): - """Construct a cacher for a cacheable command. - - Returns an ``iwireprotocolcommandcacher`` instance. - - Extensions can monkeypatch this function to provide custom caching - backends. - """ - return None - - -def resolvenodes(repo, revisions): - """Resolve nodes from a revisions specifier data structure.""" - cl = repo.changelog - clhasnode = cl.hasnode - - seen = set() - nodes = [] - - if not isinstance(revisions, list): - raise error.WireprotoCommandError( - b'revisions must be defined as an array' - ) - - for spec in revisions: - if b'type' not in spec: - raise error.WireprotoCommandError( - b'type key not present in revision specifier' - ) - - typ = spec[b'type'] - - if typ == b'changesetexplicit': - if b'nodes' not in spec: - raise error.WireprotoCommandError( - b'nodes key not present in changesetexplicit revision ' - b'specifier' - ) - - for node in spec[b'nodes']: - if node not in seen: - nodes.append(node) - seen.add(node) - - elif typ == b'changesetexplicitdepth': - for key in (b'nodes', b'depth'): - if key not in spec: - raise error.WireprotoCommandError( - b'%s key not present in changesetexplicitdepth revision ' - b'specifier', - (key,), - ) - - for rev in repo.revs( - b'ancestors(%ln, %s)', spec[b'nodes'], spec[b'depth'] - 1 - ): - node = cl.node(rev) - - if node not in seen: - nodes.append(node) - seen.add(node) - - elif typ == b'changesetdagrange': - for key in (b'roots', b'heads'): - if key not in spec: - raise error.WireprotoCommandError( - b'%s key not present in changesetdagrange revision ' - b'specifier', - (key,), - ) - - if not spec[b'heads']: - raise error.WireprotoCommandError( - b'heads key in changesetdagrange cannot be empty' - ) - - if spec[b'roots']: - common = [n for n in spec[b'roots'] if clhasnode(n)] - else: - common = [repo.nullid] - - for n in discovery.outgoing(repo, common, spec[b'heads']).missing: - if n not in seen: - nodes.append(n) - seen.add(n) - - else: - raise error.WireprotoCommandError( - b'unknown revision specifier type: %s', (typ,) - ) - - return nodes - - -@wireprotocommand(b'branchmap', permission=b'pull') -def branchmapv2(repo, proto): - yield { - encoding.fromlocal(k): v - for k, v in pycompat.iteritems(repo.branchmap()) - } - - -@wireprotocommand(b'capabilities', permission=b'pull') -def capabilitiesv2(repo, proto): - yield _capabilitiesv2(repo, proto) - - -@wireprotocommand( - b'changesetdata', - args={ - b'revisions': { - b'type': b'list', - b'example': [ - { - b'type': b'changesetexplicit', - b'nodes': [b'abcdef...'], - } - ], - }, - b'fields': { - b'type': b'set', - b'default': set, - b'example': {b'parents', b'revision'}, - b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'}, - }, - }, - permission=b'pull', -) -def changesetdata(repo, proto, revisions, fields): - # TODO look for unknown fields and abort when they can't be serviced. - # This could probably be validated by dispatcher using validvalues. - - cl = repo.changelog - outgoing = resolvenodes(repo, revisions) - publishing = repo.publishing() - - if outgoing: - repo.hook(b'preoutgoing', throw=True, source=b'serve') - - yield { - b'totalitems': len(outgoing), - } - - # The phases of nodes already transferred to the client may have changed - # since the client last requested data. We send phase-only records - # for these revisions, if requested. - # TODO actually do this. We'll probably want to emit phase heads - # in the ancestry set of the outgoing revisions. This will ensure - # that phase updates within that set are seen. - if b'phase' in fields: - pass - - nodebookmarks = {} - for mark, node in repo._bookmarks.items(): - nodebookmarks.setdefault(node, set()).add(mark) - - # It is already topologically sorted by revision number. - for node in outgoing: - d = { - b'node': node, - } - - if b'parents' in fields: - d[b'parents'] = cl.parents(node) - - if b'phase' in fields: - if publishing: - d[b'phase'] = b'public' - else: - ctx = repo[node] - d[b'phase'] = ctx.phasestr() - - if b'bookmarks' in fields and node in nodebookmarks: - d[b'bookmarks'] = sorted(nodebookmarks[node]) - del nodebookmarks[node] - - followingmeta = [] - followingdata = [] - - if b'revision' in fields: - revisiondata = cl.revision(node) - followingmeta.append((b'revision', len(revisiondata))) - followingdata.append(revisiondata) - - # TODO make it possible for extensions to wrap a function or register - # a handler to service custom fields. - - if followingmeta: - d[b'fieldsfollowing'] = followingmeta - - yield d - - for extra in followingdata: - yield extra - - # If requested, send bookmarks from nodes that didn't have revision - # data sent so receiver is aware of any bookmark updates. - if b'bookmarks' in fields: - for node, marks in sorted(pycompat.iteritems(nodebookmarks)): - yield { - b'node': node, - b'bookmarks': sorted(marks), - } - - -class FileAccessError(Exception): - """Represents an error accessing a specific file.""" - - def __init__(self, path, msg, args): - self.path = path - self.msg = msg - self.args = args - - -def getfilestore(repo, proto, path): - """Obtain a file storage object for use with wire protocol. - - Exists as a standalone function so extensions can monkeypatch to add - access control. - """ - # This seems to work even if the file doesn't exist. So catch - # "empty" files and return an error. - fl = repo.file(path) - - if not len(fl): - raise FileAccessError(path, b'unknown file: %s', (path,)) - - return fl - - -def emitfilerevisions(repo, path, revisions, linknodes, fields): - for revision in revisions: - d = { - b'node': revision.node, - } - - if b'parents' in fields: - d[b'parents'] = [revision.p1node, revision.p2node] - - if b'linknode' in fields: - d[b'linknode'] = linknodes[revision.node] - - followingmeta = [] - followingdata = [] - - if b'revision' in fields: - if revision.revision is not None: - followingmeta.append((b'revision', len(revision.revision))) - followingdata.append(revision.revision) - else: - d[b'deltabasenode'] = revision.basenode - followingmeta.append((b'delta', len(revision.delta))) - followingdata.append(revision.delta) - - if followingmeta: - d[b'fieldsfollowing'] = followingmeta - - yield d - - for extra in followingdata: - yield extra - - -def makefilematcher(repo, pathfilter): - """Construct a matcher from a path filter dict.""" - - # Validate values. - if pathfilter: - for key in (b'include', b'exclude'): - for pattern in pathfilter.get(key, []): - if not pattern.startswith((b'path:', b'rootfilesin:')): - raise error.WireprotoCommandError( - b'%s pattern must begin with `path:` or `rootfilesin:`; ' - b'got %s', - (key, pattern), - ) - - if pathfilter: - matcher = matchmod.match( - repo.root, - b'', - include=pathfilter.get(b'include', []), - exclude=pathfilter.get(b'exclude', []), - ) - else: - matcher = matchmod.match(repo.root, b'') - - # Requested patterns could include files not in the local store. So - # filter those out. - return repo.narrowmatch(matcher) - - -@wireprotocommand( - b'filedata', - args={ - b'haveparents': { - b'type': b'bool', - b'default': lambda: False, - b'example': True, - }, - b'nodes': { - b'type': b'list', - b'example': [b'0123456...'], - }, - b'fields': { - b'type': b'set', - b'default': set, - b'example': {b'parents', b'revision'}, - b'validvalues': {b'parents', b'revision', b'linknode'}, - }, - b'path': { - b'type': b'bytes', - b'example': b'foo.txt', - }, - }, - permission=b'pull', - # TODO censoring a file revision won't invalidate the cache. - # Figure out a way to take censoring into account when deriving - # the cache key. - cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True), -) -def filedata(repo, proto, haveparents, nodes, fields, path): - # TODO this API allows access to file revisions that are attached to - # secret changesets. filesdata does not have this problem. Maybe this - # API should be deleted? - - try: - # Extensions may wish to access the protocol handler. - store = getfilestore(repo, proto, path) - except FileAccessError as e: - raise error.WireprotoCommandError(e.msg, e.args) - - clnode = repo.changelog.node - linknodes = {} - - # Validate requested nodes. - for node in nodes: - try: - store.rev(node) - except error.LookupError: - raise error.WireprotoCommandError( - b'unknown file node: %s', (hex(node),) - ) - - # TODO by creating the filectx against a specific file revision - # instead of changeset, linkrev() is always used. This is wrong for - # cases where linkrev() may refer to a hidden changeset. But since this - # API doesn't know anything about changesets, we're not sure how to - # disambiguate the linknode. Perhaps we should delete this API? - fctx = repo.filectx(path, fileid=node) - linknodes[node] = clnode(fctx.introrev()) - - revisions = store.emitrevisions( - nodes, - revisiondata=b'revision' in fields, - assumehaveparentrevisions=haveparents, - ) - - yield { - b'totalitems': len(nodes), - } - - for o in emitfilerevisions(repo, path, revisions, linknodes, fields): - yield o - - -def filesdatacapabilities(repo, proto): - batchsize = repo.ui.configint( - b'experimental', b'server.filesdata.recommended-batch-size' - ) - return { - b'recommendedbatchsize': batchsize, - } - - -@wireprotocommand( - b'filesdata', - args={ - b'haveparents': { - b'type': b'bool', - b'default': lambda: False, - b'example': True, - }, - b'fields': { - b'type': b'set', - b'default': set, - b'example': {b'parents', b'revision'}, - b'validvalues': { - b'firstchangeset', - b'linknode', - b'parents', - b'revision', - }, - }, - b'pathfilter': { - b'type': b'dict', - b'default': lambda: None, - b'example': {b'include': [b'path:tests']}, - }, - b'revisions': { - b'type': b'list', - b'example': [ - { - b'type': b'changesetexplicit', - b'nodes': [b'abcdef...'], - } - ], - }, - }, - permission=b'pull', - # TODO censoring a file revision won't invalidate the cache. - # Figure out a way to take censoring into account when deriving - # the cache key. - cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True), - extracapabilitiesfn=filesdatacapabilities, -) -def filesdata(repo, proto, haveparents, fields, pathfilter, revisions): - # TODO This should operate on a repo that exposes obsolete changesets. There - # is a race between a client making a push that obsoletes a changeset and - # another client fetching files data for that changeset. If a client has a - # changeset, it should probably be allowed to access files data for that - # changeset. - - outgoing = resolvenodes(repo, revisions) - filematcher = makefilematcher(repo, pathfilter) - - # path -> {fnode: linknode} - fnodes = collections.defaultdict(dict) - - # We collect the set of relevant file revisions by iterating the changeset - # revisions and either walking the set of files recorded in the changeset - # or by walking the manifest at that revision. There is probably room for a - # storage-level API to request this data, as it can be expensive to compute - # and would benefit from caching or alternate storage from what revlogs - # provide. - for node in outgoing: - ctx = repo[node] - mctx = ctx.manifestctx() - md = mctx.read() - - if haveparents: - checkpaths = ctx.files() - else: - checkpaths = md.keys() - - for path in checkpaths: - fnode = md[path] - - if path in fnodes and fnode in fnodes[path]: - continue - - if not filematcher(path): - continue - - fnodes[path].setdefault(fnode, node) - - yield { - b'totalpaths': len(fnodes), - b'totalitems': sum(len(v) for v in fnodes.values()), - } - - for path, filenodes in sorted(fnodes.items()): - try: - store = getfilestore(repo, proto, path) - except FileAccessError as e: - raise error.WireprotoCommandError(e.msg, e.args) - - yield { - b'path': path, - b'totalitems': len(filenodes), - } - - revisions = store.emitrevisions( - filenodes.keys(), - revisiondata=b'revision' in fields, - assumehaveparentrevisions=haveparents, - ) - - for o in emitfilerevisions(repo, path, revisions, filenodes, fields): - yield o - - -@wireprotocommand( - b'heads', - args={ - b'publiconly': { - b'type': b'bool', - b'default': lambda: False, - b'example': False, - }, - }, - permission=b'pull', -) -def headsv2(repo, proto, publiconly): - if publiconly: - repo = repo.filtered(b'immutable') - - yield repo.heads() - - -@wireprotocommand( - b'known', - args={ - b'nodes': { - b'type': b'list', - b'default': list, - b'example': [b'deadbeef'], - }, - }, - permission=b'pull', -) -def knownv2(repo, proto, nodes): - result = b''.join(b'1' if n else b'0' for n in repo.known(nodes)) - yield result - - -@wireprotocommand( - b'listkeys', - args={ - b'namespace': { - b'type': b'bytes', - b'example': b'ns', - }, - }, - permission=b'pull', -) -def listkeysv2(repo, proto, namespace): - keys = repo.listkeys(encoding.tolocal(namespace)) - keys = { - encoding.fromlocal(k): encoding.fromlocal(v) - for k, v in pycompat.iteritems(keys) - } - - yield keys - - -@wireprotocommand( - b'lookup', - args={ - b'key': { - b'type': b'bytes', - b'example': b'foo', - }, - }, - permission=b'pull', -) -def lookupv2(repo, proto, key): - key = encoding.tolocal(key) - - # TODO handle exception. - node = repo.lookup(key) - - yield node - - -def manifestdatacapabilities(repo, proto): - batchsize = repo.ui.configint( - b'experimental', b'server.manifestdata.recommended-batch-size' - ) - - return { - b'recommendedbatchsize': batchsize, - } - - -@wireprotocommand( - b'manifestdata', - args={ - b'nodes': { - b'type': b'list', - b'example': [b'0123456...'], - }, - b'haveparents': { - b'type': b'bool', - b'default': lambda: False, - b'example': True, - }, - b'fields': { - b'type': b'set', - b'default': set, - b'example': {b'parents', b'revision'}, - b'validvalues': {b'parents', b'revision'}, - }, - b'tree': { - b'type': b'bytes', - b'example': b'', - }, - }, - permission=b'pull', - cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True), - extracapabilitiesfn=manifestdatacapabilities, -) -def manifestdata(repo, proto, haveparents, nodes, fields, tree): - store = repo.manifestlog.getstorage(tree) - - # Validate the node is known and abort on unknown revisions. - for node in nodes: - try: - store.rev(node) - except error.LookupError: - raise error.WireprotoCommandError(b'unknown node: %s', (node,)) - - revisions = store.emitrevisions( - nodes, - revisiondata=b'revision' in fields, - assumehaveparentrevisions=haveparents, - ) - - yield { - b'totalitems': len(nodes), - } - - for revision in revisions: - d = { - b'node': revision.node, - } - - if b'parents' in fields: - d[b'parents'] = [revision.p1node, revision.p2node] - - followingmeta = [] - followingdata = [] - - if b'revision' in fields: - if revision.revision is not None: - followingmeta.append((b'revision', len(revision.revision))) - followingdata.append(revision.revision) - else: - d[b'deltabasenode'] = revision.basenode - followingmeta.append((b'delta', len(revision.delta))) - followingdata.append(revision.delta) - - if followingmeta: - d[b'fieldsfollowing'] = followingmeta - - yield d - - for extra in followingdata: - yield extra - - -@wireprotocommand( - b'pushkey', - args={ - b'namespace': { - b'type': b'bytes', - b'example': b'ns', - }, - b'key': { - b'type': b'bytes', - b'example': b'key', - }, - b'old': { - b'type': b'bytes', - b'example': b'old', - }, - b'new': { - b'type': b'bytes', - b'example': b'new', - }, - }, - permission=b'push', -) -def pushkeyv2(repo, proto, namespace, key, old, new): - # TODO handle ui output redirection - yield repo.pushkey( - encoding.tolocal(namespace), - encoding.tolocal(key), - encoding.tolocal(old), - encoding.tolocal(new), - ) - - -@wireprotocommand( - b'rawstorefiledata', - args={ - b'files': { - b'type': b'list', - b'example': [b'changelog', b'manifestlog'], - }, - b'pathfilter': { - b'type': b'list', - b'default': lambda: None, - b'example': {b'include': [b'path:tests']}, - }, - }, - permission=b'pull', -) -def rawstorefiledata(repo, proto, files, pathfilter): - if not streamclone.allowservergeneration(repo): - raise error.WireprotoCommandError(b'stream clone is disabled') - - # TODO support dynamically advertising what store files "sets" are - # available. For now, we support changelog, manifestlog, and files. - files = set(files) - allowedfiles = {b'changelog', b'manifestlog'} - - unsupported = files - allowedfiles - if unsupported: - raise error.WireprotoCommandError( - b'unknown file type: %s', (b', '.join(sorted(unsupported)),) - ) - - with repo.lock(): - topfiles = list(repo.store.topfiles()) - - sendfiles = [] - totalsize = 0 - - # TODO this is a bunch of storage layer interface abstractions because - # it assumes revlogs. - for rl_type, name, size in topfiles: - # XXX use the `rl_type` for that - if b'changelog' in files and name.startswith(b'00changelog'): - pass - elif b'manifestlog' in files and name.startswith(b'00manifest'): - pass - else: - continue - - sendfiles.append((b'store', name, size)) - totalsize += size - - yield { - b'filecount': len(sendfiles), - b'totalsize': totalsize, - } - - for location, name, size in sendfiles: - yield { - b'location': location, - b'path': name, - b'size': size, - } - - # We have to use a closure for this to ensure the context manager is - # closed only after sending the final chunk. - def getfiledata(): - with repo.svfs(name, b'rb', auditpath=False) as fh: - for chunk in util.filechunkiter(fh, limit=size): - yield chunk - - yield wireprototypes.indefinitebytestringresponse(getfiledata())
--- a/relnotes/next Thu Dec 30 13:25:44 2021 +0100 +++ b/relnotes/next Tue Jan 18 10:27:13 2022 +0100 @@ -11,9 +11,12 @@ == Bug Fixes == +The `--no-check` and `--no-merge` now properly overwrite the behavior from `commands.update.check`. == Backwards Compatibility Changes == +The remotefilelog extension now requires an appropiate excludepattern +for subrepositories. == Internal API Changes ==
--- a/rust/Cargo.lock Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/Cargo.lock Tue Jan 18 10:27:13 2022 +0100 @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "adler" version = "0.2.3" @@ -314,21 +316,19 @@ [[package]] name = "format-bytes" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4e89040c7fd7b4e6ba2820ac705a45def8a0c098ec78d170ae88f1ef1d5762" +checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd" dependencies = [ "format-bytes-macros", - "proc-macro-hack", ] [[package]] name = "format-bytes-macros" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05089e341a0460449e2210c3bf7b61597860b07f0deae58da38dbed0a4c6b6d" +checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -356,6 +356,17 @@ ] [[package]] +name = "getrandom" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", +] + +[[package]] name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -371,6 +382,12 @@ ] [[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] name = "hg-core" version = "0.1.0" dependencies = [ @@ -391,7 +408,7 @@ "memmap2", "micro-timer", "pretty_assertions", - "rand", + "rand 0.8.4", "rand_distr", "rand_pcg", "rayon", @@ -415,6 +432,7 @@ "libc", "log", "stable_deref_trait", + "vcsgraph", ] [[package]] @@ -442,7 +460,7 @@ checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f" dependencies = [ "bitmaps", - "rand_core", + "rand_core 0.5.1", "rand_xoshiro", "sized-chunks", "typenum", @@ -480,6 +498,12 @@ checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] +name = "libm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" + +[[package]] name = "libz-sys" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -579,6 +603,7 @@ checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -637,12 +662,6 @@ ] [[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] name = "proc-macro2" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -692,11 +711,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -706,7 +737,17 @@ checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.3", ] [[package]] @@ -715,16 +756,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom 0.2.4", ] [[package]] name = "rand_distr" -version = "0.2.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +checksum = "964d548f8e7d12e102ef183a0de7e98180c9f8729f555897a857b96e48122d2f" dependencies = [ - "rand", + "num-traits", + "rand 0.8.4", ] [[package]] @@ -733,16 +784,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core 0.6.3", ] [[package]] name = "rand_pcg" -version = "0.2.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core", + "rand_core 0.6.3", ] [[package]] @@ -751,7 +811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" dependencies = [ - "rand_core", + "rand_core 0.5.1", ] [[package]] @@ -905,7 +965,7 @@ dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi", @@ -956,7 +1016,7 @@ checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" dependencies = [ "cfg-if 0.1.10", - "rand", + "rand 0.7.3", "static_assertions", ] @@ -995,6 +1055,17 @@ checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] +name = "vcsgraph" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7" +dependencies = [ + "hex", + "rand 0.7.3", + "sha-1", +] + +[[package]] name = "vec_map" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/rust/hg-core/Cargo.toml Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/Cargo.toml Tue Jan 18 10:27:13 2022 +0100 @@ -18,9 +18,9 @@ itertools = "0.9" lazy_static = "1.4.0" libc = "0.2" -rand = "0.7.3" -rand_pcg = "0.2.1" -rand_distr = "0.2.2" +rand = "0.8.4" +rand_pcg = "0.3.1" +rand_distr = "0.4.2" rayon = "1.3.0" regex = "1.3.9" sha-1 = "0.9.6" @@ -33,7 +33,7 @@ log = "0.4.8" memmap2 = {version = "0.4", features = ["stable_deref_trait"]} zstd = "0.5.3" -format-bytes = "0.2.2" +format-bytes = "0.3.0" # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until # we have a clearer view of which backend is the fastest.
--- a/rust/hg-core/src/ancestors.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/ancestors.rs Tue Jan 18 10:27:13 2022 +0100 @@ -26,15 +26,6 @@ stoprev: Revision, } -/// Lazy ancestors set, backed by AncestorsIterator -pub struct LazyAncestors<G: Graph + Clone> { - graph: G, - containsiter: AncestorsIterator<G>, - initrevs: Vec<Revision>, - stoprev: Revision, - inclusive: bool, -} - pub struct MissingAncestors<G: Graph> { graph: G, bases: HashSet<Revision>, @@ -165,49 +156,6 @@ } } -impl<G: Graph + Clone> LazyAncestors<G> { - pub fn new( - graph: G, - initrevs: impl IntoIterator<Item = Revision>, - stoprev: Revision, - inclusive: bool, - ) -> Result<Self, GraphError> { - let v: Vec<Revision> = initrevs.into_iter().collect(); - Ok(LazyAncestors { - graph: graph.clone(), - containsiter: AncestorsIterator::new( - graph, - v.iter().cloned(), - stoprev, - inclusive, - )?, - initrevs: v, - stoprev, - inclusive, - }) - } - - pub fn contains(&mut self, rev: Revision) -> Result<bool, GraphError> { - self.containsiter.contains(rev) - } - - pub fn is_empty(&self) -> bool { - self.containsiter.is_empty() - } - - pub fn iter(&self) -> AncestorsIterator<G> { - // the arguments being the same as for self.containsiter, we know - // for sure that AncestorsIterator constructor can't fail - AncestorsIterator::new( - self.graph.clone(), - self.initrevs.iter().cloned(), - self.stoprev, - self.inclusive, - ) - .unwrap() - } -} - impl<G: Graph> MissingAncestors<G> { pub fn new(graph: G, bases: impl IntoIterator<Item = Revision>) -> Self { let mut created = MissingAncestors { @@ -550,39 +498,6 @@ } #[test] - fn test_lazy_iter_contains() { - let mut lazy = - LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap(); - - let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect(); - // compare with iterator tests on the same initial revisions - assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]); - - // contains() results are correct, unaffected by the fact that - // we consumed entirely an iterator out of lazy - assert_eq!(lazy.contains(2), Ok(true)); - assert_eq!(lazy.contains(9), Ok(false)); - } - - #[test] - fn test_lazy_contains_iter() { - let mut lazy = - LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap(); // reminder: [8, 7, 4, 3, 2, 1, 0] - - assert_eq!(lazy.contains(2), Ok(true)); - assert_eq!(lazy.contains(6), Ok(false)); - - // after consumption of 2 by the inner iterator, results stay - // consistent - assert_eq!(lazy.contains(2), Ok(true)); - assert_eq!(lazy.contains(5), Ok(false)); - - // iter() still gives us a fresh iterator - let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect(); - assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]); - } - - #[test] /// Test constructor, add/get bases and heads fn test_missing_bases() -> Result<(), GraphError> { let mut missing_ancestors =
--- a/rust/hg-core/src/config/config.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/config/config.rs Tue Jan 18 10:27:13 2022 +0100 @@ -114,6 +114,7 @@ b"rhg", b"fallback-executable", ); + config.add_for_environment_variable("RHG_STATUS", b"rhg", b"status"); // HGRCPATH replaces user config if opt_rc_path.is_none() { @@ -361,6 +362,15 @@ Ok(self.get_option(section, item)?.unwrap_or(false)) } + /// Returns `true` if the extension is enabled, `false` otherwise + pub fn is_extension_enabled(&self, extension: &[u8]) -> bool { + let value = self.get(b"extensions", extension); + match value { + Some(c) => !c.starts_with(b"!"), + None => false, + } + } + /// If there is an `item` value in `section`, parse and return a list of /// byte strings. pub fn get_list( @@ -402,6 +412,66 @@ .collect() } + /// Returns whether any key is defined in the given section + pub fn has_non_empty_section(&self, section: &[u8]) -> bool { + self.layers + .iter() + .any(|layer| layer.has_non_empty_section(section)) + } + + /// Yields (key, value) pairs for everything in the given section + pub fn iter_section<'a>( + &'a self, + section: &'a [u8], + ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a { + // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is + // available: + // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut + struct Peekable<I: Iterator> { + iter: I, + /// Remember a peeked value, even if it was None. + peeked: Option<Option<I::Item>>, + } + + impl<I: Iterator> Peekable<I> { + fn new(iter: I) -> Self { + Self { iter, peeked: None } + } + + fn next(&mut self) { + self.peeked = None + } + + fn peek_mut(&mut self) -> Option<&mut I::Item> { + let iter = &mut self.iter; + self.peeked.get_or_insert_with(|| iter.next()).as_mut() + } + } + + // Deduplicate keys redefined in multiple layers + let mut keys_already_seen = HashSet::new(); + let mut key_is_new = + move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool { + keys_already_seen.insert(key) + }; + // This is similar to `flat_map` + `filter_map`, except with a single + // closure that owns `key_is_new` (and therefore the + // `keys_already_seen` set): + let mut layer_iters = Peekable::new( + self.layers + .iter() + .rev() + .map(move |layer| layer.iter_section(section)), + ); + std::iter::from_fn(move || loop { + if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) { + return Some(pair); + } else { + layer_iters.next(); + } + }) + } + /// Get raw values bytes from all layers (even untrusted ones) in order /// of precedence. #[cfg(test)]
--- a/rust/hg-core/src/config/layer.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/config/layer.rs Tue Jan 18 10:27:13 2022 +0100 @@ -127,6 +127,24 @@ .flat_map(|section| section.keys().map(|vec| &**vec)) } + /// Returns the (key, value) pairs defined in the given section + pub fn iter_section<'layer>( + &'layer self, + section: &[u8], + ) -> impl Iterator<Item = (&'layer [u8], &'layer [u8])> { + self.sections + .get(section) + .into_iter() + .flat_map(|section| section.iter().map(|(k, v)| (&**k, &*v.bytes))) + } + + /// Returns whether any key is defined in the given section + pub fn has_non_empty_section(&self, section: &[u8]) -> bool { + self.sections + .get(section) + .map_or(false, |section| !section.is_empty()) + } + pub fn is_empty(&self) -> bool { self.sections.is_empty() }
--- a/rust/hg-core/src/dirstate/entry.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate/entry.rs Tue Jan 18 10:27:13 2022 +0100 @@ -43,6 +43,10 @@ truncated_seconds: u32, /// Always in the `0 .. 1_000_000_000` range. nanoseconds: u32, + /// TODO this should be in DirstateEntry, but the current code needs + /// refactoring to use DirstateEntry instead of TruncatedTimestamp for + /// comparison. + pub second_ambiguous: bool, } impl TruncatedTimestamp { @@ -50,11 +54,16 @@ /// and truncate the seconds components to its lower 31 bits. /// /// Panics if the nanoseconds components is not in the expected range. - pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self { + pub fn new_truncate( + seconds: i64, + nanoseconds: u32, + second_ambiguous: bool, + ) -> Self { assert!(nanoseconds < NSEC_PER_SEC); Self { truncated_seconds: seconds as u32 & RANGE_MASK_31BIT, nanoseconds, + second_ambiguous, } } @@ -63,6 +72,7 @@ pub fn from_already_truncated( truncated_seconds: u32, nanoseconds: u32, + second_ambiguous: bool, ) -> Result<Self, DirstateV2ParseError> { if truncated_seconds & !RANGE_MASK_31BIT == 0 && nanoseconds < NSEC_PER_SEC @@ -70,12 +80,17 @@ Ok(Self { truncated_seconds, nanoseconds, + second_ambiguous, }) } else { Err(DirstateV2ParseError) } } + /// Returns a `TruncatedTimestamp` for the modification time of `metadata`. + /// + /// Propagates errors from `std` on platforms where modification time + /// is not available at all. pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> { #[cfg(unix)] { @@ -83,7 +98,7 @@ let seconds = metadata.mtime(); // i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range let nanoseconds = metadata.mtime_nsec().try_into().unwrap(); - Ok(Self::new_truncate(seconds, nanoseconds)) + Ok(Self::new_truncate(seconds, nanoseconds, false)) } #[cfg(not(unix))] { @@ -91,6 +106,47 @@ } } + /// Like `for_mtime_of`, but may return `None` or a value with + /// `second_ambiguous` set if the mtime is not "reliable". + /// + /// A modification time is reliable if it is older than `boundary` (or + /// sufficiently in the future). + /// + /// Otherwise a concurrent modification might happens with the same mtime. + pub fn for_reliable_mtime_of( + metadata: &fs::Metadata, + boundary: &Self, + ) -> io::Result<Option<Self>> { + let mut mtime = Self::for_mtime_of(metadata)?; + // If the mtime of the ambiguous file is younger (or equal) to the + // starting point of the `status` walk, we cannot garantee that + // another, racy, write will not happen right after with the same mtime + // and we cannot cache the information. + // + // However if the mtime is far away in the future, this is likely some + // mismatch between the current clock and previous file system + // operation. So mtime more than one days in the future are considered + // fine. + let reliable = if mtime.truncated_seconds == boundary.truncated_seconds + { + mtime.second_ambiguous = true; + mtime.nanoseconds != 0 + && boundary.nanoseconds != 0 + && mtime.nanoseconds < boundary.nanoseconds + } else { + // `truncated_seconds` is less than 2**31, + // so this does not overflow `u32`: + let one_day_later = boundary.truncated_seconds + 24 * 3600; + mtime.truncated_seconds < boundary.truncated_seconds + || mtime.truncated_seconds > one_day_later + }; + if reliable { + Ok(Some(mtime)) + } else { + Ok(None) + } + } + /// The lower 31 bits of the number of seconds since the epoch. pub fn truncated_seconds(&self) -> u32 { self.truncated_seconds @@ -122,10 +178,17 @@ /// in that way, doing a simple comparison would cause many false /// negatives. pub fn likely_equal(self, other: Self) -> bool { - self.truncated_seconds == other.truncated_seconds - && (self.nanoseconds == other.nanoseconds - || self.nanoseconds == 0 - || other.nanoseconds == 0) + if self.truncated_seconds != other.truncated_seconds { + false + } else if self.nanoseconds == 0 || other.nanoseconds == 0 { + if self.second_ambiguous { + false + } else { + true + } + } else { + self.nanoseconds == other.nanoseconds + } } pub fn likely_equal_to_mtime_of( @@ -168,12 +231,12 @@ } } }; - Self::new_truncate(seconds, nanoseconds) + Self::new_truncate(seconds, nanoseconds, false) } } const NSEC_PER_SEC: u32 = 1_000_000_000; -const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF; +pub const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF; pub const MTIME_UNSET: i32 = -1; @@ -258,9 +321,10 @@ let mode = u32::try_from(mode).unwrap(); let size = u32::try_from(size).unwrap(); let mtime = u32::try_from(mtime).unwrap(); - let mtime = - TruncatedTimestamp::from_already_truncated(mtime, 0) - .unwrap(); + let mtime = TruncatedTimestamp::from_already_truncated( + mtime, 0, false, + ) + .unwrap(); Self { flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, mode_size: Some((mode, size)), @@ -438,7 +502,11 @@ } else if !self.flags.contains(Flags::P1_TRACKED) { MTIME_UNSET } else if let Some(mtime) = self.mtime { - i32::try_from(mtime.truncated_seconds()).unwrap() + if mtime.second_ambiguous { + MTIME_UNSET + } else { + i32::try_from(mtime.truncated_seconds()).unwrap() + } } else { MTIME_UNSET } @@ -580,10 +648,8 @@ &self, filesystem_metadata: &std::fs::Metadata, ) -> bool { - use std::os::unix::fs::MetadataExt; - const EXEC_BIT_MASK: u32 = 0o100; - let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK; - let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; + let dirstate_exec_bit = (self.mode() as u32 & EXEC_BIT_MASK) != 0; + let fs_exec_bit = has_exec_bit(filesystem_metadata); dirstate_exec_bit != fs_exec_bit } @@ -592,16 +658,6 @@ pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { (self.state().into(), self.mode(), self.size(), self.mtime()) } - - /// True if the stored mtime would be ambiguous with the current time - pub fn need_delay(&self, now: TruncatedTimestamp) -> bool { - if let Some(mtime) = self.mtime { - self.state() == EntryState::Normal - && mtime.truncated_seconds() == now.truncated_seconds() - } else { - false - } - } } impl EntryState { @@ -641,3 +697,11 @@ } } } + +const EXEC_BIT_MASK: u32 = 0o100; + +pub fn has_exec_bit(metadata: &std::fs::Metadata) -> bool { + // TODO: How to handle executable permissions on Windows? + use std::os::unix::fs::MetadataExt; + (metadata.mode() & EXEC_BIT_MASK) != 0 +}
--- a/rust/hg-core/src/dirstate/status.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate/status.rs Tue Jan 18 10:27:13 2022 +0100 @@ -9,10 +9,9 @@ //! It is currently missing a lot of functionality compared to the Python one //! and will only be triggered in narrow cases. +use crate::dirstate::entry::TruncatedTimestamp; use crate::dirstate_tree::on_disk::DirstateV2ParseError; - use crate::{ - dirstate::TruncatedTimestamp, utils::hg_path::{HgPath, HgPathError}, PatternError, }; @@ -62,46 +61,48 @@ #[derive(Debug, Copy, Clone)] pub struct StatusOptions { - /// Remember the most recent modification timeslot for status, to make - /// sure we won't miss future size-preserving file content modifications - /// that happen within the same timeslot. - pub last_normal_time: TruncatedTimestamp, /// Whether we are on a filesystem with UNIX-like exec flags pub check_exec: bool, pub list_clean: bool, pub list_unknown: bool, pub list_ignored: bool, + /// Whether to populate `StatusPath::copy_source` + pub list_copies: bool, /// Whether to collect traversed dirs for applying a callback later. /// Used by `hg purge` for example. pub collect_traversed_dirs: bool, } -#[derive(Debug, Default)] +#[derive(Default)] pub struct DirstateStatus<'a> { + /// The current time at the start of the `status()` algorithm, as measured + /// and possibly truncated by the filesystem. + pub filesystem_time_at_status_start: Option<TruncatedTimestamp>, + /// Tracked files whose contents have changed since the parent revision - pub modified: Vec<HgPathCow<'a>>, + pub modified: Vec<StatusPath<'a>>, /// Newly-tracked files that were not present in the parent - pub added: Vec<HgPathCow<'a>>, + pub added: Vec<StatusPath<'a>>, /// Previously-tracked files that have been (re)moved with an hg command - pub removed: Vec<HgPathCow<'a>>, + pub removed: Vec<StatusPath<'a>>, /// (Still) tracked files that are missing, (re)moved with an non-hg /// command - pub deleted: Vec<HgPathCow<'a>>, + pub deleted: Vec<StatusPath<'a>>, /// Tracked files that are up to date with the parent. /// Only pupulated if `StatusOptions::list_clean` is true. - pub clean: Vec<HgPathCow<'a>>, + pub clean: Vec<StatusPath<'a>>, /// Files in the working directory that are ignored with `.hgignore`. /// Only pupulated if `StatusOptions::list_ignored` is true. - pub ignored: Vec<HgPathCow<'a>>, + pub ignored: Vec<StatusPath<'a>>, /// Files in the working directory that are neither tracked nor ignored. /// Only pupulated if `StatusOptions::list_unknown` is true. - pub unknown: Vec<HgPathCow<'a>>, + pub unknown: Vec<StatusPath<'a>>, /// Was explicitly matched but cannot be found/accessed pub bad: Vec<(HgPathCow<'a>, BadMatch)>, @@ -109,7 +110,7 @@ /// Either clean or modified, but we can’t tell from filesystem metadata /// alone. The file contents need to be read and compared with that in /// the parent. - pub unsure: Vec<HgPathCow<'a>>, + pub unsure: Vec<StatusPath<'a>>, /// Only filled if `collect_traversed_dirs` is `true` pub traversed: Vec<HgPathCow<'a>>, @@ -119,6 +120,12 @@ pub dirty: bool, } +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct StatusPath<'a> { + pub path: HgPathCow<'a>, + pub copy_source: Option<HgPathCow<'a>>, +} + #[derive(Debug, derive_more::From)] pub enum StatusError { /// Generic IO error
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Tue Jan 18 10:27:13 2022 +0100 @@ -309,6 +309,25 @@ NodeRef::OnDisk(node) => node.copy_source(on_disk), } } + /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk, + /// HgPath>` detached from `'tree` + pub(super) fn copy_source_borrowed( + &self, + on_disk: &'on_disk [u8], + ) -> Result<Option<BorrowedPath<'tree, 'on_disk>>, DirstateV2ParseError> + { + Ok(match self { + NodeRef::InMemory(_path, node) => { + node.copy_source.as_ref().map(|source| match source { + Cow::Borrowed(on_disk) => BorrowedPath::OnDisk(on_disk), + Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory), + }) + } + NodeRef::OnDisk(node) => node + .copy_source(on_disk)? + .map(|source| BorrowedPath::OnDisk(source)), + }) + } pub(super) fn entry( &self, @@ -677,25 +696,6 @@ }) } - fn clear_known_ambiguous_mtimes( - &mut self, - paths: &[impl AsRef<HgPath>], - ) -> Result<(), DirstateV2ParseError> { - for path in paths { - if let Some(node) = Self::get_node_mut( - self.on_disk, - &mut self.unreachable_bytes, - &mut self.root, - path.as_ref(), - )? { - if let NodeData::Entry(entry) = &mut node.data { - entry.set_possibly_dirty(); - } - } - } - Ok(()) - } - fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { if let Cow::Borrowed(path) = path { *unreachable_bytes += path.len() as u32 @@ -928,31 +928,22 @@ #[timed] pub fn pack_v1( - &mut self, + &self, parents: DirstateParents, - now: TruncatedTimestamp, ) -> Result<Vec<u8>, DirstateError> { - let map = self.get_map_mut(); - let mut ambiguous_mtimes = Vec::new(); + let map = self.get_map(); // Optizimation (to be measured?): pre-compute size to avoid `Vec` // reallocations let mut size = parents.as_bytes().len(); for node in map.iter_nodes() { let node = node?; - if let Some(entry) = node.entry()? { + if node.entry()?.is_some() { size += packed_entry_size( node.full_path(map.on_disk)?, node.copy_source(map.on_disk)?, ); - if entry.need_delay(now) { - ambiguous_mtimes.push( - node.full_path_borrowed(map.on_disk)? - .detach_from_tree(), - ) - } } } - map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?; let mut packed = Vec::with_capacity(size); packed.extend(parents.as_bytes()); @@ -977,27 +968,10 @@ /// (false). #[timed] pub fn pack_v2( - &mut self, - now: TruncatedTimestamp, + &self, can_append: bool, - ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { - let map = self.get_map_mut(); - let mut paths = Vec::new(); - for node in map.iter_nodes() { - let node = node?; - if let Some(entry) = node.entry()? { - if entry.need_delay(now) { - paths.push( - node.full_path_borrowed(map.on_disk)? - .detach_from_tree(), - ) - } - } - } - // Borrow of `self` ends here since we collect cloned paths - - map.clear_known_ambiguous_mtimes(&paths)?; - + ) -> Result<(Vec<u8>, on_disk::TreeMetadata, bool), DirstateError> { + let map = self.get_map(); on_disk::write(map, can_append) }
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate_tree/on_disk.rs Tue Jan 18 10:27:13 2022 +0100 @@ -14,8 +14,10 @@ use bytes_cast::unaligned::{U16Be, U32Be}; use bytes_cast::BytesCast; use format_bytes::format_bytes; +use rand::Rng; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; +use std::fmt::Write; /// Added at the start of `.hg/dirstate` when the "v2" format is used. /// This a redundant sanity check more than an actual "magic number" since @@ -61,14 +63,14 @@ pub struct Docket<'on_disk> { header: &'on_disk DocketHeader, - uuid: &'on_disk [u8], + pub uuid: &'on_disk [u8], } /// Fields are documented in the *Tree metadata in the docket file* /// section of `mercurial/helptext/internals/dirstate-v2.txt` #[derive(BytesCast)] #[repr(C)] -struct TreeMetadata { +pub struct TreeMetadata { root_nodes: ChildNodes, nodes_with_entry_count: Size, nodes_with_copy_source_count: Size, @@ -186,7 +188,51 @@ } } +impl TreeMetadata { + pub fn as_bytes(&self) -> &[u8] { + BytesCast::as_bytes(self) + } +} + impl<'on_disk> Docket<'on_disk> { + /// Generate the identifier for a new data file + /// + /// TODO: support the `HGTEST_UUIDFILE` environment variable. + /// See `mercurial/revlogutils/docket.py` + pub fn new_uid() -> String { + const ID_LENGTH: usize = 8; + let mut id = String::with_capacity(ID_LENGTH); + let mut rng = rand::thread_rng(); + for _ in 0..ID_LENGTH { + // One random hexadecimal digit. + // `unwrap` never panics because `impl Write for String` + // never returns an error. + write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap(); + } + id + } + + pub fn serialize( + parents: DirstateParents, + tree_metadata: TreeMetadata, + data_size: u64, + uuid: &[u8], + ) -> Result<Vec<u8>, std::num::TryFromIntError> { + let header = DocketHeader { + marker: *V2_FORMAT_MARKER, + parent_1: parents.p1.pad_to_256_bits(), + parent_2: parents.p2.pad_to_256_bits(), + metadata: tree_metadata, + data_size: u32::try_from(data_size)?.into(), + uuid_size: uuid.len().try_into()?, + }; + let header = header.as_bytes(); + let mut docket = Vec::with_capacity(header.len() + uuid.len()); + docket.extend_from_slice(header); + docket.extend_from_slice(uuid); + Ok(docket) + } + pub fn parents(&self) -> DirstateParents { use crate::Node; let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES]) @@ -336,7 +382,7 @@ && self.flags().contains(Flags::HAS_MTIME) && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED) { - Ok(Some(self.mtime.try_into()?)) + Ok(Some(self.mtime()?)) } else { Ok(None) } @@ -356,6 +402,14 @@ (file_type | permisions).into() } + fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> { + let mut m: TruncatedTimestamp = self.mtime.try_into()?; + if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) { + m.second_ambiguous = true; + } + Ok(m) + } + fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> { // TODO: convert through raw bits instead? let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED); @@ -371,11 +425,8 @@ let mtime = if self.flags().contains(Flags::HAS_MTIME) && !self.flags().contains(Flags::DIRECTORY) && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED) - // The current code is not able to do the more subtle comparison that the - // MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime - && !self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) { - Some(self.mtime.try_into()?) + Some(self.mtime()?) } else { None }; @@ -465,6 +516,9 @@ }; let mtime = if let Some(m) = mtime_opt { flags.insert(Flags::HAS_MTIME); + if m.second_ambiguous { + flags.insert(Flags::MTIME_SECOND_AMBIGUOUS); + }; m.into() } else { PackedTruncatedTimestamp::null() @@ -549,9 +603,9 @@ /// `dirstate_map.on_disk` (true), instead of written to a new data file /// (false). pub(super) fn write( - dirstate_map: &mut DirstateMap, + dirstate_map: &DirstateMap, can_append: bool, -) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { +) -> Result<(Vec<u8>, TreeMetadata, bool), DirstateError> { let append = can_append && dirstate_map.write_should_append(); // This ignores the space for paths, and for nodes without an entry. @@ -577,7 +631,7 @@ unused: [0; 4], ignore_patterns_hash: dirstate_map.ignore_patterns_hash, }; - Ok((writer.out, meta.as_bytes().to_vec(), append)) + Ok((writer.out, meta, append)) } struct Writer<'dmap, 'on_disk> { @@ -631,7 +685,7 @@ dirstate_map::NodeData::Entry(entry) => { Node::from_dirstate_entry(entry) } - dirstate_map::NodeData::CachedDirectory { mtime } => ( + dirstate_map::NodeData::CachedDirectory { mtime } => { // we currently never set a mtime if unknown file // are present. // So if we have a mtime for a directory, we know @@ -642,12 +696,14 @@ // We never set ALL_IGNORED_RECORDED since we // don't track that case // currently. - Flags::DIRECTORY + let mut flags = Flags::DIRECTORY | Flags::HAS_MTIME - | Flags::ALL_UNKNOWN_RECORDED, - 0.into(), - (*mtime).into(), - ), + | Flags::ALL_UNKNOWN_RECORDED; + if mtime.second_ambiguous { + flags.insert(Flags::MTIME_SECOND_AMBIGUOUS) + } + (flags, 0.into(), (*mtime).into()) + } dirstate_map::NodeData::None => ( Flags::DIRECTORY, 0.into(), @@ -773,6 +829,7 @@ Self::from_already_truncated( timestamp.truncated_seconds.get(), timestamp.nanoseconds.get(), + false, ) } }
--- a/rust/hg-core/src/dirstate_tree/owning.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate_tree/owning.rs Tue Jan 18 10:27:13 2022 +0100 @@ -21,7 +21,7 @@ /// language cannot represent a lifetime referencing a sibling field. /// This is not quite a self-referencial struct (moving this struct is not /// a problem as it doesn’t change the address of the bytes buffer owned - /// by `PyBytes`) but touches similar borrow-checker limitations. + /// by `on_disk`) but touches similar borrow-checker limitations. ptr: *mut (), } @@ -50,13 +50,13 @@ // SAFETY: We cast the type-erased pointer back to the same type it had // in `new`, except with a different lifetime parameter. This time we // connect the lifetime to that of `self`. This cast is valid because - // `self` owns the same `PyBytes` whose buffer `DirstateMap` - // references. That buffer has a stable memory address because the byte - // string value of a `PyBytes` is immutable. + // `self` owns the same `on_disk` whose buffer `DirstateMap` + // references. That buffer has a stable memory address because our + // `Self::new_empty` counstructor requires `StableDeref`. let ptr: *mut DirstateMap<'a> = self.ptr.cast(); // SAFETY: we dereference that pointer, connecting the lifetime of the - // new `&mut` to that of `self`. This is valid because the - // raw pointer is to a boxed value, and `self` owns that box. + // new `&mut` to that of `self`. This is valid because the + // raw pointer is to a boxed value, and `self` owns that box. (&self.on_disk, unsafe { &mut *ptr }) } @@ -65,7 +65,7 @@ } pub fn get_map<'a>(&'a self) -> &'a DirstateMap<'a> { - // SAFETY: same reasoning as in `get_mut` above. + // SAFETY: same reasoning as in `get_pair_mut` above. let ptr: *mut DirstateMap<'a> = self.ptr.cast(); unsafe { &*ptr } } @@ -79,13 +79,13 @@ fn drop(&mut self) { // Silence a "field is never read" warning, and demonstrate that this // value is still alive. - let _ = &self.on_disk; + let _: &Box<dyn Deref<Target = [u8]> + Send> = &self.on_disk; // SAFETY: this cast is the same as in `get_mut`, and is valid for the // same reason. `self.on_disk` still exists at this point, drop glue // will drop it implicitly after this `drop` method returns. let ptr: *mut DirstateMap<'_> = self.ptr.cast(); // SAFETY: `Box::from_raw` takes ownership of the box away from `self`. - // This is fine because drop glue does nothig for `*mut ()` and we’re + // This is fine because drop glue does nothing for `*mut ()` and we’re // in `drop`, so `get` and `get_mut` cannot be called again. unsafe { drop(Box::from_raw(ptr)) } }
--- a/rust/hg-core/src/dirstate_tree/status.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/dirstate_tree/status.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,5 +1,6 @@ use crate::dirstate::entry::TruncatedTimestamp; use crate::dirstate::status::IgnoreFnType; +use crate::dirstate::status::StatusPath; use crate::dirstate_tree::dirstate_map::BorrowedPath; use crate::dirstate_tree::dirstate_map::ChildNodesRef; use crate::dirstate_tree::dirstate_map::DirstateMap; @@ -15,6 +16,7 @@ use crate::DirstateStatus; use crate::EntryState; use crate::HgPathBuf; +use crate::HgPathCow; use crate::PatternFileWarning; use crate::StatusError; use crate::StatusOptions; @@ -61,16 +63,22 @@ (Box::new(|&_| true), vec![], None) }; + let filesystem_time_at_status_start = + filesystem_now(&root_dir).ok().map(TruncatedTimestamp::from); + let outcome = DirstateStatus { + filesystem_time_at_status_start, + ..Default::default() + }; let common = StatusCommon { dmap, options, matcher, ignore_fn, - outcome: Default::default(), + outcome: Mutex::new(outcome), ignore_patterns_have_changed: patterns_changed, new_cachable_directories: Default::default(), outated_cached_directories: Default::default(), - filesystem_time_at_status_start: filesystem_now(&root_dir).ok(), + filesystem_time_at_status_start, }; let is_at_repo_root = true; let hg_path = &BorrowedPath::OnDisk(HgPath::new("")); @@ -138,10 +146,68 @@ /// The current time at the start of the `status()` algorithm, as measured /// and possibly truncated by the filesystem. - filesystem_time_at_status_start: Option<SystemTime>, + filesystem_time_at_status_start: Option<TruncatedTimestamp>, +} + +enum Outcome { + Modified, + Added, + Removed, + Deleted, + Clean, + Ignored, + Unknown, + Unsure, } impl<'a, 'tree, 'on_disk> StatusCommon<'a, 'tree, 'on_disk> { + fn push_outcome( + &self, + which: Outcome, + dirstate_node: &NodeRef<'tree, 'on_disk>, + ) -> Result<(), DirstateV2ParseError> { + let path = dirstate_node + .full_path_borrowed(self.dmap.on_disk)? + .detach_from_tree(); + let copy_source = if self.options.list_copies { + dirstate_node + .copy_source_borrowed(self.dmap.on_disk)? + .map(|source| source.detach_from_tree()) + } else { + None + }; + self.push_outcome_common(which, path, copy_source); + Ok(()) + } + + fn push_outcome_without_copy_source( + &self, + which: Outcome, + path: &BorrowedPath<'_, 'on_disk>, + ) { + self.push_outcome_common(which, path.detach_from_tree(), None) + } + + fn push_outcome_common( + &self, + which: Outcome, + path: HgPathCow<'on_disk>, + copy_source: Option<HgPathCow<'on_disk>>, + ) { + let mut outcome = self.outcome.lock().unwrap(); + let vec = match which { + Outcome::Modified => &mut outcome.modified, + Outcome::Added => &mut outcome.added, + Outcome::Removed => &mut outcome.removed, + Outcome::Deleted => &mut outcome.deleted, + Outcome::Clean => &mut outcome.clean, + Outcome::Ignored => &mut outcome.ignored, + Outcome::Unknown => &mut outcome.unknown, + Outcome::Unsure => &mut outcome.unsure, + }; + vec.push(StatusPath { path, copy_source }); + } + fn read_dir( &self, hg_path: &HgPath, @@ -342,10 +408,7 @@ // If we previously had a file here, it was removed (with // `hg rm` or similar) or deleted before it could be // replaced by a directory or something else. - self.mark_removed_or_deleted_if_file( - &hg_path, - dirstate_node.state()?, - ); + self.mark_removed_or_deleted_if_file(&dirstate_node)?; } if file_type.is_dir() { if self.options.collect_traversed_dirs { @@ -376,24 +439,13 @@ if file_or_symlink && self.matcher.matches(hg_path) { if let Some(state) = dirstate_node.state()? { match state { - EntryState::Added => self - .outcome - .lock() - .unwrap() - .added - .push(hg_path.detach_from_tree()), + EntryState::Added => { + self.push_outcome(Outcome::Added, &dirstate_node)? + } EntryState::Removed => self - .outcome - .lock() - .unwrap() - .removed - .push(hg_path.detach_from_tree()), + .push_outcome(Outcome::Removed, &dirstate_node)?, EntryState::Merged => self - .outcome - .lock() - .unwrap() - .modified - .push(hg_path.detach_from_tree()), + .push_outcome(Outcome::Modified, &dirstate_node)?, EntryState::Normal => self .handle_normal_file(&dirstate_node, fs_metadata)?, } @@ -421,71 +473,86 @@ directory_metadata: &std::fs::Metadata, dirstate_node: NodeRef<'tree, 'on_disk>, ) -> Result<(), DirstateV2ParseError> { - if children_all_have_dirstate_node_or_are_ignored { - // All filesystem directory entries from `read_dir` have a - // corresponding node in the dirstate, so we can reconstitute the - // names of those entries without calling `read_dir` again. - if let (Some(status_start), Ok(directory_mtime)) = ( - &self.filesystem_time_at_status_start, - directory_metadata.modified(), + if !children_all_have_dirstate_node_or_are_ignored { + return Ok(()); + } + // All filesystem directory entries from `read_dir` have a + // corresponding node in the dirstate, so we can reconstitute the + // names of those entries without calling `read_dir` again. + + // TODO: use let-else here and below when available: + // https://github.com/rust-lang/rust/issues/87335 + let status_start = if let Some(status_start) = + &self.filesystem_time_at_status_start + { + status_start + } else { + return Ok(()); + }; + + // Although the Rust standard library’s `SystemTime` type + // has nanosecond precision, the times reported for a + // directory’s (or file’s) modified time may have lower + // resolution based on the filesystem (for example ext3 + // only stores integer seconds), kernel (see + // https://stackoverflow.com/a/14393315/1162888), etc. + let directory_mtime = if let Ok(option) = + TruncatedTimestamp::for_reliable_mtime_of( + directory_metadata, + status_start, ) { - // Although the Rust standard library’s `SystemTime` type - // has nanosecond precision, the times reported for a - // directory’s (or file’s) modified time may have lower - // resolution based on the filesystem (for example ext3 - // only stores integer seconds), kernel (see - // https://stackoverflow.com/a/14393315/1162888), etc. - if &directory_mtime >= status_start { - // The directory was modified too recently, don’t cache its - // `read_dir` results. - // - // A timeline like this is possible: - // - // 1. A change to this directory (direct child was - // added or removed) cause its mtime to be set - // (possibly truncated) to `directory_mtime` - // 2. This `status` algorithm calls `read_dir` - // 3. An other change is made to the same directory is - // made so that calling `read_dir` agin would give - // different results, but soon enough after 1. that - // the mtime stays the same - // - // On a system where the time resolution poor, this - // scenario is not unlikely if all three steps are caused - // by the same script. - } else { - // We’ve observed (through `status_start`) that time has - // “progressed” since `directory_mtime`, so any further - // change to this directory is extremely likely to cause a - // different mtime. - // - // Having the same mtime again is not entirely impossible - // since the system clock is not monotonous. It could jump - // backward to some point before `directory_mtime`, then a - // directory change could potentially happen during exactly - // the wrong tick. - // - // We deem this scenario (unlike the previous one) to be - // unlikely enough in practice. - let truncated = TruncatedTimestamp::from(directory_mtime); - let is_up_to_date = if let Some(cached) = - dirstate_node.cached_directory_mtime()? - { - cached.likely_equal(truncated) - } else { - false - }; - if !is_up_to_date { - let hg_path = dirstate_node - .full_path_borrowed(self.dmap.on_disk)? - .detach_from_tree(); - self.new_cachable_directories - .lock() - .unwrap() - .push((hg_path, truncated)) - } - } + if let Some(directory_mtime) = option { + directory_mtime + } else { + // The directory was modified too recently, + // don’t cache its `read_dir` results. + // + // 1. A change to this directory (direct child was + // added or removed) cause its mtime to be set + // (possibly truncated) to `directory_mtime` + // 2. This `status` algorithm calls `read_dir` + // 3. An other change is made to the same directory is + // made so that calling `read_dir` agin would give + // different results, but soon enough after 1. that + // the mtime stays the same + // + // On a system where the time resolution poor, this + // scenario is not unlikely if all three steps are caused + // by the same script. + return Ok(()); } + } else { + // OS/libc does not support mtime? + return Ok(()); + }; + // We’ve observed (through `status_start`) that time has + // “progressed” since `directory_mtime`, so any further + // change to this directory is extremely likely to cause a + // different mtime. + // + // Having the same mtime again is not entirely impossible + // since the system clock is not monotonous. It could jump + // backward to some point before `directory_mtime`, then a + // directory change could potentially happen during exactly + // the wrong tick. + // + // We deem this scenario (unlike the previous one) to be + // unlikely enough in practice. + + let is_up_to_date = + if let Some(cached) = dirstate_node.cached_directory_mtime()? { + cached.likely_equal(directory_mtime) + } else { + false + }; + if !is_up_to_date { + let hg_path = dirstate_node + .full_path_borrowed(self.dmap.on_disk)? + .detach_from_tree(); + self.new_cachable_directories + .lock() + .unwrap() + .push((hg_path, directory_mtime)) } Ok(()) } @@ -505,7 +572,6 @@ let entry = dirstate_node .entry()? .expect("handle_normal_file called with entry-less node"); - let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?; let mode_changed = || self.options.check_exec && entry.mode_changed(fs_metadata); let size = entry.size(); @@ -513,43 +579,31 @@ if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() { // issue6456: Size returned may be longer due to encryption // on EXT-4 fscrypt. TODO maybe only do it on EXT4? - self.outcome - .lock() - .unwrap() - .unsure - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Unsure, dirstate_node)? } else if dirstate_node.has_copy_source() || entry.is_from_other_parent() || (size >= 0 && (size_changed || mode_changed())) { - self.outcome - .lock() - .unwrap() - .modified - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Modified, dirstate_node)? } else { let mtime_looks_clean; if let Some(dirstate_mtime) = entry.truncated_mtime() { let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata) .expect("OS/libc does not support mtime?"); + // There might be a change in the future if for example the + // internal clock become off while process run, but this is a + // case where the issues the user would face + // would be a lot worse and there is nothing we + // can really do. mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime) - && !fs_mtime.likely_equal(self.options.last_normal_time) } else { // No mtime in the dirstate entry mtime_looks_clean = false }; if !mtime_looks_clean { - self.outcome - .lock() - .unwrap() - .unsure - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Unsure, dirstate_node)? } else if self.options.list_clean { - self.outcome - .lock() - .unwrap() - .clean - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Clean, dirstate_node)? } } Ok(()) @@ -561,10 +615,7 @@ dirstate_node: NodeRef<'tree, 'on_disk>, ) -> Result<(), DirstateV2ParseError> { self.check_for_outdated_directory_cache(&dirstate_node)?; - self.mark_removed_or_deleted_if_file( - &dirstate_node.full_path_borrowed(self.dmap.on_disk)?, - dirstate_node.state()?, - ); + self.mark_removed_or_deleted_if_file(&dirstate_node)?; dirstate_node .children(self.dmap.on_disk)? .par_iter() @@ -578,26 +629,19 @@ /// Does nothing on a "directory" node fn mark_removed_or_deleted_if_file( &self, - hg_path: &BorrowedPath<'tree, 'on_disk>, - dirstate_node_state: Option<EntryState>, - ) { - if let Some(state) = dirstate_node_state { - if self.matcher.matches(hg_path) { + dirstate_node: &NodeRef<'tree, 'on_disk>, + ) -> Result<(), DirstateV2ParseError> { + if let Some(state) = dirstate_node.state()? { + let path = dirstate_node.full_path(self.dmap.on_disk)?; + if self.matcher.matches(path) { if let EntryState::Removed = state { - self.outcome - .lock() - .unwrap() - .removed - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Removed, dirstate_node)? } else { - self.outcome - .lock() - .unwrap() - .deleted - .push(hg_path.detach_from_tree()) + self.push_outcome(Outcome::Deleted, &dirstate_node)? } } } + Ok(()) } /// Something in the filesystem has no corresponding dirstate node @@ -675,19 +719,17 @@ let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path); if is_ignored { if self.options.list_ignored { - self.outcome - .lock() - .unwrap() - .ignored - .push(hg_path.detach_from_tree()) + self.push_outcome_without_copy_source( + Outcome::Ignored, + hg_path, + ) } } else { if self.options.list_unknown { - self.outcome - .lock() - .unwrap() - .unknown - .push(hg_path.detach_from_tree()) + self.push_outcome_without_copy_source( + Outcome::Unknown, + hg_path, + ) } } is_ignored
--- a/rust/hg-core/src/errors.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/errors.rs Tue Jan 18 10:27:13 2022 +0100 @@ -151,6 +151,8 @@ /// Converts a `Result` with `std::io::Error` into one with `HgError`. fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError>; + fn when_writing_file(self, path: &std::path::Path) -> Result<T, HgError>; + fn with_context( self, context: impl FnOnce() -> IoErrorContext, @@ -162,6 +164,10 @@ self.with_context(|| IoErrorContext::ReadingFile(path.to_owned())) } + fn when_writing_file(self, path: &std::path::Path) -> Result<T, HgError> { + self.with_context(|| IoErrorContext::WritingFile(path.to_owned())) + } + fn with_context( self, context: impl FnOnce() -> IoErrorContext,
--- a/rust/hg-core/src/lib.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/lib.rs Tue Jan 18 10:27:13 2022 +0100 @@ -7,7 +7,7 @@ mod ancestors; pub mod dagops; pub mod errors; -pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors}; +pub use ancestors::{AncestorsIterator, MissingAncestors}; pub mod dirstate; pub mod dirstate_tree; pub mod discovery; @@ -29,6 +29,7 @@ pub mod revlog; pub use revlog::*; pub mod config; +pub mod lock; pub mod logging; pub mod operations; pub mod revset;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/lock.rs Tue Jan 18 10:27:13 2022 +0100 @@ -0,0 +1,187 @@ +//! Filesystem-based locks for local repositories + +use crate::errors::HgError; +use crate::errors::HgResultExt; +use crate::utils::StrExt; +use crate::vfs::Vfs; +use std::io; +use std::io::ErrorKind; + +#[derive(derive_more::From)] +pub enum LockError { + AlreadyHeld, + #[from] + Other(HgError), +} + +/// Try to call `f` with the lock acquired, without waiting. +/// +/// If the lock is aready held, `f` is not called and `LockError::AlreadyHeld` +/// is returned. `LockError::Io` is returned for any unexpected I/O error +/// accessing the lock file, including for removing it after `f` was called. +/// The return value of `f` is dropped in that case. If all is successful, the +/// return value of `f` is forwarded. +pub fn try_with_lock_no_wait<R>( + hg_vfs: Vfs, + lock_filename: &str, + f: impl FnOnce() -> R, +) -> Result<R, LockError> { + let our_lock_data = &*OUR_LOCK_DATA; + for _retry in 0..5 { + match make_lock(hg_vfs, lock_filename, our_lock_data) { + Ok(()) => { + let result = f(); + unlock(hg_vfs, lock_filename)?; + return Ok(result); + } + Err(HgError::IoError { error, .. }) + if error.kind() == ErrorKind::AlreadyExists => + { + let lock_data = read_lock(hg_vfs, lock_filename)?; + if lock_data.is_none() { + // Lock was apparently just released, retry acquiring it + continue; + } + if !lock_should_be_broken(&lock_data) { + return Err(LockError::AlreadyHeld); + } + // The lock file is left over from a process not running + // anymore. Break it, but with another lock to + // avoid a race. + break_lock(hg_vfs, lock_filename)?; + + // Retry acquiring + } + Err(error) => Err(error)?, + } + } + Err(LockError::AlreadyHeld) +} + +fn break_lock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), LockError> { + try_with_lock_no_wait(hg_vfs, &format!("{}.break", lock_filename), || { + // Check again in case some other process broke and + // acquired the lock in the meantime + let lock_data = read_lock(hg_vfs, lock_filename)?; + if !lock_should_be_broken(&lock_data) { + return Err(LockError::AlreadyHeld); + } + Ok(hg_vfs.remove_file(lock_filename)?) + })? +} + +#[cfg(unix)] +fn make_lock( + hg_vfs: Vfs, + lock_filename: &str, + data: &str, +) -> Result<(), HgError> { + // Use a symbolic link because creating it is atomic. + // The link’s "target" contains data not representing any path. + let fake_symlink_target = data; + hg_vfs.create_symlink(lock_filename, fake_symlink_target) +} + +fn read_lock( + hg_vfs: Vfs, + lock_filename: &str, +) -> Result<Option<String>, HgError> { + let link_target = + hg_vfs.read_link(lock_filename).io_not_found_as_none()?; + if let Some(target) = link_target { + let data = target + .into_os_string() + .into_string() + .map_err(|_| HgError::corrupted("non-UTF-8 lock data"))?; + Ok(Some(data)) + } else { + Ok(None) + } +} + +fn unlock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), HgError> { + hg_vfs.remove_file(lock_filename) +} + +/// Return whether the process that is/was holding the lock is known not to be +/// running anymore. +fn lock_should_be_broken(data: &Option<String>) -> bool { + (|| -> Option<bool> { + let (prefix, pid) = data.as_ref()?.split_2(':')?; + if prefix != &*LOCK_PREFIX { + return Some(false); + } + let process_is_running; + + #[cfg(unix)] + { + let pid: libc::pid_t = pid.parse().ok()?; + unsafe { + let signal = 0; // Test if we could send a signal, without sending + let result = libc::kill(pid, signal); + if result == 0 { + process_is_running = true + } else { + let errno = + io::Error::last_os_error().raw_os_error().unwrap(); + process_is_running = errno != libc::ESRCH + } + } + } + + Some(!process_is_running) + })() + .unwrap_or(false) +} + +lazy_static::lazy_static! { + /// A string which is used to differentiate pid namespaces + /// + /// It's useful to detect "dead" processes and remove stale locks with + /// confidence. Typically it's just hostname. On modern linux, we include an + /// extra Linux-specific pid namespace identifier. + static ref LOCK_PREFIX: String = { + // Note: this must match the behavior of `_getlockprefix` in `mercurial/lock.py` + + /// Same as https://github.com/python/cpython/blob/v3.10.0/Modules/socketmodule.c#L5414 + const BUFFER_SIZE: usize = 1024; + let mut buffer = [0_i8; BUFFER_SIZE]; + let hostname_bytes = unsafe { + let result = libc::gethostname(buffer.as_mut_ptr(), BUFFER_SIZE); + if result != 0 { + panic!("gethostname: {}", io::Error::last_os_error()) + } + std::ffi::CStr::from_ptr(buffer.as_mut_ptr()).to_bytes() + }; + let hostname = + std::str::from_utf8(hostname_bytes).expect("non-UTF-8 hostname"); + + #[cfg(target_os = "linux")] + { + use std::os::linux::fs::MetadataExt; + match std::fs::metadata("/proc/self/ns/pid") { + Ok(meta) => { + return format!("{}/{:x}", hostname, meta.st_ino()) + } + Err(error) => { + // TODO: match on `error.kind()` when `NotADirectory` + // is available on all supported Rust versions: + // https://github.com/rust-lang/rust/issues/86442 + use libc::{ + ENOENT, // ErrorKind::NotFound + ENOTDIR, // ErrorKind::NotADirectory + EACCES, // ErrorKind::PermissionDenied + }; + match error.raw_os_error() { + Some(ENOENT) | Some(ENOTDIR) | Some(EACCES) => {} + _ => panic!("stat /proc/self/ns/pid: {}", error), + } + } + } + } + + hostname.to_owned() + }; + + static ref OUR_LOCK_DATA: String = format!("{}:{}", &*LOCK_PREFIX, std::process::id()); +}
--- a/rust/hg-core/src/matchers.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/matchers.rs Tue Jan 18 10:27:13 2022 +0100 @@ -22,6 +22,7 @@ PatternSyntax, }; +use crate::dirstate::status::IgnoreFnType; use crate::filepatterns::normalize_path_bytes; use std::borrow::ToOwned; use std::collections::HashSet; @@ -246,7 +247,7 @@ /// ``` pub struct IncludeMatcher<'a> { patterns: Vec<u8>, - match_fn: Box<dyn for<'r> Fn(&'r HgPath) -> bool + 'a + Sync>, + match_fn: IgnoreFnType<'a>, /// Whether all the patterns match a prefix (i.e. recursively) prefix: bool, roots: HashSet<HgPathBuf>, @@ -341,9 +342,9 @@ /// Returns the regex pattern and a function that matches an `HgPath` against /// said regex formed by the given ignore patterns. -fn build_regex_match( - ignore_patterns: &[IgnorePattern], -) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + Sync>)> { +fn build_regex_match<'a, 'b>( + ignore_patterns: &'a [IgnorePattern], +) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> { let mut regexps = vec![]; let mut exact_set = HashSet::new(); @@ -365,10 +366,10 @@ let func = move |filename: &HgPath| { exact_set.contains(filename) || matcher(filename) }; - Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync> + Box::new(func) as IgnoreFnType } else { let func = move |filename: &HgPath| exact_set.contains(filename); - Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync> + Box::new(func) as IgnoreFnType }; Ok((full_regex, func)) @@ -476,8 +477,8 @@ /// should be matched. fn build_match<'a, 'b>( ignore_patterns: Vec<IgnorePattern>, -) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + 'b + Sync>)> { - let mut match_funcs: Vec<Box<dyn Fn(&HgPath) -> bool + Sync>> = vec![]; +) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> { + let mut match_funcs: Vec<IgnoreFnType<'b>> = vec![]; // For debugging and printing let mut patterns = vec![]; @@ -560,14 +561,11 @@ /// Parses all "ignore" files with their recursive includes and returns a /// function that checks whether a given file (in the general sense) should be /// ignored. -pub fn get_ignore_function<'a>( +pub fn get_ignore_matcher<'a>( mut all_pattern_files: Vec<PathBuf>, root_dir: &Path, inspect_pattern_bytes: &mut impl FnMut(&[u8]), -) -> PatternResult<( - Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>, - Vec<PatternFileWarning>, -)> { +) -> PatternResult<(IncludeMatcher<'a>, Vec<PatternFileWarning>)> { let mut all_patterns = vec![]; let mut all_warnings = vec![]; @@ -590,10 +588,25 @@ all_warnings.extend(warnings); } let matcher = IncludeMatcher::new(all_patterns)?; - Ok(( - Box::new(move |path: &HgPath| matcher.matches(path)), - all_warnings, - )) + Ok((matcher, all_warnings)) +} + +/// Parses all "ignore" files with their recursive includes and returns a +/// function that checks whether a given file (in the general sense) should be +/// ignored. +pub fn get_ignore_function<'a>( + all_pattern_files: Vec<PathBuf>, + root_dir: &Path, + inspect_pattern_bytes: &mut impl FnMut(&[u8]), +) -> PatternResult<(IgnoreFnType<'a>, Vec<PatternFileWarning>)> { + let res = + get_ignore_matcher(all_pattern_files, root_dir, inspect_pattern_bytes); + res.map(|(matcher, all_warnings)| { + let res: IgnoreFnType<'a> = + Box::new(move |path: &HgPath| matcher.matches(path)); + + (res, all_warnings) + }) } impl<'a> IncludeMatcher<'a> { @@ -628,6 +641,10 @@ .chain(self.parents.iter()); DirsChildrenMultiset::new(thing, Some(&self.parents)) } + + pub fn debug_get_patterns(&self) -> &[u8] { + self.patterns.as_ref() + } } impl<'a> Display for IncludeMatcher<'a> {
--- a/rust/hg-core/src/operations/cat.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/operations/cat.rs Tue Jan 18 10:27:13 2022 +0100 @@ -11,6 +11,9 @@ use crate::utils::hg_path::HgPath; +use crate::errors::HgError; +use crate::manifest::Manifest; +use crate::manifest::ManifestEntry; use itertools::put_back; use itertools::PutBack; use std::cmp::Ordering; @@ -28,46 +31,43 @@ } // Find an item in an iterator over a sorted collection. -fn find_item<'a, 'b, 'c, D, I: Iterator<Item = (&'a HgPath, D)>>( - i: &mut PutBack<I>, - needle: &'b HgPath, -) -> Option<D> { +fn find_item<'a>( + i: &mut PutBack<impl Iterator<Item = Result<ManifestEntry<'a>, HgError>>>, + needle: &HgPath, +) -> Result<Option<Node>, HgError> { loop { match i.next() { - None => return None, - Some(val) => match needle.as_bytes().cmp(val.0.as_bytes()) { - Ordering::Less => { - i.put_back(val); - return None; + None => return Ok(None), + Some(result) => { + let entry = result?; + match needle.as_bytes().cmp(entry.path.as_bytes()) { + Ordering::Less => { + i.put_back(Ok(entry)); + return Ok(None); + } + Ordering::Greater => continue, + Ordering::Equal => return Ok(Some(entry.node_id()?)), } - Ordering::Greater => continue, - Ordering::Equal => return Some(val.1), - }, + } } } } -fn find_files_in_manifest< - 'manifest, - 'query, - Data, - Manifest: Iterator<Item = (&'manifest HgPath, Data)>, - Query: Iterator<Item = &'query HgPath>, ->( - manifest: Manifest, - query: Query, -) -> (Vec<(&'query HgPath, Data)>, Vec<&'query HgPath>) { - let mut manifest = put_back(manifest); +fn find_files_in_manifest<'query>( + manifest: &Manifest, + query: impl Iterator<Item = &'query HgPath>, +) -> Result<(Vec<(&'query HgPath, Node)>, Vec<&'query HgPath>), HgError> { + let mut manifest = put_back(manifest.iter()); let mut res = vec![]; let mut missing = vec![]; for file in query { - match find_item(&mut manifest, file) { + match find_item(&mut manifest, file)? { None => missing.push(file), Some(item) => res.push((file, item)), } } - return (res, missing); + return Ok((res, missing)); } /// Output the given revision of files @@ -92,17 +92,16 @@ files.sort_unstable(); let (found, missing) = find_files_in_manifest( - manifest.files_with_nodes(), + &manifest, files.into_iter().map(|f| f.as_ref()), - ); + )?; - for (file_path, node_bytes) in found { + for (file_path, file_node) in found { found_any = true; let file_log = repo.filelog(file_path)?; - let file_node = Node::from_hex_for_repo(node_bytes)?; results.push(( file_path, - file_log.data_for_node(file_node)?.into_data()?, + file_log.data_for_node(file_node)?.into_file_data()?, )); }
--- a/rust/hg-core/src/operations/debugdata.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/operations/debugdata.rs Tue Jan 18 10:27:13 2022 +0100 @@ -29,5 +29,5 @@ let rev = crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?; let data = revlog.get_rev_data(rev)?; - Ok(data) + Ok(data.into_owned()) }
--- a/rust/hg-core/src/operations/list_tracked_files.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/operations/list_tracked_files.rs Tue Jan 18 10:27:13 2022 +0100 @@ -76,7 +76,7 @@ pub struct FilesForRev(Manifest); impl FilesForRev { - pub fn iter(&self) -> impl Iterator<Item = &HgPath> { - self.0.files() + pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> { + self.0.iter().map(|entry| Ok(entry?.path)) } }
--- a/rust/hg-core/src/repo.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/repo.rs Tue Jan 18 10:27:13 2022 +0100 @@ -2,10 +2,12 @@ use crate::config::{Config, ConfigError, ConfigParseError}; use crate::dirstate::DirstateParents; use crate::dirstate_tree::dirstate_map::DirstateMap; +use crate::dirstate_tree::on_disk::Docket as DirstateDocket; use crate::dirstate_tree::owning::OwningDirstateMap; -use crate::errors::HgError; use crate::errors::HgResultExt; +use crate::errors::{HgError, IoResultExt}; use crate::exit_codes; +use crate::lock::{try_with_lock_no_wait, LockError}; use crate::manifest::{Manifest, Manifestlog}; use crate::revlog::filelog::Filelog; use crate::revlog::revlog::RevlogError; @@ -15,8 +17,11 @@ use crate::vfs::{is_dir, is_file, Vfs}; use crate::{requirements, NodePrefix}; use crate::{DirstateError, Revision}; -use std::cell::{Cell, Ref, RefCell, RefMut}; +use std::cell::{Ref, RefCell, RefMut}; use std::collections::HashSet; +use std::io::Seek; +use std::io::SeekFrom; +use std::io::Write as IoWrite; use std::path::{Path, PathBuf}; /// A repository on disk @@ -26,8 +31,8 @@ store: PathBuf, requirements: HashSet<String>, config: Config, - // None means not known/initialized yet - dirstate_parents: Cell<Option<DirstateParents>>, + dirstate_parents: LazyCell<DirstateParents, HgError>, + dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>, HgError>, dirstate_map: LazyCell<OwningDirstateMap, DirstateError>, changelog: LazyCell<Changelog, HgError>, manifestlog: LazyCell<Manifestlog, HgError>, @@ -202,7 +207,10 @@ store: store_path, dot_hg, config: repo_config, - dirstate_parents: Cell::new(None), + dirstate_parents: LazyCell::new(Self::read_dirstate_parents), + dirstate_data_file_uuid: LazyCell::new( + Self::read_dirstate_data_file_uuid, + ), dirstate_map: LazyCell::new(Self::new_dirstate_map), changelog: LazyCell::new(Changelog::open), manifestlog: LazyCell::new(Manifestlog::open), @@ -243,11 +251,26 @@ } } + pub fn try_with_wlock_no_wait<R>( + &self, + f: impl FnOnce() -> R, + ) -> Result<R, LockError> { + try_with_lock_no_wait(self.hg_vfs(), "wlock", f) + } + pub fn has_dirstate_v2(&self) -> bool { self.requirements .contains(requirements::DIRSTATE_V2_REQUIREMENT) } + pub fn has_sparse(&self) -> bool { + self.requirements.contains(requirements::SPARSE_REQUIREMENT) + } + + pub fn has_narrow(&self) -> bool { + self.requirements.contains(requirements::NARROW_REQUIREMENT) + } + fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> { Ok(self .hg_vfs() @@ -257,32 +280,64 @@ } pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> { - if let Some(parents) = self.dirstate_parents.get() { - return Ok(parents); - } + Ok(*self.dirstate_parents.get_or_init(self)?) + } + + fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> { let dirstate = self.dirstate_file_contents()?; let parents = if dirstate.is_empty() { + if self.has_dirstate_v2() { + self.dirstate_data_file_uuid.set(None); + } DirstateParents::NULL } else if self.has_dirstate_v2() { - crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents() + let docket = + crate::dirstate_tree::on_disk::read_docket(&dirstate)?; + self.dirstate_data_file_uuid + .set(Some(docket.uuid.to_owned())); + docket.parents() } else { crate::dirstate::parsers::parse_dirstate_parents(&dirstate)? .clone() }; - self.dirstate_parents.set(Some(parents)); + self.dirstate_parents.set(parents); Ok(parents) } + fn read_dirstate_data_file_uuid( + &self, + ) -> Result<Option<Vec<u8>>, HgError> { + assert!( + self.has_dirstate_v2(), + "accessing dirstate data file ID without dirstate-v2" + ); + let dirstate = self.dirstate_file_contents()?; + if dirstate.is_empty() { + self.dirstate_parents.set(DirstateParents::NULL); + Ok(None) + } else { + let docket = + crate::dirstate_tree::on_disk::read_docket(&dirstate)?; + self.dirstate_parents.set(docket.parents()); + Ok(Some(docket.uuid.to_owned())) + } + } + fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> { let dirstate_file_contents = self.dirstate_file_contents()?; if dirstate_file_contents.is_empty() { - self.dirstate_parents.set(Some(DirstateParents::NULL)); + self.dirstate_parents.set(DirstateParents::NULL); + if self.has_dirstate_v2() { + self.dirstate_data_file_uuid.set(None); + } Ok(OwningDirstateMap::new_empty(Vec::new())) } else if self.has_dirstate_v2() { let docket = crate::dirstate_tree::on_disk::read_docket( &dirstate_file_contents, )?; - self.dirstate_parents.set(Some(docket.parents())); + self.dirstate_parents.set(docket.parents()); + self.dirstate_data_file_uuid + .set(Some(docket.uuid.to_owned())); let data_size = docket.data_size(); let metadata = docket.tree_metadata(); let mut map = if let Some(data_mmap) = self @@ -302,7 +357,7 @@ let (on_disk, placeholder) = map.get_pair_mut(); let (inner, parents) = DirstateMap::new_v1(on_disk)?; self.dirstate_parents - .set(Some(parents.unwrap_or(DirstateParents::NULL))); + .set(parents.unwrap_or(DirstateParents::NULL)); *placeholder = inner; Ok(map) } @@ -362,9 +417,81 @@ ) } + pub fn has_subrepos(&self) -> Result<bool, DirstateError> { + if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? { + Ok(entry.state().is_tracked()) + } else { + Ok(false) + } + } + pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> { Filelog::open(self, path) } + + /// Write to disk any updates that were made through `dirstate_map_mut`. + /// + /// The "wlock" must be held while calling this. + /// See for example `try_with_wlock_no_wait`. + /// + /// TODO: have a `WritableRepo` type only accessible while holding the + /// lock? + pub fn write_dirstate(&self) -> Result<(), DirstateError> { + let map = self.dirstate_map()?; + // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if + // it’s unset + let parents = self.dirstate_parents()?; + let packed_dirstate = if self.has_dirstate_v2() { + let uuid = self.dirstate_data_file_uuid.get_or_init(self)?; + let mut uuid = uuid.as_ref(); + let can_append = uuid.is_some(); + let (data, tree_metadata, append) = map.pack_v2(can_append)?; + if !append { + uuid = None + } + let uuid = if let Some(uuid) = uuid { + std::str::from_utf8(uuid) + .map_err(|_| { + HgError::corrupted("non-UTF-8 dirstate data file ID") + })? + .to_owned() + } else { + DirstateDocket::new_uid() + }; + let data_filename = format!("dirstate.{}", uuid); + let data_filename = self.hg_vfs().join(data_filename); + let mut options = std::fs::OpenOptions::new(); + if append { + options.append(true); + } else { + options.write(true).create_new(true); + } + let data_size = (|| { + // TODO: loop and try another random ID if !append and this + // returns `ErrorKind::AlreadyExists`? Collision chance of two + // random IDs is one in 2**32 + let mut file = options.open(&data_filename)?; + file.write_all(&data)?; + file.flush()?; + // TODO: use https://doc.rust-lang.org/std/io/trait.Seek.html#method.stream_position when we require Rust 1.51+ + file.seek(SeekFrom::Current(0)) + })() + .when_writing_file(&data_filename)?; + DirstateDocket::serialize( + parents, + tree_metadata, + data_size, + uuid.as_bytes(), + ) + .map_err(|_: std::num::TryFromIntError| { + HgError::corrupted("overflow in dirstate docket serialization") + })? + } else { + map.pack_v1(parents)? + }; + self.hg_vfs().atomic_write("dirstate", &packed_dirstate)?; + Ok(()) + } } /// Lazily-initialized component of `Repo` with interior mutability @@ -386,6 +513,10 @@ } } + fn set(&self, value: T) { + *self.value.borrow_mut() = Some(value) + } + fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> { let mut borrowed = self.value.borrow(); if borrowed.is_none() { @@ -399,7 +530,7 @@ Ok(Ref::map(borrowed, |option| option.as_ref().unwrap())) } - pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> { + fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> { let mut borrowed = self.value.borrow_mut(); if borrowed.is_none() { *borrowed = Some((self.init)(repo)?);
--- a/rust/hg-core/src/requirements.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/requirements.rs Tue Jan 18 10:27:13 2022 +0100 @@ -88,6 +88,10 @@ // When it starts writing to the repository, it’ll need to either keep the // persistent nodemap up to date or remove this entry: NODEMAP_REQUIREMENT, + // Not all commands support `sparse` and `narrow`. The commands that do + // not should opt out by checking `has_sparse` and `has_narrow`. + SPARSE_REQUIREMENT, + NARROW_REQUIREMENT, ]; // Copied from mercurial/requirements.py:
--- a/rust/hg-core/src/revlog/changelog.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/changelog.rs Tue Jan 18 10:27:13 2022 +0100 @@ -22,7 +22,7 @@ pub fn data_for_node( &self, node: NodePrefix, - ) -> Result<ChangelogEntry, RevlogError> { + ) -> Result<ChangelogRevisionData, RevlogError> { let rev = self.revlog.rev_from_node(node)?; self.data_for_rev(rev) } @@ -31,9 +31,9 @@ pub fn data_for_rev( &self, rev: Revision, - ) -> Result<ChangelogEntry, RevlogError> { - let bytes = self.revlog.get_rev_data(rev)?; - Ok(ChangelogEntry { bytes }) + ) -> Result<ChangelogRevisionData, RevlogError> { + let bytes = self.revlog.get_rev_data(rev)?.into_owned(); + Ok(ChangelogRevisionData { bytes }) } pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> { @@ -43,12 +43,12 @@ /// `Changelog` entry which knows how to interpret the `changelog` data bytes. #[derive(Debug)] -pub struct ChangelogEntry { +pub struct ChangelogRevisionData { /// The data bytes of the `changelog` entry. bytes: Vec<u8>, } -impl ChangelogEntry { +impl ChangelogRevisionData { /// Return an iterator over the lines of the entry. pub fn lines(&self) -> impl Iterator<Item = &[u8]> { self.bytes
--- a/rust/hg-core/src/revlog/filelog.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/filelog.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,6 +1,7 @@ use crate::errors::HgError; use crate::repo::Repo; use crate::revlog::path_encode::path_encode; +use crate::revlog::revlog::RevlogEntry; use crate::revlog::revlog::{Revlog, RevlogError}; use crate::revlog::NodePrefix; use crate::revlog::Revision; @@ -23,24 +24,43 @@ Ok(Self { revlog }) } - /// The given node ID is that of the file as found in a manifest, not of a + /// The given node ID is that of the file as found in a filelog, not of a /// changeset. pub fn data_for_node( &self, file_node: impl Into<NodePrefix>, + ) -> Result<FilelogRevisionData, RevlogError> { + let file_rev = self.revlog.rev_from_node(file_node.into())?; + self.data_for_rev(file_rev) + } + + /// The given revision is that of the file as found in a filelog, not of a + /// changeset. + pub fn data_for_rev( + &self, + file_rev: Revision, + ) -> Result<FilelogRevisionData, RevlogError> { + let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned(); + Ok(FilelogRevisionData(data.into())) + } + + /// The given node ID is that of the file as found in a filelog, not of a + /// changeset. + pub fn entry_for_node( + &self, + file_node: impl Into<NodePrefix>, ) -> Result<FilelogEntry, RevlogError> { let file_rev = self.revlog.rev_from_node(file_node.into())?; - self.data_for_rev(file_rev) + self.entry_for_rev(file_rev) } - /// The given revision is that of the file as found in a manifest, not of a + /// The given revision is that of the file as found in a filelog, not of a /// changeset. - pub fn data_for_rev( + pub fn entry_for_rev( &self, file_rev: Revision, ) -> Result<FilelogEntry, RevlogError> { - let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?; - Ok(FilelogEntry(data.into())) + Ok(FilelogEntry(self.revlog.get_entry(file_rev)?)) } } @@ -50,9 +70,101 @@ get_path_from_bytes(&encoded_bytes).into() } -pub struct FilelogEntry(Vec<u8>); +pub struct FilelogEntry<'a>(RevlogEntry<'a>); + +impl FilelogEntry<'_> { + /// `self.data()` can be expensive, with decompression and delta + /// resolution. + /// + /// *Without* paying this cost, based on revlog index information + /// including `RevlogEntry::uncompressed_len`: + /// + /// * Returns `true` if the length that `self.data().file_data().len()` + /// would return is definitely **not equal** to `other_len`. + /// * Returns `false` if available information is inconclusive. + pub fn file_data_len_not_equal_to(&self, other_len: u64) -> bool { + // Relevant code that implement this behavior in Python code: + // basefilectx.cmp, filelog.size, storageutil.filerevisioncopied, + // revlog.size, revlog.rawsize + + // Let’s call `file_data_len` what would be returned by + // `self.data().file_data().len()`. + + if self.0.is_cencored() { + let file_data_len = 0; + return other_len != file_data_len; + } + + if self.0.has_length_affecting_flag_processor() { + // We can’t conclude anything about `file_data_len`. + return false; + } -impl FilelogEntry { + // Revlog revisions (usually) have metadata for the size of + // their data after decompression and delta resolution + // as would be returned by `Revlog::get_rev_data`. + // + // For filelogs this is the file’s contents preceded by an optional + // metadata block. + let uncompressed_len = if let Some(l) = self.0.uncompressed_len() { + l as u64 + } else { + // The field was set to -1, the actual uncompressed len is unknown. + // We need to decompress to say more. + return false; + }; + // `uncompressed_len = file_data_len + optional_metadata_len`, + // so `file_data_len <= uncompressed_len`. + if uncompressed_len < other_len { + // Transitively, `file_data_len < other_len`. + // So `other_len != file_data_len` definitely. + return true; + } + + if uncompressed_len == other_len + 4 { + // It’s possible that `file_data_len == other_len` with an empty + // metadata block (2 start marker bytes + 2 end marker bytes). + // This happens when there wouldn’t otherwise be metadata, but + // the first 2 bytes of file data happen to match a start marker + // and would be ambiguous. + return false; + } + + if !self.0.has_p1() { + // There may or may not be copy metadata, so we can’t deduce more + // about `file_data_len` without computing file data. + return false; + } + + // Filelog ancestry is not meaningful in the way changelog ancestry is. + // It only provides hints to delta generation. + // p1 and p2 are set to null when making a copy or rename since + // contents are likely unrelatedto what might have previously existed + // at the destination path. + // + // Conversely, since here p1 is non-null, there is no copy metadata. + // Note that this reasoning may be invalidated in the presence of + // merges made by some previous versions of Mercurial that + // swapped p1 and p2. See <https://bz.mercurial-scm.org/show_bug.cgi?id=6528> + // and `tests/test-issue6528.t`. + // + // Since copy metadata is currently the only kind of metadata + // kept in revlog data of filelogs, + // this `FilelogEntry` does not have such metadata: + let file_data_len = uncompressed_len; + + return file_data_len != other_len; + } + + pub fn data(&self) -> Result<FilelogRevisionData, HgError> { + Ok(FilelogRevisionData(self.0.data()?.into_owned())) + } +} + +/// The data for one revision in a filelog, uncompressed and delta-resolved. +pub struct FilelogRevisionData(Vec<u8>); + +impl FilelogRevisionData { /// Split into metadata and data pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> { const DELIMITER: &[u8; 2] = &[b'\x01', b'\n']; @@ -71,14 +183,14 @@ } /// Returns the file contents at this revision, stripped of any metadata - pub fn data(&self) -> Result<&[u8], HgError> { + pub fn file_data(&self) -> Result<&[u8], HgError> { let (_metadata, data) = self.split()?; Ok(data) } /// Consume the entry, and convert it into data, discarding any metadata, /// if present. - pub fn into_data(self) -> Result<Vec<u8>, HgError> { + pub fn into_file_data(self) -> Result<Vec<u8>, HgError> { if let (Some(_metadata), data) = self.split()? { Ok(data.to_owned()) } else {
--- a/rust/hg-core/src/revlog/index.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/index.rs Tue Jan 18 10:27:13 2022 +0100 @@ -9,12 +9,82 @@ pub const INDEX_ENTRY_SIZE: usize = 64; +pub struct IndexHeader { + header_bytes: [u8; 4], +} + +#[derive(Copy, Clone)] +pub struct IndexHeaderFlags { + flags: u16, +} + +/// Corresponds to the high bits of `_format_flags` in python +impl IndexHeaderFlags { + /// Corresponds to FLAG_INLINE_DATA in python + pub fn is_inline(self) -> bool { + return self.flags & 1 != 0; + } + /// Corresponds to FLAG_GENERALDELTA in python + pub fn uses_generaldelta(self) -> bool { + return self.flags & 2 != 0; + } +} + +/// Corresponds to the INDEX_HEADER structure, +/// which is parsed as a `header` variable in `_loadindex` in `revlog.py` +impl IndexHeader { + fn format_flags(&self) -> IndexHeaderFlags { + // No "unknown flags" check here, unlike in python. Maybe there should + // be. + return IndexHeaderFlags { + flags: BigEndian::read_u16(&self.header_bytes[0..2]), + }; + } + + /// The only revlog version currently supported by rhg. + const REVLOGV1: u16 = 1; + + /// Corresponds to `_format_version` in Python. + fn format_version(&self) -> u16 { + return BigEndian::read_u16(&self.header_bytes[2..4]); + } + + const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader { + // We treat an empty file as a valid index with no entries. + // Here we make an arbitrary choice of what we assume the format of the + // index to be (V1, using generaldelta). + // This doesn't matter too much, since we're only doing read-only + // access. but the value corresponds to the `new_header` variable in + // `revlog.py`, `_loadindex` + header_bytes: [0, 3, 0, 1], + }; + + fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> { + if index_bytes.len() == 0 { + return Ok(IndexHeader::EMPTY_INDEX_HEADER); + } + if index_bytes.len() < 4 { + return Err(HgError::corrupted( + "corrupted revlog: can't read the index format header", + )); + } + return Ok(IndexHeader { + header_bytes: { + let bytes: [u8; 4] = + index_bytes[0..4].try_into().expect("impossible"); + bytes + }, + }); + } +} + /// A Revlog index pub struct Index { bytes: Box<dyn Deref<Target = [u8]> + Send>, /// Offsets of starts of index blocks. /// Only needed when the index is interleaved with data. offsets: Option<Vec<usize>>, + uses_generaldelta: bool, } impl Index { @@ -23,7 +93,20 @@ pub fn new( bytes: Box<dyn Deref<Target = [u8]> + Send>, ) -> Result<Self, HgError> { - if is_inline(&bytes) { + let header = IndexHeader::parse(bytes.as_ref())?; + + if header.format_version() != IndexHeader::REVLOGV1 { + // A proper new version should have had a repo/store + // requirement. + return Err(HgError::corrupted("unsupported revlog version")); + } + + // This is only correct because we know version is REVLOGV1. + // In v2 we always use generaldelta, while in v0 we never use + // generaldelta. Similar for [is_inline] (it's only used in v1). + let uses_generaldelta = header.format_flags().uses_generaldelta(); + + if header.format_flags().is_inline() { let mut offset: usize = 0; let mut offsets = Vec::new(); @@ -35,13 +118,14 @@ offset_override: None, }; - offset += INDEX_ENTRY_SIZE + entry.compressed_len(); + offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize; } if offset == bytes.len() { Ok(Self { bytes, offsets: Some(offsets), + uses_generaldelta, }) } else { Err(HgError::corrupted("unexpected inline revlog length") @@ -51,10 +135,15 @@ Ok(Self { bytes, offsets: None, + uses_generaldelta, }) } } + pub fn uses_generaldelta(&self) -> bool { + self.uses_generaldelta + } + /// Value of the inline flag. pub fn is_inline(&self) -> bool { self.offsets.is_some() @@ -171,18 +260,22 @@ } } + pub fn flags(&self) -> u16 { + BigEndian::read_u16(&self.bytes[6..=7]) + } + /// Return the compressed length of the data. - pub fn compressed_len(&self) -> usize { - BigEndian::read_u32(&self.bytes[8..=11]) as usize + pub fn compressed_len(&self) -> u32 { + BigEndian::read_u32(&self.bytes[8..=11]) } /// Return the uncompressed length of the data. - pub fn uncompressed_len(&self) -> usize { - BigEndian::read_u32(&self.bytes[12..=15]) as usize + pub fn uncompressed_len(&self) -> i32 { + BigEndian::read_i32(&self.bytes[12..=15]) } /// Return the revision upon which the data has been derived. - pub fn base_revision(&self) -> Revision { + pub fn base_revision_or_base_of_delta_chain(&self) -> Revision { // TODO Maybe return an Option when base_revision == rev? // Requires to add rev to IndexEntry @@ -206,17 +299,6 @@ } } -/// Value of the inline flag. -pub fn is_inline(index_bytes: &[u8]) -> bool { - if index_bytes.len() < 4 { - return true; - } - match &index_bytes[0..=1] { - [0, 0] | [0, 2] => false, - _ => true, - } -} - #[cfg(test)] mod tests { use super::*; @@ -231,7 +313,7 @@ offset: usize, compressed_len: usize, uncompressed_len: usize, - base_revision: Revision, + base_revision_or_base_of_delta_chain: Revision, } #[cfg(test)] @@ -245,7 +327,7 @@ offset: 0, compressed_len: 0, uncompressed_len: 0, - base_revision: 0, + base_revision_or_base_of_delta_chain: 0, } } @@ -284,8 +366,11 @@ self } - pub fn with_base_revision(&mut self, value: Revision) -> &mut Self { - self.base_revision = value; + pub fn with_base_revision_or_base_of_delta_chain( + &mut self, + value: Revision, + ) -> &mut Self { + self.base_revision_or_base_of_delta_chain = value; self } @@ -308,42 +393,67 @@ bytes.extend(&[0u8; 2]); // Revision flags. bytes.extend(&(self.compressed_len as u32).to_be_bytes()); bytes.extend(&(self.uncompressed_len as u32).to_be_bytes()); - bytes.extend(&self.base_revision.to_be_bytes()); + bytes.extend( + &self.base_revision_or_base_of_delta_chain.to_be_bytes(), + ); bytes } } + pub fn is_inline(index_bytes: &[u8]) -> bool { + IndexHeader::parse(index_bytes) + .expect("too short") + .format_flags() + .is_inline() + } + + pub fn uses_generaldelta(index_bytes: &[u8]) -> bool { + IndexHeader::parse(index_bytes) + .expect("too short") + .format_flags() + .uses_generaldelta() + } + + pub fn get_version(index_bytes: &[u8]) -> u16 { + IndexHeader::parse(index_bytes) + .expect("too short") + .format_version() + } + #[test] - fn is_not_inline_when_no_inline_flag_test() { + fn flags_when_no_inline_flag_test() { let bytes = IndexEntryBuilder::new() .is_first(true) .with_general_delta(false) .with_inline(false) .build(); - assert_eq!(is_inline(&bytes), false) + assert_eq!(is_inline(&bytes), false); + assert_eq!(uses_generaldelta(&bytes), false); } #[test] - fn is_inline_when_inline_flag_test() { + fn flags_when_inline_flag_test() { let bytes = IndexEntryBuilder::new() .is_first(true) .with_general_delta(false) .with_inline(true) .build(); - assert_eq!(is_inline(&bytes), true) + assert_eq!(is_inline(&bytes), true); + assert_eq!(uses_generaldelta(&bytes), false); } #[test] - fn is_inline_when_inline_and_generaldelta_flags_test() { + fn flags_when_inline_and_generaldelta_flags_test() { let bytes = IndexEntryBuilder::new() .is_first(true) .with_general_delta(true) .with_inline(true) .build(); - assert_eq!(is_inline(&bytes), true) + assert_eq!(is_inline(&bytes), true); + assert_eq!(uses_generaldelta(&bytes), true); } #[test] @@ -391,14 +501,26 @@ } #[test] - fn test_base_revision() { - let bytes = IndexEntryBuilder::new().with_base_revision(1).build(); + fn test_base_revision_or_base_of_delta_chain() { + let bytes = IndexEntryBuilder::new() + .with_base_revision_or_base_of_delta_chain(1) + .build(); let entry = IndexEntry { bytes: &bytes, offset_override: None, }; - assert_eq!(entry.base_revision(), 1) + assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1) + } + + #[test] + fn version_test() { + let bytes = IndexEntryBuilder::new() + .is_first(true) + .with_version(1) + .build(); + + assert_eq!(get_version(&bytes), 1) } }
--- a/rust/hg-core/src/revlog/manifest.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/manifest.rs Tue Jan 18 10:27:13 2022 +0100 @@ -4,6 +4,7 @@ use crate::revlog::Revision; use crate::revlog::{Node, NodePrefix}; use crate::utils::hg_path::HgPath; +use crate::utils::SliceExt; /// A specialized `Revlog` to work with `manifest` data format. pub struct Manifestlog { @@ -43,7 +44,7 @@ &self, rev: Revision, ) -> Result<Manifest, RevlogError> { - let bytes = self.revlog.get_rev_data(rev)?; + let bytes = self.revlog.get_rev_data(rev)?.into_owned(); Ok(Manifest { bytes }) } } @@ -51,51 +52,142 @@ /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes. #[derive(Debug)] pub struct Manifest { + /// Format for a manifest: flat sequence of variable-size entries, + /// sorted by path, each as: + /// + /// ```text + /// <path> \0 <hex_node_id> <flags> \n + /// ``` + /// + /// The last entry is also terminated by a newline character. + /// Flags is one of `b""` (the empty string), `b"x"`, `b"l"`, or `b"t"`. bytes: Vec<u8>, } impl Manifest { - /// Return an iterator over the lines of the entry. - pub fn lines(&self) -> impl Iterator<Item = &[u8]> { + pub fn iter( + &self, + ) -> impl Iterator<Item = Result<ManifestEntry, HgError>> { self.bytes .split(|b| b == &b'\n') .filter(|line| !line.is_empty()) - } - - /// Return an iterator over the files of the entry. - pub fn files(&self) -> impl Iterator<Item = &HgPath> { - self.lines().filter(|line| !line.is_empty()).map(|line| { - let pos = line - .iter() - .position(|x| x == &b'\0') - .expect("manifest line should contain \\0"); - HgPath::new(&line[..pos]) - }) - } - - /// Return an iterator over the files of the entry. - pub fn files_with_nodes(&self) -> impl Iterator<Item = (&HgPath, &[u8])> { - self.lines().filter(|line| !line.is_empty()).map(|line| { - let pos = line - .iter() - .position(|x| x == &b'\0') - .expect("manifest line should contain \\0"); - let hash_start = pos + 1; - let hash_end = hash_start + 40; - (HgPath::new(&line[..pos]), &line[hash_start..hash_end]) - }) + .map(ManifestEntry::from_raw) } /// If the given path is in this manifest, return its filelog node ID - pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> { - // TODO: use binary search instead of linear scan. This may involve - // building (and caching) an index of the byte indicex of each manifest - // line. - for (manifest_path, node) in self.files_with_nodes() { - if manifest_path == path { - return Ok(Some(Node::from_hex_for_repo(node)?)); + pub fn find_by_path( + &self, + path: &HgPath, + ) -> Result<Option<ManifestEntry>, HgError> { + use std::cmp::Ordering::*; + let path = path.as_bytes(); + // Both boundaries of this `&[u8]` slice are always at the boundary of + // an entry + let mut bytes = &*self.bytes; + + // Binary search algorithm derived from `[T]::binary_search_by` + // <https://github.com/rust-lang/rust/blob/1.57.0/library/core/src/slice/mod.rs#L2221> + // except we don’t have a slice of entries. Instead we jump to the + // middle of the byte slice and look around for entry delimiters + // (newlines). + while let Some(entry_range) = Self::find_entry_near_middle_of(bytes)? { + let (entry_path, rest) = + ManifestEntry::split_path(&bytes[entry_range.clone()])?; + let cmp = entry_path.cmp(path); + if cmp == Less { + let after_newline = entry_range.end + 1; + bytes = &bytes[after_newline..]; + } else if cmp == Greater { + bytes = &bytes[..entry_range.start]; + } else { + return Ok(Some(ManifestEntry::from_path_and_rest( + entry_path, rest, + ))); } } Ok(None) } + + /// If there is at least one, return the byte range of an entry *excluding* + /// the final newline. + fn find_entry_near_middle_of( + bytes: &[u8], + ) -> Result<Option<std::ops::Range<usize>>, HgError> { + let len = bytes.len(); + if len > 0 { + let middle = bytes.len() / 2; + // Integer division rounds down, so `middle < len`. + let (before, after) = bytes.split_at(middle); + let is_newline = |&byte: &u8| byte == b'\n'; + let entry_start = match before.iter().rposition(is_newline) { + Some(i) => i + 1, + None => 0, // We choose the first entry in `bytes` + }; + let entry_end = match after.iter().position(is_newline) { + Some(i) => { + // No `+ 1` here to exclude this newline from the range + middle + i + } + None => { + // In a well-formed manifest: + // + // * Since `len > 0`, `bytes` contains at least one entry + // * Every entry ends with a newline + // * Since `middle < len`, `after` contains at least the + // newline at the end of the last entry of `bytes`. + // + // We didn’t find a newline, so this manifest is not + // well-formed. + return Err(HgError::corrupted( + "manifest entry without \\n delimiter", + )); + } + }; + Ok(Some(entry_start..entry_end)) + } else { + // len == 0 + Ok(None) + } + } } + +/// `Manifestlog` entry which knows how to interpret the `manifest` data bytes. +#[derive(Debug)] +pub struct ManifestEntry<'manifest> { + pub path: &'manifest HgPath, + pub hex_node_id: &'manifest [u8], + + /// `Some` values are b'x', b'l', or 't' + pub flags: Option<u8>, +} + +impl<'a> ManifestEntry<'a> { + fn split_path(bytes: &[u8]) -> Result<(&[u8], &[u8]), HgError> { + bytes.split_2(b'\0').ok_or_else(|| { + HgError::corrupted("manifest entry without \\0 delimiter") + }) + } + + fn from_path_and_rest(path: &'a [u8], rest: &'a [u8]) -> Self { + let (hex_node_id, flags) = match rest.split_last() { + Some((&b'x', rest)) => (rest, Some(b'x')), + Some((&b'l', rest)) => (rest, Some(b'l')), + Some((&b't', rest)) => (rest, Some(b't')), + _ => (rest, None), + }; + Self { + path: HgPath::new(path), + hex_node_id, + flags, + } + } + + fn from_raw(bytes: &'a [u8]) -> Result<Self, HgError> { + let (path, rest) = Self::split_path(bytes)?; + Ok(Self::from_path_and_rest(path, rest)) + } + + pub fn node_id(&self) -> Result<Node, HgError> { + Node::from_hex_for_repo(self.hex_node_id) + } +}
--- a/rust/hg-core/src/revlog/node.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/node.rs Tue Jan 18 10:27:13 2022 +0100 @@ -174,6 +174,12 @@ data: self.data, } } + + pub fn pad_to_256_bits(&self) -> [u8; 32] { + let mut bits = [0; 32]; + bits[..NODE_BYTES_LENGTH].copy_from_slice(&self.data); + bits + } } /// The beginning of a binary revision SHA.
--- a/rust/hg-core/src/revlog/revlog.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/revlog/revlog.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,9 +1,9 @@ use std::borrow::Cow; +use std::convert::TryFrom; use std::io::Read; use std::ops::Deref; use std::path::Path; -use byteorder::{BigEndian, ByteOrder}; use flate2::read::ZlibDecoder; use micro_timer::timed; use sha1::{Digest, Sha1}; @@ -20,6 +20,18 @@ use crate::revlog::Revision; use crate::{Node, NULL_REVISION}; +const REVISION_FLAG_CENSORED: u16 = 1 << 15; +const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14; +const REVISION_FLAG_EXTSTORED: u16 = 1 << 13; +const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12; + +// Keep this in sync with REVIDX_KNOWN_FLAGS in +// mercurial/revlogutils/flagutil.py +const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED + | REVISION_FLAG_ELLIPSIS + | REVISION_FLAG_EXTSTORED + | REVISION_FLAG_HASCOPIESINFO; + #[derive(derive_more::From)] pub enum RevlogError { InvalidRevision, @@ -40,9 +52,13 @@ } } +fn corrupted() -> HgError { + HgError::corrupted("corrupted revlog") +} + impl RevlogError { fn corrupted() -> Self { - RevlogError::Other(HgError::corrupted("corrupted revlog")) + RevlogError::Other(corrupted()) } } @@ -74,13 +90,6 @@ match repo.store_vfs().mmap_open_opt(&index_path)? { None => Index::new(Box::new(vec![])), Some(index_mmap) => { - let version = get_version(&index_mmap)?; - if version != 1 { - // A proper new version should have had a repo/store - // requirement. - return Err(HgError::corrupted("corrupted revlog")); - } - let index = Index::new(Box::new(index_mmap))?; Ok(index) } @@ -192,42 +201,14 @@ /// retrieved as needed, and the deltas will be applied to the inital /// snapshot to rebuild the final data. #[timed] - pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> { + pub fn get_rev_data( + &self, + rev: Revision, + ) -> Result<Cow<[u8]>, RevlogError> { if rev == NULL_REVISION { - return Ok(vec![]); + return Ok(Cow::Borrowed(&[])); }; - // Todo return -> Cow - let mut entry = self.get_entry(rev)?; - let mut delta_chain = vec![]; - while let Some(base_rev) = entry.base_rev { - delta_chain.push(entry); - entry = self - .get_entry(base_rev) - .map_err(|_| RevlogError::corrupted())?; - } - - // TODO do not look twice in the index - let index_entry = self - .index - .get_entry(rev) - .ok_or(RevlogError::InvalidRevision)?; - - let data: Vec<u8> = if delta_chain.is_empty() { - entry.data()?.into() - } else { - Revlog::build_data_from_deltas(entry, &delta_chain)? - }; - - if self.check_hash( - index_entry.p1(), - index_entry.p2(), - index_entry.hash().as_bytes(), - &data, - ) { - Ok(data) - } else { - Err(RevlogError::corrupted()) - } + Ok(self.get_entry(rev)?.data()?) } /// Check the hash of some given data against the recorded hash. @@ -258,13 +239,13 @@ fn build_data_from_deltas( snapshot: RevlogEntry, deltas: &[RevlogEntry], - ) -> Result<Vec<u8>, RevlogError> { - let snapshot = snapshot.data()?; + ) -> Result<Vec<u8>, HgError> { + let snapshot = snapshot.data_chunk()?; let deltas = deltas .iter() .rev() - .map(RevlogEntry::data) - .collect::<Result<Vec<Cow<'_, [u8]>>, RevlogError>>()?; + .map(RevlogEntry::data_chunk) + .collect::<Result<Vec<_>, _>>()?; let patches: Vec<_> = deltas.iter().map(|d| patch::PatchList::new(d)).collect(); let patch = patch::fold_patch_lists(&patches); @@ -282,42 +263,67 @@ } /// Get an entry of the revlog. - fn get_entry(&self, rev: Revision) -> Result<RevlogEntry, RevlogError> { + pub fn get_entry( + &self, + rev: Revision, + ) -> Result<RevlogEntry, RevlogError> { let index_entry = self .index .get_entry(rev) .ok_or(RevlogError::InvalidRevision)?; let start = index_entry.offset(); - let end = start + index_entry.compressed_len(); + let end = start + index_entry.compressed_len() as usize; let data = if self.index.is_inline() { self.index.data(start, end) } else { &self.data()[start..end] }; let entry = RevlogEntry { + revlog: self, rev, bytes: data, compressed_len: index_entry.compressed_len(), uncompressed_len: index_entry.uncompressed_len(), - base_rev: if index_entry.base_revision() == rev { + base_rev_or_base_of_delta_chain: if index_entry + .base_revision_or_base_of_delta_chain() + == rev + { None } else { - Some(index_entry.base_revision()) + Some(index_entry.base_revision_or_base_of_delta_chain()) }, + p1: index_entry.p1(), + p2: index_entry.p2(), + flags: index_entry.flags(), + hash: *index_entry.hash(), }; Ok(entry) } + + /// when resolving internal references within revlog, any errors + /// should be reported as corruption, instead of e.g. "invalid revision" + fn get_entry_internal( + &self, + rev: Revision, + ) -> Result<RevlogEntry, HgError> { + return self.get_entry(rev).map_err(|_| corrupted()); + } } /// The revlog entry's bytes and the necessary informations to extract /// the entry's data. -#[derive(Debug)] +#[derive(Clone)] pub struct RevlogEntry<'a> { + revlog: &'a Revlog, rev: Revision, bytes: &'a [u8], - compressed_len: usize, - uncompressed_len: usize, - base_rev: Option<Revision>, + compressed_len: u32, + uncompressed_len: i32, + base_rev_or_base_of_delta_chain: Option<Revision>, + p1: Revision, + p2: Revision, + flags: u16, + hash: Node, } impl<'a> RevlogEntry<'a> { @@ -325,8 +331,65 @@ self.rev } + pub fn uncompressed_len(&self) -> Option<u32> { + u32::try_from(self.uncompressed_len).ok() + } + + pub fn has_p1(&self) -> bool { + self.p1 != NULL_REVISION + } + + pub fn is_cencored(&self) -> bool { + (self.flags & REVISION_FLAG_CENSORED) != 0 + } + + pub fn has_length_affecting_flag_processor(&self) -> bool { + // Relevant Python code: revlog.size() + // note: ELLIPSIS is known to not change the content + (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0 + } + + /// The data for this entry, after resolving deltas if any. + pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> { + let mut entry = self.clone(); + let mut delta_chain = vec![]; + + // The meaning of `base_rev_or_base_of_delta_chain` depends on + // generaldelta. See the doc on `ENTRY_DELTA_BASE` in + // `mercurial/revlogutils/constants.py` and the code in + // [_chaininfo] and in [index_deltachain]. + let uses_generaldelta = self.revlog.index.uses_generaldelta(); + while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain { + let base_rev = if uses_generaldelta { + base_rev + } else { + entry.rev - 1 + }; + delta_chain.push(entry); + entry = self.revlog.get_entry_internal(base_rev)?; + } + + let data = if delta_chain.is_empty() { + entry.data_chunk()? + } else { + Revlog::build_data_from_deltas(entry, &delta_chain)?.into() + }; + + if self.revlog.check_hash( + self.p1, + self.p2, + self.hash.as_bytes(), + &data, + ) { + Ok(data) + } else { + Err(corrupted()) + } + } + /// Extract the data contained in the entry. - pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> { + /// This may be a delta. (See `is_delta`.) + fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> { if self.bytes.is_empty() { return Ok(Cow::Borrowed(&[])); } @@ -341,39 +404,37 @@ // zstd data. b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)), // A proper new format should have had a repo/store requirement. - _format_type => Err(RevlogError::corrupted()), + _format_type => Err(corrupted()), } } - fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, RevlogError> { + fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> { let mut decoder = ZlibDecoder::new(self.bytes); if self.is_delta() { - let mut buf = Vec::with_capacity(self.compressed_len); - decoder - .read_to_end(&mut buf) - .map_err(|_| RevlogError::corrupted())?; + let mut buf = Vec::with_capacity(self.compressed_len as usize); + decoder.read_to_end(&mut buf).map_err(|_| corrupted())?; Ok(buf) } else { - let mut buf = vec![0; self.uncompressed_len]; - decoder - .read_exact(&mut buf) - .map_err(|_| RevlogError::corrupted())?; + let cap = self.uncompressed_len.max(0) as usize; + let mut buf = vec![0; cap]; + decoder.read_exact(&mut buf).map_err(|_| corrupted())?; Ok(buf) } } - fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, RevlogError> { + fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> { if self.is_delta() { - let mut buf = Vec::with_capacity(self.compressed_len); + let mut buf = Vec::with_capacity(self.compressed_len as usize); zstd::stream::copy_decode(self.bytes, &mut buf) - .map_err(|_| RevlogError::corrupted())?; + .map_err(|_| corrupted())?; Ok(buf) } else { - let mut buf = vec![0; self.uncompressed_len]; + let cap = self.uncompressed_len.max(0) as usize; + let mut buf = vec![0; cap]; let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf) - .map_err(|_| RevlogError::corrupted())?; - if len != self.uncompressed_len { - Err(RevlogError::corrupted()) + .map_err(|_| corrupted())?; + if len != self.uncompressed_len as usize { + Err(corrupted()) } else { Ok(buf) } @@ -383,23 +444,10 @@ /// Tell if the entry is a snapshot or a delta /// (influences on decompression). fn is_delta(&self) -> bool { - self.base_rev.is_some() + self.base_rev_or_base_of_delta_chain.is_some() } } -/// Format version of the revlog. -pub fn get_version(index_bytes: &[u8]) -> Result<u16, HgError> { - if index_bytes.len() == 0 { - return Ok(1); - }; - if index_bytes.len() < 4 { - return Err(HgError::corrupted( - "corrupted revlog: can't read the index format header", - )); - }; - Ok(BigEndian::read_u16(&index_bytes[2..=3])) -} - /// Calculate the hash of a revision given its data and its parents. fn hash( data: &[u8], @@ -418,20 +466,3 @@ hasher.update(data); *hasher.finalize().as_ref() } - -#[cfg(test)] -mod tests { - use super::*; - - use super::super::index::IndexEntryBuilder; - - #[test] - fn version_test() { - let bytes = IndexEntryBuilder::new() - .is_first(true) - .with_version(1) - .build(); - - assert_eq!(get_version(&bytes).map_err(|_err| ()), Ok(1)) - } -}
--- a/rust/hg-core/src/utils.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/utils.rs Tue Jan 18 10:27:13 2022 +0100 @@ -145,6 +145,21 @@ } } +pub trait StrExt { + // TODO: Use https://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_once + // once we require Rust 1.52+ + fn split_2(&self, separator: char) -> Option<(&str, &str)>; +} + +impl StrExt for str { + fn split_2(&self, separator: char) -> Option<(&str, &str)> { + let mut iter = self.splitn(2, separator); + let a = iter.next()?; + let b = iter.next()?; + Some((a, b)) + } +} + pub trait Escaped { /// Return bytes escaped for display to the user fn escaped_bytes(&self) -> Vec<u8>;
--- a/rust/hg-core/src/vfs.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/src/vfs.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,6 +1,6 @@ use crate::errors::{HgError, IoErrorContext, IoResultExt}; use memmap2::{Mmap, MmapOptions}; -use std::io::ErrorKind; +use std::io::{ErrorKind, Write}; use std::path::{Path, PathBuf}; /// Filesystem access abstraction for the contents of a given "base" diretory @@ -16,6 +16,22 @@ self.base.join(relative_path) } + pub fn symlink_metadata( + &self, + relative_path: impl AsRef<Path>, + ) -> Result<std::fs::Metadata, HgError> { + let path = self.join(relative_path); + std::fs::symlink_metadata(&path).when_reading_file(&path) + } + + pub fn read_link( + &self, + relative_path: impl AsRef<Path>, + ) -> Result<PathBuf, HgError> { + let path = self.join(relative_path); + std::fs::read_link(&path).when_reading_file(&path) + } + pub fn read( &self, relative_path: impl AsRef<Path>, @@ -71,6 +87,47 @@ std::fs::rename(&from, &to) .with_context(|| IoErrorContext::RenamingFile { from, to }) } + + pub fn remove_file( + &self, + relative_path: impl AsRef<Path>, + ) -> Result<(), HgError> { + let path = self.join(relative_path); + std::fs::remove_file(&path) + .with_context(|| IoErrorContext::RemovingFile(path)) + } + + #[cfg(unix)] + pub fn create_symlink( + &self, + relative_link_path: impl AsRef<Path>, + target_path: impl AsRef<Path>, + ) -> Result<(), HgError> { + let link_path = self.join(relative_link_path); + std::os::unix::fs::symlink(target_path, &link_path) + .when_writing_file(&link_path) + } + + /// Write `contents` into a temporary file, then rename to `relative_path`. + /// This makes writing to a file "atomic": a reader opening that path will + /// see either the previous contents of the file or the complete new + /// content, never a partial write. + pub fn atomic_write( + &self, + relative_path: impl AsRef<Path>, + contents: &[u8], + ) -> Result<(), HgError> { + let mut tmp = tempfile::NamedTempFile::new_in(self.base) + .when_writing_file(self.base)?; + tmp.write_all(contents) + .and_then(|()| tmp.flush()) + .when_writing_file(tmp.path())?; + let path = self.join(relative_path); + tmp.persist(&path) + .map_err(|e| e.error) + .when_writing_file(&path)?; + Ok(()) + } } fn fs_metadata(
--- a/rust/hg-core/tests/test_missing_ancestors.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-core/tests/test_missing_ancestors.rs Tue Jan 18 10:27:13 2022 +0100 @@ -32,11 +32,11 @@ if i == 2 || rng.gen_bool(prevprob) { (i - 1) as Revision } else { - rng.gen_range(0, i - 1) as Revision + rng.gen_range(0..i - 1) as Revision } }; // p2 is a random revision lower than i and different from p1 - let mut p2 = rng.gen_range(0, i - 1) as Revision; + let mut p2 = rng.gen_range(0..i - 1) as Revision; if p2 >= p1 { p2 = p2 + 1; } @@ -44,7 +44,7 @@ } else if rng.gen_bool(prevprob) { vg.push([(i - 1) as Revision, NULL_REVISION]) } else { - vg.push([rng.gen_range(0, i - 1) as Revision, NULL_REVISION]) + vg.push([rng.gen_range(0..i - 1) as Revision, NULL_REVISION]) } } vg
--- a/rust/hg-cpython/Cargo.toml Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/Cargo.toml Tue Jan 18 10:27:13 2022 +0100 @@ -28,3 +28,5 @@ log = "0.4.8" env_logger = "0.7.1" stable_deref_trait = "1.2.0" +vcsgraph = "0.2.0" +
--- a/rust/hg-cpython/src/ancestors.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/ancestors.rs Tue Jan 18 10:27:13 2022 +0100 @@ -42,20 +42,21 @@ ObjectProtocol, PyClone, PyDict, PyList, PyModule, PyObject, PyResult, Python, PythonObject, ToPyObject, }; +use hg::MissingAncestors as CoreMissing; use hg::Revision; -use hg::{ - AncestorsIterator as CoreIterator, LazyAncestors as CoreLazy, - MissingAncestors as CoreMissing, -}; use std::cell::RefCell; use std::collections::HashSet; +use vcsgraph::lazy_ancestors::{ + AncestorsIterator as VCGAncestorsIterator, + LazyAncestors as VCGLazyAncestors, +}; py_class!(pub class AncestorsIterator |py| { - data inner: RefCell<Box<CoreIterator<Index>>>; + data inner: RefCell<Box<VCGAncestorsIterator<Index>>>; def __next__(&self) -> PyResult<Option<Revision>> { match self.inner(py).borrow_mut().next() { - Some(Err(e)) => Err(GraphError::pynew(py, e)), + Some(Err(e)) => Err(GraphError::pynew_from_vcsgraph(py, e)), None => Ok(None), Some(Ok(r)) => Ok(Some(r)), } @@ -63,7 +64,7 @@ def __contains__(&self, rev: Revision) -> PyResult<bool> { self.inner(py).borrow_mut().contains(rev) - .map_err(|e| GraphError::pynew(py, e)) + .map_err(|e| GraphError::pynew_from_vcsgraph(py, e)) } def __iter__(&self) -> PyResult<Self> { @@ -73,32 +74,35 @@ def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision, inclusive: bool) -> PyResult<AncestorsIterator> { let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?; - let ait = CoreIterator::new( + let ait = VCGAncestorsIterator::new( pyindex_to_graph(py, index)?, initvec, stoprev, inclusive, ) - .map_err(|e| GraphError::pynew(py, e))?; + .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?; AncestorsIterator::from_inner(py, ait) } }); impl AncestorsIterator { - pub fn from_inner(py: Python, ait: CoreIterator<Index>) -> PyResult<Self> { + pub fn from_inner( + py: Python, + ait: VCGAncestorsIterator<Index>, + ) -> PyResult<Self> { Self::create_instance(py, RefCell::new(Box::new(ait))) } } py_class!(pub class LazyAncestors |py| { - data inner: RefCell<Box<CoreLazy<Index>>>; + data inner: RefCell<Box<VCGLazyAncestors<Index>>>; def __contains__(&self, rev: Revision) -> PyResult<bool> { self.inner(py) .borrow_mut() .contains(rev) - .map_err(|e| GraphError::pynew(py, e)) + .map_err(|e| GraphError::pynew_from_vcsgraph(py, e)) } def __iter__(&self) -> PyResult<AncestorsIterator> { @@ -114,9 +118,9 @@ let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?; let lazy = - CoreLazy::new(pyindex_to_graph(py, index)?, + VCGLazyAncestors::new(pyindex_to_graph(py, index)?, initvec, stoprev, inclusive) - .map_err(|e| GraphError::pynew(py, e))?; + .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?; Self::create_instance(py, RefCell::new(Box::new(lazy))) }
--- a/rust/hg-cpython/src/cindex.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/cindex.rs Tue Jan 18 10:27:13 2022 +0100 @@ -155,6 +155,24 @@ } } +impl vcsgraph::graph::Graph for Index { + fn parents( + &self, + rev: Revision, + ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError> + { + match Graph::parents(self, rev) { + Ok(parents) => Ok(vcsgraph::graph::Parents(parents)), + Err(GraphError::ParentOutOfRange(rev)) => { + Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev)) + } + Err(GraphError::WorkingDirectoryUnsupported) => Err( + vcsgraph::graph::GraphReadError::WorkingDirectoryUnsupported, + ), + } + } +} + impl RevlogIndex for Index { /// Note C return type is Py_ssize_t (hence signed), but we shall /// force it to unsigned, because it's a length
--- a/rust/hg-cpython/src/dirstate.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/dirstate.rs Tue Jan 18 10:27:13 2022 +0100 @@ -54,7 +54,6 @@ matcher: PyObject, ignorefiles: PyList, check_exec: bool, - last_normal_time: (u32, u32), list_clean: bool, list_ignored: bool, list_unknown: bool,
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Jan 18 10:27:13 2022 +0100 @@ -18,7 +18,7 @@ use crate::{ dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator}, - dirstate::item::{timestamp, DirstateItem}, + dirstate::item::DirstateItem, pybytes_deref::PyBytesDeref, }; use hg::{ @@ -194,16 +194,13 @@ &self, p1: PyObject, p2: PyObject, - now: (u32, u32) ) -> PyResult<PyBytes> { - let now = timestamp(py, now)?; - - let mut inner = self.inner(py).borrow_mut(); + let inner = self.inner(py).borrow(); let parents = DirstateParents { p1: extract_node_id(py, &p1)?, p2: extract_node_id(py, &p2)?, }; - let result = inner.pack_v1(parents, now); + let result = inner.pack_v1(parents); match result { Ok(packed) => Ok(PyBytes::new(py, &packed)), Err(_) => Err(PyErr::new::<exc::OSError, _>( @@ -218,17 +215,14 @@ /// instead of written to a new data file (False). def write_v2( &self, - now: (u32, u32), can_append: bool, ) -> PyResult<PyObject> { - let now = timestamp(py, now)?; - - let mut inner = self.inner(py).borrow_mut(); - let result = inner.pack_v2(now, can_append); + let inner = self.inner(py).borrow(); + let result = inner.pack_v2(can_append); match result { Ok((packed, tree_metadata, append)) => { let packed = PyBytes::new(py, &packed); - let tree_metadata = PyBytes::new(py, &tree_metadata); + let tree_metadata = PyBytes::new(py, tree_metadata.as_bytes()); let tuple = (packed, tree_metadata, append); Ok(tuple.to_py_object(py).into_object()) },
--- a/rust/hg-cpython/src/dirstate/item.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/dirstate/item.rs Tue Jan 18 10:27:13 2022 +0100 @@ -23,7 +23,7 @@ p2_info: bool = false, has_meaningful_data: bool = true, has_meaningful_mtime: bool = true, - parentfiledata: Option<(u32, u32, (u32, u32))> = None, + parentfiledata: Option<(u32, u32, Option<(u32, u32, bool)>)> = None, fallback_exec: Option<bool> = None, fallback_symlink: Option<bool> = None, @@ -35,7 +35,9 @@ mode_size_opt = Some((mode, size)) } if has_meaningful_mtime { - mtime_opt = Some(timestamp(py, mtime)?) + if let Some(m) = mtime { + mtime_opt = Some(timestamp(py, m)?); + } } } let entry = DirstateEntry::from_v2_data( @@ -192,12 +194,8 @@ Ok(mtime) } - def need_delay(&self, now: (u32, u32)) -> PyResult<bool> { - let now = timestamp(py, now)?; - Ok(self.entry(py).get().need_delay(now)) - } - - def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> { + def mtime_likely_equal_to(&self, other: (u32, u32, bool)) + -> PyResult<bool> { if let Some(mtime) = self.entry(py).get().truncated_mtime() { Ok(mtime.likely_equal(timestamp(py, other)?)) } else { @@ -230,7 +228,7 @@ &self, mode: u32, size: u32, - mtime: (u32, u32), + mtime: (u32, u32, bool), ) -> PyResult<PyNone> { let mtime = timestamp(py, mtime)?; self.update(py, |entry| entry.set_clean(mode, size, mtime)); @@ -275,12 +273,13 @@ pub(crate) fn timestamp( py: Python<'_>, - (s, ns): (u32, u32), + (s, ns, second_ambiguous): (u32, u32, bool), ) -> PyResult<TruncatedTimestamp> { - TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| { - PyErr::new::<exc::ValueError, _>( - py, - "expected mtime truncated to 31 bits", - ) - }) + TruncatedTimestamp::from_already_truncated(s, ns, second_ambiguous) + .map_err(|_| { + PyErr::new::<exc::ValueError, _>( + py, + "expected mtime truncated to 31 bits", + ) + }) }
--- a/rust/hg-cpython/src/dirstate/status.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/dirstate/status.rs Tue Jan 18 10:27:13 2022 +0100 @@ -9,13 +9,13 @@ //! `hg-core` crate. From Python, this will be seen as //! `rustext.dirstate.status`. -use crate::dirstate::item::timestamp; use crate::{dirstate::DirstateMap, exceptions::FallbackError}; use cpython::exc::OSError; use cpython::{ exc::ValueError, ObjectProtocol, PyBytes, PyErr, PyList, PyObject, PyResult, PyTuple, Python, PythonObject, ToPyObject, }; +use hg::dirstate::status::StatusPath; use hg::{ matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher}, parse_pattern_syntax, @@ -28,15 +28,19 @@ }; use std::borrow::Borrow; +fn collect_status_path_list(py: Python, paths: &[StatusPath<'_>]) -> PyList { + collect_pybytes_list(py, paths.iter().map(|item| &*item.path)) +} + /// This will be useless once trait impls for collection are added to `PyBytes` /// upstream. fn collect_pybytes_list( py: Python, - collection: &[impl AsRef<HgPath>], + iter: impl Iterator<Item = impl AsRef<HgPath>>, ) -> PyList { let list = PyList::new(py, &[]); - for path in collection.iter() { + for path in iter { list.append( py, PyBytes::new(py, path.as_ref().as_bytes()).into_object(), @@ -103,13 +107,11 @@ root_dir: PyObject, ignore_files: PyList, check_exec: bool, - last_normal_time: (u32, u32), list_clean: bool, list_ignored: bool, list_unknown: bool, collect_traversed_dirs: bool, ) -> PyResult<PyTuple> { - let last_normal_time = timestamp(py, last_normal_time)?; let bytes = root_dir.extract::<PyBytes>(py)?; let root_dir = get_path_from_bytes(bytes.data(py)); @@ -124,6 +126,8 @@ }) .collect(); let ignore_files = ignore_files?; + // The caller may call `copymap.items()` separately + let list_copies = false; match matcher.get_type(py).name(py).borrow() { "alwaysmatcher" => { @@ -135,10 +139,10 @@ ignore_files, StatusOptions { check_exec, - last_normal_time, list_clean, list_ignored, list_unknown, + list_copies, collect_traversed_dirs, }, ) @@ -172,10 +176,10 @@ ignore_files, StatusOptions { check_exec, - last_normal_time, list_clean, list_ignored, list_unknown, + list_copies, collect_traversed_dirs, }, ) @@ -224,10 +228,10 @@ ignore_files, StatusOptions { check_exec, - last_normal_time, list_clean, list_ignored, list_unknown, + list_copies, collect_traversed_dirs, }, ) @@ -247,16 +251,16 @@ status_res: DirstateStatus, warnings: Vec<PatternFileWarning>, ) -> PyResult<PyTuple> { - let modified = collect_pybytes_list(py, status_res.modified.as_ref()); - let added = collect_pybytes_list(py, status_res.added.as_ref()); - let removed = collect_pybytes_list(py, status_res.removed.as_ref()); - let deleted = collect_pybytes_list(py, status_res.deleted.as_ref()); - let clean = collect_pybytes_list(py, status_res.clean.as_ref()); - let ignored = collect_pybytes_list(py, status_res.ignored.as_ref()); - let unknown = collect_pybytes_list(py, status_res.unknown.as_ref()); - let unsure = collect_pybytes_list(py, status_res.unsure.as_ref()); - let bad = collect_bad_matches(py, status_res.bad.as_ref())?; - let traversed = collect_pybytes_list(py, status_res.traversed.as_ref()); + let modified = collect_status_path_list(py, &status_res.modified); + let added = collect_status_path_list(py, &status_res.added); + let removed = collect_status_path_list(py, &status_res.removed); + let deleted = collect_status_path_list(py, &status_res.deleted); + let clean = collect_status_path_list(py, &status_res.clean); + let ignored = collect_status_path_list(py, &status_res.ignored); + let unknown = collect_status_path_list(py, &status_res.unknown); + let unsure = collect_status_path_list(py, &status_res.unsure); + let bad = collect_bad_matches(py, &status_res.bad)?; + let traversed = collect_pybytes_list(py, status_res.traversed.iter()); let dirty = status_res.dirty.to_py_object(py); let py_warnings = PyList::new(py, &[]); for warning in warnings.iter() {
--- a/rust/hg-cpython/src/exceptions.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/hg-cpython/src/exceptions.rs Tue Jan 18 10:27:13 2022 +0100 @@ -37,6 +37,32 @@ } } } + + pub fn pynew_from_vcsgraph( + py: Python, + inner: vcsgraph::graph::GraphReadError, + ) -> PyErr { + match inner { + vcsgraph::graph::GraphReadError::InconsistentGraphData => { + GraphError::new(py, "InconsistentGraphData") + } + vcsgraph::graph::GraphReadError::InvalidKey => { + GraphError::new(py, "ParentOutOfRange") + } + vcsgraph::graph::GraphReadError::KeyedInvalidKey(r) => { + GraphError::new(py, ("ParentOutOfRange", r)) + } + vcsgraph::graph::GraphReadError::WorkingDirectoryUnsupported => { + match py + .import("mercurial.error") + .and_then(|m| m.get(py, "WdirUnsupported")) + { + Err(e) => e, + Ok(cls) => PyErr::from_instance(py, cls), + } + } + } + } } py_exception!(rustext, HgPathPyError, RuntimeError);
--- a/rust/rhg/Cargo.toml Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/Cargo.toml Tue Jan 18 10:27:13 2022 +0100 @@ -18,5 +18,5 @@ micro-timer = "0.3.1" regex = "1.3.9" env_logger = "0.7.1" -format-bytes = "0.2.1" +format-bytes = "0.3.0" users = "0.11.0"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/rhg/src/commands/debugignorerhg.rs Tue Jan 18 10:27:13 2022 +0100 @@ -0,0 +1,40 @@ +use crate::error::CommandError; +use clap::SubCommand; +use hg; +use hg::matchers::get_ignore_matcher; +use hg::StatusError; +use log::warn; + +pub const HELP_TEXT: &str = " +Show effective hgignore patterns used by rhg. + +This is a pure Rust version of `hg debugignore`. + +Some options might be missing, check the list below. +"; + +pub fn args() -> clap::App<'static, 'static> { + SubCommand::with_name("debugignorerhg").about(HELP_TEXT) +} + +pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> { + let repo = invocation.repo?; + + let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded + + let (ignore_matcher, warnings) = get_ignore_matcher( + vec![ignore_file], + &repo.working_directory_path().to_owned(), + &mut |_pattern_bytes| (), + ) + .map_err(|e| StatusError::from(e))?; + + if !warnings.is_empty() { + warn!("Pattern warnings: {:?}", &warnings); + } + + let patterns = ignore_matcher.debug_get_patterns(); + invocation.ui.write_stdout(patterns)?; + invocation.ui.write_stdout(b"\n")?; + Ok(()) +}
--- a/rust/rhg/src/commands/files.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/src/commands/files.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,12 @@ use crate::error::CommandError; use crate::ui::Ui; -use crate::ui::UiError; -use crate::utils::path_utils::relativize_paths; +use crate::utils::path_utils::RelativizePaths; use clap::Arg; +use hg::errors::HgError; use hg::operations::list_rev_tracked_files; use hg::operations::Dirstate; use hg::repo::Repo; use hg::utils::hg_path::HgPath; -use std::borrow::Cow; pub const HELP_TEXT: &str = " List tracked files. @@ -39,29 +38,60 @@ let rev = invocation.subcommand_args.value_of("rev"); let repo = invocation.repo?; + + // It seems better if this check is removed: this would correspond to + // automatically enabling the extension if the repo requires it. + // However we need this check to be in sync with vanilla hg so hg tests + // pass. + if repo.has_sparse() + && invocation.config.get(b"extensions", b"sparse").is_none() + { + return Err(CommandError::unsupported( + "repo is using sparse, but sparse extension is not enabled", + )); + } + if let Some(rev) = rev { + if repo.has_narrow() { + return Err(CommandError::unsupported( + "rhg files -r <rev> is not supported in narrow clones", + )); + } let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?; display_files(invocation.ui, repo, files.iter()) } else { + // The dirstate always reflects the sparse narrowspec, so if + // we only have sparse without narrow all is fine. + // If we have narrow, then [hg files] needs to check if + // the store narrowspec is in sync with the one of the dirstate, + // so we can't support that without explicit code. + if repo.has_narrow() { + return Err(CommandError::unsupported( + "rhg files is not supported in narrow clones", + )); + } let distate = Dirstate::new(repo)?; let files = distate.tracked_files()?; - display_files(invocation.ui, repo, files) + display_files(invocation.ui, repo, files.into_iter().map(Ok)) } } fn display_files<'a>( ui: &Ui, repo: &Repo, - files: impl IntoIterator<Item = &'a HgPath>, + files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>, ) -> Result<(), CommandError> { let mut stdout = ui.stdout_buffer(); let mut any = false; - relativize_paths(repo, files, |path: Cow<[u8]>| -> Result<(), UiError> { + let relativize = RelativizePaths::new(repo)?; + for result in files { + let path = result?; + stdout.write_all(&relativize.relativize(path))?; + stdout.write_all(b"\n")?; any = true; - stdout.write_all(path.as_ref())?; - stdout.write_all(b"\n") - })?; + } + stdout.flush()?; if any { Ok(())
--- a/rust/rhg/src/commands/status.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/src/commands/status.rs Tue Jan 18 10:27:13 2022 +0100 @@ -6,20 +6,29 @@ // GNU General Public License version 2 or any later version. use crate::error::CommandError; -use crate::ui::{Ui, UiError}; -use crate::utils::path_utils::relativize_paths; +use crate::ui::Ui; +use crate::utils::path_utils::RelativizePaths; use clap::{Arg, SubCommand}; +use format_bytes::format_bytes; use hg; use hg::config::Config; +use hg::dirstate::has_exec_bit; +use hg::dirstate::status::StatusPath; use hg::dirstate::TruncatedTimestamp; -use hg::errors::HgError; +use hg::dirstate::RANGE_MASK_31BIT; +use hg::errors::{HgError, IoResultExt}; +use hg::lock::LockError; use hg::manifest::Manifest; use hg::matchers::AlwaysMatcher; use hg::repo::Repo; -use hg::utils::hg_path::{hg_path_to_os_string, HgPath}; -use hg::{HgPathCow, StatusOptions}; -use log::{info, warn}; -use std::borrow::Cow; +use hg::utils::files::get_bytes_from_os_string; +use hg::utils::files::get_bytes_from_path; +use hg::utils::files::get_path_from_bytes; +use hg::utils::hg_path::{hg_path_to_path_buf, HgPath}; +use hg::StatusOptions; +use log::info; +use std::io; +use std::path::PathBuf; pub const HELP_TEXT: &str = " Show changed files in the working directory @@ -81,6 +90,18 @@ .short("-i") .long("--ignored"), ) + .arg( + Arg::with_name("copies") + .help("show source of copied files (DEFAULT: ui.statuscopies)") + .short("-C") + .long("--copies"), + ) + .arg( + Arg::with_name("no-status") + .help("hide status prefix") + .short("-n") + .long("--no-status"), + ) } /// Pure data type allowing the caller to specify file states to display @@ -138,21 +159,42 @@ } // TODO: lift these limitations - if invocation.config.get_bool(b"ui", b"tweakdefaults").ok() == Some(true) { + if invocation.config.get_bool(b"ui", b"tweakdefaults")? { return Err(CommandError::unsupported( "ui.tweakdefaults is not yet supported with rhg status", )); } - if invocation.config.get_bool(b"ui", b"statuscopies").ok() == Some(true) { + if invocation.config.get_bool(b"ui", b"statuscopies")? { return Err(CommandError::unsupported( "ui.statuscopies is not yet supported with rhg status", )); } + if invocation + .config + .get(b"commands", b"status.terse") + .is_some() + { + return Err(CommandError::unsupported( + "status.terse is not yet supported with rhg status", + )); + } let ui = invocation.ui; let config = invocation.config; let args = invocation.subcommand_args; - let display_states = if args.is_present("all") { + + let verbose = !ui.plain() + && !args.is_present("print0") + && (config.get_bool(b"ui", b"verbose")? + || config.get_bool(b"commands", b"status.verbose")?); + if verbose { + return Err(CommandError::unsupported( + "verbose status is not supported yet", + )); + } + + let all = args.is_present("all"); + let display_states = if all { // TODO when implementing `--quiet`: it excludes clean files // from `--all` ALL_DISPLAY_STATES @@ -172,44 +214,84 @@ requested } }; + let no_status = args.is_present("no-status"); + let list_copies = all + || args.is_present("copies") + || config.get_bool(b"ui", b"statuscopies")?; let repo = invocation.repo?; + + if repo.has_sparse() || repo.has_narrow() { + return Err(CommandError::unsupported( + "rhg status is not supported for sparse checkouts or narrow clones yet" + )); + } + let mut dmap = repo.dirstate_map_mut()?; let options = StatusOptions { - // TODO should be provided by the dirstate parsing and - // hence be stored on dmap. Using a value that assumes we aren't - // below the time resolution granularity of the FS and the - // dirstate. - last_normal_time: TruncatedTimestamp::new_truncate(0, 0), // we're currently supporting file systems with exec flags only // anyway check_exec: true, list_clean: display_states.clean, list_unknown: display_states.unknown, list_ignored: display_states.ignored, + list_copies, collect_traversed_dirs: false, }; - let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded let (mut ds_status, pattern_warnings) = dmap.status( &AlwaysMatcher, repo.working_directory_path().to_owned(), - vec![ignore_file], + ignore_files(repo, config), options, )?; - if !pattern_warnings.is_empty() { - warn!("Pattern warnings: {:?}", &pattern_warnings); + for warning in pattern_warnings { + match warning { + hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui + .write_stderr(&format_bytes!( + b"{}: ignoring invalid syntax '{}'\n", + get_bytes_from_path(path), + &*syntax + ))?, + hg::PatternFileWarning::NoSuchFile(path) => { + let path = if let Ok(relative) = + path.strip_prefix(repo.working_directory_path()) + { + relative + } else { + &*path + }; + ui.write_stderr(&format_bytes!( + b"skipping unreadable pattern file '{}': \ + No such file or directory\n", + get_bytes_from_path(path), + ))? + } + } } - if !ds_status.bad.is_empty() { - warn!("Bad matches {:?}", &(ds_status.bad)) + for (path, error) in ds_status.bad { + let error = match error { + hg::BadMatch::OsError(code) => { + std::io::Error::from_raw_os_error(code).to_string() + } + hg::BadMatch::BadType(ty) => { + format!("unsupported file type (type is {})", ty) + } + }; + ui.write_stderr(&format_bytes!( + b"{}: {}\n", + path.as_bytes(), + error.as_bytes() + ))? } if !ds_status.unsure.is_empty() { info!( "Files to be rechecked by retrieval from filelog: {:?}", - &ds_status.unsure + ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>() ); } + let mut fixup = Vec::new(); if !ds_status.unsure.is_empty() && (display_states.modified || display_states.clean) { @@ -218,99 +300,240 @@ CommandError::from((e, &*format!("{:x}", p1.short()))) })?; for to_check in ds_status.unsure { - if cat_file_is_modified(repo, &manifest, &to_check)? { + if unsure_is_modified(repo, &manifest, &to_check.path)? { if display_states.modified { ds_status.modified.push(to_check); } } else { if display_states.clean { - ds_status.clean.push(to_check); + ds_status.clean.push(to_check.clone()); } + fixup.push(to_check.path.into_owned()) } } } + let relative_paths = (!ui.plain()) + && config + .get_option(b"commands", b"status.relative")? + .unwrap_or(config.get_bool(b"ui", b"relative-paths")?); + let output = DisplayStatusPaths { + ui, + no_status, + relativize: if relative_paths { + Some(RelativizePaths::new(repo)?) + } else { + None + }, + }; if display_states.modified { - display_status_paths(ui, repo, config, &mut ds_status.modified, b"M")?; + output.display(b"M", ds_status.modified)?; } if display_states.added { - display_status_paths(ui, repo, config, &mut ds_status.added, b"A")?; + output.display(b"A", ds_status.added)?; } if display_states.removed { - display_status_paths(ui, repo, config, &mut ds_status.removed, b"R")?; + output.display(b"R", ds_status.removed)?; } if display_states.deleted { - display_status_paths(ui, repo, config, &mut ds_status.deleted, b"!")?; + output.display(b"!", ds_status.deleted)?; } if display_states.unknown { - display_status_paths(ui, repo, config, &mut ds_status.unknown, b"?")?; + output.display(b"?", ds_status.unknown)?; } if display_states.ignored { - display_status_paths(ui, repo, config, &mut ds_status.ignored, b"I")?; + output.display(b"I", ds_status.ignored)?; } if display_states.clean { - display_status_paths(ui, repo, config, &mut ds_status.clean, b"C")?; + output.display(b"C", ds_status.clean)?; + } + + let mut dirstate_write_needed = ds_status.dirty; + let filesystem_time_at_status_start = + ds_status.filesystem_time_at_status_start; + + if (fixup.is_empty() || filesystem_time_at_status_start.is_none()) + && !dirstate_write_needed + { + // Nothing to update + return Ok(()); + } + + // Update the dirstate on disk if we can + let with_lock_result = + repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> { + if let Some(mtime_boundary) = filesystem_time_at_status_start { + for hg_path in fixup { + use std::os::unix::fs::MetadataExt; + let fs_path = hg_path_to_path_buf(&hg_path) + .expect("HgPath conversion"); + // Specifically do not reuse `fs_metadata` from + // `unsure_is_clean` which was needed before reading + // contents. Here we access metadata again after reading + // content, in case it changed in the meantime. + let fs_metadata = repo + .working_directory_vfs() + .symlink_metadata(&fs_path)?; + if let Some(mtime) = + TruncatedTimestamp::for_reliable_mtime_of( + &fs_metadata, + &mtime_boundary, + ) + .when_reading_file(&fs_path)? + { + let mode = fs_metadata.mode(); + let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT; + let mut entry = dmap + .get(&hg_path)? + .expect("ambiguous file not in dirstate"); + entry.set_clean(mode, size, mtime); + dmap.add_file(&hg_path, entry)?; + dirstate_write_needed = true + } + } + } + drop(dmap); // Avoid "already mutably borrowed" RefCell panics + if dirstate_write_needed { + repo.write_dirstate()? + } + Ok(()) + }); + match with_lock_result { + Ok(closure_result) => closure_result?, + Err(LockError::AlreadyHeld) => { + // Not updating the dirstate is not ideal but not critical: + // don’t keep our caller waiting until some other Mercurial + // process releases the lock. + } + Err(LockError::Other(HgError::IoError { error, .. })) + if error.kind() == io::ErrorKind::PermissionDenied => + { + // `hg status` on a read-only repository is fine + } + Err(LockError::Other(error)) => { + // Report other I/O errors + Err(error)? + } } Ok(()) } -// Probably more elegant to use a Deref or Borrow trait rather than -// harcode HgPathBuf, but probably not really useful at this point -fn display_status_paths( - ui: &Ui, - repo: &Repo, - config: &Config, - paths: &mut [HgPathCow], - status_prefix: &[u8], -) -> Result<(), CommandError> { - paths.sort_unstable(); - let mut relative: bool = - config.get_bool(b"ui", b"relative-paths").unwrap_or(false); - relative = config - .get_bool(b"commands", b"status.relative") - .unwrap_or(relative); - if relative && !ui.plain() { - relativize_paths( - repo, - paths, - |path: Cow<[u8]>| -> Result<(), UiError> { - ui.write_stdout( - &[status_prefix, b" ", path.as_ref(), b"\n"].concat(), - ) - }, - )?; - } else { - for path in paths { - // Same TODO as in commands::root - let bytes: &[u8] = path.as_bytes(); +fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> { + let mut ignore_files = Vec::new(); + let repo_ignore = repo.working_directory_vfs().join(".hgignore"); + if repo_ignore.exists() { + ignore_files.push(repo_ignore) + } + for (key, value) in config.iter_section(b"ui") { + if key == b"ignore" || key.starts_with(b"ignore.") { + let path = get_path_from_bytes(value); + // TODO: expand "~/" and environment variable here, like Python + // does with `os.path.expanduser` and `os.path.expandvars` + + let joined = repo.working_directory_path().join(path); + ignore_files.push(joined); + } + } + ignore_files +} + +struct DisplayStatusPaths<'a> { + ui: &'a Ui, + no_status: bool, + relativize: Option<RelativizePaths>, +} + +impl DisplayStatusPaths<'_> { + // Probably more elegant to use a Deref or Borrow trait rather than + // harcode HgPathBuf, but probably not really useful at this point + fn display( + &self, + status_prefix: &[u8], + mut paths: Vec<StatusPath<'_>>, + ) -> Result<(), CommandError> { + paths.sort_unstable(); + for StatusPath { path, copy_source } in paths { + let relative; + let path = if let Some(relativize) = &self.relativize { + relative = relativize.relativize(&path); + &*relative + } else { + path.as_bytes() + }; // TODO optim, probably lots of unneeded copies here, especially // if out stream is buffered - ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?; + if self.no_status { + self.ui.write_stdout(&format_bytes!(b"{}\n", path))? + } else { + self.ui.write_stdout(&format_bytes!( + b"{} {}\n", + status_prefix, + path + ))? + } + if let Some(source) = copy_source { + self.ui.write_stdout(&format_bytes!( + b" {}\n", + source.as_bytes() + ))? + } } + Ok(()) } - Ok(()) } /// Check if a file is modified by comparing actual repo store and file system. /// /// This meant to be used for those that the dirstate cannot resolve, due /// to time resolution limits. -/// -/// TODO: detect permission bits and similar metadata modifications -fn cat_file_is_modified( +fn unsure_is_modified( repo: &Repo, manifest: &Manifest, hg_path: &HgPath, ) -> Result<bool, HgError> { - let file_node = manifest - .find_file(hg_path)? + let vfs = repo.working_directory_vfs(); + let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion"); + let fs_metadata = vfs.symlink_metadata(&fs_path)?; + let is_symlink = fs_metadata.file_type().is_symlink(); + // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the + // dirstate + let fs_flags = if is_symlink { + Some(b'l') + } else if has_exec_bit(&fs_metadata) { + Some(b'x') + } else { + None + }; + + let entry = manifest + .find_by_path(hg_path)? .expect("ambgious file not in p1"); + if entry.flags != fs_flags { + return Ok(true); + } let filelog = repo.filelog(hg_path)?; - let filelog_entry = filelog.data_for_node(file_node).map_err(|_| { - HgError::corrupted("filelog missing node from manifest") - })?; - let contents_in_p1 = filelog_entry.data()?; + let fs_len = fs_metadata.len(); + let filelog_entry = + filelog.entry_for_node(entry.node_id()?).map_err(|_| { + HgError::corrupted("filelog missing node from manifest") + })?; + if filelog_entry.file_data_len_not_equal_to(fs_len) { + // No need to read file contents: + // it cannot be equal if it has a different length. + return Ok(true); + } - let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion"); - let fs_contents = repo.working_directory_vfs().read(fs_path)?; - return Ok(contents_in_p1 != &*fs_contents); + let p1_filelog_data = filelog_entry.data()?; + let p1_contents = p1_filelog_data.file_data()?; + if p1_contents.len() as u64 != fs_len { + // No need to read file contents: + // it cannot be equal if it has a different length. + return Ok(true); + } + + let fs_contents = if is_symlink { + get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string()) + } else { + vfs.read(fs_path)? + }; + Ok(p1_contents != &*fs_contents) }
--- a/rust/rhg/src/main.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/src/main.rs Tue Jan 18 10:27:13 2022 +0100 @@ -1,4 +1,5 @@ extern crate log; +use crate::error::CommandError; use crate::ui::Ui; use clap::App; use clap::AppSettings; @@ -10,6 +11,7 @@ use hg::repo::{Repo, RepoError}; use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes}; use hg::utils::SliceExt; +use std::collections::HashSet; use std::ffi::OsString; use std::path::PathBuf; use std::process::Command; @@ -20,7 +22,6 @@ pub mod utils { pub mod path_utils; } -use error::CommandError; fn main_with_result( process_start_time: &blackbox::ProcessStartTime, @@ -28,7 +29,7 @@ repo: Result<&Repo, &NoRepoInCwdError>, config: &Config, ) -> Result<(), CommandError> { - check_extensions(config)?; + check_unsupported(config, repo, ui)?; let app = App::new("rhg") .global_setting(AppSettings::AllowInvalidUtf8) @@ -110,18 +111,23 @@ } } - let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?; - blackbox.log_command_start(); - let result = run(&invocation); - blackbox.log_command_end(exit_code( - &result, - // TODO: show a warning or combine with original error if `get_bool` - // returns an error - config - .get_bool(b"ui", b"detailed-exit-code") - .unwrap_or(false), - )); - result + if config.is_extension_enabled(b"blackbox") { + let blackbox = + blackbox::Blackbox::new(&invocation, process_start_time)?; + blackbox.log_command_start(); + let result = run(&invocation); + blackbox.log_command_end(exit_code( + &result, + // TODO: show a warning or combine with original error if + // `get_bool` returns an error + config + .get_bool(b"ui", b"detailed-exit-code") + .unwrap_or(false), + )); + result + } else { + run(&invocation) + } } fn main() { @@ -179,7 +185,7 @@ exit( &initial_current_dir, &ui, - OnUnsupported::from_config(&ui, &non_repo_config), + OnUnsupported::from_config(&non_repo_config), Err(error.into()), non_repo_config .get_bool(b"ui", b"detailed-exit-code") @@ -197,7 +203,7 @@ exit( &initial_current_dir, &ui, - OnUnsupported::from_config(&ui, &non_repo_config), + OnUnsupported::from_config(&non_repo_config), Err(CommandError::UnsupportedFeature { message: format_bytes!( b"URL-like --repository {}", @@ -287,7 +293,7 @@ Err(error) => exit( &initial_current_dir, &ui, - OnUnsupported::from_config(&ui, &non_repo_config), + OnUnsupported::from_config(&non_repo_config), Err(error.into()), // TODO: show a warning or combine with original error if // `get_bool` returns an error @@ -302,7 +308,7 @@ } else { &non_repo_config }; - let on_unsupported = OnUnsupported::from_config(&ui, config); + let on_unsupported = OnUnsupported::from_config(config); let result = main_with_result( &process_start_time, @@ -362,6 +368,20 @@ ) = (&on_unsupported, &result) { let mut args = std::env::args_os(); + let executable = match executable { + None => { + exit_no_fallback( + ui, + OnUnsupported::Abort, + Err(CommandError::abort( + "abort: 'rhg.on-unsupported=fallback' without \ + 'rhg.fallback-executable' set.", + )), + false, + ); + } + Some(executable) => executable, + }; let executable_path = get_path_from_bytes(&executable); let this_executable = args.next().expect("exepcted argv[0] to exist"); if executable_path == &PathBuf::from(this_executable) { @@ -374,7 +394,8 @@ )); on_unsupported = OnUnsupported::Abort } else { - // `args` is now `argv[1..]` since we’ve already consumed `argv[0]` + // `args` is now `argv[1..]` since we’ve already consumed + // `argv[0]` let mut command = Command::new(executable_path); command.args(args); if let Some(initial) = initial_current_dir { @@ -465,6 +486,7 @@ cat debugdata debugrequirements + debugignorerhg files root config @@ -549,13 +571,13 @@ /// Silently exit with code 252. AbortSilent, /// Try running a Python implementation - Fallback { executable: Vec<u8> }, + Fallback { executable: Option<Vec<u8>> }, } impl OnUnsupported { const DEFAULT: Self = OnUnsupported::Abort; - fn from_config(ui: &Ui, config: &Config) -> Self { + fn from_config(config: &Config) -> Self { match config .get(b"rhg", b"on-unsupported") .map(|value| value.to_ascii_lowercase()) @@ -566,18 +588,7 @@ Some(b"fallback") => OnUnsupported::Fallback { executable: config .get(b"rhg", b"fallback-executable") - .unwrap_or_else(|| { - exit_no_fallback( - ui, - Self::Abort, - Err(CommandError::abort( - "abort: 'rhg.on-unsupported=fallback' without \ - 'rhg.fallback-executable' set." - )), - false, - ) - }) - .to_owned(), + .map(|x| x.to_owned()), }, None => Self::DEFAULT, Some(_) => { @@ -588,10 +599,23 @@ } } -const SUPPORTED_EXTENSIONS: &[&[u8]] = &[b"blackbox", b"share"]; +/// The `*` extension is an edge-case for config sub-options that apply to all +/// extensions. For now, only `:required` exists, but that may change in the +/// future. +const SUPPORTED_EXTENSIONS: &[&[u8]] = + &[b"blackbox", b"share", b"sparse", b"narrow", b"*"]; fn check_extensions(config: &Config) -> Result<(), CommandError> { - let enabled = config.get_section_keys(b"extensions"); + let enabled: HashSet<&[u8]> = config + .get_section_keys(b"extensions") + .into_iter() + .map(|extension| { + // Ignore extension suboptions. Only `required` exists for now. + // `rhg` either supports an extension or doesn't, so it doesn't + // make sense to consider the loading of an extension. + extension.split_2(b':').unwrap_or((extension, b"")).0 + }) + .collect(); let mut unsupported = enabled; for supported in SUPPORTED_EXTENSIONS { @@ -616,3 +640,39 @@ }) } } + +fn check_unsupported( + config: &Config, + repo: Result<&Repo, &NoRepoInCwdError>, + ui: &ui::Ui, +) -> Result<(), CommandError> { + check_extensions(config)?; + + if std::env::var_os("HG_PENDING").is_some() { + // TODO: only if the value is `== repo.working_directory`? + // What about relative v.s. absolute paths? + Err(CommandError::unsupported("$HG_PENDING"))? + } + + if let Ok(repo) = repo { + if repo.has_subrepos()? { + Err(CommandError::unsupported("sub-repositories"))? + } + } + + if config.has_non_empty_section(b"encode") { + Err(CommandError::unsupported("[encode] config"))? + } + + if config.has_non_empty_section(b"decode") { + Err(CommandError::unsupported("[decode] config"))? + } + + if let Some(color) = config.get(b"ui", b"color") { + if (color == b"always" || color == b"debug") && !ui.plain() { + Err(CommandError::unsupported("colored output"))? + } + } + + Ok(()) +}
--- a/rust/rhg/src/ui.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/src/ui.rs Tue Jan 18 10:27:13 2022 +0100 @@ -51,7 +51,7 @@ stderr.flush().or_else(handle_stderr_error) } - /// is plain mode active + /// Return whether plain mode is active. /// /// Plain mode means that all configuration variables which affect /// the behavior and output of Mercurial should be
--- a/rust/rhg/src/utils/path_utils.rs Thu Dec 30 13:25:44 2021 +0100 +++ b/rust/rhg/src/utils/path_utils.rs Tue Jan 18 10:27:13 2022 +0100 @@ -3,8 +3,7 @@ // This software may be used and distributed according to the terms of the // GNU General Public License version 2 or any later version. -use crate::error::CommandError; -use crate::ui::UiError; +use hg::errors::HgError; use hg::repo::Repo; use hg::utils::current_dir; use hg::utils::files::{get_bytes_from_path, relativize_path}; @@ -12,37 +11,45 @@ use hg::utils::hg_path::HgPathBuf; use std::borrow::Cow; -pub fn relativize_paths( - repo: &Repo, - paths: impl IntoIterator<Item = impl AsRef<HgPath>>, - mut callback: impl FnMut(Cow<[u8]>) -> Result<(), UiError>, -) -> Result<(), CommandError> { - let cwd = current_dir()?; - let repo_root = repo.working_directory_path(); - let repo_root = cwd.join(repo_root); // Make it absolute - let repo_root_hgpath = - HgPathBuf::from(get_bytes_from_path(repo_root.to_owned())); - let outside_repo: bool; - let cwd_hgpath: HgPathBuf; +pub struct RelativizePaths { + repo_root: HgPathBuf, + cwd: HgPathBuf, + outside_repo: bool, +} + +impl RelativizePaths { + pub fn new(repo: &Repo) -> Result<Self, HgError> { + let cwd = current_dir()?; + let repo_root = repo.working_directory_path(); + let repo_root = cwd.join(repo_root); // Make it absolute + let repo_root_hgpath = + HgPathBuf::from(get_bytes_from_path(repo_root.to_owned())); - if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) { - // The current directory is inside the repo, so we can work with - // relative paths - outside_repo = false; - cwd_hgpath = - HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo)); - } else { - outside_repo = true; - cwd_hgpath = HgPathBuf::from(get_bytes_from_path(cwd)); + if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) { + // The current directory is inside the repo, so we can work with + // relative paths + Ok(Self { + repo_root: repo_root_hgpath, + cwd: HgPathBuf::from(get_bytes_from_path( + cwd_relative_to_repo, + )), + outside_repo: false, + }) + } else { + Ok(Self { + repo_root: repo_root_hgpath, + cwd: HgPathBuf::from(get_bytes_from_path(cwd)), + outside_repo: true, + }) + } } - for file in paths { - if outside_repo { - let file = repo_root_hgpath.join(file.as_ref()); - callback(relativize_path(&file, &cwd_hgpath))?; + pub fn relativize<'a>(&self, path: &'a HgPath) -> Cow<'a, [u8]> { + if self.outside_repo { + let joined = self.repo_root.join(path); + Cow::Owned(relativize_path(&joined, &self.cwd).into_owned()) } else { - callback(relativize_path(file.as_ref(), &cwd_hgpath))?; + relativize_path(path, &self.cwd) } } - Ok(()) }
--- a/setup.py Thu Dec 30 13:25:44 2021 +0100 +++ b/setup.py Tue Jan 18 10:27:13 2022 +0100 @@ -209,7 +209,7 @@ from distutils.sysconfig import get_python_inc, get_config_var from distutils.version import StrictVersion -# Explain to distutils.StrictVersion how our release candidates are versionned +# Explain to distutils.StrictVersion how our release candidates are versioned StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') @@ -535,7 +535,7 @@ # (see mercurial/__modulepolicy__.py) if hgrustext != 'cpython' and hgrustext is not None: if hgrustext: - msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext + msg = 'unknown HGWITHRUSTEXT value: %s' % hgrustext printf(msg, file=sys.stderr) hgrustext = None self.rust = hgrustext is not None @@ -597,8 +597,8 @@ e for e in self.extensions if e.name != 'mercurial.zstd' ] - # Build Rust standalon extensions if it'll be used - # and its build is not explictely disabled (for external build + # Build Rust standalone extensions if it'll be used + # and its build is not explicitly disabled (for external build # as Linux distributions would do) if self.distribution.rust and self.rust: if not sys.platform.startswith('linux'): @@ -1502,7 +1502,7 @@ raise RustCompilationError("Cargo not found") elif exc.errno == errno.EACCES: raise RustCompilationError( - "Cargo found, but permisssion to execute it is denied" + "Cargo found, but permission to execute it is denied" ) else: raise
--- a/tests/failfilemerge.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/failfilemerge.py Tue Jan 18 10:27:13 2022 +0100 @@ -9,12 +9,9 @@ ) -def failfilemerge( - filemergefn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None -): +def failfilemerge(*args, **kwargs): raise error.Abort(b"^C") - return filemergefn(premerge, repo, mynode, orig, fcd, fco, fca, labels) def extsetup(ui): - extensions.wrapfunction(filemerge, '_filemerge', failfilemerge) + extensions.wrapfunction(filemerge, 'filemerge', failfilemerge)
--- a/tests/fakedirstatewritetime.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/fakedirstatewritetime.py Tue Jan 18 10:27:13 2022 +0100 @@ -9,7 +9,6 @@ from mercurial import ( context, - dirstate, dirstatemap as dirstatemapmod, extensions, policy, @@ -38,14 +37,8 @@ has_rust_dirstate = policy.importrust('dirstate') is not None -def pack_dirstate(fakenow, orig, dmap, copymap, pl, now): - # execute what original parsers.pack_dirstate should do actually - # for consistency - for f, e in dmap.items(): - if e.need_delay(now): - e.set_possibly_dirty() - - return orig(dmap, copymap, pl, fakenow) +def pack_dirstate(orig, dmap, copymap, pl): + return orig(dmap, copymap, pl) def fakewrite(ui, func): @@ -62,30 +55,30 @@ # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] - fakenow = timestamp.timestamp((fakenow, 0)) + fakenow = timestamp.timestamp((fakenow, 0, False)) if has_rust_dirstate: # The Rust implementation does not use public parse/pack dirstate # to prevent conversion round-trips orig_dirstatemap_write = dirstatemapmod.dirstatemap.write - wrapper = lambda self, tr, st, now: orig_dirstatemap_write( - self, tr, st, fakenow - ) + wrapper = lambda self, tr, st: orig_dirstatemap_write(self, tr, st) dirstatemapmod.dirstatemap.write = wrapper - orig_dirstate_getfsnow = dirstate._getfsnow - wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args) + orig_get_fs_now = timestamp.get_fs_now + wrapper = lambda *args: pack_dirstate(orig_pack_dirstate, *args) orig_module = parsers orig_pack_dirstate = parsers.pack_dirstate orig_module.pack_dirstate = wrapper - dirstate._getfsnow = lambda *args: fakenow + timestamp.get_fs_now = ( + lambda *args: fakenow + ) # XXX useless for this purpose now try: return func() finally: orig_module.pack_dirstate = orig_pack_dirstate - dirstate._getfsnow = orig_dirstate_getfsnow + timestamp.get_fs_now = orig_get_fs_now if has_rust_dirstate: dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
--- a/tests/hghave.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/hghave.py Tue Jan 18 10:27:13 2022 +0100 @@ -663,6 +663,22 @@ return (major, minor) >= (2, 5) +@check("pygments211", "Pygments version >= 2.11") +def pygments211(): + try: + import pygments + + v = pygments.__version__ + except ImportError: + return False + + parts = v.split(".") + major = int(parts[0]) + minor = int(parts[1]) + + return (major, minor) >= (2, 11) + + @check("outer-repo", "outer repo") def has_outer_repo(): # failing for other reasons than 'no repo' imply that there is a repo
--- a/tests/run-tests.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/run-tests.py Tue Jan 18 10:27:13 2022 +0100 @@ -3228,6 +3228,7 @@ # output. osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback' osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = real_hg + osenvironb[b'RHG_STATUS'] = b'1' else: # drop flag for hghave osenvironb.pop(b'RHG_INSTALLED_AS_HG', None)
--- a/tests/test-annotate.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-annotate.t Tue Jan 18 10:27:13 2022 +0100 @@ -886,7 +886,7 @@ created new head $ hg merge --tool :merge-other 24 merging baz - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg ci -m 'merge forgetting about baz rewrite' $ cat > baz << EOF
--- a/tests/test-audit-path.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-audit-path.t Tue Jan 18 10:27:13 2022 +0100 @@ -8,7 +8,7 @@ $ hg add .hg/00changelog.i abort: path contains illegal component: .hg/00changelog.i - [255] + [10] #if symlink @@ -91,7 +91,7 @@ .hg/test $ hg update -Cr0 abort: path contains illegal component: .hg/test - [255] + [10] attack foo/.hg/test @@ -99,7 +99,7 @@ foo/.hg/test $ hg update -Cr1 abort: path 'foo/.hg/test' is inside nested repo 'foo' - [255] + [10] attack back/test where back symlinks to .. @@ -125,7 +125,7 @@ $ echo data > ../test/file $ hg update -Cr3 abort: path contains illegal component: ../test - [255] + [10] $ cat ../test/file data @@ -135,7 +135,7 @@ /tmp/test $ hg update -Cr4 abort: path contains illegal component: /tmp/test - [255] + [10] $ cd ..
--- a/tests/test-audit-subrepo.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-audit-subrepo.t Tue Jan 18 10:27:13 2022 +0100 @@ -10,7 +10,7 @@ $ echo 'sub/.hg = sub/.hg' >> .hgsub $ hg ci -qAm 'add subrepo "sub/.hg"' abort: path 'sub/.hg' is inside nested repo 'sub' - [255] + [10] prepare tampered repo (including the commit above): @@ -34,7 +34,7 @@ $ hg clone -q hgname hgname2 abort: path 'sub/.hg' is inside nested repo 'sub' - [255] + [10] Test absolute path ------------------ @@ -47,7 +47,7 @@ $ echo '/sub = sub' >> .hgsub $ hg ci -qAm 'add subrepo "/sub"' abort: path contains illegal component: /sub - [255] + [10] prepare tampered repo (including the commit above): @@ -71,7 +71,7 @@ $ hg clone -q absolutepath absolutepath2 abort: path contains illegal component: /sub - [255] + [10] Test root path -------------- @@ -84,7 +84,7 @@ $ echo '/ = sub' >> .hgsub $ hg ci -qAm 'add subrepo "/"' abort: path ends in directory separator: / - [255] + [10] prepare tampered repo (including the commit above): @@ -108,7 +108,7 @@ $ hg clone -q rootpath rootpath2 abort: path ends in directory separator: / - [255] + [10] Test empty path --------------- @@ -197,7 +197,7 @@ $ echo '../sub = ../sub' >> .hgsub $ hg ci -qAm 'add subrepo "../sub"' abort: path contains illegal component: ../sub - [255] + [10] prepare tampered repo (including the commit above): @@ -221,7 +221,7 @@ $ hg clone -q main main2 abort: path contains illegal component: ../sub - [255] + [10] $ cd .. Test variable expansion @@ -718,7 +718,7 @@ $ hg clone -q driveletter driveletter2 abort: path contains illegal component: X: - [255] + [10] #else
--- a/tests/test-bad-extension.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bad-extension.t Tue Jan 18 10:27:13 2022 +0100 @@ -52,16 +52,18 @@ > EOF $ hg -q help help 2>&1 |grep extension - *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow - *** failed to import extension badext2: No module named *badext2* (glob) + *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow + *** failed to import extension "badext2": No module named 'badext2' (py3 !) + *** failed to import extension "badext2": No module named badext2 (no-py3 !) show traceback $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound' - *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow + *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow Traceback (most recent call last): Exception: bit bucket overflow - *** failed to import extension badext2: No module named *badext2* (glob) + *** failed to import extension "badext2": No module named 'badext2' (py3 !) + *** failed to import extension "badext2": No module named badext2 (no-py3 !) Traceback (most recent call last): ImportError: No module named badext2 (no-py3 !) ImportError: No module named 'hgext.badext2' (py3 no-py36 !) @@ -101,7 +103,7 @@ YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: gpg YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob) YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext - *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow + *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow Traceback (most recent call last): Exception: bit bucket overflow YYYY/MM/DD HH:MM:SS (PID)> - loading extension: baddocext @@ -123,7 +125,8 @@ Traceback (most recent call last): (py3 !) ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !) ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !) - *** failed to import extension badext2: No module named *badext2* (glob) + *** failed to import extension "badext2": No module named 'badext2' (py3 !) + *** failed to import extension "badext2": No module named badext2 (no-py3 !) Traceback (most recent call last): ImportError: No module named 'hgext.badext2' (py3 no-py36 !) ModuleNotFoundError: No module named 'hgext.badext2' (py36 !) @@ -160,8 +163,9 @@ confirm that there's no crash when an extension's documentation is bad $ hg help --keyword baddocext - *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow - *** failed to import extension badext2: No module named *badext2* (glob) + *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow + *** failed to import extension "badext2": No module named 'badext2' (py3 !) + *** failed to import extension "badext2": No module named badext2 (no-py3 !) Topics: extensions Using Additional Features
--- a/tests/test-basic.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-basic.t Tue Jan 18 10:27:13 2022 +0100 @@ -40,7 +40,7 @@ A a $ hg status >/dev/full - abort: No space left on device + abort: No space left on device* (glob) [255] #endif
--- a/tests/test-bookmarks-current.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bookmarks-current.t Tue Jan 18 10:27:13 2022 +0100 @@ -245,4 +245,4 @@ $ hg bookmarks --inactive $ hg bookmarks -ql . abort: no active bookmark - [255] + [10]
--- a/tests/test-bookmarks-pushpull.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bookmarks-pushpull.t Tue Jan 18 10:27:13 2022 +0100 @@ -357,7 +357,7 @@ (leaving bookmark V) $ hg push -B . ../a abort: no active bookmark - [255] + [10] $ hg update -r V 0 files updated, 0 files merged, 1 files removed, 0 files unresolved (activating bookmark V) @@ -715,14 +715,15 @@ $ cat <<EOF > ../lookuphook.py > """small extensions adding a hook after wireprotocol lookup to test race""" > import functools - > from mercurial import wireprotov1server, wireprotov2server + > from mercurial import wireprotov1server > > def wrappedlookup(orig, repo, *args, **kwargs): > ret = orig(repo, *args, **kwargs) > repo.hook(b'lookup') > return ret - > for table in [wireprotov1server.commands, wireprotov2server.COMMANDS]: - > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func) + > + > table = wireprotov1server.commands + > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func) > EOF $ cat <<EOF > ../pull-race/.hg/hgrc > [extensions]
--- a/tests/test-bookmarks.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bookmarks.t Tue Jan 18 10:27:13 2022 +0100 @@ -278,7 +278,7 @@ $ hg book -i rename-me $ hg book -m . renamed abort: no active bookmark - [255] + [10] $ hg up -q Y $ hg book -d rename-me @@ -298,7 +298,7 @@ $ hg book -i delete-me $ hg book -d . abort: no active bookmark - [255] + [10] $ hg up -q Y $ hg book -d delete-me
--- a/tests/test-branch-option.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-branch-option.t Tue Jan 18 10:27:13 2022 +0100 @@ -58,12 +58,12 @@ $ hg in -qbz abort: unknown branch 'z' - [255] + [10] $ hg in -q ../branch#z 2:f25d57ab0566 $ hg out -qbz abort: unknown branch 'z' - [255] + [10] in rev c branch a
--- a/tests/test-bundle.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bundle.t Tue Jan 18 10:27:13 2022 +0100 @@ -716,7 +716,7 @@ $ hg incoming '../test#bundle.hg' comparing with ../test abort: unknown revision 'bundle.hg' - [255] + [10] note that percent encoding is not handled:
--- a/tests/test-bundle2-exchange.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bundle2-exchange.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - Test exchange of common information using bundle2
--- a/tests/test-bundle2-pushback.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bundle2-pushback.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - $ cat > bundle2.py << EOF > """A small extension to test bundle2 pushback parts. > Current bundle2 implementation doesn't provide a way to generate those
--- a/tests/test-bundle2-remote-changegroup.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-bundle2-remote-changegroup.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - Create an extension to test bundle2 remote-changegroup parts $ cat > bundle2.py << EOF
--- a/tests/test-casecollision.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-casecollision.t Tue Jan 18 10:27:13 2022 +0100 @@ -12,7 +12,7 @@ ? A $ hg add --config ui.portablefilenames=abort A abort: possible case-folding collision for A - [255] + [20] $ hg st A a ? A
--- a/tests/test-check-interfaces.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-check-interfaces.py Tue Jan 18 10:27:13 2022 +0100 @@ -39,7 +39,6 @@ wireprotoserver, wireprototypes, wireprotov1peer, - wireprotov2server, ) testdir = os.path.dirname(__file__) @@ -129,9 +128,6 @@ ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer) checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None)) - ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer) - checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None)) - ziverify.verifyClass(repository.ipeerbase, localrepo.localpeer) checkzobject(localrepo.localpeer(dummyrepo())) @@ -158,19 +154,6 @@ ) ) - ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer) - checkzobject( - sshpeer.sshv2peer( - ui, - b'ssh://localhost/foo', - b'', - dummypipe(), - dummypipe(), - None, - None, - ) - ) - ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer) checkzobject(bundlerepo.bundlepeer(dummyrepo())) @@ -193,26 +176,15 @@ wireprototypes.baseprotocolhandler, wireprotoserver.sshv1protocolhandler ) ziverify.verifyClass( - wireprototypes.baseprotocolhandler, wireprotoserver.sshv2protocolhandler - ) - ziverify.verifyClass( wireprototypes.baseprotocolhandler, wireprotoserver.httpv1protocolhandler, ) - ziverify.verifyClass( - wireprototypes.baseprotocolhandler, - wireprotov2server.httpv2protocolhandler, - ) sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None) checkzobject(sshv1) - sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None) - checkzobject(sshv2) httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None) checkzobject(httpv1) - httpv2 = wireprotov2server.httpv2protocolhandler(None, None) - checkzobject(httpv2) ziverify.verifyClass(repository.ifilestorage, filelog.filelog) ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
--- a/tests/test-check-module-imports.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-check-module-imports.t Tue Jan 18 10:27:13 2022 +0100 @@ -41,4 +41,5 @@ > -X tests/test-demandimport.py \ > -X tests/test-imports-checker.t \ > -X tests/test-verify-repo-operations.py \ + > -X tests/test-extension.t \ > | sed 's-\\-/-g' | "$PYTHON" "$import_checker" -
--- a/tests/test-check-pytype.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-check-pytype.t Tue Jan 18 10:27:13 2022 +0100 @@ -10,94 +10,63 @@ probably hiding real problems. mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo -mercurial/changegroup.py # mysterious incorrect type detection -mercurial/chgserver.py # [attribute-error] -mercurial/cmdutil.py # No attribute 'markcopied' on mercurial.context.filectx [attribute-error] mercurial/context.py # many [attribute-error] -mercurial/copies.py # No attribute 'items' on None [attribute-error] mercurial/crecord.py # tons of [attribute-error], [module-attr] mercurial/debugcommands.py # [wrong-arg-types] mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error] mercurial/exchange.py # [attribute-error] mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types] mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr] -mercurial/hgweb/webcommands.py # [missing-parameter] mercurial/hgweb/wsgicgi.py # confused values in os.environ mercurial/httppeer.py # [attribute-error], [wrong-arg-types] mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error] mercurial/keepalive.py # [attribute-error] mercurial/localrepo.py # [attribute-error] -mercurial/lsprof.py # unguarded import mercurial/manifest.py # [unsupported-operands], [wrong-arg-types] mercurial/minirst.py # [unsupported-operands], [attribute-error] -mercurial/patch.py # [wrong-arg-types] mercurial/pure/osutil.py # [invalid-typevar], [not-callable] mercurial/pure/parsers.py # [attribute-error] -mercurial/pycompat.py # bytes vs str issues mercurial/repoview.py # [attribute-error] -mercurial/sslutil.py # [attribute-error] -mercurial/statprof.py # bytes vs str on TextIO.write() [wrong-arg-types] mercurial/testing/storage.py # tons of [attribute-error] mercurial/ui.py # [attribute-error], [wrong-arg-types] mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error] -mercurial/upgrade.py # line 84, in upgraderepo: No attribute 'discard' on Dict[nothing, nothing] [attribute-error] -mercurial/util.py # [attribute-error], [wrong-arg-count] -mercurial/utils/procutil.py # [attribute-error], [module-attr], [bad-return-type] -mercurial/utils/stringutil.py # [module-attr], [wrong-arg-count] mercurial/utils/memorytop.py # not 3.6 compatible mercurial/win32.py # [not-callable] mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error] -mercurial/wireprotoserver.py # line 253, in _availableapis: No attribute '__iter__' on Callable[[Any, Any], Any] [attribute-error] mercurial/wireprotov1peer.py # [attribute-error] mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs -mercurial/wireprotov2server.py # [unsupported-operands], [attribute-error] TODO: use --no-cache on test server? Caching the files locally helps during development, but may be a hinderance for CI testing. $ pytype -V 3.6 --keep-going --jobs auto mercurial \ > -x mercurial/bundlerepo.py \ - > -x mercurial/changegroup.py \ - > -x mercurial/chgserver.py \ - > -x mercurial/cmdutil.py \ > -x mercurial/context.py \ - > -x mercurial/copies.py \ > -x mercurial/crecord.py \ > -x mercurial/debugcommands.py \ > -x mercurial/dispatch.py \ > -x mercurial/exchange.py \ > -x mercurial/hgweb/hgweb_mod.py \ > -x mercurial/hgweb/server.py \ - > -x mercurial/hgweb/webcommands.py \ > -x mercurial/hgweb/wsgicgi.py \ > -x mercurial/httppeer.py \ > -x mercurial/interfaces \ > -x mercurial/keepalive.py \ > -x mercurial/localrepo.py \ - > -x mercurial/lsprof.py \ > -x mercurial/manifest.py \ > -x mercurial/minirst.py \ - > -x mercurial/patch.py \ > -x mercurial/pure/osutil.py \ > -x mercurial/pure/parsers.py \ - > -x mercurial/pycompat.py \ > -x mercurial/repoview.py \ - > -x mercurial/sslutil.py \ - > -x mercurial/statprof.py \ > -x mercurial/testing/storage.py \ > -x mercurial/thirdparty \ > -x mercurial/ui.py \ > -x mercurial/unionrepo.py \ - > -x mercurial/upgrade.py \ - > -x mercurial/utils/procutil.py \ - > -x mercurial/utils/stringutil.py \ > -x mercurial/utils/memorytop.py \ > -x mercurial/win32.py \ > -x mercurial/wireprotoframing.py \ - > -x mercurial/wireprotoserver.py \ > -x mercurial/wireprotov1peer.py \ > -x mercurial/wireprotov1server.py \ - > -x mercurial/wireprotov2server.py \ > > $TESTTMP/pytype-output.txt || cat $TESTTMP/pytype-output.txt Only show the results on a failure, because the output on success is also
--- a/tests/test-clone.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-clone.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - Prepare repo a: $ hg init a @@ -1206,14 +1196,12 @@ #if windows $ hg clone "ssh://%26touch%20owned%20/" --debug running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio" - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg [255] $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio" - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg @@ -1221,14 +1209,12 @@ #else $ hg clone "ssh://%3btouch%20owned%20/" --debug running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio' - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg [255] $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio' - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg @@ -1237,7 +1223,6 @@ $ hg clone "ssh://v-alid.example.com/" --debug running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg
--- a/tests/test-commandserver.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-commandserver.t Tue Jan 18 10:27:13 2022 +0100 @@ -159,7 +159,7 @@ ... b'default']) *** runcommand log -b --config=alias.log=!echo pwned default abort: unknown revision '--config=alias.log=!echo pwned' - [255] + [10] check that "histedit --commands=-" can read rules from the input channel:
--- a/tests/test-commit-interactive.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-commit-interactive.t Tue Jan 18 10:27:13 2022 +0100 @@ -1494,7 +1494,7 @@ Hunk #1 FAILED at 0 1 out of 1 hunks FAILED -- saving rejects to file editedfile.rej abort: patch failed to apply - [10] + [20] $ cat editedfile This change will not be committed This is the second line
--- a/tests/test-commit.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-commit.t Tue Jan 18 10:27:13 2022 +0100 @@ -134,13 +134,13 @@ $ hg add quux $ hg commit -m "adding internal used extras" --extra amend_source=hash abort: key 'amend_source' is used internally, can't be set manually - [255] + [10] $ hg commit -m "special chars in extra" --extra id@phab=214 abort: keys can only contain ascii letters, digits, '_' and '-' - [255] + [10] $ hg commit -m "empty key" --extra =value abort: unable to parse '=value', keys can't be empty - [255] + [10] $ hg commit -m "adding extras" --extra sourcehash=foo --extra oldhash=bar $ hg log -r . -T '{extras % "{extra}\n"}' branch=default @@ -661,11 +661,11 @@ #if windows $ hg co --clean tip abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc) - [255] + [10] #else $ hg co --clean tip abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc) - [255] + [10] #endif $ hg rollback -f @@ -686,7 +686,7 @@ $ "$PYTHON" evil-commit.py $ hg co --clean tip abort: path contains illegal component: HG~1/hgrc - [255] + [10] $ hg rollback -f repository tip rolled back to revision 2 (undo commit) @@ -706,7 +706,7 @@ $ "$PYTHON" evil-commit.py $ hg co --clean tip abort: path contains illegal component: HG8B6C~2/hgrc - [255] + [10] $ cd ..
--- a/tests/test-conflict.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-conflict.t Tue Jan 18 10:27:13 2022 +0100 @@ -404,7 +404,7 @@ 1 other heads for branch "default" $ hg merge --tool :merge-local merging a - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ cat a Start of file
--- a/tests/test-copies-chain-merge.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-copies-chain-merge.t Tue Jan 18 10:27:13 2022 +0100 @@ -463,14 +463,14 @@ 3 files updated, 0 files merged, 2 files removed, 0 files unresolved $ hg merge 'desc("q-2")' --tool ':union' merging v - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 0 files updated, 1 files merged, 1 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg ci -m "mPQm-0 $case_desc - one way" $ hg up 'desc("q-2")' 2 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg merge 'desc("p-2")' --tool ':union' merging v - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 0 files updated, 1 files merged, 1 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg ci -m "mQPm-0 $case_desc - the other way" created new head @@ -626,14 +626,14 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg merge 'desc("g-1")' --tool :union merging d - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg ci -m "mDGm-0 $case_desc - one way" $ hg up 'desc("g-1")' 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg merge 'desc("d-2")' --tool :union merging d - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg ci -m "mGDm-0 $case_desc - the other way" created new head @@ -1649,22 +1649,10 @@ > [format] > exp-use-copies-side-data-changeset = yes > EOF - $ hg debugformat -v - format-variant repo config default - fncache: yes yes yes - dirstate-v2: no no no - dotencode: yes yes yes - generaldelta: yes yes yes - share-safe: no no no - sparserevlog: yes yes yes - persistent-nodemap: no no no (no-rust !) - persistent-nodemap: yes yes no (rust !) + $ hg debugformat -v | egrep 'changelog-v2|revlog-v2|copies-sdc' copies-sdc: no yes no revlog-v2: no no no changelog-v2: no yes no - plain-cl-delta: yes yes yes - compression: * (glob) - compression-level: default default default $ hg debugupgraderepo --run --quiet upgrade will perform the following actions: @@ -1689,22 +1677,10 @@ > enabled=yes > numcpus=8 > EOF - $ hg debugformat -v - format-variant repo config default - fncache: yes yes yes - dirstate-v2: no no no - dotencode: yes yes yes - generaldelta: yes yes yes - share-safe: no no no - sparserevlog: yes yes yes - persistent-nodemap: no no no (no-rust !) - persistent-nodemap: yes yes no (rust !) + $ hg debugformat -v | egrep 'changelog-v2|revlog-v2|copies-sdc' copies-sdc: no yes no revlog-v2: no no no changelog-v2: no yes no - plain-cl-delta: yes yes yes - compression: * (glob) - compression-level: default default default $ hg debugupgraderepo --run --quiet upgrade will perform the following actions:
--- a/tests/test-copy-move-merge.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-copy-move-merge.t Tue Jan 18 10:27:13 2022 +0100 @@ -104,12 +104,12 @@ preserving a for resolve of b preserving a for resolve of c removing a - b: remote moved from a -> m (premerge) + b: remote moved from a -> m picked tool ':merge' for b (binary False symlink False changedelete False) merging a and b to b my b@add3f11052fa+ other b@17c05bb7fcb6 ancestor a@b8bf91eeebbc premerge successful - c: remote moved from a -> m (premerge) + c: remote moved from a -> m picked tool ':merge' for c (binary False symlink False changedelete False) merging a and c to c my c@add3f11052fa+ other c@17c05bb7fcb6 ancestor a@b8bf91eeebbc
--- a/tests/test-diff-unified.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-diff-unified.t Tue Jan 18 10:27:13 2022 +0100 @@ -46,7 +46,7 @@ $ hg diff --nodates -U foo abort: diff context lines count must be an integer, not 'foo' - [255] + [10] $ hg diff --nodates -U 2 @@ -87,7 +87,7 @@ $ hg --config diff.unified=foo diff --nodates abort: diff context lines count must be an integer, not 'foo' - [255] + [10] noprefix config and option
--- a/tests/test-dirstate-race.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-dirstate-race.t Tue Jan 18 10:27:13 2022 +0100 @@ -18,7 +18,7 @@ Do we ever miss a sub-second change?: $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do - > hg co -qC 0 + > hg update -qC 0 > echo b > a > hg st > done @@ -66,11 +66,11 @@ > ) > def extsetup(ui): > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup) - > def overridechecklookup(orig, self, files): + > def overridechecklookup(orig, self, *args, **kwargs): > # make an update that changes the dirstate from underneath > self._repo.ui.system(br"sh '$TESTTMP/dirstaterace.sh'", > cwd=self._repo.root) - > return orig(self, files) + > return orig(self, *args, **kwargs) > EOF $ hg debugrebuilddirstate @@ -89,6 +89,7 @@ > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e > EOF + $ sleep 1 # ensure non-ambiguous mtime $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py M d M e @@ -147,6 +148,8 @@ > > hg update -q -C 0 > hg cat -r 1 b > b + > # make sure the timestamps is not ambiguous and a write will be issued + > touch -t 198606251012 b > EOF "hg status" below should excludes "e", of which exec flag is set, for
--- a/tests/test-dirstate-race2.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-dirstate-race2.t Tue Jan 18 10:27:13 2022 +0100 @@ -19,22 +19,34 @@ $ hg commit -qAm _ $ echo aa > a $ hg commit -m _ +# this sleep is there to ensure current time has -at-least- one second away +# from the current time. It ensure the mtime is not ambiguous. If the test +# "sleep" longer this will be fine. +# It is not used to synchronise parallele operation so it is "fine" to use it. + $ sleep 1 + $ hg status $ hg debugdirstate --no-dates n 644 3 (set |unset) a (re) $ cat >> $TESTTMP/dirstaterace.py << EOF + > import time > from mercurial import ( + > commit, > extensions, > merge, > ) > def extsetup(ui): - > extensions.wrapfunction(merge, 'applyupdates', wrap) - > def wrap(orig, *args, **kwargs): - > res = orig(*args, **kwargs) - > with open("a", "w"): - > pass # just truncate the file - > return res + > extensions.wrapfunction(merge, 'applyupdates', wrap(0)) + > extensions.wrapfunction(commit, 'commitctx', wrap(1)) + > def wrap(duration): + > def new(orig, *args, **kwargs): + > res = orig(*args, **kwargs) + > with open("a", "w"): + > pass # just truncate the file + > time.sleep(duration) + > return res + > return new > EOF Do an update where file 'a' is changed between hg writing it to disk @@ -46,3 +58,32 @@ $ hg debugdirstate --no-dates n 644 2 (set |unset) a (re) $ echo a > a; hg status; hg diff + +Do a commit where file 'a' is changed between hg committing its new +revision into the repository, and the writing of the dirstate. + +This used to results in a corrupted dirstate (size did not match committed size). + + $ echo aaa > a; hg commit -qm _ + $ hg merge -qr 1; hg resolve -m; rm a.orig + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + (no more unresolved files) + $ cat a + <<<<<<< working copy: be46f74ce38d - test: _ + aaa + ======= + aa + >>>>>>> merge rev: eb3fc6c17aa3 - test: _ + $ hg debugdirstate --no-dates + m 0 -2 (set |unset) a (re) + $ hg commit -m _ --config extensions.race=$TESTTMP/dirstaterace.py + $ hg debugdirstate --no-dates + n 0 -1 unset a + $ cat a | wc -c + *0 (re) + $ hg cat -r . a | wc -c + *105 (re) + $ hg status; hg diff --stat + M a + a | 5 ----- + 1 files changed, 0 insertions(+), 5 deletions(-)
--- a/tests/test-dispatch.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-dispatch.t Tue Jan 18 10:27:13 2022 +0100 @@ -84,7 +84,7 @@ > raise Exception('bad') > EOF $ hg log -b '--config=extensions.bad=bad.py' default - *** failed to import extension bad from bad.py: bad + *** failed to import extension "bad" from bad.py: bad abort: option --config may not be abbreviated [10] @@ -127,20 +127,20 @@ #if no-chg $ HGPLAIN=+strictflags hg log -b --config='hooks.pre-log=false' default abort: unknown revision '--config=hooks.pre-log=false' - [255] + [10] $ HGPLAIN=+strictflags hg log -b -R. default abort: unknown revision '-R.' - [255] + [10] $ HGPLAIN=+strictflags hg log -b --cwd=. default abort: unknown revision '--cwd=.' - [255] + [10] #endif $ HGPLAIN=+strictflags hg log -b --debugger default abort: unknown revision '--debugger' - [255] + [10] $ HGPLAIN=+strictflags hg log -b --config='alias.log=!echo pwned' default abort: unknown revision '--config=alias.log=!echo pwned' - [255] + [10] $ HGPLAIN=+strictflags hg log --config='hooks.pre-log=false' -b default abort: option --config may not be abbreviated
--- a/tests/test-double-merge.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-double-merge.t Tue Jan 18 10:27:13 2022 +0100 @@ -38,12 +38,12 @@ starting 4 threads for background file closing (?) preserving foo for resolve of bar preserving foo for resolve of foo - bar: remote copied from foo -> m (premerge) + bar: remote copied from foo -> m picked tool ':merge' for bar (binary False symlink False changedelete False) merging foo and bar to bar my bar@6a0df1dad128+ other bar@484bf6903104 ancestor foo@e6dc8efe11cc premerge successful - foo: versions differ -> m (premerge) + foo: versions differ -> m picked tool ':merge' for foo (binary False symlink False changedelete False) merging foo my foo@6a0df1dad128+ other foo@484bf6903104 ancestor foo@e6dc8efe11cc
--- a/tests/test-extension.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-extension.t Tue Jan 18 10:27:13 2022 +0100 @@ -649,7 +649,7 @@ module stub. Our custom lazy importer for Python 2 always returns a stub. $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity) || true - *** failed to import extension checkrelativity from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !) + *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !) hg: unknown command 'checkrelativity' (py3 !) (use 'hg help' for a list of commands) (py3 !) @@ -1882,7 +1882,7 @@ > EOF $ hg deprecatedcmd > /dev/null - *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo + *** failed to import extension "deprecatedcmd" from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo *** (use @command decorator to register 'deprecatedcmd') hg: unknown command 'deprecatedcmd' (use 'hg help' for a list of commands) @@ -1891,7 +1891,7 @@ the extension shouldn't be loaded at all so the mq works: $ hg qseries --config extensions.mq= > /dev/null - *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo + *** failed to import extension "deprecatedcmd" from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo *** (use @command decorator to register 'deprecatedcmd') $ cd .. @@ -1939,8 +1939,117 @@ > test_unicode_default_value = $TESTTMP/test_unicode_default_value.py > EOF $ hg -R $TESTTMP/opt-unicode-default dummy - *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode *'value' found in cmdtable.dummy (glob) + *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy (py3 !) + *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy (no-py3 !) *** (use b'' to make it byte string) hg: unknown command 'dummy' (did you mean summary?) [10] + +Check the mandatory extension feature +------------------------------------- + + $ hg init mandatory-extensions + $ cat > $TESTTMP/mandatory-extensions/.hg/good.py << EOF + > pass + > EOF + $ cat > $TESTTMP/mandatory-extensions/.hg/bad.py << EOF + > raise RuntimeError("babar") + > EOF + $ cat > $TESTTMP/mandatory-extensions/.hg/syntax.py << EOF + > def ( + > EOF + +Check that the good one load : + + $ cat > $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > [extensions] + > good = $TESTTMP/mandatory-extensions/.hg/good.py + > EOF + + $ hg -R mandatory-extensions id + 000000000000 tip + +Make it mandatory to load + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > good:required = yes + > EOF + + $ hg -R mandatory-extensions id + 000000000000 tip + +Check that the bad one does not load + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > bad = $TESTTMP/mandatory-extensions/.hg/bad.py + > EOF + + $ hg -R mandatory-extensions id + *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar + 000000000000 tip + +Make it mandatory to load + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > bad:required = yes + > EOF + + $ hg -R mandatory-extensions id + abort: failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar + (loading of this extension was required, see `hg help config.extensions` for details) + [255] + +Make it not mandatory to load + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > bad:required = no + > EOF + + $ hg -R mandatory-extensions id + *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar + 000000000000 tip + +Same check with the syntax error one + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > bad = ! + > syntax = $TESTTMP/mandatory-extensions/.hg/syntax.py + > syntax:required = yes + > EOF + + $ hg -R mandatory-extensions id + abort: failed to import extension "syntax" from $TESTTMP/mandatory-extensions/.hg/syntax.py: invalid syntax (*syntax.py, line 1) (glob) + (loading of this extension was required, see `hg help config.extensions` for details) + [255] + +Same check with a missing one + + $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > syntax = ! + > syntax:required = + > missing = foo/bar/baz/I/do/not/exist/ + > missing:required = yes + > EOF + + $ hg -R mandatory-extensions id + abort: failed to import extension "missing" from foo/bar/baz/I/do/not/exist/: [Errno 2] $ENOENT$: 'foo/bar/baz/I/do/not/exist' + (loading of this extension was required, see `hg help config.extensions` for details) + [255] + +Have a "default" setting for the suboption: + + $ cat > $TESTTMP/mandatory-extensions/.hg/hgrc << EOF + > [extensions] + > bad = $TESTTMP/mandatory-extensions/.hg/bad.py + > bad:required = no + > good = $TESTTMP/mandatory-extensions/.hg/good.py + > syntax = $TESTTMP/mandatory-extensions/.hg/syntax.py + > *:required = yes + > EOF + + $ hg -R mandatory-extensions id + *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar + abort: failed to import extension "syntax" from $TESTTMP/mandatory-extensions/.hg/syntax.py: invalid syntax (*syntax.py, line 1) (glob) + (loading of this extension was required, see `hg help config.extensions` for details) + [255]
--- a/tests/test-graft.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-graft.t Tue Jan 18 10:27:13 2022 +0100 @@ -212,7 +212,7 @@ ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6 starting 4 threads for background file closing (?) preserving b for resolve of b - b: local copied/moved from a -> m (premerge) + b: local copied/moved from a -> m picked tool ':merge' for b (binary False symlink False changedelete False) merging b and a to b my b@ef0ef43d49e7+ other a@5d205f8b35b6 ancestor a@68795b066622 @@ -242,13 +242,10 @@ d: remote is newer -> g getting d preserving e for resolve of e - e: versions differ -> m (premerge) + e: versions differ -> m picked tool ':merge' for e (binary False symlink False changedelete False) merging e my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304 - e: versions differ -> m (merge) - picked tool ':merge' for e (binary False symlink False changedelete False) - my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304 warning: conflicts while merging e! (edit, then use 'hg resolve --mark') abort: unresolved conflicts, can't continue (use 'hg resolve' and 'hg graft --continue') @@ -855,8 +852,8 @@ $ hg graft -r 6 --base 5 grafting 6:25a2b029d3ae "6" merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') merging e - warning: conflicts while merging d! (edit, then use 'hg resolve --mark') abort: unresolved conflicts, can't continue (use 'hg resolve' and 'hg graft --continue') [1]
--- a/tests/test-grep.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-grep.t Tue Jan 18 10:27:13 2022 +0100 @@ -1200,11 +1200,11 @@ $ hg log -f add0-cp4 abort: cannot follow nonexistent file: "add0-cp4" - [255] + [20] $ hg grep --diff -f data add0-cp4 abort: cannot follow nonexistent file: "add0-cp4" - [255] + [20] BROKEN: maybe better to abort $ hg grep -f data add0-cp4 @@ -1214,11 +1214,11 @@ $ hg log -f add0-cp1-mod1-rm3 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3" - [255] + [20] $ hg grep --diff -f data add0-cp1-mod1-rm3 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3" - [255] + [20] BROKEN: maybe better to abort $ hg grep -f data add0-cp1-mod1-rm3 @@ -1229,11 +1229,11 @@ $ hg log -fr. add0-cp1-mod1-rm3 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3" - [255] + [20] $ hg grep --diff -fr. data add0-cp1-mod1-rm3 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3" - [255] + [20] BROKEN: should abort $ hg grep -fr. data add0-cp1-mod1-rm3 @@ -1244,11 +1244,11 @@ $ hg log -f add0-rm4 abort: cannot follow file not in parent revision: "add0-rm4" - [255] + [20] $ hg grep --diff -f data add0-rm4 abort: cannot follow file not in parent revision: "add0-rm4" - [255] + [20] BROKEN: should abort $ hg grep -f data add0-rm4 @@ -1340,11 +1340,11 @@ $ hg log -fr2 add0-rm2 abort: cannot follow file not in any of the specified revisions: "add0-rm2" - [255] + [20] $ hg grep --diff -fr2 data add0-rm2 abort: cannot follow file not in any of the specified revisions: "add0-rm2" - [255] + [20] BROKEN: should abort $ hg grep -fr2 data add0-rm2
--- a/tests/test-help.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-help.t Tue Jan 18 10:27:13 2022 +0100 @@ -1519,26 +1519,38 @@ "commands.update.check" Determines what level of checking 'hg update' will perform before moving to a destination revision. Valid values are "abort", "none", - "linear", and "noconflict". "abort" always fails if the working - directory has uncommitted changes. "none" performs no checking, and - may result in a merge with uncommitted changes. "linear" allows any - update as long as it follows a straight line in the revision history, - and may trigger a merge with uncommitted changes. "noconflict" will - allow any update which would not trigger a merge with uncommitted - changes, if any are present. (default: "linear") + "linear", and "noconflict". + + - "abort" always fails if the working directory has uncommitted + changes. + - "none" performs no checking, and may result in a merge with + uncommitted changes. + - "linear" allows any update as long as it follows a straight line in + the revision history, and may trigger a merge with uncommitted + changes. + - "noconflict" will allow any update which would not trigger a merge + with uncommitted changes, if any are present. + + (default: "linear") $ hg help config.commands.update.check "commands.update.check" Determines what level of checking 'hg update' will perform before moving to a destination revision. Valid values are "abort", "none", - "linear", and "noconflict". "abort" always fails if the working - directory has uncommitted changes. "none" performs no checking, and - may result in a merge with uncommitted changes. "linear" allows any - update as long as it follows a straight line in the revision history, - and may trigger a merge with uncommitted changes. "noconflict" will - allow any update which would not trigger a merge with uncommitted - changes, if any are present. (default: "linear") + "linear", and "noconflict". + + - "abort" always fails if the working directory has uncommitted + changes. + - "none" performs no checking, and may result in a merge with + uncommitted changes. + - "linear" allows any update as long as it follows a straight line in + the revision history, and may trigger a merge with uncommitted + changes. + - "noconflict" will allow any update which would not trigger a merge + with uncommitted changes, if any are present. + + (default: "linear") $ hg help config.ommands.update.check
--- a/tests/test-hgignore.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-hgignore.t Tue Jan 18 10:27:13 2022 +0100 @@ -59,9 +59,19 @@ ? syntax $ echo "*.o" > .hgignore +#if no-rhg $ hg status abort: $TESTTMP/ignorerepo/.hgignore: invalid pattern (relre): *.o (glob) [255] +#endif +#if rhg + $ hg status + Unsupported syntax regex parse error: + ^(?:*.o) + ^ + error: repetition operator missing expression + [255] +#endif Ensure given files are relative to cwd
--- a/tests/test-http-api-httpv2.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,743 +0,0 @@ -#require no-chg - - $ . $TESTDIR/wireprotohelpers.sh - $ enabledummycommands - - $ hg init server - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > EOF - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -HTTP v2 protocol not enabled by default - - $ sendhttpraw << EOF - > httprequest GET api/$HTTPV2 - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/exp-http-v2-0003 HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 33\r\n - s> \r\n - s> API exp-http-v2-0003 not enabled\n - -Restart server with support for HTTP v2 API - - $ killdaemons.py - $ enablehttpv2 server - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -Request to unknown command yields 404 - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/badcommand - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/badcommand HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 42\r\n - s> \r\n - s> unknown wire protocol command: badcommand\n - -GET to read-only command yields a 405 - - $ sendhttpraw << EOF - > httprequest GET api/$HTTPV2/ro/customreadonly - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 405 Method Not Allowed\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Allow: POST\r\n - s> Content-Length: 30\r\n - s> \r\n - s> commands require POST requests - -Missing Accept header results in 406 - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/customreadonly - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 406 Not Acceptable\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 85\r\n - s> \r\n - s> client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n - -Bad Accept header results in 406 - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/customreadonly - > accept: invalid - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: invalid\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 406 Not Acceptable\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 85\r\n - s> \r\n - s> client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n - -Bad Content-Type header results in 415 - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/customreadonly - > accept: $MEDIATYPE - > user-agent: test - > content-type: badmedia - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: badmedia\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 415 Unsupported Media Type\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 88\r\n - s> \r\n - s> client MUST send Content-Type header with value: application/mercurial-exp-framing-0006\n - -Request to read-only command works out of the box - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/customreadonly - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> *\r\n (glob) - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 29\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - - $ sendhttpv2peerverbose << EOF - > command customreadonly - > EOF - creating http peer for wire protocol version 2 - sending customreadonly command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 65\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x15\x00\x00\x01\x00\x01\x00\x11\xa1DnameNcustomreadonly - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x01\x00\x02\x041 - s> X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - b'customreadonly bytes response' - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -Request to read-write command fails because server is read-only by default - -GET to read-write request yields 405 - - $ sendhttpraw << EOF - > httprequest GET api/$HTTPV2/rw/customreadonly - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 405 Method Not Allowed\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Allow: POST\r\n - s> Content-Length: 30\r\n - s> \r\n - s> commands require POST requests - -Even for unknown commands - - $ sendhttpraw << EOF - > httprequest GET api/$HTTPV2/rw/badcommand - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 405 Method Not Allowed\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Allow: POST\r\n - s> Content-Length: 30\r\n - s> \r\n - s> commands require POST requests - -SSL required by default - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/rw/customreadonly - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 403 ssl required\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Length: 17\r\n - s> \r\n - s> permission denied - -Restart server to allow non-ssl read-write operations - - $ killdaemons.py - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > web.api.http-v2 = true - > [web] - > push_ssl = false - > allow-push = * - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Authorized request for valid read-write command works - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/rw/customreadonly - > user-agent: test - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 29\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - -Authorized request for unknown command is rejected - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/rw/badcommand - > user-agent: test - > accept: $MEDIATYPE - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 42\r\n - s> \r\n - s> unknown wire protocol command: badcommand\n - -debugreflect isn't enabled by default - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/debugreflect - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 34\r\n - s> \r\n - s> debugreflect service not available - -Restart server to get debugreflect endpoint - - $ killdaemons.py - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > web.api.debugreflect = true - > web.api.http-v2 = true - > [web] - > push_ssl = false - > allow-push = * - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Command frames can be reflected via debugreflect - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/debugreflect - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new cbor:{b'name': b'command1', b'args': {b'foo': b'val1', b'bar1': b'val'}} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 47\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1 - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 223\r\n - s> \r\n - s> received: 1 1 1 \xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1\n - s> ["runcommand", {"args": {"bar1": "val", "foo": "val1"}, "command": "command1", "data": null, "redirect": null, "requestid": 1}]\n - s> received: <no frame>\n - s> {"action": "noop"} - -Multiple requests to regular command URL are not allowed - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/customreadonly - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 29\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - -Multiple requests to "multirequest" URL are allowed - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/multirequest - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'} - > frame 3 1 0 command-request new cbor:{b'name': b'customreadonly'} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> *\r\n (glob) - s> *\r\n (glob) - s> user-agent: test\r\n - s> content-length: 58\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly\x15\x00\x00\x03\x00\x01\x00\x11\xa1DnameNcustomreadonly - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 27\r\n - s> \x1f\x00\x00\x03\x00\x02\x041X\x1dcustomreadonly bytes response - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x03\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - -Interleaved requests to "multirequest" are processed - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/multirequest - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new|more \xa2Dargs\xa1Inamespace - > frame 3 1 0 command-request new|more \xa2Dargs\xa1Inamespace - > frame 3 1 0 command-request continuation JnamespacesDnameHlistkeys - > frame 1 1 0 command-request continuation IbookmarksDnameHlistkeys - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 115\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x11\x00\x00\x01\x00\x01\x01\x15\xa2Dargs\xa1Inamespace\x11\x00\x00\x03\x00\x01\x00\x15\xa2Dargs\xa1Inamespace\x19\x00\x00\x03\x00\x01\x00\x12JnamespacesDnameHlistkeys\x18\x00\x00\x01\x00\x01\x00\x12IbookmarksDnameHlistkeys - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x03\x00\x02\x01\x92Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 28\r\n - s> \x00\x00\x03\x00\x02\x041\xa3Ibookmarks@Jnamespaces@Fphases@ - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x03\x00\x02\x002 - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok - s> \r\n - s> 9\r\n - s> \x01\x00\x00\x01\x00\x02\x041\xa0 - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - -Restart server to disable read-write access - - $ killdaemons.py - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > web.api.debugreflect = true - > web.api.http-v2 = true - > [web] - > push_ssl = false - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Attempting to run a read-write command via multirequest on read-only URL is not allowed - - $ sendhttpraw << EOF - > httprequest POST api/$HTTPV2/ro/multirequest - > accept: $MEDIATYPE - > content-type: $MEDIATYPE - > user-agent: test - > frame 1 1 stream-begin command-request new cbor:{b'name': b'pushkey'} - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> user-agent: test\r\n - s> content-length: 22\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> \x0e\x00\x00\x01\x00\x01\x01\x11\xa1DnameGpushkey - s> makefile('rb', None) - s> HTTP/1.1 403 Forbidden\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 52\r\n - s> \r\n - s> insufficient permissions to execute command: pushkey - -Defining an invalid content encoding results in warning - - $ hg --config experimental.httppeer.v2-encoder-order=identity,badencoder --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF - > command heads - > EOF - creating http peer for wire protocol version 2 - sending heads command - wire protocol version 2 encoder referenced in config (badencoder) is not known; ignoring - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 56\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 1e\r\n - s> \x16\x00\x00\x01\x00\x02\x041 - s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -#if zstd - - $ hg --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF - > command heads - > EOF - creating http peer for wire protocol version 2 - sending heads command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 70\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> *\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x83Hzstd-8mbDzlibHidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hzstd-8mb - s> \r\n - s> 25\r\n - s> \x1d\x00\x00\x01\x00\x02\x042 - s> (\xb5/\xfd\x00X\xa4\x00\x00p\xa1FstatusBok\x81T\x00\x01\x00\tP\x02 - s> \r\n - s> 0\r\n - s> \r\n - response: [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -#endif - - $ cat error.log
--- a/tests/test-http-api.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,299 +0,0 @@ -#require no-chg - - $ send() { - > hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/ - > } - - $ hg init server - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -Request to /api fails unless web.apiserver is enabled - - $ get-with-headers.py $LOCALIP:$HGPORT api - 400 no such method: api - - <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> - <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US"> - <head> - <link rel="icon" href="/static/hgicon.png" type="image/png" /> - <meta name="robots" content="index, nofollow" /> - <link rel="stylesheet" href="/static/style-paper.css" type="text/css" /> - <script type="text/javascript" src="/static/mercurial.js"></script> - - <title>$TESTTMP/server: error</title> - </head> - <body> - - <div class="container"> - <div class="menu"> - <div class="logo"> - <a href="https://mercurial-scm.org/"> - <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a> - </div> - <ul> - <li><a href="/shortlog">log</a></li> - <li><a href="/graph">graph</a></li> - <li><a href="/tags">tags</a></li> - <li><a href="/bookmarks">bookmarks</a></li> - <li><a href="/branches">branches</a></li> - </ul> - <ul> - <li><a href="/help">help</a></li> - </ul> - </div> - - <div class="main"> - - <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2> - <h3>error</h3> - - - <form class="search" action="/log"> - - <p><input name="rev" id="search1" type="text" size="30" value="" /></p> - <div id="hint">Find changesets by keywords (author, files, the commit message), revision - number or hash, or <a href="/help/revsets">revset expression</a>.</div> - </form> - - <div class="description"> - <p> - An error occurred while processing your request: - </p> - <p> - no such method: api - </p> - </div> - </div> - </div> - - - - </body> - </html> - - [1] - - $ get-with-headers.py $LOCALIP:$HGPORT api/ - 400 no such method: api - - <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> - <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US"> - <head> - <link rel="icon" href="/static/hgicon.png" type="image/png" /> - <meta name="robots" content="index, nofollow" /> - <link rel="stylesheet" href="/static/style-paper.css" type="text/css" /> - <script type="text/javascript" src="/static/mercurial.js"></script> - - <title>$TESTTMP/server: error</title> - </head> - <body> - - <div class="container"> - <div class="menu"> - <div class="logo"> - <a href="https://mercurial-scm.org/"> - <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a> - </div> - <ul> - <li><a href="/shortlog">log</a></li> - <li><a href="/graph">graph</a></li> - <li><a href="/tags">tags</a></li> - <li><a href="/bookmarks">bookmarks</a></li> - <li><a href="/branches">branches</a></li> - </ul> - <ul> - <li><a href="/help">help</a></li> - </ul> - </div> - - <div class="main"> - - <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2> - <h3>error</h3> - - - <form class="search" action="/log"> - - <p><input name="rev" id="search1" type="text" size="30" value="" /></p> - <div id="hint">Find changesets by keywords (author, files, the commit message), revision - number or hash, or <a href="/help/revsets">revset expression</a>.</div> - </form> - - <div class="description"> - <p> - An error occurred while processing your request: - </p> - <p> - no such method: api - </p> - </div> - </div> - </div> - - - - </body> - </html> - - [1] - -Restart server with support for API server - - $ killdaemons.py - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -/api lists available APIs (empty since none are available by default) - - $ send << EOF - > httprequest GET api - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 100\r\n - s> \r\n - s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n - s> \n - s> (no available APIs)\n - - $ send << EOF - > httprequest GET api/ - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/ HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 100\r\n - s> \r\n - s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n - s> \n - s> (no available APIs)\n - -Accessing an unknown API yields a 404 - - $ send << EOF - > httprequest GET api/unknown - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/unknown HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 33\r\n - s> \r\n - s> Unknown API: unknown\n - s> Known APIs: - -Accessing a known but not enabled API yields a different error - - $ send << EOF - > httprequest GET api/exp-http-v2-0003 - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/exp-http-v2-0003 HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 33\r\n - s> \r\n - s> API exp-http-v2-0003 not enabled\n - -Restart server with support for HTTP v2 API - - $ killdaemons.py - $ cat > server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > web.api.http-v2 = true - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -/api lists the HTTP v2 protocol as available - - $ send << EOF - > httprequest GET api - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 96\r\n - s> \r\n - s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n - s> \n - s> exp-http-v2-0003 - - $ send << EOF - > httprequest GET api/ - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/ HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 96\r\n - s> \r\n - s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n - s> \n - s> exp-http-v2-0003
--- a/tests/test-http-protocol.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-http-protocol.t Tue Jan 18 10:27:13 2022 +0100 @@ -252,121 +252,6 @@ s> bookmarks\t\n s> namespaces\t\n s> phases\t - -Client with HTTPv2 enabled advertises that and gets old capabilities response from old server - - $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF - > command heads - > EOF - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - sending heads command - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=heads HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1\r\n - s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: 41\r\n - s> \r\n - s> 0000000000000000000000000000000000000000\n - response: [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ killdaemons.py - $ enablehttpv2 empty - $ hg --config server.compressionengines=zlib -R empty serve -p $HGPORT -d --pid-file hg.pid - $ cat hg.pid > $DAEMON_PIDS - -Client with HTTPv2 enabled automatically upgrades if the server supports it - - $ hg --config experimental.httppeer.advertise-v2=true --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF - > command heads - > EOF - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - sending heads command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 56\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 1e\r\n - s> \x16\x00\x00\x01\x00\x02\x041 - s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - $ killdaemons.py HTTP client follows HTTP redirect on handshake to new repo
--- a/tests/test-import-bypass.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-import-bypass.t Tue Jan 18 10:27:13 2022 +0100 @@ -43,7 +43,7 @@ unable to find 'a' for patching (use '--prefix' to apply patch relative to the current directory) abort: patch failed to apply - [255] + [20] $ hg st $ shortlog o 1:4e322f7ce8e3 test 0 0 - foo - changea @@ -234,7 +234,7 @@ patching file a Hunk #1 FAILED at 0 abort: patch failed to apply - [255] + [20] $ hg --config patch.eol=auto import -d '0 0' -m 'test patch.eol' --bypass ../test.diff applying ../test.diff $ shortlog
--- a/tests/test-import-git.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-import-git.t Tue Jan 18 10:27:13 2022 +0100 @@ -519,7 +519,8 @@ > EOF applying patch from stdin abort: could not decode "binary2" binary patch: bad base85 character at position 6 - [255] + (check that whitespace in the patch has not been mangled) + [10] $ hg revert -aq $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF" @@ -534,7 +535,8 @@ > EOF applying patch from stdin abort: "binary2" length is 5 bytes, should be 6 - [255] + (check that whitespace in the patch has not been mangled) + [10] $ hg revert -aq $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF" @@ -548,7 +550,8 @@ > EOF applying patch from stdin abort: could not extract "binary2" binary data - [255] + (check that whitespace in the patch has not been mangled) + [10] Simulate a copy/paste turning LF into CRLF (issue2870) @@ -748,7 +751,7 @@ > EOF applying patch from stdin abort: cannot create b: destination already exists - [255] + [20] $ cat b b @@ -768,7 +771,7 @@ cannot create b: destination already exists 1 out of 1 hunks FAILED -- saving rejects to file b.rej abort: patch failed to apply - [255] + [20] $ cat b b @@ -791,7 +794,7 @@ Hunk #1 FAILED at 0 1 out of 1 hunks FAILED -- saving rejects to file linkb.rej abort: patch failed to apply - [255] + [20] $ hg st ? b.rej ? linkb.rej
--- a/tests/test-import-unknown.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-import-unknown.t Tue Jan 18 10:27:13 2022 +0100 @@ -29,7 +29,7 @@ file added already exists 1 out of 1 hunks FAILED -- saving rejects to file added.rej abort: patch failed to apply - [255] + [20] Test modifying an unknown file @@ -41,7 +41,7 @@ $ hg import --no-commit ../unknown.diff applying ../unknown.diff abort: cannot patch changed: file is not tracked - [255] + [20] Test removing an unknown file @@ -54,7 +54,7 @@ $ hg import --no-commit ../unknown.diff applying ../unknown.diff abort: cannot patch removed: file is not tracked - [255] + [20] Test copying onto an unknown file @@ -64,6 +64,6 @@ $ hg import --no-commit ../unknown.diff applying ../unknown.diff abort: cannot create copied: destination already exists - [255] + [20] $ cd ..
--- a/tests/test-import.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-import.t Tue Jan 18 10:27:13 2022 +0100 @@ -234,7 +234,8 @@ $ hg --cwd b import -mpatch ../broken.patch applying ../broken.patch abort: bad hunk #1 - [255] + (check that whitespace in the patch has not been mangled) + [10] $ rm -r b hg -R repo import @@ -834,7 +835,7 @@ Hunk #1 FAILED at 0 1 out of 1 hunks FAILED -- saving rejects to file a.rej abort: patch failed to apply - [255] + [20] $ hg import --no-commit -v fuzzy-tip.patch applying fuzzy-tip.patch patching file a @@ -853,7 +854,7 @@ Hunk #1 FAILED at 0 1 out of 1 hunks FAILED -- saving rejects to file a.rej abort: patch failed to apply - [255] + [20] $ hg up -qC $ hg import --config patch.fuzz=2 --exact fuzzy-reparent.patch applying fuzzy-reparent.patch @@ -1084,7 +1085,7 @@ > EOF applying patch from stdin abort: path contains illegal component: ../outside/foo - [255] + [10] $ cd .. @@ -2054,7 +2055,7 @@ (use '--prefix' to apply patch relative to the current directory) 1 out of 1 hunks FAILED -- saving rejects to file file1.rej abort: patch failed to apply - [255] + [20] test import crash (issue5375) $ cd .. @@ -2064,7 +2065,7 @@ applying patch from stdin a not tracked! abort: source file 'a' does not exist - [255] + [20] test immature end of hunk @@ -2076,7 +2077,8 @@ > EOF applying patch from stdin abort: bad hunk #1: incomplete hunk - [255] + (check that whitespace in the patch has not been mangled) + [10] $ hg import - <<'EOF' > diff --git a/foo b/foo @@ -2087,4 +2089,5 @@ > EOF applying patch from stdin abort: bad hunk #1: incomplete hunk - [255] + (check that whitespace in the patch has not been mangled) + [10]
--- a/tests/test-infinitepush-ci.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-infinitepush-ci.t Tue Jan 18 10:27:13 2022 +0100 @@ -204,7 +204,7 @@ $ hg pull -r b4e4bce660512ad3e71189e14588a70ac8e31fef pulling from $TESTTMP/repo abort: unknown revision 'b4e4bce660512ad3e71189e14588a70ac8e31fef' - [255] + [10] $ hg glog o 1:6cb0989601f1 added a | public
--- a/tests/test-init.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-init.t Tue Jan 18 10:27:13 2022 +0100 @@ -9,7 +9,7 @@ > if [ -f "$name"/.hg/00changelog.i ]; then > echo 00changelog.i created > fi - > cat "$name"/.hg/requires + > hg debugrequires -R "$name" > } creating 'local'
--- a/tests/test-issue6528.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-issue6528.t Tue Jan 18 10:27:13 2022 +0100 @@ -187,6 +187,11 @@ #endif Check that the issue is present +(It is currently not present with rhg but will be when optimizations are added +to resolve ambiguous files at the end of status without reading their content +if the size differs, and reading the expected size without resolving filelog +deltas where possible.) + $ hg st M D.txt M b.txt
--- a/tests/test-issue672.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-issue672.t Tue Jan 18 10:27:13 2022 +0100 @@ -65,7 +65,7 @@ ancestor: c64f439569a9, local: f4a9cff3cd0b+, remote: 746e9549ea96 starting 4 threads for background file closing (?) preserving 1a for resolve of 1a - 1a: local copied/moved from 1 -> m (premerge) + 1a: local copied/moved from 1 -> m picked tool ':merge' for 1a (binary False symlink False changedelete False) merging 1a and 1 to 1a my 1a@f4a9cff3cd0b+ other 1@746e9549ea96 ancestor 1@c64f439569a9 @@ -89,7 +89,7 @@ starting 4 threads for background file closing (?) preserving 1 for resolve of 1a removing 1 - 1a: remote moved from 1 -> m (premerge) + 1a: remote moved from 1 -> m picked tool ':merge' for 1a (binary False symlink False changedelete False) merging 1 and 1a to 1a my 1a@746e9549ea96+ other 1a@f4a9cff3cd0b ancestor 1@c64f439569a9
--- a/tests/test-largefiles-misc.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-largefiles-misc.t Tue Jan 18 10:27:13 2022 +0100 @@ -41,7 +41,7 @@ > EOF $ hg config extensions - \*\*\* failed to import extension largefiles from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob) + \*\*\* failed to import extension "largefiles" from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob) abort: repository requires features unknown to this Mercurial: largefiles (see https://mercurial-scm.org/wiki/MissingRequirement for more information) [255]
--- a/tests/test-largefiles-update.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-largefiles-update.t Tue Jan 18 10:27:13 2022 +0100 @@ -68,20 +68,39 @@ A linear merge will update standins before performing the actual merge. It will do a lfdirstate status walk and find 'unset'/'unsure' files, hash them, and update the corresponding standins. + Verify that it actually marks the clean files as clean in lfdirstate so we don't have to hash them again next time we update. +# note: +# We do this less agressively now, to avoid race condition, however the +# cache +# is properly set after the next status +# +# The "changed" output is marked as missing-correct-output/known-bad-output +# for clarify + $ hg up 0 files updated, 0 files merged, 0 files removed, 0 files unresolved updated to "f74e50bd9e55: #2" 1 other heads for branch "default" $ hg debugdirstate --large --nodate + n 644 7 set large1 (missing-correct-output !) + n 644 13 set large2 (missing-correct-output !) + n 0 -1 unset large1 (known-bad-output !) + n 0 -1 unset large2 (known-bad-output !) + $ sleep 1 # so that mtime are not ambiguous + $ hg status + $ hg debugdirstate --large --nodate n 644 7 set large1 n 644 13 set large2 Test that lfdirstate keeps track of last modification of largefiles and prevents unnecessary hashing of content - also after linear/noop update +(XXX Since there is a possible race during update, we only do this after the next +status call, this is slower, but more correct) + $ sleep 1 $ hg st $ hg debugdirstate --large --nodate @@ -92,6 +111,13 @@ updated to "f74e50bd9e55: #2" 1 other heads for branch "default" $ hg debugdirstate --large --nodate + n 644 7 set large1 (missing-correct-output !) + n 644 13 set large2 (missing-correct-output !) + n 0 -1 unset large1 (known-bad-output !) + n 0 -1 unset large2 (known-bad-output !) + $ sleep 1 # so that mtime are not ambiguous + $ hg status + $ hg debugdirstate --large --nodate n 644 7 set large1 n 644 13 set large2
--- a/tests/test-largefiles-wireproto.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-largefiles-wireproto.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - This file contains testcases that tend to be related to the wire protocol part of largefiles.
--- a/tests/test-lfconvert.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-lfconvert.t Tue Jan 18 10:27:13 2022 +0100 @@ -94,7 +94,7 @@ 1276481102f218c981e0324180bafd9f sub/maybelarge.dat "lfconvert" adds 'largefiles' to .hg/requires. - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache
--- a/tests/test-lfs-largefiles.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-lfs-largefiles.t Tue Jan 18 10:27:13 2022 +0100 @@ -288,7 +288,7 @@ The requirement is added to the destination repo. - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -345,7 +345,7 @@ breaks you can get 1048576 lines of +y in the output, which takes a looooooong time to print. $ hg diff -r 2:3 | head -n 20 - $ hg diff -r 2:6 + $ hg diff -r 2:6 | head -n 20 diff -r e989d0fa3764 -r 752e3a0d8488 large.bin --- a/large.bin Thu Jan 01 00:00:00 1970 +0000 +++ b/large.bin Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-lfs.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-lfs.t Tue Jan 18 10:27:13 2022 +0100 @@ -40,7 +40,7 @@ > EOF $ hg config extensions - \*\*\* failed to import extension lfs from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob) + \*\*\* failed to import extension "lfs" from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob) abort: repository requires features unknown to this Mercurial: lfs (see https://mercurial-scm.org/wiki/MissingRequirement for more information) [255]
--- a/tests/test-log-bookmark.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-log-bookmark.t Tue Jan 18 10:27:13 2022 +0100 @@ -189,10 +189,10 @@ $ hg log -B unknown abort: bookmark 'unknown' does not exist - [255] + [10] Shouldn't accept string-matcher syntax: $ hg log -B 're:.*' abort: bookmark 're:.*' does not exist - [255] + [10]
--- a/tests/test-log-linerange.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-log-linerange.t Tue Jan 18 10:27:13 2022 +0100 @@ -1150,4 +1150,4 @@ $ hg ci -m 'remove baz' --quiet $ hg log -f -L dir/baz,5:7 -p abort: cannot follow file not in parent revision: "dir/baz" - [255] + [20]
--- a/tests/test-log.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-log.t Tue Jan 18 10:27:13 2022 +0100 @@ -122,13 +122,13 @@ $ hg log -qfl1 '' inexistent abort: cannot follow file not in parent revision: "inexistent" - [255] + [20] $ hg log -qfl1 . inexistent abort: cannot follow file not in parent revision: "inexistent" - [255] + [20] $ hg log -qfl1 "`pwd`" inexistent abort: cannot follow file not in parent revision: "inexistent" - [255] + [20] $ hg log -qfl1 '' e 4:7e4639b4691b @@ -145,7 +145,7 @@ $ hg log -f dir abort: cannot follow file not in parent revision: "dir" - [255] + [20] -f, directory @@ -552,7 +552,7 @@ $ hg log -T '{rev}\n' -fr4 e x abort: cannot follow file not in any of the specified revisions: "x" - [255] + [20] follow files from the specified revisions with directory patterns (BROKEN: should follow copies from dir/b@2) @@ -1417,7 +1417,7 @@ $ hg log -b 're:.*' abort: unknown revision 're:.*' - [255] + [10] $ hg log -k 're:.*' $ hg log -u 're:.*' @@ -1544,7 +1544,7 @@ $ hg log -b dummy abort: unknown revision 'dummy' - [255] + [10] log -b . @@ -2422,7 +2422,7 @@ $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound abort: cannot follow file not in any of the specified revisions: "notfound" - [255] + [20] follow files from wdir and non-wdir revision: @@ -2435,15 +2435,15 @@ $ hg log -T '{rev}\n' -f d1/f2 abort: cannot follow nonexistent file: "d1/f2" - [255] + [20] $ hg log -T '{rev}\n' -f f1-copy abort: cannot follow nonexistent file: "f1-copy" - [255] + [20] $ hg log -T '{rev}\n' -f .d6/f1 abort: cannot follow file not in parent revision: ".d6/f1" - [255] + [20] $ hg revert -aqC
--- a/tests/test-merge-commit.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-commit.t Tue Jan 18 10:27:13 2022 +0100 @@ -72,7 +72,7 @@ ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 0555950ead28 starting 4 threads for background file closing (?) preserving bar for resolve of bar - bar: versions differ -> m (premerge) + bar: versions differ -> m picked tool ':merge' for bar (binary False symlink False changedelete False) merging bar my bar@2263c1be0967+ other bar@0555950ead28 ancestor bar@0f2ff26688b9 @@ -159,7 +159,7 @@ ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 3ffa6b9e35f0 starting 4 threads for background file closing (?) preserving bar for resolve of bar - bar: versions differ -> m (premerge) + bar: versions differ -> m picked tool ':merge' for bar (binary False symlink False changedelete False) merging bar my bar@2263c1be0967+ other bar@3ffa6b9e35f0 ancestor bar@0f2ff26688b9
--- a/tests/test-merge-criss-cross.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-criss-cross.t Tue Jan 18 10:27:13 2022 +0100 @@ -93,13 +93,10 @@ f1: remote is newer -> g getting f1 preserving f2 for resolve of f2 - f2: versions differ -> m (premerge) + f2: versions differ -> m picked tool ':dump' for f2 (binary False symlink False changedelete False) merging f2 my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527 - f2: versions differ -> m (merge) - picked tool ':dump' for f2 (binary False symlink False changedelete False) - my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527 1 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1]
--- a/tests/test-merge-exec.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-exec.t Tue Jan 18 10:27:13 2022 +0100 @@ -4,7 +4,6 @@ #require execbit - Initial setup ==============
--- a/tests/test-merge-force.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-force.t Tue Jan 18 10:27:13 2022 +0100 @@ -218,27 +218,27 @@ You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_content1_content4-tracked + warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark') merging content1_content2_content2_content1-tracked merging content1_content2_content2_content4-tracked + warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') merging content1_content2_content3_content1-tracked merging content1_content2_content3_content3-tracked + warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') merging content1_content2_content3_content4-tracked + warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') merging content1_content2_missing_content1-tracked merging content1_content2_missing_content4-tracked + warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_content2_content4-tracked + warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_content3_content3-tracked + warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_content3_content4-tracked + warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_missing_content4-tracked + warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_missing_content4-untracked - warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') warning: conflicts while merging missing_content2_missing_content4-untracked! (edit, then use 'hg resolve --mark') 18 files updated, 3 files merged, 8 files removed, 35 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon @@ -735,6 +735,7 @@ You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_content1_content4-tracked + warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark') file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev]. You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u @@ -752,6 +753,7 @@ You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_content2_content4-tracked + warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev]. You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u @@ -769,10 +771,12 @@ You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_content3_content3-tracked + warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev]. You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_content3_content4-tracked + warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev]. You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u @@ -790,6 +794,7 @@ You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u merging content1_content2_missing_content4-tracked + warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev]. You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. What do you want to do? u @@ -812,19 +817,14 @@ You can use (c)hanged version, (d)elete, or leave (u)nresolved. What do you want to do? u merging missing_content2_content2_content4-tracked + warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_content3_content3-tracked + warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_content3_content4-tracked + warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_missing_content4-tracked + warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') merging missing_content2_missing_content4-untracked - warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark') - warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark') warning: conflicts while merging missing_content2_missing_content4-untracked! (edit, then use 'hg resolve --mark') [1] $ checkstatus > $TESTTMP/status2 2>&1
--- a/tests/test-merge-halt.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-halt.t Tue Jan 18 10:27:13 2022 +0100 @@ -24,8 +24,8 @@ $ hg rebase -s 1 -d 2 --tool false rebasing 1:1f28a51c3c9b "c" merging a + merging a failed! merging b - merging a failed! merging b failed! unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') [240] @@ -42,7 +42,6 @@ $ hg rebase -s 1 -d 2 --tool false rebasing 1:1f28a51c3c9b "c" merging a - merging b merging a failed! unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') [240] @@ -67,9 +66,9 @@ > EOS rebasing 1:1f28a51c3c9b "c" merging a - merging b merging a failed! continue merge operation (yn)? y + merging b merging b failed! continue merge operation (yn)? n unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') @@ -94,9 +93,9 @@ > EOS rebasing 1:1f28a51c3c9b "c" merging a - merging b output file a appears unchanged was merge successful (yn)? y + merging b output file b appears unchanged was merge successful (yn)? n merging b failed! @@ -122,7 +121,6 @@ $ hg rebase -s 1 -d 2 --tool true rebasing 1:1f28a51c3c9b "c" merging a - merging b merging a failed! unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') [240] @@ -141,8 +139,8 @@ > EOS rebasing 1:1f28a51c3c9b "c" merging a + was merge of 'a' successful (yn)? y merging b - was merge of 'a' successful (yn)? y was merge of 'b' successful (yn)? n merging b failed! unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') @@ -159,8 +157,8 @@ $ hg rebase -s 1 -d 2 --tool echo --keep --config merge-tools.echo.premerge=keep rebasing 1:1f28a51c3c9b "c" merging a + $TESTTMP/repo/a *a~base* *a~other* (glob) merging b - $TESTTMP/repo/a *a~base* *a~other* (glob) $TESTTMP/repo/b *b~base* *b~other* (glob) Check that unshelve isn't broken by halting the merge @@ -187,7 +185,6 @@ unshelving change 'default' rebasing shelved changes merging shelve_file1 - merging shelve_file2 merging shelve_file1 failed! unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue') [240] @@ -195,7 +192,6 @@ M shelve_file1 M shelve_file2 ? shelve_file1.orig - ? shelve_file2.orig # The repository is in an unfinished *unshelve* state. # Unresolved merge conflicts: @@ -210,7 +206,6 @@ $ hg resolve --tool false --all --re-merge merging shelve_file1 - merging shelve_file2 merging shelve_file1 failed! merge halted after failed merge (see hg resolve) [240]
--- a/tests/test-merge-internal-tools-pattern.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-internal-tools-pattern.t Tue Jan 18 10:27:13 2022 +0100 @@ -130,7 +130,7 @@ $ hg merge 3 merging f - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ cat f
--- a/tests/test-merge-tools.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-tools.t Tue Jan 18 10:27:13 2022 +0100 @@ -578,7 +578,6 @@ $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=nonexistentmergetool couldn't find merge tool true (for pattern f) merging f - couldn't find merge tool true (for pattern f) merging f failed! 0 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon @@ -604,7 +603,6 @@ $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=/nonexistent/mergetool couldn't find merge tool true (for pattern f) merging f - couldn't find merge tool true (for pattern f) merging f failed! 0 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon @@ -1837,7 +1835,6 @@ $ hg merge -y -r 2 --config ui.merge=missingbinary couldn't find merge tool missingbinary (for pattern f) merging f - couldn't find merge tool missingbinary (for pattern f) revision 1 space revision 0
--- a/tests/test-merge-types.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge-types.t Tue Jan 18 10:27:13 2022 +0100 @@ -34,7 +34,7 @@ branchmerge: True, force: False, partial: False ancestor: c334dc3be0da, local: 521a1e40188f+, remote: 3574f3e69b1c preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m tool internal:merge (for pattern a) can't handle symlinks couldn't find merge tool hgmerge no tool found to merge a @@ -68,7 +68,7 @@ branchmerge: True, force: False, partial: False ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool ':union' for a (binary False symlink True changedelete False) merging a my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da @@ -90,7 +90,7 @@ branchmerge: True, force: False, partial: False ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool ':merge3' for a (binary False symlink True changedelete False) merging a my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da @@ -112,7 +112,7 @@ branchmerge: True, force: False, partial: False ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool ':merge-local' for a (binary False symlink True changedelete False) merging a my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da @@ -133,7 +133,7 @@ branchmerge: True, force: False, partial: False ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool ':merge-other' for a (binary False symlink True changedelete False) merging a my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da @@ -166,7 +166,7 @@ branchmerge: False, force: False, partial: False ancestor: c334dc3be0da, local: c334dc3be0da+, remote: 521a1e40188f preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m (couldn't find merge tool hgmerge|tool hgmerge can't handle symlinks) (re) no tool found to merge a picked tool ':prompt' for a (binary False symlink True changedelete False) @@ -343,9 +343,12 @@ $ hg merge merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') warning: cannot merge flags for b without common ancestor - keeping local flags merging b + warning: conflicts while merging b! (edit, then use 'hg resolve --mark') merging bx + warning: conflicts while merging bx! (edit, then use 'hg resolve --mark') warning: cannot merge flags for c without common ancestor - keeping local flags tool internal:merge (for pattern d) can't handle symlinks no tool found to merge d @@ -362,9 +365,6 @@ file 'h' needs to be resolved. You can keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved. What do you want to do? u - warning: conflicts while merging a! (edit, then use 'hg resolve --mark') - warning: conflicts while merging b! (edit, then use 'hg resolve --mark') - warning: conflicts while merging bx! (edit, then use 'hg resolve --mark') 3 files updated, 0 files merged, 0 files removed, 6 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] @@ -411,9 +411,12 @@ $ hg up -Cqr1 $ hg merge merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') warning: cannot merge flags for b without common ancestor - keeping local flags merging b + warning: conflicts while merging b! (edit, then use 'hg resolve --mark') merging bx + warning: conflicts while merging bx! (edit, then use 'hg resolve --mark') warning: cannot merge flags for c without common ancestor - keeping local flags tool internal:merge (for pattern d) can't handle symlinks no tool found to merge d @@ -430,9 +433,6 @@ file 'h' needs to be resolved. You can keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved. What do you want to do? u - warning: conflicts while merging a! (edit, then use 'hg resolve --mark') - warning: conflicts while merging b! (edit, then use 'hg resolve --mark') - warning: conflicts while merging bx! (edit, then use 'hg resolve --mark') 3 files updated, 0 files merged, 0 files removed, 6 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1]
--- a/tests/test-merge1.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge1.t Tue Jan 18 10:27:13 2022 +0100 @@ -349,6 +349,10 @@ aren't changed), even if none of mode, size and timestamp of them isn't changed on the filesystem (see also issue4583). +This test is now "best effort" as the mechanism to prevent such race are +getting better, it get more complicated to test a specific scenario that would +trigger it. If you see flakyness here, there is a race. + $ cat > $TESTTMP/abort.py <<EOF > from __future__ import absolute_import > # emulate aborting before "recordupdates()". in this case, files @@ -365,13 +369,6 @@ > extensions.wrapfunction(merge, "applyupdates", applyupdates) > EOF - $ cat >> .hg/hgrc <<EOF - > [fakedirstatewritetime] - > # emulate invoking dirstate.write() via repo.status() - > # at 2000-01-01 00:00 - > fakenow = 200001010000 - > EOF - (file gotten from other revision) $ hg update -q -C 2 @@ -381,12 +378,8 @@ $ hg update -q -C 3 $ cat b This is file b1 - $ touch -t 200001010000 b - $ hg debugrebuildstate - $ cat >> .hg/hgrc <<EOF > [extensions] - > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py > abort = $TESTTMP/abort.py > EOF $ hg merge 5 @@ -394,13 +387,11 @@ [255] $ cat >> .hg/hgrc <<EOF > [extensions] - > fakedirstatewritetime = ! > abort = ! > EOF $ cat b THIS IS FILE B5 - $ touch -t 200001010000 b $ hg status -A b M b @@ -413,12 +404,10 @@ $ cat b this is file b6 - $ touch -t 200001010000 b - $ hg debugrebuildstate + $ hg status $ cat >> .hg/hgrc <<EOF > [extensions] - > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py > abort = $TESTTMP/abort.py > EOF $ hg merge --tool internal:other 5 @@ -426,13 +415,11 @@ [255] $ cat >> .hg/hgrc <<EOF > [extensions] - > fakedirstatewritetime = ! > abort = ! > EOF $ cat b THIS IS FILE B5 - $ touch -t 200001010000 b $ hg status -A b M b
--- a/tests/test-merge7.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge7.t Tue Jan 18 10:27:13 2022 +0100 @@ -86,13 +86,10 @@ ancestor: 96b70246a118, local: 50c3a7e29886+, remote: 40d11a4173a8 starting 4 threads for background file closing (?) preserving test.txt for resolve of test.txt - test.txt: versions differ -> m (premerge) + test.txt: versions differ -> m picked tool ':merge' for test.txt (binary False symlink False changedelete False) merging test.txt my test.txt@50c3a7e29886+ other test.txt@40d11a4173a8 ancestor test.txt@96b70246a118 - test.txt: versions differ -> m (merge) - picked tool ':merge' for test.txt (binary False symlink False changedelete False) - my test.txt@50c3a7e29886+ other test.txt@40d11a4173a8 ancestor test.txt@96b70246a118 warning: conflicts while merging test.txt! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
--- a/tests/test-merge9.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-merge9.t Tue Jan 18 10:27:13 2022 +0100 @@ -27,8 +27,8 @@ test with the rename on the remote side $ HGMERGE=false hg merge merging bar + merging bar failed! merging foo and baz to baz - merging bar failed! 1 files updated, 1 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] @@ -41,8 +41,8 @@ 3 files updated, 0 files merged, 1 files removed, 0 files unresolved $ HGMERGE=false hg merge merging bar + merging bar failed! merging baz and foo to baz - merging bar failed! 1 files updated, 1 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1]
--- a/tests/test-narrow-acl.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-acl.t Tue Jan 18 10:27:13 2022 +0100 @@ -34,7 +34,7 @@ f2 Requirements should contain narrowhg - $ cat narrowclone1/.hg/requires | grep narrowhg + $ hg debugrequires -R narrowclone1 | grep narrowhg narrowhg-experimental NarrowHG should track f1 and f2
--- a/tests/test-narrow-clone-no-ellipsis.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-clone-no-ellipsis.t Tue Jan 18 10:27:13 2022 +0100 @@ -22,7 +22,7 @@ added 40 changesets with 1 changes to 1 files new changesets *:* (glob) $ cd narrow - $ cat .hg/requires | grep -v generaldelta + $ hg debugrequires | grep -v generaldelta dotencode dirstate-v2 (dirstate-v2 !) fncache
--- a/tests/test-narrow-clone-stream.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-clone-stream.t Tue Jan 18 10:27:13 2022 +0100 @@ -61,7 +61,7 @@ Making sure we have the correct set of requirements - $ cat .hg/requires + $ hg debugrequires dotencode (tree !) dotencode (flat-fncache !) dirstate-v2 (dirstate-v2 !)
--- a/tests/test-narrow-clone.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-clone.t Tue Jan 18 10:27:13 2022 +0100 @@ -38,7 +38,7 @@ added 3 changesets with 1 changes to 1 files new changesets *:* (glob) $ cd narrow - $ cat .hg/requires | grep -v generaldelta + $ hg debugrequires | grep -v generaldelta dotencode dirstate-v2 (dirstate-v2 !) fncache
--- a/tests/test-narrow-expanddirstate.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-expanddirstate.t Tue Jan 18 10:27:13 2022 +0100 @@ -142,7 +142,7 @@ Hunk #1 FAILED at 0 1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej abort: patch failed to apply - [255] + [20] $ hg tracked | grep patchdir [1] $ hg files | grep patchdir > /dev/null
--- a/tests/test-narrow-merge.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-merge.t Tue Jan 18 10:27:13 2022 +0100 @@ -101,4 +101,4 @@ $ hg merge 'desc("conflicting outside/f1")' abort: conflict in file 'outside/f1' is outside narrow clone (flat !) abort: conflict in file 'outside/' is outside narrow clone (tree !) - [255] + [20]
--- a/tests/test-narrow-rebase.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-rebase.t Tue Jan 18 10:27:13 2022 +0100 @@ -96,4 +96,4 @@ $ hg rebase -d 'desc("modify outside/f1")' rebasing 4:707c035aadb6 "conflicting outside/f1" abort: conflict in file 'outside/f1' is outside narrow clone - [255] + [20]
--- a/tests/test-narrow-sparse.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-narrow-sparse.t Tue Jan 18 10:27:13 2022 +0100 @@ -56,7 +56,7 @@ $ test -f .hg/sparse [1] - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache
--- a/tests/test-obsolete-distributed.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-obsolete-distributed.t Tue Jan 18 10:27:13 2022 +0100 @@ -570,7 +570,7 @@ added 2 changesets with 0 changes to 2 files (+1 heads) (2 other changesets obsolete on arrival) abort: cannot update to target: filtered revision '6' - [255] + [10] $ cd ..
--- a/tests/test-parseindex2.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-parseindex2.py Tue Jan 18 10:27:13 2022 +0100 @@ -57,6 +57,7 @@ 0, constants.COMP_MODE_INLINE, constants.COMP_MODE_INLINE, + constants.RANK_UNKNOWN, ) nodemap[e[7]] = n append(e) @@ -72,6 +73,7 @@ 0, constants.COMP_MODE_INLINE, constants.COMP_MODE_INLINE, + constants.RANK_UNKNOWN, ) nodemap[e[7]] = n append(e) @@ -268,6 +270,7 @@ 0, constants.COMP_MODE_INLINE, constants.COMP_MODE_INLINE, + constants.RANK_UNKNOWN, ) index, junk = parsers.parse_index2(data_inlined, True) got = index[-1] @@ -303,6 +306,7 @@ 0, constants.COMP_MODE_INLINE, constants.COMP_MODE_INLINE, + constants.RANK_UNKNOWN, ) index.append(e)
--- a/tests/test-permissions.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-permissions.t Tue Jan 18 10:27:13 2022 +0100 @@ -78,7 +78,7 @@ (fsmonitor makes "hg status" avoid accessing to "dir") $ hg status - dir: Permission denied + dir: Permission denied* (glob) M a #endif
--- a/tests/test-phases.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-phases.t Tue Jan 18 10:27:13 2022 +0100 @@ -882,16 +882,8 @@ $ hg init no-internal-phase --config format.internal-phase=no $ cd no-internal-phase - $ cat .hg/requires - dotencode - dirstate-v2 (dirstate-v2 !) - fncache - generaldelta - persistent-nodemap (rust !) - revlog-compression-zstd (zstd !) - revlogv1 - sparserevlog - store + $ hg debugrequires | grep internal-phase + [1] $ echo X > X $ hg add X $ hg status @@ -911,17 +903,8 @@ $ hg init internal-phase --config format.internal-phase=yes $ cd internal-phase - $ cat .hg/requires - dotencode - dirstate-v2 (dirstate-v2 !) - fncache - generaldelta + $ hg debugrequires | grep internal-phase internal-phase - persistent-nodemap (rust !) - revlog-compression-zstd (zstd !) - revlogv1 - sparserevlog - store $ mkcommit A test-debug-phase: new rev 0: x -> 1 test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256: -> draft
--- a/tests/test-pull-network.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-pull-network.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,15 +1,5 @@ #require serve -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - $ hg init test $ cd test
--- a/tests/test-pull-r.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-pull-r.t Tue Jan 18 10:27:13 2022 +0100 @@ -112,7 +112,7 @@ $ hg pull -qr missing ../repo abort: unknown revision 'missing' - [255] + [10] Pull multiple revisions with update:
--- a/tests/test-purge.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-purge.t Tue Jan 18 10:27:13 2022 +0100 @@ -29,7 +29,7 @@ $ hg st $ touch foo $ hg purge - permanently delete 1 unkown files? (yN) n + permanently delete 1 unknown files? (yN) n abort: removal cancelled [250] $ hg st @@ -93,7 +93,7 @@ untracked_file untracked_file_readonly $ hg purge --confirm - permanently delete 2 unkown files? (yN) n + permanently delete 2 unknown files? (yN) n abort: removal cancelled [250] $ hg purge -v @@ -156,7 +156,7 @@ $ hg purge -p ../untracked_directory untracked_directory/nested_directory $ hg purge --confirm - permanently delete 1 unkown files? (yN) n + permanently delete 1 unknown files? (yN) n abort: removal cancelled [250] $ hg purge -v ../untracked_directory @@ -203,7 +203,7 @@ ignored untracked_file $ hg purge --confirm --all - permanently delete 1 unkown and 1 ignored files? (yN) n + permanently delete 1 unknown and 1 ignored files? (yN) n abort: removal cancelled [250] $ hg purge -v --all
--- a/tests/test-qrecord.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-qrecord.t Tue Jan 18 10:27:13 2022 +0100 @@ -117,7 +117,7 @@ $ echo "mq=nonexistent" >> $HGRCPATH $ hg help qrecord - *** failed to import extension mq from nonexistent: [Errno *] * (glob) + *** failed to import extension "mq" from nonexistent: [Errno *] * (glob) hg qrecord [OPTION]... PATCH [FILE]... interactively record a new patch
--- a/tests/test-rebuildstate.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-rebuildstate.t Tue Jan 18 10:27:13 2022 +0100 @@ -79,6 +79,7 @@ $ touch foo bar qux $ hg add qux $ hg remove bar + $ sleep 1 # remove potential ambiguity in mtime $ hg status -A A qux R bar @@ -106,6 +107,7 @@ $ hg manifest bar foo + $ sleep 1 # remove potential ambiguity in mtime $ hg status -A A qux R bar
--- a/tests/test-remotefilelog-clone-tree.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-remotefilelog-clone-tree.t Tue Jan 18 10:27:13 2022 +0100 @@ -25,7 +25,7 @@ searching for changes no changes found $ cd shallow - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1 @@ -69,7 +69,7 @@ searching for changes no changes found $ cd shallow2 - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1 @@ -113,7 +113,7 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ ls shallow3/.hg/store/data - $ cat shallow3/.hg/requires + $ hg debugrequires -R shallow3/ dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1
--- a/tests/test-remotefilelog-clone.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-remotefilelog-clone.t Tue Jan 18 10:27:13 2022 +0100 @@ -22,7 +22,7 @@ searching for changes no changes found $ cd shallow - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1 @@ -59,7 +59,7 @@ searching for changes no changes found $ cd shallow2 - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1 @@ -111,7 +111,7 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ ls shallow3/.hg/store/data - $ cat shallow3/.hg/requires + $ hg debugrequires -R shallow3/ dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1
--- a/tests/test-remotefilelog-log.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-remotefilelog-log.t Tue Jan 18 10:27:13 2022 +0100 @@ -25,7 +25,7 @@ searching for changes no changes found $ cd shallow - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-remotefilelog-repo-req-1
--- a/tests/test-remotefilelog-repack.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-remotefilelog-repack.t Tue Jan 18 10:27:13 2022 +0100 @@ -307,7 +307,7 @@ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) $ hg prefetch -r 38 abort: unknown revision '38' - [255] + [10] $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
--- a/tests/test-rename-merge1.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-rename-merge1.t Tue Jan 18 10:27:13 2022 +0100 @@ -44,7 +44,7 @@ getting b2 preserving a for resolve of b removing a - b: remote moved from a -> m (premerge) + b: remote moved from a -> m picked tool ':merge' for b (binary False symlink False changedelete False) merging a and b to b my b@044f8520aeeb+ other b@85c198ef2f6c ancestor a@af1939970a1c @@ -218,7 +218,7 @@ ancestor: 5151c134577e, local: 07fcbc9a74ed+, remote: f21419739508 starting 4 threads for background file closing (?) preserving z for resolve of z - z: both renamed from y -> m (premerge) + z: both renamed from y -> m picked tool ':merge3' for z (binary False symlink False changedelete False) merging z my z@07fcbc9a74ed+ other z@f21419739508 ancestor y@5151c134577e
--- a/tests/test-rename-merge2.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-rename-merge2.t Tue Jan 18 10:27:13 2022 +0100 @@ -88,18 +88,15 @@ starting 4 threads for background file closing (?) preserving a for resolve of b preserving rev for resolve of rev - b: remote copied from a -> m (premerge) + b: remote copied from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging a and b to b my b@e300d1c794ec+ other b@4ce40f5aca24 ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@e300d1c794ec+ other rev@4ce40f5aca24 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@e300d1c794ec+ other rev@4ce40f5aca24 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -128,18 +125,15 @@ getting a preserving b for resolve of b preserving rev for resolve of rev - b: local copied/moved from a -> m (premerge) + b: local copied/moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b my b@86a2aa42fc76+ other a@f4db7e329e71 ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@86a2aa42fc76+ other rev@f4db7e329e71 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@86a2aa42fc76+ other rev@f4db7e329e71 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -168,18 +162,15 @@ preserving a for resolve of b preserving rev for resolve of rev removing a - b: remote moved from a -> m (premerge) + b: remote moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging a and b to b my b@e300d1c794ec+ other b@bdb19105162a ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@e300d1c794ec+ other rev@bdb19105162a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@e300d1c794ec+ other rev@bdb19105162a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -206,18 +197,15 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: local copied/moved from a -> m (premerge) + b: local copied/moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b my b@02963e448370+ other a@f4db7e329e71 ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@02963e448370+ other rev@f4db7e329e71 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@02963e448370+ other rev@f4db7e329e71 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -244,13 +232,10 @@ b: remote created -> g getting b preserving rev for resolve of rev - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@94b33a1b7f2d+ other rev@4ce40f5aca24 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@94b33a1b7f2d+ other rev@4ce40f5aca24 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 1 files merged, 0 files removed, 0 files unresolved @@ -276,13 +261,10 @@ ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 97c705ade336 starting 4 threads for background file closing (?) preserving rev for resolve of rev - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@86a2aa42fc76+ other rev@97c705ade336 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@86a2aa42fc76+ other rev@97c705ade336 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 1 files merged, 0 files removed, 0 files unresolved @@ -311,13 +293,10 @@ b: remote created -> g getting b preserving rev for resolve of rev - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@94b33a1b7f2d+ other rev@bdb19105162a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@94b33a1b7f2d+ other rev@bdb19105162a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 1 files merged, 1 files removed, 0 files unresolved @@ -342,13 +321,10 @@ ancestor: 924404dff337, local: 02963e448370+, remote: 97c705ade336 starting 4 threads for background file closing (?) preserving rev for resolve of rev - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@02963e448370+ other rev@97c705ade336 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@02963e448370+ other rev@97c705ade336 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 1 files merged, 0 files removed, 0 files unresolved @@ -374,22 +350,16 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@62e7bf090eba+ other b@49b6d8032493 ancestor a@924404dff337 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@62e7bf090eba+ other rev@49b6d8032493 ancestor rev@924404dff337 - b: both renamed from a -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@62e7bf090eba+ other b@49b6d8032493 ancestor a@924404dff337 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@62e7bf090eba+ other rev@49b6d8032493 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -425,13 +395,10 @@ c: remote created -> g getting c preserving rev for resolve of rev - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@02963e448370+ other rev@fe905ef2c33e ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@02963e448370+ other rev@fe905ef2c33e ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 1 files merged, 0 files removed, 0 files unresolved @@ -456,22 +423,16 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both created -> m (premerge) + b: both created -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@86a2aa42fc76+ other b@af30c7647fc7 ancestor b@000000000000 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@86a2aa42fc76+ other rev@af30c7647fc7 ancestor rev@924404dff337 - b: both created -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@86a2aa42fc76+ other b@af30c7647fc7 ancestor b@000000000000 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@86a2aa42fc76+ other rev@af30c7647fc7 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -498,22 +459,16 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both created -> m (premerge) + b: both created -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337 - b: both created -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 1 files removed, 0 files unresolved @@ -538,18 +493,15 @@ getting a preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -576,22 +528,16 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both created -> m (premerge) + b: both created -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337 - b: both created -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 1 files removed, 0 files unresolved @@ -616,18 +562,15 @@ getting a preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -652,18 +595,15 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -688,18 +628,15 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@02963e448370+ other b@8dbce441892a ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -723,18 +660,15 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: both renamed from a -> m (premerge) + b: both renamed from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@0b76e65c8289+ other b@bdb19105162a ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -762,22 +696,16 @@ preserving a for resolve of b preserving rev for resolve of rev removing a - b: remote moved from a -> m (premerge) + b: remote moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging a and b to b my b@e300d1c794ec+ other b@49b6d8032493 ancestor a@924404dff337 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@e300d1c794ec+ other rev@49b6d8032493 ancestor rev@924404dff337 - b: remote moved from a -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@e300d1c794ec+ other b@49b6d8032493 ancestor a@924404dff337 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@e300d1c794ec+ other rev@49b6d8032493 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -804,22 +732,16 @@ starting 4 threads for background file closing (?) preserving b for resolve of b preserving rev for resolve of rev - b: local copied/moved from a -> m (premerge) + b: local copied/moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b my b@62e7bf090eba+ other a@f4db7e329e71 ancestor a@924404dff337 - rev: versions differ -> m (premerge) + launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) + merge tool returned: 0 + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@62e7bf090eba+ other rev@f4db7e329e71 ancestor rev@924404dff337 - b: local copied/moved from a -> m (merge) - picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) - my b@62e7bf090eba+ other a@f4db7e329e71 ancestor a@924404dff337 - launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob) - merge tool returned: 0 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@62e7bf090eba+ other rev@f4db7e329e71 ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 0 files updated, 2 files merged, 0 files removed, 0 files unresolved @@ -852,18 +774,15 @@ getting c preserving b for resolve of b preserving rev for resolve of rev - b: local copied/moved from a -> m (premerge) + b: local copied/moved from a -> m picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b my b@02963e448370+ other a@2b958612230f ancestor a@924404dff337 premerge successful - rev: versions differ -> m (premerge) + rev: versions differ -> m picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev my rev@02963e448370+ other rev@2b958612230f ancestor rev@924404dff337 - rev: versions differ -> m (merge) - picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) - my rev@02963e448370+ other rev@2b958612230f ancestor rev@924404dff337 launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob) merge tool returned: 0 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-rename.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-rename.t Tue Jan 18 10:27:13 2022 +0100 @@ -610,7 +610,7 @@ $ hg rename d1/d11/a1 .hg/foo abort: path contains illegal component: .hg/foo - [255] + [10] $ hg status -C $ hg rename d1/d11/a1 ../foo abort: ../foo not under root '$TESTTMP' @@ -620,7 +620,7 @@ $ mv d1/d11/a1 .hg/foo $ hg rename --after d1/d11/a1 .hg/foo abort: path contains illegal component: .hg/foo - [255] + [10] $ hg status -C ! d1/d11/a1 $ hg update -C @@ -629,11 +629,11 @@ $ hg rename d1/d11/a1 .hg abort: path contains illegal component: .hg/a1 - [255] + [10] $ hg --config extensions.largefiles= rename d1/d11/a1 .hg The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !) abort: path contains illegal component: .hg/a1 - [255] + [10] $ hg status -C $ hg rename d1/d11/a1 .. abort: ../a1 not under root '$TESTTMP' @@ -647,7 +647,7 @@ $ mv d1/d11/a1 .hg $ hg rename --after d1/d11/a1 .hg abort: path contains illegal component: .hg/a1 - [255] + [10] $ hg status -C ! d1/d11/a1 $ hg update -C @@ -656,7 +656,7 @@ $ (cd d1/d11; hg rename ../../d2/b ../../.hg/foo) abort: path contains illegal component: .hg/foo - [255] + [10] $ hg status -C $ (cd d1/d11; hg rename ../../d2/b ../../../foo) abort: ../../../foo not under root '$TESTTMP'
--- a/tests/test-repo-compengines.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-repo-compengines.t Tue Jan 18 10:27:13 2022 +0100 @@ -9,7 +9,7 @@ $ hg init default $ cd default - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -59,7 +59,7 @@ $ touch bar $ hg --config format.revlog-compression=none -q commit -A -m 'add bar with a lot of repeated repeated repeated text' - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -79,7 +79,7 @@ $ hg --config format.revlog-compression=zstd init zstd $ cd zstd - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -183,7 +183,7 @@ summary: some-commit - $ cat none-compression/.hg/requires + $ hg debugrequires -R none-compression/ dotencode exp-compression-none dirstate-v2 (dirstate-v2 !)
--- a/tests/test-resolve.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-resolve.t Tue Jan 18 10:27:13 2022 +0100 @@ -196,8 +196,8 @@ resolve --all should re-merge all unresolved files $ hg resolve --all merging file1 + warning: conflicts while merging file1! (edit, then use 'hg resolve --mark') merging file2 - warning: conflicts while merging file1! (edit, then use 'hg resolve --mark') warning: conflicts while merging file2! (edit, then use 'hg resolve --mark') [1] $ cat file1.orig @@ -211,8 +211,8 @@ $ hg resolve --all --verbose --config 'ui.origbackuppath=.hg/origbackups' merging file1 creating directory: $TESTTMP/repo/.hg/origbackups + warning: conflicts while merging file1! (edit, then use 'hg resolve --mark') merging file2 - warning: conflicts while merging file1! (edit, then use 'hg resolve --mark') warning: conflicts while merging file2! (edit, then use 'hg resolve --mark') [1] $ ls .hg/origbackups @@ -478,10 +478,10 @@ $ hg rebase -s 1 -d 2 rebasing 1:f30f98a8181f "added emp1 emp2 emp3" merging emp1 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') merging emp2 + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') merging emp3 - warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') - warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') [240] @@ -490,10 +490,10 @@ =========================================================== $ hg resolve --all merging emp1 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') merging emp2 + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') merging emp3 - warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') - warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') [1] @@ -522,10 +522,10 @@ > EOF re-merge all unresolved files (yn)? y merging emp1 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') merging emp2 + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') merging emp3 - warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') - warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') [1]
--- a/tests/test-revert.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-revert.t Tue Jan 18 10:27:13 2022 +0100 @@ -320,7 +320,7 @@ $ hg mv --force a b/b $ hg revert b/b - $ hg status a b/b + $ hg status a b/b --copies $ cd ..
--- a/tests/test-revlog-raw.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-revlog-raw.py Tue Jan 18 10:27:13 2022 +0100 @@ -325,7 +325,7 @@ rawtext = text if rlog.rawsize(rev) != len(rawtext): abort('rev %d: wrong rawsize' % rev) - if rlog.revision(rev, raw=False) != text: + if rlog.revision(rev) != text: abort('rev %d: wrong text' % rev) if rlog.rawdata(rev) != rawtext: abort('rev %d: wrong rawtext' % rev)
--- a/tests/test-revlog-v2.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-revlog-v2.t Tue Jan 18 10:27:13 2022 +0100 @@ -20,7 +20,7 @@ $ hg init new-repo $ cd new-repo - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) exp-revlogv2.2
--- a/tests/test-revset.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-revset.t Tue Jan 18 10:27:13 2022 +0100 @@ -306,7 +306,7 @@ (negate (symbol 'a'))) abort: unknown revision '-a' - [255] + [10] $ try é (symbol '\xc3\xa9') * set:
--- a/tests/test-revset2.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-revset2.t Tue Jan 18 10:27:13 2022 +0100 @@ -870,7 +870,7 @@ $ try m (symbol 'm') abort: unknown revision 'm' - [255] + [10] $ HGPLAINEXCEPT=revsetalias $ export HGPLAINEXCEPT @@ -1061,7 +1061,7 @@ (symbol 'max') (string '$1'))) abort: unknown revision '$1' - [255] + [10] test scope of alias expansion: 'universe' is expanded prior to 'shadowall(0)', but 'all()' should never be substituted to '0()'. @@ -1601,7 +1601,7 @@ > EOF $ hg debugrevspec "custom1()" - *** failed to import extension custompredicate from $TESTTMP/custompredicate.py: intentional failure of loading extension + *** failed to import extension "custompredicate" from $TESTTMP/custompredicate.py: intentional failure of loading extension hg: parse error: unknown identifier: custom1 [10]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-rhg-no-generaldelta.t Tue Jan 18 10:27:13 2022 +0100 @@ -0,0 +1,46 @@ + $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort" + + $ cat << EOF >> $HGRCPATH + > [format] + > sparse-revlog = no + > EOF + + $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no + $ cd repo + $ (echo header; seq.py 20) > f + $ hg commit -q -Am initial + $ (echo header; seq.py 20; echo footer) > f + $ hg commit -q -Am x + $ hg update ".^" + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ (seq.py 20; echo footer) > f + $ hg commit -q -Am y + $ hg debugdeltachain f --template '{rev} {prevrev} {deltatype}\n' + 0 -1 base + 1 0 prev + 2 1 prev + +rhg works on non-generaldelta revlogs: + + $ $NO_FALLBACK hg cat f -r . + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + footer
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-rhg-sparse-narrow.t Tue Jan 18 10:27:13 2022 +0100 @@ -0,0 +1,120 @@ +#require rhg + + $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort" + +Rhg works well when sparse working copy is enabled. + + $ cd "$TESTTMP" + $ hg init repo-sparse + $ cd repo-sparse + $ cat > .hg/hgrc <<EOF + > [extensions] + > sparse= + > EOF + + $ echo a > show + $ echo x > hide + $ mkdir dir1 dir2 + $ echo x > dir1/x + $ echo y > dir1/y + $ echo z > dir2/z + + $ hg ci -Aqm 'initial' + $ hg debugsparse --include 'show' + $ ls -A + .hg + show + + $ tip=$(hg log -r . --template '{node}') + $ $NO_FALLBACK rhg files -r "$tip" + dir1/x + dir1/y + dir2/z + hide + show + $ $NO_FALLBACK rhg files + show + + $ $NO_FALLBACK rhg cat -r "$tip" hide + x + + $ cd .. + +We support most things when narrow is enabled, too, with a couple of caveats. + + $ . "$TESTDIR/narrow-library.sh" + $ real_hg=$RHG_FALLBACK_EXECUTABLE + + $ cat >> $HGRCPATH <<EOF + > [extensions] + > narrow= + > EOF + + $ hg clone --narrow ./repo-sparse repo-narrow --include dir1 + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 2 changes to 2 files + new changesets 6d714a4a2998 + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ cd repo-narrow + + $ $NO_FALLBACK rhg cat -r "$tip" dir1/x + x + $ "$real_hg" cat -r "$tip" dir1/x + x + +TODO: bad error message + + $ $NO_FALLBACK rhg cat -r "$tip" hide + abort: invalid revision identifier: 6d714a4a2998cbfd0620db44da58b749f6565d63 + [255] + $ "$real_hg" cat -r "$tip" hide + [1] + +A naive implementation of [rhg files] leaks the paths that are supposed to be +hidden by narrow, so we just fall back to hg. + + $ $NO_FALLBACK rhg files -r "$tip" + unsupported feature: rhg files -r <rev> is not supported in narrow clones + [252] + $ "$real_hg" files -r "$tip" + dir1/x + dir1/y + +Hg status needs to do some filtering based on narrow spec, so we don't +support it in rhg for narrow clones yet. + + $ mkdir dir2 + $ touch dir2/q + $ "$real_hg" status + $ $NO_FALLBACK rhg --config rhg.status=true status + unsupported feature: rhg status is not supported for sparse checkouts or narrow clones yet + [252] + +Adding "orphaned" index files: + + $ (cd ..; cp repo-sparse/.hg/store/data/hide.i repo-narrow/.hg/store/data/hide.i) + $ (cd ..; mkdir repo-narrow/.hg/store/data/dir2; cp repo-sparse/.hg/store/data/dir2/z.i repo-narrow/.hg/store/data/dir2/z.i) + $ "$real_hg" verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 1 changesets with 2 changes to 2 files + + $ "$real_hg" files -r "$tip" + dir1/x + dir1/y + +# TODO: even though [hg files] hides the orphaned dir2/z, [hg cat] still shows it. +# rhg has the same issue, but at least it's not specific to rhg. +# This is despite [hg verify] succeeding above. + + $ $NO_FALLBACK rhg cat -r "$tip" dir2/z + z + $ "$real_hg" cat -r "$tip" dir2/z + z
--- a/tests/test-rhg.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-rhg.t Tue Jan 18 10:27:13 2022 +0100 @@ -168,13 +168,12 @@ $ rhg cat original --exclude="*.rs" original content - $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE" - $ unset RHG_FALLBACK_EXECUTABLE - $ rhg cat original --exclude="*.rs" + $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original --exclude="*.rs") abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set. [255] - $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE" - $ export RHG_FALLBACK_EXECUTABLE + + $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original) + original content $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false [1] @@ -381,3 +380,13 @@ $ rhg files a $ rm .hgsub + +The `:required` extension suboptions are correctly ignored + + $ echo "[extensions]" >> $HGRCPATH + $ echo "blackbox:required = yes" >> $HGRCPATH + $ rhg files + a + $ echo "*:required = yes" >> $HGRCPATH + $ rhg files + a
--- a/tests/test-run-tests.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-run-tests.t Tue Jan 18 10:27:13 2022 +0100 @@ -176,14 +176,19 @@ running 1 tests using 1 parallel processes \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc) - \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc) + \x1b[38;5;28m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc) (pygments211 !) + \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc) (no-pygments211 !) \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc) - $ echo "bar-baz"; echo "bar-bad"; echo foo - \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc) - bar*bad (glob) + \x1b[38;5;250m \x1b[39m $ echo "bar-baz"; echo "bar-bad"; echo foo (esc) (pygments211 !) + $ echo "bar-baz"; echo "bar-bad"; echo foo (no-pygments211 !) + \x1b[38;5;28m+ bar*baz (glob)\x1b[39m (esc) (pygments211 !) + \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc) (no-pygments211 !) + \x1b[38;5;250m \x1b[39m bar*bad (glob) (esc) (pygments211 !) + bar*bad (glob) (no-pygments211 !) \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc) \x1b[38;5;124m- | fo (re)\x1b[39m (esc) - \x1b[38;5;34m+ foo\x1b[39m (esc) + \x1b[38;5;28m+ foo\x1b[39m (esc) (pygments211 !) + \x1b[38;5;34m+ foo\x1b[39m (esc) (no-pygments211 !) \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc) !
--- a/tests/test-share-safe.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-share-safe.t Tue Jan 18 10:27:13 2022 +0100 @@ -363,10 +363,7 @@ preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !) added: share-safe - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process $ hg debugupgraderepo --run upgrade will perform the following actions: @@ -379,10 +376,7 @@ share-safe Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs. - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only @@ -457,10 +451,7 @@ preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !) removed: share-safe - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process $ hg debugupgraderepo --run upgrade will perform the following actions: @@ -470,10 +461,7 @@ preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !) removed: share-safe - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only @@ -556,10 +544,7 @@ preserved: dotencode, exp-rc-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !) added: share-safe - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode. $ hg debugrequirements
--- a/tests/test-share.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-share.t Tue Jan 18 10:27:13 2022 +0100 @@ -161,7 +161,7 @@ $ cd .. $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2 - $ cat ./cloned-via-bundle2/.hg/requires | grep "shared" + $ hg -R cloned-via-bundle2 debugrequires | grep "shared" [1] $ hg id --cwd cloned-via-bundle2 -r tip c2e0ac586386 tip @@ -284,3 +284,25 @@ $ hg share nostore sharednostore abort: cannot create shared repository as source was created with 'format.usestore' config disabled [255] + +Check that (safe) share can control wc-specific format variant at creation time +------------------------------------------------------------------------------- + +#if no-rust + + $ cat << EOF >> $HGRCPATH + > [storage] + > dirstate-v2.slow-path = allow + > EOF + +#endif + + $ hg init repo-safe-d1 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=no + $ hg debugformat -R repo-safe-d1 | grep dirstate-v2 + dirstate-v2: no + + $ hg share repo-safe-d1 share-safe-d2 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=yes + updating working directory + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg debugformat -R share-safe-d2 | grep dirstate-v2 + dirstate-v2: yes
--- a/tests/test-shelve.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-shelve.t Tue Jan 18 10:27:13 2022 +0100 @@ -1385,8 +1385,8 @@ unshelving change 'default-01' rebasing shelved changes merging bar1 + warning: conflicts while merging bar1! (edit, then use 'hg resolve --mark') merging bar2 - warning: conflicts while merging bar1! (edit, then use 'hg resolve --mark') warning: conflicts while merging bar2! (edit, then use 'hg resolve --mark') unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue') [240]
--- a/tests/test-simplemerge.py Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-simplemerge.py Tue Jan 18 10:27:13 2022 +0100 @@ -48,9 +48,6 @@ ) -CantReprocessAndShowBase = simplemerge.CantReprocessAndShowBase - - def split_lines(t): return util.stringio(t).readlines() @@ -179,7 +176,9 @@ self.assertEqual(list(m3.merge_regions()), [(b'a', 0, 2)]) - self.assertEqual(list(m3.merge_lines()), [b'aaa', b'bbb']) + self.assertEqual( + simplemerge.render_minimized(m3), ([b'aaa', b'bbb'], False) + ) def test_no_conflicts(self): """No conflicts because only one side changed""" @@ -204,7 +203,9 @@ [b'aaa\n', b'bbb\n'], ) - self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n') + self.assertEqual( + b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n' + ) def test_append_b(self): m3 = Merge3( @@ -213,7 +214,9 @@ [b'aaa\n', b'bbb\n', b'222\n'], ) - self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n') + self.assertEqual( + b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n' + ) def test_append_agreement(self): m3 = Merge3( @@ -222,7 +225,9 @@ [b'aaa\n', b'bbb\n', b'222\n'], ) - self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n') + self.assertEqual( + b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n' + ) def test_append_clash(self): m3 = Merge3( @@ -231,7 +236,8 @@ [b'aaa\n', b'bbb\n', b'333\n'], ) - ml = m3.merge_lines( + ml, conflicts = simplemerge.render_minimized( + m3, name_a=b'a', name_b=b'b', start_marker=b'<<', @@ -250,7 +256,8 @@ [b'aaa\n', b'222\n', b'bbb\n'], ) - ml = m3.merge_lines( + ml, conflicts = simplemerge.render_minimized( + m3, name_a=b'a', name_b=b'b', start_marker=b'<<', @@ -285,12 +292,13 @@ list(m3.merge_groups()), [ (b'unchanged', [b'aaa\n']), - (b'conflict', [], [b'111\n'], [b'222\n']), + (b'conflict', ([], [b'111\n'], [b'222\n'])), (b'unchanged', [b'bbb\n']), ], ) - ml = m3.merge_lines( + ml, conflicts = simplemerge.render_minimized( + m3, name_a=b'a', name_b=b'b', start_marker=b'<<', @@ -338,7 +346,7 @@ def test_merge_poem(self): """Test case from diff3 manual""" m3 = Merge3(TZU, LAO, TAO) - ml = list(m3.merge_lines(b'LAO', b'TAO')) + ml, conflicts = simplemerge.render_minimized(m3, b'LAO', b'TAO') self.log(b'merge result:') self.log(b''.join(ml)) self.assertEqual(ml, MERGED_RESULT) @@ -356,11 +364,11 @@ other_text.splitlines(True), this_text.splitlines(True), ) - m_lines = m3.merge_lines(b'OTHER', b'THIS') + m_lines, conflicts = simplemerge.render_minimized(m3, b'OTHER', b'THIS') self.assertEqual( b'<<<<<<< OTHER\r\nc\r\n=======\r\nb\r\n' b'>>>>>>> THIS\r\n'.splitlines(True), - list(m_lines), + m_lines, ) def test_mac_text(self): @@ -372,11 +380,11 @@ other_text.splitlines(True), this_text.splitlines(True), ) - m_lines = m3.merge_lines(b'OTHER', b'THIS') + m_lines, conflicts = simplemerge.render_minimized(m3, b'OTHER', b'THIS') self.assertEqual( b'<<<<<<< OTHER\rc\r=======\rb\r' b'>>>>>>> THIS\r'.splitlines(True), - list(m_lines), + m_lines, )
--- a/tests/test-sparse-profiles.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-sparse-profiles.t Tue Jan 18 10:27:13 2022 +0100 @@ -128,8 +128,8 @@ $ hg merge 1 temporarily included 2 file(s) in the sparse checkout for merging merging backend.sparse + warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark') merging data.py - warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark') warning: conflicts while merging data.py! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 2 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon @@ -197,8 +197,8 @@ rebasing 1:a2b1de640a62 "edit profile" temporarily included 2 file(s) in the sparse checkout for merging merging backend.sparse + warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark') merging data.py - warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark') warning: conflicts while merging data.py! (edit, then use 'hg resolve --mark') unresolved conflicts (see 'hg resolve', then 'hg rebase --continue') [240]
--- a/tests/test-sparse-requirement.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-sparse-requirement.t Tue Jan 18 10:27:13 2022 +0100 @@ -16,7 +16,7 @@ Enable sparse profile - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -36,7 +36,7 @@ Requirement for sparse added when sparse is enabled - $ cat .hg/requires + $ hg debugrequires --config extensions.sparse= dotencode dirstate-v2 (dirstate-v2 !) exp-sparse @@ -59,7 +59,7 @@ $ hg debugsparse --reset --config extensions.sparse= - $ cat .hg/requires + $ hg debugrequires dotencode dirstate-v2 (dirstate-v2 !) fncache
--- a/tests/test-sqlitestore.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-sqlitestore.t Tue Jan 18 10:27:13 2022 +0100 @@ -13,7 +13,7 @@ New repo should not use SQLite by default $ hg init empty-no-sqlite - $ cat empty-no-sqlite/.hg/requires + $ hg debugrequires -R empty-no-sqlite dotencode dirstate-v2 (dirstate-v2 !) fncache @@ -27,7 +27,7 @@ storage.new-repo-backend=sqlite is recognized $ hg --config storage.new-repo-backend=sqlite init empty-sqlite - $ cat empty-sqlite/.hg/requires + $ hg debugrequires -R empty-sqlite dotencode dirstate-v2 (dirstate-v2 !) exp-sqlite-001 @@ -49,7 +49,7 @@ Can force compression to zlib $ hg --config storage.sqlite.compression=zlib init empty-zlib - $ cat empty-zlib/.hg/requires + $ hg debugrequires -R empty-zlib dotencode dirstate-v2 (dirstate-v2 !) exp-sqlite-001 @@ -65,7 +65,7 @@ Can force compression to none $ hg --config storage.sqlite.compression=none init empty-none - $ cat empty-none/.hg/requires + $ hg debugrequires -R empty-none dotencode dirstate-v2 (dirstate-v2 !) exp-sqlite-001
--- a/tests/test-ssh-bundle1.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-ssh-bundle1.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,16 +1,6 @@ This test is a duplicate of 'test-http.t' feel free to factor out parts that are not bundle1/bundle2 specific. -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - $ cat << EOF >> $HGRCPATH > [devel] > # This test is dedicated to interaction through old bundle @@ -483,15 +473,13 @@ $ hg pull --debug ssh://user@dummy/remote pulling from ssh://user@dummy/remote running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command - remote: 444 (sshv1 no-rust !) - remote: 463 (sshv1 rust !) - protocol upgraded to exp-ssh-v2-0003 (sshv2 !) + remote: 444 (no-rust !) + remote: 463 (rust !) remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !) remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !) - remote: 1 (sshv1 !) + remote: 1 sending protocaps command preparing listkeys for "bookmarks" sending listkeys command
--- a/tests/test-ssh-clone-r.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-ssh-clone-r.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,15 +1,5 @@ This test tries to exercise the ssh functionality with a dummy script -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - creating 'remote' repo $ hg init remote
--- a/tests/test-ssh-proto-unbundle.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-ssh-proto-unbundle.t Tue Jan 18 10:27:13 2022 +0100 @@ -5,13 +5,6 @@ > use-persistent-nodemap = no > EOF - $ cat > hgrc-sshv2 << EOF - > %include $HGRCPATH - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF - $ debugwireproto() { > commands=`cat -` > echo 'testing ssh1' @@ -20,12 +13,6 @@ > if [ -n "$1" ]; then > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}" > fi - > echo "" - > echo 'testing ssh2' - > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh --noreadstderr - > if [ -n "$1" ]; then - > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}" - > fi > } Generate some bundle files @@ -103,56 +90,6 @@ e> read(-1) -> 115: e> abort: incompatible Mercurial client; bundle2 required\n e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 115: - e> abort: incompatible Mercurial client; bundle2 required\n - e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n $ cd .. @@ -287,61 +224,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 151: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 1 line\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n And a variation that writes multiple lines using ui.write @@ -412,62 +294,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 173: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 2 lines 1\n - e> ui.write 2 lines 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n And a variation that does a ui.flush() after writing output @@ -537,61 +363,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 157: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 1 line flush\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n Multiple writes + flush @@ -662,62 +433,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 161: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 1st\n - e> ui.write 2nd\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n ui.write() + ui.write_err() output is captured @@ -790,64 +505,7 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 187: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 1\n - e> ui.write_err 1\n - e> ui.write 2\n - e> ui.write_err 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n + print() output is captured @@ -917,61 +575,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 148: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> printed line\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n Mixed print() and ui.write() are both captured @@ -1044,64 +647,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 173: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> print 1\n - e> ui.write 1\n - e> print 2\n - e> ui.write 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n print() to stdout and stderr both get captured @@ -1174,64 +719,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 171: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> stdout 1\n - e> stderr 1\n - e> stdout 2\n - e> stderr 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook failed\n Shell hook writing to stdout has output captured @@ -1308,63 +795,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook exited with status 1\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 167: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> stdout 1\n - e> stdout 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook exited with status 1\n - Shell hook writing to stderr has output captured $ cat > $TESTTMP/hook.sh << EOF @@ -1435,63 +865,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook exited with status 1\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 167: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> stderr 1\n - e> stderr 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook exited with status 1\n - Shell hook writing to stdout and stderr has output captured $ cat > $TESTTMP/hook.sh << EOF @@ -1566,65 +939,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.fail hook exited with status 1\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 185: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> stdout 1\n - e> stderr 1\n - e> stdout 2\n - e> stderr 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.fail hook exited with status 1\n - Shell and Python hooks writing to stdout and stderr have output captured $ cat > $TESTTMP/hook.sh << EOF @@ -1709,69 +1023,6 @@ e> transaction abort!\n e> rollback completed\n e> abort: pretxnchangegroup.b hook failed\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 0 - result: 0 - remote output: - e> read(-1) -> 228: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> shell stdout 1\n - e> shell stderr 1\n - e> shell stdout 2\n - e> shell stderr 2\n - e> stdout 1\n - e> stderr 1\n - e> stdout 2\n - e> stderr 2\n - e> transaction abort!\n - e> rollback completed\n - e> abort: pretxnchangegroup.b hook failed\n - $ cd .. Pushing a bundle1 with no output @@ -1837,59 +1088,6 @@ e> adding manifests\n e> adding file changes\n e> added 1 changesets with 1 changes to 1 files\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 1 - result: 1 - remote output: - e> read(-1) -> 100: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n - $ cd .. Pushing a bundle1 with ui.write() and ui.write_err() @@ -1971,59 +1169,3 @@ e> ui.write 2\n e> ui.write_err 2\n e> added 1 changesets with 1 changes to 1 files\n - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending unbundle command - i> write(9) -> 9: - i> unbundle\n - i> write(9) -> 9: - i> heads 10\n - i> write(10) -> 10: 666f726365 - i> flush() -> None - o> readline() -> 2: - o> 0\n - i> write(4) -> 4: - i> 426\n - i> write(426) -> 426: - i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n - i> test\n - i> 0 0\n - i> foo\n - i> \n - i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n - i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n - i> \x00\x00\x00\x00\x00\x00\x00\x00 - i> write(2) -> 2: - i> 0\n - i> flush() -> None - o> readline() -> 2: - o> 0\n - o> readline() -> 2: - o> 1\n - o> read(1) -> 1: 1 - result: 1 - remote output: - e> read(-1) -> 152: - e> adding changesets\n - e> adding manifests\n - e> adding file changes\n - e> ui.write 1\n - e> ui.write_err 1\n - e> ui.write 2\n - e> ui.write_err 2\n - e> added 1 changesets with 1 changes to 1 files\n
--- a/tests/test-ssh-proto.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-ssh-proto.t Tue Jan 18 10:27:13 2022 +0100 @@ -7,13 +7,6 @@ > use-persistent-nodemap = no > EOF - $ cat > hgrc-sshv2 << EOF - > %include $HGRCPATH - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF - Helper function to run protocol tests against multiple protocol versions. This is easier than using #testcases because managing differences between protocols with inline conditional output is hard to read. @@ -22,9 +15,6 @@ > commands=`cat -` > echo 'testing ssh1' > echo "${commands}" | hg --verbose debugwireproto --localssh - > echo "" - > echo 'testing ssh2' - > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh > } $ cat >> $HGRCPATH << EOF @@ -54,9 +44,6 @@ $ hg debugwireproto --localssh --peer ssh1 << EOF > EOF creating ssh peer for wire protocol version 1 - $ hg debugwireproto --localssh --peer ssh2 << EOF - > EOF - creating ssh peer for wire protocol version 2 Test a normal behaving server, for sanity @@ -916,410 +903,6 @@ o> readline() -> 4: o> 444\n -Send an upgrade request to a server that doesn't support that command - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n - > readline - > raw - > hello\n - > between\n - > pairs 81\n - > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - > readline - > readline - > readline - > readline - > EOF - using raw connection to peer - i> write(77) -> 77: - i> upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n - o> readline() -> 2: - o> 0\n - i> write(104) -> 104: - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - o> readline() -> 4: - o> 444\n - o> readline() -> 444: - o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - o> readline() -> 2: - o> 1\n - o> readline() -> 1: - o> \n - - $ cd .. - - $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server - running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) - devel-peer-request: hello+between - devel-peer-request: pairs: 81 bytes - sending hello command - sending between command - remote: 0 - remote: 444 - remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - remote: 1 - devel-peer-request: protocaps - devel-peer-request: caps: * bytes (glob) - sending protocaps command - url: ssh://user@dummy/server - local: no - pushable: yes - -Enable version 2 support on server. We need to do this in hgrc because we can't -use --config with `hg serve --stdio`. - - $ cat >> server/.hg/hgrc << EOF - > [experimental] - > sshserver.support-v2 = true - > EOF - -Send an upgrade request to a server that supports upgrade - - $ cd server - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade this-is-some-token proto=exp-ssh-v2-0003\n - > hello\n - > between\n - > pairs 81\n - > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - > readline - > readline - > readline - > EOF - using raw connection to peer - i> write(153) -> 153: - i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - o> readline() -> 44: - o> upgraded this-is-some-token exp-ssh-v2-0003\n - o> readline() -> 4: - o> 443\n - o> readline() -> 444: - o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - - $ cd .. - - $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server - running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) - devel-peer-request: hello+between - devel-peer-request: pairs: 81 bytes - sending hello command - sending between command - protocol upgraded to exp-ssh-v2-0003 - remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - devel-peer-request: protocaps - devel-peer-request: caps: * bytes (glob) - sending protocaps command - url: ssh://user@dummy/server - local: no - pushable: yes - -Verify the peer has capabilities - - $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server - running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) - devel-peer-request: hello+between - devel-peer-request: pairs: 81 bytes - sending hello command - sending between command - protocol upgraded to exp-ssh-v2-0003 - remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - devel-peer-request: protocaps - devel-peer-request: caps: * bytes (glob) - sending protocaps command - Main capabilities: - batch - branchmap - $USUAL_BUNDLE2_CAPS$ - changegroupsubset - getbundle - known - lookup - protocaps - pushkey - streamreqs=generaldelta,revlogv1,sparserevlog - unbundle=HG10GZ,HG10BZ,HG10UN - unbundlehash - Bundle2 capabilities: - HG20 - bookmarks - changegroup - 01 - 02 - checkheads - related - digests - md5 - sha1 - sha512 - error - abort - unsupportedcontent - pushraced - pushkey - hgtagsfnodes - listkeys - phases - heads - pushkey - remote-changegroup - http - https - stream - v2 - -Command after upgrade to version 2 is processed - - $ cd server - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade this-is-some-token proto=exp-ssh-v2-0003\n - > hello\n - > between\n - > pairs 81\n - > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - > readline - > readline - > readline - > raw - > hello\n - > readline - > readline - > EOF - using raw connection to peer - i> write(153) -> 153: - i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - o> readline() -> 44: - o> upgraded this-is-some-token exp-ssh-v2-0003\n - o> readline() -> 4: - o> 443\n - o> readline() -> 444: - o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - i> write(6) -> 6: - i> hello\n - o> readline() -> 4: - o> 428\n - o> readline() -> 428: - o> capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - -Multiple upgrades is not allowed - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade this-is-some-token proto=exp-ssh-v2-0003\n - > hello\n - > between\n - > pairs 81\n - > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - > readline - > readline - > readline - > raw - > upgrade another-token proto=irrelevant\n - > hello\n - > readline - > readavailable - > EOF - using raw connection to peer - i> write(153) -> 153: - i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - o> readline() -> 44: - o> upgraded this-is-some-token exp-ssh-v2-0003\n - o> readline() -> 4: - o> 443\n - o> readline() -> 444: - o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - i> write(45) -> 45: - i> upgrade another-token proto=irrelevant\n - i> hello\n - o> readline() -> 1: - o> \n - e> read(-1) -> 42: - e> cannot upgrade protocols multiple times\n - e> -\n - -Malformed upgrade request line (not exactly 3 space delimited tokens) - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade\n - > readline - > EOF - using raw connection to peer - i> write(8) -> 8: - i> upgrade\n - o> readline() -> 2: - o> 0\n - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade token\n - > readline - > EOF - using raw connection to peer - i> write(14) -> 14: - i> upgrade token\n - o> readline() -> 2: - o> 0\n - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade token foo=bar extra-token\n - > readline - > EOF - using raw connection to peer - i> write(34) -> 34: - i> upgrade token foo=bar extra-token\n - o> readline() -> 2: - o> 0\n - -Upgrade request to unsupported protocol is ignored - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade this-is-some-token proto=unknown1,unknown2\n - > readline - > raw - > hello\n - > readline - > readline - > raw - > between\n - > pairs 81\n - > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - > readline - > readline - > EOF - using raw connection to peer - i> write(51) -> 51: - i> upgrade this-is-some-token proto=unknown1,unknown2\n - o> readline() -> 2: - o> 0\n - i> write(6) -> 6: - i> hello\n - o> readline() -> 4: - o> 444\n - o> readline() -> 444: - o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n - i> write(98) -> 98: - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - o> readline() -> 2: - o> 1\n - o> readline() -> 1: - o> \n - -Upgrade request must be followed by hello + between - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade token proto=exp-ssh-v2-0003\n - > invalid\n - > readline - > readavailable - > EOF - using raw connection to peer - i> write(44) -> 44: - i> upgrade token proto=exp-ssh-v2-0003\n - i> invalid\n - o> readline() -> 1: - o> \n - e> read(-1) -> 46: - e> malformed handshake protocol: missing hello\n - e> -\n - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade token proto=exp-ssh-v2-0003\n - > hello\n - > invalid\n - > readline - > readavailable - > EOF - using raw connection to peer - i> write(50) -> 50: - i> upgrade token proto=exp-ssh-v2-0003\n - i> hello\n - i> invalid\n - o> readline() -> 1: - o> \n - e> read(-1) -> 48: - e> malformed handshake protocol: missing between\n - e> -\n - - $ hg debugwireproto --localssh --peer raw << EOF - > raw - > upgrade token proto=exp-ssh-v2-0003\n - > hello\n - > between\n - > invalid\n - > readline - > readavailable - > EOF - using raw connection to peer - i> write(58) -> 58: - i> upgrade token proto=exp-ssh-v2-0003\n - i> hello\n - i> between\n - i> invalid\n - o> readline() -> 1: - o> \n - e> read(-1) -> 49: - e> malformed handshake protocol: missing pairs 81\n - e> -\n - -Legacy commands are not exposed to version 2 of protocol - -TODO re-enable these once we're back to actually using v2 commands - -$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF -> command branches -> nodes 0000000000000000000000000000000000000000 -> EOF -creating ssh peer from handshake results -sending branches command -response: - -$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF -> command changegroup -> roots 0000000000000000000000000000000000000000 -> EOF -creating ssh peer from handshake results -sending changegroup command -response: - -$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF -> command changegroupsubset -> bases 0000000000000000000000000000000000000000 -> heads 0000000000000000000000000000000000000000 -> EOF -creating ssh peer from handshake results -sending changegroupsubset command -response: - $ cd .. Test listkeys for listing namespaces @@ -1364,41 +947,6 @@ b'namespaces': b'', b'phases': b'' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(13) -> 13: - i> namespace 10\n - i> write(10) -> 10: namespaces - i> flush() -> None - o> bufferedreadline() -> 3: - o> 30\n - o> bufferedread(30) -> 30: - o> bookmarks\t\n - o> namespaces\t\n - o> phases\t - response: { - b'bookmarks': b'', - b'namespaces': b'', - b'phases': b'' - } $ cd .. @@ -1444,33 +992,6 @@ o> bufferedreadline() -> 2: o> 0\n response: {} - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 9\n - i> write(9) -> 9: bookmarks - i> flush() -> None - o> bufferedreadline() -> 2: - o> 0\n - response: {} With a single bookmark set @@ -1508,36 +1029,6 @@ response: { b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 9\n - i> write(9) -> 9: bookmarks - i> flush() -> None - o> bufferedreadline() -> 3: - o> 46\n - o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f - response: { - b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f' - } With multiple bookmarks set @@ -1578,39 +1069,6 @@ b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 9\n - i> write(9) -> 9: bookmarks - i> flush() -> None - o> bufferedreadline() -> 3: - o> 93\n - o> bufferedread(93) -> 93: - o> bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n - o> bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d - response: { - b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', - b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d' - } Test pushkey for bookmarks @@ -1657,43 +1115,6 @@ o> bufferedread(2) -> 2: o> 1\n response: True - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending pushkey command - i> write(8) -> 8: - i> pushkey\n - i> write(6) -> 6: - i> key 6\n - i> write(6) -> 6: remote - i> write(12) -> 12: - i> namespace 9\n - i> write(9) -> 9: bookmarks - i> write(7) -> 7: - i> new 40\n - i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f - i> write(6) -> 6: - i> old 0\n - i> flush() -> None - o> bufferedreadline() -> 2: - o> 2\n - o> bufferedread(2) -> 2: - o> 1\n - response: True $ hg bookmarks bookA 0:68986213bd44 @@ -1742,36 +1163,6 @@ response: { b'publishing': b'True' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 6\n - i> write(6) -> 6: phases - i> flush() -> None - o> bufferedreadline() -> 3: - o> 15\n - o> bufferedread(15) -> 15: publishing\tTrue - response: { - b'publishing': b'True' - } Create some commits @@ -1830,41 +1221,6 @@ b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 6\n - i> write(6) -> 6: phases - i> flush() -> None - o> bufferedreadline() -> 4: - o> 101\n - o> bufferedread(101) -> 101: - o> 20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n - o> c4750011d906c18ea2f0527419cbc1a544435150\t1\n - o> publishing\tTrue - response: { - b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1', - b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', - b'publishing': b'True' - } Single draft head @@ -1905,39 +1261,6 @@ b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 6\n - i> write(6) -> 6: phases - i> flush() -> None - o> bufferedreadline() -> 3: - o> 58\n - o> bufferedread(58) -> 58: - o> c4750011d906c18ea2f0527419cbc1a544435150\t1\n - o> publishing\tTrue - response: { - b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', - b'publishing': b'True' - } All public heads @@ -1975,36 +1298,6 @@ response: { b'publishing': b'True' } - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending listkeys command - i> write(9) -> 9: - i> listkeys\n - i> write(12) -> 12: - i> namespace 6\n - i> write(6) -> 6: phases - i> flush() -> None - o> bufferedreadline() -> 3: - o> 15\n - o> bufferedread(15) -> 15: publishing\tTrue - response: { - b'publishing': b'True' - } Setting public phase via pushkey @@ -2054,44 +1347,6 @@ o> bufferedread(2) -> 2: o> 1\n response: True - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending pushkey command - i> write(8) -> 8: - i> pushkey\n - i> write(7) -> 7: - i> key 40\n - i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b - i> write(12) -> 12: - i> namespace 6\n - i> write(6) -> 6: phases - i> write(6) -> 6: - i> new 1\n - i> write(1) -> 1: 0 - i> write(6) -> 6: - i> old 1\n - i> write(1) -> 1: 1 - i> flush() -> None - o> bufferedreadline() -> 2: - o> 2\n - o> bufferedread(2) -> 2: - o> 1\n - response: True $ hg phase . 4: public @@ -2160,40 +1415,3 @@ response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n response #1: bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6 response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\npublishing\tTrue - - testing ssh2 - creating ssh peer from handshake results - i> write(171) -> 171: - i> upgrade * proto=exp-ssh-v2-0003\n (glob) - i> hello\n - i> between\n - i> pairs 81\n - i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 - i> flush() -> None - o> readline() -> 62: - o> upgraded * exp-ssh-v2-0003\n (glob) - o> readline() -> 4: - o> 443\n - o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - o> read(1) -> 1: - o> \n - sending batch with 3 sub-commands - i> write(6) -> 6: - i> batch\n - i> write(4) -> 4: - i> * 0\n - i> write(8) -> 8: - i> cmds 61\n - i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases - i> flush() -> None - o> bufferedreadline() -> 4: - o> 278\n - o> bufferedread(278) -> 278: - o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n - o> ;bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n - o> bookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\n - o> bfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\n - o> publishing\tTrue - response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n - response #1: bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6 - response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\npublishing\tTrue
--- a/tests/test-ssh.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-ssh.t Tue Jan 18 10:27:13 2022 +0100 @@ -1,13 +1,3 @@ -#testcases sshv1 sshv2 - -#if sshv2 - $ cat >> $HGRCPATH << EOF - > [experimental] - > sshpeer.advertise-v2 = true - > sshserver.support-v2 = true - > EOF -#endif - This test tries to exercise the ssh functionality with a dummy script creating 'remote' repo @@ -537,17 +527,15 @@ $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes pulling from ssh://user@dummy/remote running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re) - sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes sending hello command sending between command - remote: 444 (sshv1 no-rust !) - remote: 463 (sshv1 rust !) - protocol upgraded to exp-ssh-v2-0003 (sshv2 !) + remote: 444 (no-rust !) + remote: 463 (rust !) remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !) remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !) - remote: 1 (sshv1 !) + remote: 1 devel-peer-request: protocaps devel-peer-request: caps: * bytes (glob) sending protocaps command
--- a/tests/test-static-http.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-static-http.t Tue Jan 18 10:27:13 2022 +0100 @@ -95,7 +95,7 @@ $ cd .. $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0 abort: unknown revision 'doesnotexist' - [255] + [10] $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0 adding changesets adding manifests
--- a/tests/test-status-color.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-status-color.t Tue Jan 18 10:27:13 2022 +0100 @@ -375,8 +375,8 @@ created new head $ hg merge merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') merging b - warning: conflicts while merging a! (edit, then use 'hg resolve --mark') warning: conflicts while merging b! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 2 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
--- a/tests/test-status.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-status.t Tue Jan 18 10:27:13 2022 +0100 @@ -218,6 +218,13 @@ ! deleted ? unknown +hg status -n: + $ env RHG_ON_UNSUPPORTED=abort hg status -n + added + removed + deleted + unknown + hg status modified added removed deleted unknown never-existed ignored: $ hg status modified added removed deleted unknown never-existed ignored @@ -934,6 +941,7 @@ Now the directory is eligible for caching, so its mtime is save in the dirstate $ rm subdir/unknown + $ sleep 0.1 # ensure the kernel’s internal clock for mtimes has ticked $ hg status $ hg debugdirstate --all --no-dates | grep '^ ' 0 -1 set subdir
--- a/tests/test-subrepo.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-subrepo.t Tue Jan 18 10:27:13 2022 +0100 @@ -278,7 +278,7 @@ branchmerge: True, force: False, partial: False ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4 starting 4 threads for background file closing (?) - .hgsubstate: versions differ -> m (premerge) + .hgsubstate: versions differ -> m subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg getting subrepo t @@ -304,7 +304,7 @@ branchmerge: True, force: False, partial: False ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf starting 4 threads for background file closing (?) - .hgsubstate: versions differ -> m (premerge) + .hgsubstate: versions differ -> m subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4 subrepo t: both sides changed subrepository t diverged (local revision: 20a0db6fbf6c, remote revision: 7af322bc1198) @@ -317,13 +317,10 @@ ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198 starting 4 threads for background file closing (?) preserving t for resolve of t - t: versions differ -> m (premerge) + t: versions differ -> m picked tool ':merge' for t (binary False symlink False changedelete False) merging t my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a - t: versions differ -> m (merge) - picked tool ':merge' for t (binary False symlink False changedelete False) - my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a warning: conflicts while merging t! (edit, then use 'hg resolve --mark') 0 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon @@ -1021,37 +1018,21 @@ test if untracked file is not overwritten -(this also tests that updated .hgsubstate is treated as "modified", -when 'merge.update()' is aborted before 'merge.recordupdates()', even -if none of mode, size and timestamp of it isn't changed on the -filesystem (see also issue4583)) +(this tests also has a change to update .hgsubstate and merge it within the +same second. It should mark is are modified , even if none of mode, size and +timestamp of it isn't changed on the filesystem (see also issue4583)) $ echo issue3276_ok > repo/s/b $ hg -R repo2 push -f -q - $ touch -t 200001010000 repo/.hgsubstate - $ cat >> repo/.hg/hgrc <<EOF - > [fakedirstatewritetime] - > # emulate invoking dirstate.write() via repo.status() - > # at 2000-01-01 00:00 - > fakenow = 200001010000 - > - > [extensions] - > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py - > EOF $ hg -R repo update b: untracked file differs abort: untracked files in working directory differ from files in requested revision (in subrepository "s") [255] - $ cat >> repo/.hg/hgrc <<EOF - > [extensions] - > fakedirstatewritetime = ! - > EOF $ cat repo/s/b issue3276_ok $ rm repo/s/b - $ touch -t 200001010000 repo/.hgsubstate $ hg -R repo revert --all reverting repo/.hgsubstate reverting subrepo s
--- a/tests/test-template-functions.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-template-functions.t Tue Jan 18 10:27:13 2022 +0100 @@ -1295,10 +1295,10 @@ -1 $ hg log -T '{revset("%d", rev + 1)}\n' -r'tip' abort: unknown revision '3' - [255] + [10] $ hg log -T '{revset("%d", rev - 1)}\n' -r'null' abort: unknown revision '-2' - [255] + [10] Invalid arguments passed to revset()
--- a/tests/test-transplant.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-transplant.t Tue Jan 18 10:27:13 2022 +0100 @@ -1063,7 +1063,7 @@ $ cat r1 Y1 $ hg debugstate | grep ' r1$' - n 644 3 unset r1 + n 0 -1 unset r1 $ hg status -A r1 M r1
--- a/tests/test-treemanifest.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-treemanifest.t Tue Jan 18 10:27:13 2022 +0100 @@ -5,7 +5,7 @@ Requirements get set on init - $ grep treemanifest .hg/requires + $ hg debugrequires | grep treemanifest treemanifest Without directories, looks like any other repo @@ -229,7 +229,7 @@ $ cd repo-mixed $ test -d .hg/store/meta [1] - $ grep treemanifest .hg/requires + $ hg debugrequires | grep treemanifest treemanifest Should be possible to push updates from flat to tree manifest repo @@ -373,7 +373,7 @@ > [experimental] > changegroup3=yes > EOF - $ grep treemanifest empty-repo/.hg/requires + $ hg debugrequires -R empty-repo | grep treemanifest [1] $ hg push -R repo -r 0 empty-repo pushing to empty-repo @@ -382,13 +382,13 @@ adding manifests adding file changes added 1 changesets with 2 changes to 2 files - $ grep treemanifest empty-repo/.hg/requires + $ hg debugrequires -R empty-repo | grep treemanifest treemanifest Pushing to an empty repo works $ hg --config experimental.treemanifest=1 init clone - $ grep treemanifest clone/.hg/requires + $ hg debugrequires -R clone | grep treemanifest treemanifest $ hg push -R repo clone pushing to clone @@ -397,7 +397,7 @@ adding manifests adding file changes added 11 changesets with 15 changes to 10 files (+3 heads) - $ grep treemanifest clone/.hg/requires + $ hg debugrequires -R clone | grep treemanifest treemanifest $ hg -R clone verify checking changesets @@ -682,7 +682,7 @@ No server errors. $ cat deeprepo/errors.log requires got updated to include treemanifest - $ cat deepclone/.hg/requires | grep treemanifest + $ hg debugrequires -R deepclone | grep treemanifest treemanifest Tree manifest revlogs exist. $ find deepclone/.hg/store/meta | sort @@ -730,7 +730,7 @@ updating to branch default 8 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd deeprepo-basicstore - $ grep store .hg/requires + $ hg debugrequires | grep store [1] $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log $ cat hg.pid >> $DAEMON_PIDS @@ -747,7 +747,7 @@ updating to branch default 8 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd deeprepo-encodedstore - $ grep fncache .hg/requires + $ hg debugrequires | grep fncache [1] $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log $ cat hg.pid >> $DAEMON_PIDS
--- a/tests/test-up-local-change.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-up-local-change.t Tue Jan 18 10:27:13 2022 +0100 @@ -46,13 +46,10 @@ b: remote created -> g getting b preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool 'true' for a (binary False symlink False changedelete False) merging a my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a - a: versions differ -> m (merge) - picked tool 'true' for a (binary False symlink False changedelete False) - my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a launching merge tool: true *$TESTTMP/r2/a* * * (glob) merge tool returned: 0 1 files updated, 1 files merged, 0 files removed, 0 files unresolved @@ -72,13 +69,10 @@ removing b starting 4 threads for background file closing (?) preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool 'true' for a (binary False symlink False changedelete False) merging a my a@1e71731e6fbb+ other a@c19d34741b0a ancestor a@1e71731e6fbb - a: versions differ -> m (merge) - picked tool 'true' for a (binary False symlink False changedelete False) - my a@1e71731e6fbb+ other a@c19d34741b0a ancestor a@1e71731e6fbb launching merge tool: true *$TESTTMP/r2/a* * * (glob) merge tool returned: 0 0 files updated, 1 files merged, 1 files removed, 0 files unresolved @@ -95,13 +89,10 @@ b: remote created -> g getting b preserving a for resolve of a - a: versions differ -> m (premerge) + a: versions differ -> m picked tool 'true' for a (binary False symlink False changedelete False) merging a my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a - a: versions differ -> m (merge) - picked tool 'true' for a (binary False symlink False changedelete False) - my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a launching merge tool: true *$TESTTMP/r2/a* * * (glob) merge tool returned: 0 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-update-branches.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-update-branches.t Tue Jan 18 10:27:13 2022 +0100 @@ -158,47 +158,47 @@ parent=3 M sub/suba - $ revtest '-C dirty linear' dirty 1 2 -C + $ revtest '--clean dirty linear' dirty 1 2 --clean 2 files updated, 0 files merged, 0 files removed, 0 files unresolved parent=2 - $ revtest '-c dirty linear' dirty 1 2 -c + $ revtest '--check dirty linear' dirty 1 2 --check abort: uncommitted changes parent=1 M foo - $ revtest '-m dirty linear' dirty 1 2 -m + $ revtest '--merge dirty linear' dirty 1 2 --merge 1 files updated, 0 files merged, 0 files removed, 0 files unresolved parent=2 M foo - $ revtest '-m dirty cross' dirty 3 4 -m + $ revtest '--merge dirty cross' dirty 3 4 --merge 1 files updated, 0 files merged, 0 files removed, 0 files unresolved parent=4 M foo - $ revtest '-c dirtysub linear' dirtysub 1 2 -c + $ revtest '--check dirtysub linear' dirtysub 1 2 --check abort: uncommitted changes in subrepository "sub" parent=1 M sub/suba - $ norevtest '-c clean same' clean 2 -c + $ norevtest '--check clean same' clean 2 -c 0 files updated, 0 files merged, 0 files removed, 0 files unresolved updated to "bd10386d478c: 2" 1 other heads for branch "default" parent=2 - $ revtest '-cC dirty linear' dirty 1 2 -cC + $ revtest '--check --clean dirty linear' dirty 1 2 "--check --clean" abort: cannot specify both --clean and --check parent=1 M foo - $ revtest '-mc dirty linear' dirty 1 2 -mc + $ revtest '--merge -checkc dirty linear' dirty 1 2 "--merge --check" abort: cannot specify both --check and --merge parent=1 M foo - $ revtest '-mC dirty linear' dirty 1 2 -mC + $ revtest '--merge -clean dirty linear' dirty 1 2 "--merge --clean" abort: cannot specify both --clean and --merge parent=1 M foo @@ -211,12 +211,27 @@ parent=1 M foo - $ revtest 'none dirty linear' dirty 1 2 -c + $ revtest 'none dirty linear' dirty 1 2 --check + abort: uncommitted changes + parent=1 + M foo + + $ revtest '--merge none dirty linear' dirty 1 2 --check abort: uncommitted changes parent=1 M foo - $ revtest 'none dirty linear' dirty 1 2 -C + $ revtest '--merge none dirty linear' dirty 1 2 --merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + M foo + + $ revtest '--merge none dirty linear' dirty 1 2 --no-check + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + M foo + + $ revtest 'none dirty linear' dirty 1 2 --clean 2 files updated, 0 files merged, 0 files removed, 0 files unresolved parent=2 @@ -232,12 +247,17 @@ parent=2 M foo - $ revtest 'none dirty linear' dirty 1 2 -c + $ revtest 'none dirty linear' dirty 1 2 --check abort: uncommitted changes parent=1 M foo - $ revtest 'none dirty linear' dirty 1 2 -C + $ revtest 'none dirty linear' dirty 1 2 --no-merge + abort: uncommitted changes + parent=1 + M foo + + $ revtest 'none dirty linear' dirty 1 2 --clean 2 files updated, 0 files merged, 0 files removed, 0 files unresolved parent=2
--- a/tests/test-upgrade-repo.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-upgrade-repo.t Tue Jan 18 10:27:13 2022 +0100 @@ -213,10 +213,7 @@ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !) preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !) - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process additional optimizations are available by specifying "--optimize <name>": @@ -238,10 +235,7 @@ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !) preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !) - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process --optimize can be used to add optimizations @@ -401,6 +395,10 @@ [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !) [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default] $ hg debugupgraderepo + note: selecting all-filelogs for processing to change: dotencode + note: selecting all-manifestlogs for processing to change: dotencode + note: selecting changelog for processing to change: dotencode + repository lacks features recommended by current config options: fncache @@ -473,6 +471,10 @@ $ hg --config format.dotencode=false debugupgraderepo + note: selecting all-filelogs for processing to change: fncache + note: selecting all-manifestlogs for processing to change: fncache + note: selecting changelog for processing to change: fncache + repository lacks features recommended by current config options: fncache @@ -567,6 +569,10 @@ .hg/store/data/f2.i $ hg debugupgraderepo --run --config format.sparse-revlog=false + note: selecting all-filelogs for processing to change: generaldelta + note: selecting all-manifestlogs for processing to change: generaldelta + note: selecting changelog for processing to change: generaldelta + upgrade will perform the following actions: requirements @@ -618,7 +624,7 @@ generaldelta added to original requirements files - $ cat .hg/requires + $ hg debugrequires dotencode fncache generaldelta @@ -671,6 +677,10 @@ $ rm -rf .hg/upgradebackup.*/ $ hg debugupgraderepo --run --no-backup + note: selecting all-filelogs for processing to change: sparserevlog + note: selecting all-manifestlogs for processing to change: sparserevlog + note: selecting changelog for processing to change: sparserevlog + upgrade will perform the following actions: requirements @@ -944,8 +954,25 @@ $ echo "[format]" > .hg/hgrc $ echo "sparse-revlog=no" >> .hg/hgrc + $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet + warning: ignoring --no-manifest, as upgrade is changing: sparserevlog + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !) + preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !) + removed: sparserevlog + + optimisations: re-delta-parent + + processed revlogs: + - all-filelogs + - changelog + - manifest + $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback - ignoring revlogs selection flags, format requirements change: sparserevlog + note: selecting all-filelogs for processing to change: sparserevlog + note: selecting changelog for processing to change: sparserevlog + upgrade will perform the following actions: requirements @@ -1000,7 +1027,9 @@ $ echo "sparse-revlog=yes" >> .hg/hgrc $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback - ignoring revlogs selection flags, format requirements change: sparserevlog + note: selecting all-filelogs for processing to change: sparserevlog + note: selecting changelog for processing to change: sparserevlog + upgrade will perform the following actions: requirements @@ -1657,10 +1686,7 @@ dirstate-v2 "hg status" will be faster - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only @@ -1686,10 +1712,7 @@ preserved: * (glob) removed: dirstate-v2 - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only @@ -1724,10 +1747,7 @@ dirstate-v2 "hg status" will be faster - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only @@ -1748,10 +1768,7 @@ preserved: * (glob) removed: dirstate-v2 - processed revlogs: - - all-filelogs - - changelog - - manifest + no revlogs to process beginning upgrade... repository locked and read-only
--- a/tests/test-walk.t Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/test-walk.t Tue Jan 18 10:27:13 2022 +0100 @@ -299,10 +299,10 @@ f mammals/skunk skunk $ hg debugwalk -v .hg abort: path 'mammals/.hg' is inside nested repo 'mammals' - [255] + [10] $ hg debugwalk -v ../.hg abort: path contains illegal component: .hg - [255] + [10] $ cd .. $ hg debugwalk -v -Ibeans @@ -410,16 +410,16 @@ [255] $ hg debugwalk -v .hg abort: path contains illegal component: .hg - [255] + [10] $ hg debugwalk -v beans/../.hg abort: path contains illegal component: .hg - [255] + [10] $ hg debugwalk -v beans/../.hg/data abort: path contains illegal component: .hg/data - [255] + [10] $ hg debugwalk -v beans/.hg abort: path 'beans/.hg' is inside nested repo 'beans' - [255] + [10] Test explicit paths and excludes:
--- a/tests/test-wireproto-caching.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,468 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - -persistent-nodemap is not enabled by default. It is not relevant for this test so disable it. - - $ cat >> $HGRCPATH << EOF - > [format] - > use-persistent-nodemap = no - > [extensions] - > blackbox = - > [blackbox] - > track = simplecache - > EOF - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat >> .hg/hgrc << EOF - > [extensions] - > simplecache = $TESTDIR/wireprotosimplecache.py - > EOF - - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - $ echo a1 > a - $ hg commit -m 'commit 1' - $ echo b1 > b - $ hg commit -m 'commit 2' - $ echo a2 > a - $ echo b2 > b - $ hg commit -m 'commit 3' - - $ hg log -G -T '{rev}:{node} {desc}' - @ 3:50590a86f3ff5d1e9a1624a7a6957884565cc8e8 commit 3 - | - o 2:4d01eda50c6ac5f7e89cbe1880143a32f559c302 commit 2 - | - o 1:4432d83626e8a98655f062ec1f2a43b07f7fbbb0 commit 1 - | - o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0 - - - $ hg --debug debugindex -m - rev linkrev nodeid p1 p2 - 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 - 2 2 a8853dafacfca6fc807055a660d8b835141a3bb4 a988fb43583e871d1ed5750ee074c6d840bbbfc8 0000000000000000000000000000000000000000 - 3 3 3fe11dfbb13645782b0addafbe75a87c210ffddc a8853dafacfca6fc807055a660d8b835141a3bb4 0000000000000000000000000000000000000000 - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Performing the same request should result in same result, with 2nd response -coming from cache. - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Sending different request doesn't yield cache hit. - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41', b'\xa9\x88\xfb\x43\x58\x3e\x87\x1d\x1e\xd5\x75\x0e\xe0\x74\xc6\xd8\x40\xbb\xbf\xc8'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'node': b'\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8', - b'parents': [ - b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - - $ cat .hg/blackbox.log - *> cacher constructed for manifestdata (glob) - *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> cacher constructed for manifestdata (glob) - *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> cacher constructed for manifestdata (glob) - *> cache miss for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob) - *> storing cache entry for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob) - - $ cat error.log - - $ killdaemons.py - $ rm .hg/blackbox.log - -Try with object caching mode - - $ cat >> .hg/hgrc << EOF - > [simplecache] - > cacheobjects = true - > EOF - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - - $ cat .hg/blackbox.log - *> cacher constructed for manifestdata (glob) - *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> cacher constructed for manifestdata (glob) - *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - - $ cat error.log - - $ killdaemons.py - $ rm .hg/blackbox.log - -A non-cacheable command does not instantiate cacher - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - $ sendhttpv2peer << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - sending capabilities command - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ] - } - ] - - $ test -f .hg/blackbox.log - [1] - -An error is not cached - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa (esc) - [255] - - $ cat .hg/blackbox.log - *> cacher constructed for manifestdata (glob) - *> cache miss for 2cba2a7d0d1575fea2fe68f597e97a7c2ac2f705 (glob) - *> cacher exiting due to error (glob) - - $ killdaemons.py - $ rm .hg/blackbox.log
--- a/tests/test-wireproto-command-branchmap.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ hg debugdrawdag << EOF - > C D - > |/ - > B - > | - > A - > EOF - - $ hg up B - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg branch branch1 - marked working directory as branch branch1 - (branches are permanent and global, did you want a bookmark?) - $ echo b1 > foo - $ hg -q commit -A -m 'branch 1' - $ hg up B - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - $ hg branch branch2 - marked working directory as branch branch2 - $ echo b2 > foo - $ hg -q commit -A -m 'branch 2' - - $ hg log -T '{rev}:{node} {branch} {desc}\n' - 5:224161c7589aa48fa83a48feff5e95b56ae327fc branch2 branch 2 - 4:b5faacdfd2633768cb3152336cc0953381266688 branch1 branch 1 - 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 default D - 2:26805aba1e600a82e93661149f2313866a221a7b default C - 1:112478962961147124edd43549aedd1a335e44bf default B - 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 default A - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -No arguments returns something reasonable - - $ sendhttpv2peer << EOF - > command branchmap - > EOF - creating http peer for wire protocol version 2 - sending branchmap command - response: { - b'branch1': [ - b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88' - ], - b'branch2': [ - b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc' - ], - b'default': [ - b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{', - b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82' - ] - } - - $ cat error.log
--- a/tests/test-wireproto-command-capabilities.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,762 +0,0 @@ -#require no-chg - - $ . $TESTDIR/wireprotohelpers.sh - -persistent-nodemap is not enabled by default. It is not relevant for this test so disable it. - - $ cat >> $HGRCPATH << EOF - > [format] - > use-persistent-nodemap = no - > EOF - - $ hg init server - -zstd isn't present in plain builds. Make tests easier by removing -zstd from the equation. - - $ cat >> server/.hg/hgrc << EOF - > [server] - > compressionengines = zlib - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -A normal capabilities request is serviced for version 1 - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - -A proper request without the API server enabled returns the legacy response - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: foo - > x-hgproto-1: cbor - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: foo\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - -Restart with just API server enabled. This enables serving the new format. - - $ killdaemons.py - $ cat error.log - - $ cat >> server/.hg/hgrc << EOF - > [experimental] - > web.apiserver = true - > EOF - - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -X-HgUpgrade-<N> without CBOR advertisement uses legacy response - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: foo bar - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgupgrade-1: foo bar\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - -X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: foo bar - > x-hgproto-1: some value - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgproto-1: some value\r\n - s> x-hgupgrade-1: foo bar\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 Script output follows\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-0.1\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - -X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: foo bar - > x-hgproto-1: cbor - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: foo bar\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - cbor> [ - { - b'apibase': b'api/', - b'apis': {}, - b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash' - } - ] - -Restart server to enable HTTPv2 - - $ killdaemons.py - $ enablehttpv2 server - $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Only requested API services are returned - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: foo bar - > x-hgproto-1: cbor - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: foo bar\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - cbor> [ - { - b'apibase': b'api/', - b'apis': {}, - b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash' - } - ] - -Request for HTTPv2 service returns information about it - - $ sendhttpraw << EOF - > httprequest GET ?cmd=capabilities - > user-agent: test - > x-hgupgrade-1: exp-http-v2-0003 foo bar - > x-hgproto-1: cbor - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003 foo bar\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - cbor> [ - { - b'apibase': b'api/', - b'apis': { - b'exp-http-v2-0003': { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ] - } - }, - b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash' - } - ] - -capabilities command returns expected info - - $ sendhttpv2peerhandshake << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: *\r\n (glob) - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - sending capabilities command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 63\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x13\x00\x00\x01\x00\x01\x00\x11\xa1DnameLcapabilities - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 65e\r\n - s> V\x06\x00\x01\x00\x02\x041 - s> \xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1Lsparserevlog - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ] - } - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat error.log
--- a/tests/test-wireproto-command-changesetdata.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,613 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat >> .hg/hgrc << EOF - > [phases] - > publish = false - > EOF - $ echo a0 > a - $ echo b0 > b - - $ hg -q commit -A -m 'commit 0' - - $ echo a1 > a - $ echo b1 > b - $ hg commit -m 'commit 1' - $ echo b2 > b - $ hg commit -m 'commit 2' - $ hg phase --public -r . - - $ hg -q up -r 0 - $ echo a2 > a - $ hg commit -m 'commit 3' - created new head - - $ hg log -G -T '{rev}:{node} {desc}\n' - @ 3:eae5f82c2e622368d27daecb76b7e393d0f24211 commit 3 - | - | o 2:0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd commit 2 - | | - | o 1:7592917e1c3e82677cb0a4bc715ca25dd12d28c1 commit 1 - |/ - o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0 - - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -No arguments is an invalid request - - $ sendhttpv2peer << EOF - > command changesetdata - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: missing required arguments: revisions - [255] - -Missing nodes for changesetexplicit results in error - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetexplicit'}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: nodes key not present in changesetexplicit revision specifier - [255] - -changesetexplicitdepth requires nodes and depth keys - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetexplicitdepth'}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: nodes key not present in changesetexplicitdepth revision specifier - [255] - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetexplicitdepth', b'nodes': []}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: depth key not present in changesetexplicitdepth revision specifier - [255] - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetexplicitdepth', b'depth': 42}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: nodes key not present in changesetexplicitdepth revision specifier - [255] - -changesetdagrange requires roots and heads keys - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetdagrange'}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: roots key not present in changesetdagrange revision specifier - [255] - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetdagrange', b'roots': []}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: heads key not present in changesetdagrange revision specifier - [255] - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetdagrange', b'heads': [b'dummy']}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: roots key not present in changesetdagrange revision specifier - [255] - -Empty changesetdagrange heads results in an error - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{b'type': b'changesetdagrange', b'heads': [], b'roots': []}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - abort: heads key in changesetdagrange cannot be empty - [255] - -Sending just dagrange heads sends all revisions - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 4 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - }, - { - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -Sending root nodes limits what data is sent - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - } - ] - -Requesting data on a single node by node works - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a']}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - } - ] - -Specifying a noderange and nodes takes union - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[ - > { - > b'type': b'changesetexplicit', - > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'], - > }, - > { - > b'type': b'changesetdagrange', - > b'roots': [b'\x75\x92\x91\x7e\x1c\x3e\x82\x67\x7c\xb0\xa4\xbc\x71\x5c\xa2\x5d\xd1\x2d\x28\xc1'], - > b'heads': [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'], - > }] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - } - ] - -nodesdepth of 1 limits to exactly requested nodes - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetexplicitdepth', - > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'], - > b'depth': 1}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -nodesdepth of 2 limits to first ancestor - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetexplicitdepth', - > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'], - > b'depth': 2}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -nodesdepth with multiple nodes - - $ sendhttpv2peer << EOF - > command changesetdata - > revisions eval:[{ - > b'type': b'changesetexplicitdepth', - > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'], - > b'depth': 2}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 4 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - }, - { - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -Parents data is transferred upon request - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'parents'] - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11', - b'parents': [ - b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Phase data is transferred upon request - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'phase'] - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd', - b'phase': b'public' - } - ] - -Revision data is transferred upon request - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'revision'] - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 61 - ] - ], - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - }, - b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3' - ] - -Bookmarks key isn't present if no bookmarks data - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'bookmarks'] - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 4 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - }, - { - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - { - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - }, - { - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -Bookmarks are sent when requested - - $ hg -R ../server bookmark -r 0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd book-1 - $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-2 - $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-3 - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'bookmarks'] - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 4 - }, - { - b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:' - }, - { - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - { - b'bookmarks': [ - b'book-1' - ], - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - }, - { - b'bookmarks': [ - b'book-2', - b'book-3' - ], - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -Bookmarks are sent when we make a no-new-revisions request - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'bookmarks', b'revision'] - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 63 - ] - ], - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1' - }, - b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1', - { - b'bookmarks': [ - b'book-1' - ], - b'fieldsfollowing': [ - [ - b'revision', - 61 - ] - ], - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd' - }, - b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2', - { - b'bookmarks': [ - b'book-2', - b'book-3' - ], - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11' - } - ] - -Multiple fields can be transferred - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'parents', b'revision'] - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 61 - ] - ], - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11', - b'parents': [ - b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3' - ] - -Base nodes have just their metadata (e.g. phase) transferred -TODO this doesn't work - - $ sendhttpv2peer << EOF - > command changesetdata - > fields eval:[b'phase', b'parents', b'revision'] - > revisions eval:[{ - > b'type': b'changesetdagrange', - > b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'], - > b'heads': [ - > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', - > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending changesetdata command - response: gen[ - { - b'totalitems': 3 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 63 - ] - ], - b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1', - b'parents': [ - b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ], - b'phase': b'public' - }, - b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1', - { - b'fieldsfollowing': [ - [ - b'revision', - 61 - ] - ], - b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd', - b'parents': [ - b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ], - b'phase': b'public' - }, - b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2', - { - b'fieldsfollowing': [ - [ - b'revision', - 61 - ] - ], - b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11', - b'parents': [ - b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ], - b'phase': b'draft' - }, - b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3' - ] - - $ cat error.log
--- a/tests/test-wireproto-command-filedata.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,364 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat > a << EOF - > a0 - > 00000000000000000000000000000000000000 - > 11111111111111111111111111111111111111 - > EOF - $ echo b0 > b - $ mkdir -p dir0/child0 dir0/child1 dir1 - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ echo e0 > dir0/child0/e - $ echo f0 > dir0/child1/f - $ hg -q commit -A -m 'commit 0' - - $ echo a1 >> a - $ echo d1 > dir0/d - $ hg commit -m 'commit 1' - $ echo f1 > dir0/child1/f - $ hg commit -m 'commit 2' - - $ hg -q up -r 0 - $ echo a2 >> a - $ hg commit -m 'commit 3' - created new head - -Create multiple heads introducing the same changeset - - $ hg -q up -r 0 - $ echo foo > dupe-file - $ hg commit -Am 'dupe 1' - adding dupe-file - created new head - $ hg -q up -r 0 - $ echo foo > dupe-file - $ hg commit -Am 'dupe 2' - adding dupe-file - created new head - - $ hg log -G -T '{rev}:{node} {desc}\n' - @ 5:732c3dd7bee94242de656000e5f458e7ccfe2828 dupe 2 - | - | o 4:4334f10897d13c3e8beb4b636f7272b4ec2d0322 dupe 1 - |/ - | o 3:5ce944d7fece1252dae06c34422b573c191b9489 commit 3 - |/ - | o 2:b3c27db01410dae01e5485d425b1440078df540c commit 2 - | | - | o 1:3ef5e551f219ba505481d34d6b0316b017fa3f00 commit 1 - |/ - o 0:91b232a2253ce0638496f67bdfd7a4933fb51b25 commit 0 - - - $ hg --debug debugindex a - rev linkrev nodeid p1 p2 - 0 0 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - 1 1 0a86321f1379d1a9ecd0579a22977af7a5acaf11 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 - 2 3 7e5801b6d5f03a5a54f3c47b583f7567aad43e5b 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 - - $ hg --debug debugindex dir0/child0/e - rev linkrev nodeid p1 p2 - 0 0 bbba6c06b30f443d34ff841bc985c4d0827c6be4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - - $ hg --debug debugindex dupe-file - rev linkrev nodeid p1 p2 - 0 4 2ed2a3912a0b24502043eae84ee4b279c18b90dd 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Missing arguments is an error - - $ sendhttpv2peer << EOF - > command filedata - > EOF - creating http peer for wire protocol version 2 - sending filedata command - abort: missing required arguments: nodes, path - [255] - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - abort: missing required arguments: path - [255] - -Unknown node is an error - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa'] - > path eval:b'a' - > EOF - creating http peer for wire protocol version 2 - sending filedata command - abort: unknown file node: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - [255] - -Fetching a single revision returns just metadata by default - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - } - ] - -Requesting parents works - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11', - b'parents': [ - b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Requesting revision data works -(haveparents defaults to False, so fulltext is emitted) - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 84 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n' - ] - -haveparents=False should be same as above - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > fields eval:[b'revision'] - > haveparents eval:False - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 84 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n' - ] - -haveparents=True should emit a delta - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > fields eval:[b'revision'] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n' - ] - -Requesting multiple revisions works -(first revision is a fulltext since haveparents=False by default) - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39', b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11'] - > path eval:b'a' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n', - { - b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n' - ] - -Revisions are sorted by DAG order, parents first - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11', b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39'] - > path eval:b'a' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n', - { - b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n' - ] - -Requesting parents and revision data works - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x7e\x58\x01\xb6\xd5\xf0\x3a\x5a\x54\xf3\xc4\x7b\x58\x3f\x75\x67\xaa\xd4\x3e\x5b'] - > path eval:b'a' - > fields eval:[b'parents', b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 84 - ] - ], - b'node': b'~X\x01\xb6\xd5\xf0:ZT\xf3\xc4{X?ug\xaa\xd4>[', - b'parents': [ - b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na2\n' - ] - -Linknode for duplicate revision is the initial revision - - $ sendhttpv2peer << EOF - > command filedata - > nodes eval:[b'\x2e\xd2\xa3\x91\x2a\x0b\x24\x50\x20\x43\xea\xe8\x4e\xe4\xb2\x79\xc1\x8b\x90\xdd'] - > path eval:b'dupe-file' - > fields eval:[b'linknode', b'parents', b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filedata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 4 - ] - ], - b'linknode': b'C4\xf1\x08\x97\xd1<>\x8b\xebKcorr\xb4\xec-\x03"', - b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - b'foo\n' - ] - - $ cat error.log
--- a/tests/test-wireproto-command-filesdata.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1298 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat > a << EOF - > a0 - > 00000000000000000000000000000000000000 - > 11111111111111111111111111111111111111 - > EOF - $ cat > b << EOF - > b0 - > aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - > bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb - > EOF - $ mkdir -p dir0/child0 dir0/child1 dir1 - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ echo e0 > dir0/child0/e - $ echo f0 > dir0/child1/f - $ hg -q commit -A -m 'commit 0' - - $ echo a1 >> a - $ echo d1 > dir0/d - $ echo g0 > g - $ echo h0 > h - $ hg -q commit -A -m 'commit 1' - $ echo f1 > dir0/child1/f - $ echo i0 > dir0/i - $ hg -q commit -A -m 'commit 2' - - $ hg -q up -r 0 - $ echo a2 >> a - $ hg commit -m 'commit 3' - created new head - -Create multiple heads introducing the same file nodefile node - - $ hg -q up -r 0 - $ echo foo > dupe-file - $ hg commit -Am 'dupe 1' - adding dupe-file - created new head - $ hg -q up -r 0 - $ echo foo > dupe-file - $ hg commit -Am 'dupe 2' - adding dupe-file - created new head - - $ hg log -G -T '{rev}:{node} {desc}\n' - @ 5:47fc30580911232cb264675b402819deddf6c6f0 dupe 2 - | - | o 4:b16cce2967c1749ef4f4e3086a806cfbad8a3af7 dupe 1 - |/ - | o 3:476fbf122cd82f6726f0191ff146f67140946abc commit 3 - |/ - | o 2:b91c03cbba3519ab149b6cd0a0afbdb5cf1b5c8a commit 2 - | | - | o 1:5b0b1a23577e205ea240e39c9704e28d7697cbd8 commit 1 - |/ - o 0:6e875ff18c227659ad6143bb3580c65700734884 commit 0 - - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Missing arguments is an error - - $ sendhttpv2peer << EOF - > command filesdata - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - abort: missing required arguments: revisions - [255] - -Bad pattern to pathfilter is rejected - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > pathfilter eval:{b'include': [b'bad:foo']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - abort: include pattern must begin with `path:` or `rootfilesin:`; got bad:foo - [255] - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > pathfilter eval:{b'exclude': [b'glob:foo']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - abort: exclude pattern must begin with `path:` or `rootfilesin:`; got glob:foo - [255] - -Fetching a single changeset without parents fetches all files - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 8, - b'totalpaths': 8 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - { - b'path': b'dir0/child1/f', - b'totalitems': 1 - }, - { - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -Fetching a single changeset saying parents data is available fetches just new files - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 4, - b'totalpaths': 4 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -A path filter for a sub-directory is honored - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > haveparents eval:True - > pathfilter eval:{b'include': [b'path:dir0']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 1, - b'totalpaths': 1 - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - } - ] - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > haveparents eval:True - > pathfilter eval:{b'exclude': [b'path:a', b'path:g']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 2, - b'totalpaths': 2 - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -Requesting multiple changeset nodes without haveparents sends all data for both - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a', - > ]}] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 10, - b'totalpaths': 9 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - { - b'path': b'dir0/child1/f', - b'totalitems': 2 - }, - { - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - { - b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e' - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'dir0/i', - b'totalitems': 1 - }, - { - b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7' - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -Requesting multiple changeset nodes with haveparents sends incremental data for both - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a', - > ]}] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 6, - b'totalpaths': 6 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - { - b'path': b'dir0/child1/f', - b'totalitems': 1 - }, - { - b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e' - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'dir0/i', - b'totalitems': 1 - }, - { - b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7' - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -Requesting parents works - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 8, - b'totalpaths': 8 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11', - b'parents': [ - b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'dir0/child1/f', - b'totalitems': 1 - }, - { - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G', - b'parents': [ - b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Requesting revision data works -(haveparents defaults to False, so fulltext is emitted) - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 8, - b'totalpaths': 8 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 84 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n', - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n', - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - b'c0\n', - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - b'e0\n', - { - b'path': b'dir0/child1/f', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - b'f0\n', - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - b'd1\n', - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - b'g0\n', - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - }, - b'h0\n' - ] - -haveparents=False should be same as above - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > fields eval:[b'revision'] - > haveparents eval:False - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 8, - b'totalpaths': 8 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 84 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n', - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n', - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - b'c0\n', - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - b'e0\n', - { - b'path': b'dir0/child1/f', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - b'f0\n', - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - b'd1\n', - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - b'g0\n', - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - }, - b'h0\n' - ] - -haveparents=True should emit a delta - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > ]}] - > fields eval:[b'revision'] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 4, - b'totalpaths': 4 - }, - { - b'path': b'a', - b'totalitems': 1 - }, - { - b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n', - { - b'path': b'dir0/d', - b'totalitems': 1 - }, - { - b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n', - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - b'g0\n', - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - }, - b'h0\n' - ] - -Requesting multiple revisions works -(first revision is a fulltext since haveparents=False by default) - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84', - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a', - > ]}] - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 12, - b'totalpaths': 9 - }, - { - b'path': b'a', - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9' - }, - b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n', - { - b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n', - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 81 - ] - ], - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n', - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - b'c0\n', - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - b'e0\n', - { - b'path': b'dir0/child1/f', - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - b'f0\n', - { - b'deltabasenode': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e' - }, - b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03f1\n', - { - b'path': b'dir0/d', - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&' - }, - b'd0\n', - { - b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&', - b'fieldsfollowing': [ - [ - b'delta', - 15 - ] - ], - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n', - { - b'path': b'dir0/i', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7' - }, - b'i0\n', - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - b'g0\n', - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 3 - ] - ], - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - }, - b'h0\n' - ] - -Requesting linknode field works - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84', - > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8', - > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a', - > ]}] - > fields eval:[b'linknode'] - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 12, - b'totalpaths': 9 - }, - { - b'path': b'a', - b'totalitems': 2 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9' - }, - { - b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8', - b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11' - }, - { - b'path': b'b', - b'totalitems': 1 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2' - }, - { - b'path': b'dir0/c', - b'totalitems': 1 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01' - }, - { - b'path': b'dir0/child0/e', - b'totalitems': 1 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4' - }, - { - b'path': b'dir0/child1/f', - b'totalitems': 2 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4' - }, - { - b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a', - b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e' - }, - { - b'path': b'dir0/d', - b'totalitems': 2 - }, - { - b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84', - b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&' - }, - { - b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8', - b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G' - }, - { - b'path': b'dir0/i', - b'totalitems': 1 - }, - { - b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a', - b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7' - }, - { - b'path': b'g', - b'totalitems': 1 - }, - { - b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8', - b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c' - }, - { - b'path': b'h', - b'totalitems': 1 - }, - { - b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8', - b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K' - } - ] - -Test behavior where a file node is introduced in 2 DAG heads - -Request for changeset introducing filenode returns linknode as self - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7', - > ]}] - > fields eval:[b'linknode'] - > pathfilter eval:{b'include': [b'path:dupe-file']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 1, - b'totalpaths': 1 - }, - { - b'path': b'dupe-file', - b'totalitems': 1 - }, - { - b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7', - b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd' - } - ] - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7', - > ]}] - > fields eval:[b'linknode'] - > haveparents eval:True - > pathfilter eval:{b'include': [b'path:dupe-file']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 1, - b'totalpaths': 1 - }, - { - b'path': b'dupe-file', - b'totalitems': 1 - }, - { - b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7', - b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd' - } - ] - -Request for changeset where recorded linknode isn't in DAG ancestry will get -rewritten accordingly - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0', - > ]}] - > fields eval:[b'linknode'] - > pathfilter eval:{b'include': [b'path:dupe-file']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 1, - b'totalpaths': 1 - }, - { - b'path': b'dupe-file', - b'totalitems': 1 - }, - { - b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0', - b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd' - } - ] - - $ sendhttpv2peer << EOF - > command filesdata - > revisions eval:[{ - > b'type': b'changesetexplicit', - > b'nodes': [ - > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0', - > ]}] - > fields eval:[b'linknode'] - > haveparents eval:True - > pathfilter eval:{b'include': [b'path:dupe-file']} - > EOF - creating http peer for wire protocol version 2 - sending filesdata command - response: gen[ - { - b'totalitems': 1, - b'totalpaths': 1 - }, - { - b'path': b'dupe-file', - b'totalitems': 1 - }, - { - b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0', - b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd' - } - ] - - $ cat error.log
--- a/tests/test-wireproto-command-heads.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,56 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ hg debugdrawdag << EOF - > H I J - > | | | - > E F G - > | |/ - > C D - > |/ - > B - > | - > A - > EOF - - $ hg phase --force --secret J - $ hg phase --public E - - $ hg log -r 'E + H + I + G + J' -T '{rev}:{node} {desc} {phase}\n' - 4:78d2dca436b2f5b188ac267e29b81e07266d38fc E public - 7:ae492e36b0c8339ffaf328d00b85b4525de1165e H draft - 8:1d6f6b91d44aaba6d5e580bc30a9948530dbe00b I draft - 6:29446d2dc5419c5f97447a8bc062e4cc328bf241 G draft - 9:dec04b246d7cbb670c6689806c05ad17c835284e J secret - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -All non-secret heads returned by default - - $ sendhttpv2peer << EOF - > command heads - > EOF - creating http peer for wire protocol version 2 - sending heads command - response: [ - b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b', - b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^', - b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A' - ] - -Requesting just the public heads works - - $ sendhttpv2peer << EOF - > command heads - > publiconly 1 - > EOF - creating http peer for wire protocol version 2 - sending heads command - response: [ - b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc' - ] - - $ cat error.log
--- a/tests/test-wireproto-command-known.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,58 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ hg debugdrawdag << EOF - > C D - > |/ - > B - > | - > A - > EOF - - $ hg log -T '{rev}:{node} {desc}\n' - 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 D - 2:26805aba1e600a82e93661149f2313866a221a7b C - 1:112478962961147124edd43549aedd1a335e44bf B - 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 A - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -No arguments returns something reasonable - - $ sendhttpv2peer << EOF - > command known - > EOF - creating http peer for wire protocol version 2 - sending known command - response: [] - -Single known node works - - $ sendhttpv2peer << EOF - > command known - > nodes eval:[b'\x42\x6b\xad\xa5\xc6\x75\x98\xca\x65\x03\x6d\x57\xd9\xe4\xb6\x4b\x0c\x1c\xe7\xa0'] - > EOF - creating http peer for wire protocol version 2 - sending known command - response: [ - True - ] - -Multiple nodes works - - $ sendhttpv2peer << EOF - > command known - > nodes eval:[b'\x42\x6b\xad\xa5\xc6\x75\x98\xca\x65\x03\x6d\x57\xd9\xe4\xb6\x4b\x0c\x1c\xe7\xa0', b'00000000000000000000', b'\x11\x24\x78\x96\x29\x61\x14\x71\x24\xed\xd4\x35\x49\xae\xdd\x1a\x33\x5e\x44\xbf'] - > EOF - creating http peer for wire protocol version 2 - sending known command - response: [ - True, - False, - True - ] - - $ cat error.log
--- a/tests/test-wireproto-command-listkeys.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,65 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ hg debugdrawdag << EOF - > C D - > |/ - > B - > | - > A - > EOF - - $ hg phase --public -r C - $ hg book -r C @ - - $ hg log -T '{rev}:{node} {desc}\n' - 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 D - 2:26805aba1e600a82e93661149f2313866a221a7b C - 1:112478962961147124edd43549aedd1a335e44bf B - 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 A - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Request for namespaces works - - $ sendhttpv2peer << EOF - > command listkeys - > namespace namespaces - > EOF - creating http peer for wire protocol version 2 - sending listkeys command - response: { - b'bookmarks': b'', - b'namespaces': b'', - b'phases': b'' - } - -Request for phases works - - $ sendhttpv2peer << EOF - > command listkeys - > namespace phases - > EOF - creating http peer for wire protocol version 2 - sending listkeys command - response: { - b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1', - b'publishing': b'True' - } - -Request for bookmarks works - - $ sendhttpv2peer << EOF - > command listkeys - > namespace bookmarks - > EOF - creating http peer for wire protocol version 2 - sending listkeys command - response: { - b'@': b'26805aba1e600a82e93661149f2313866a221a7b' - } - - $ cat error.log
--- a/tests/test-wireproto-command-lookup.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,33 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat >> .hg/hgrc << EOF - > [web] - > push_ssl = false - > allow-push = * - > EOF - $ hg debugdrawdag << EOF - > C D - > |/ - > B - > | - > A - > EOF - $ root_node=$(hg log -r A -T '{node}') - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -lookup for known node works - - $ sendhttpv2peer << EOF - > command lookup - > key $root_node - > EOF - creating http peer for wire protocol version 2 - sending lookup command - response: b'Bk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0' - - $ cat error.log
--- a/tests/test-wireproto-command-manifestdata.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,358 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ echo a0 > a - $ echo b0 > b - $ mkdir -p dir0/child0 dir0/child1 dir1 - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ echo e0 > dir0/child0/e - $ echo f0 > dir0/child1/f - $ hg -q commit -A -m 'commit 0' - - $ echo a1 > a - $ echo d1 > dir0/d - $ hg commit -m 'commit 1' - $ echo f0 > dir0/child1/f - $ hg commit -m 'commit 2' - nothing changed - [1] - - $ hg -q up -r 0 - $ echo a2 > a - $ hg commit -m 'commit 3' - created new head - - $ hg log -G -T '{rev}:{node} {desc}\n' - @ 2:c8757a2ffe552850d1e0dfe60d295ebf64c196d9 commit 3 - | - | o 1:650165e803375748a94df471e5b58d85763e0b29 commit 1 - |/ - o 0:6d85ca1270b377d320098556ba5bfad34a9ee12d commit 0 - - - $ hg --debug debugindex -m - rev linkrev nodeid p1 p2 - 0 0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - 1 1 91e0bdbfb0dde0023fa063edc1445f207a22eac7 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000 - 2 2 46a6721b5edaf0ea04b79a5cb3218854a4d2aba0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000 - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Missing arguments is an error - - $ sendhttpv2peer << EOF - > command manifestdata - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - abort: missing required arguments: nodes, tree - [255] - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - abort: missing required arguments: tree - [255] - -Unknown node is an error - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa'] - > tree eval:b'' - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa (esc) - [255] - -Fetching a single revision returns just metadata by default - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - } - ] - -Requesting parents works - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0', - b'parents': [ - b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Requesting revision data works -(haveparents defaults to false, so fulltext is emitted) - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n' - ] - -haveparents=False yields same output - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'revision'] - > haveparents eval:False - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n' - ] - -haveparents=True will emit delta - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'revision'] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'fieldsfollowing': [ - [ - b'delta', - 55 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n' - ] - -Requesting multiple revisions works -(haveparents defaults to false, so fulltext is emitted unless a parent -has been emitted) - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4' - }, - b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n', - { - b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'fieldsfollowing': [ - [ - b'delta', - 55 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n' - ] - -With haveparents=True, first revision is a delta instead of fulltext - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'revision'] - > haveparents eval:True - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4' - }, - b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n', - { - b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'fieldsfollowing': [ - [ - b'delta', - 55 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n' - ] - -Revisions are sorted by DAG order, parents first - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0', b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4'] - > tree eval:b'' - > fields eval:[b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4' - }, - b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n', - { - b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'fieldsfollowing': [ - [ - b'delta', - 55 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0' - }, - b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n' - ] - -Requesting parents and revision data works - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0'] - > tree eval:b'' - > fields eval:[b'parents', b'revision'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 2 - }, - { - b'fieldsfollowing': [ - [ - b'revision', - 292 - ] - ], - b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n', - { - b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'fieldsfollowing': [ - [ - b'delta', - 55 - ] - ], - b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0', - b'parents': [ - b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - }, - b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n' - ] - - $ cat error.log
--- a/tests/test-wireproto-command-pushkey.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat >> .hg/hgrc << EOF - > [web] - > push_ssl = false - > allow-push = * - > EOF - $ hg debugdrawdag << EOF - > C D - > |/ - > B - > | - > A - > EOF - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -pushkey for a bookmark works - - $ sendhttpv2peer << EOF - > command pushkey - > namespace bookmarks - > key @ - > old - > new 426bada5c67598ca65036d57d9e4b64b0c1ce7a0 - > EOF - creating http peer for wire protocol version 2 - sending pushkey command - response: True - - $ sendhttpv2peer << EOF - > command listkeys - > namespace bookmarks - > EOF - creating http peer for wire protocol version 2 - sending listkeys command - response: { - b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0' - } - - $ cat error.log
--- a/tests/test-wireproto-command-rawstorefiledata.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,132 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - $ hg init server - $ enablehttpv2 server - $ cd server - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - $ echo a1 > a - $ hg commit -m 'commit 1' - $ mkdir dir0 - $ mkdir dir1 - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ echo e0 > dir1/e - $ echo f0 > dir1/f - $ hg commit -A -m 'commit 2' - adding dir0/c - adding dir0/d - adding dir1/e - adding dir1/f - $ echo f1 > dir1/f - $ hg commit -m 'commit 3' - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - -Missing requirement argument results in error - - $ sendhttpv2peer << EOF - > command rawstorefiledata - > EOF - creating http peer for wire protocol version 2 - sending rawstorefiledata command - abort: missing required arguments: files - [255] - -Unknown files value results in error - - $ sendhttpv2peer << EOF - > command rawstorefiledata - > files eval:[b'unknown'] - > EOF - creating http peer for wire protocol version 2 - sending rawstorefiledata command - abort: unknown file type: unknown - [255] - -Requesting just changelog works - - $ sendhttpv2peer << EOF - > command rawstorefiledata - > files eval:[b'changelog'] - > EOF - creating http peer for wire protocol version 2 - sending rawstorefiledata command - response: gen[ - { - b'filecount': 1, - b'totalsize': 527 (no-zstd !) - b'totalsize': 530 (zstd !) - }, - { - b'location': b'store', - b'path': b'00changelog.i', - b'size': 527 (no-zstd !) - b'size': 530 (zstd !) - }, - b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !) - b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !) - b'' - ] - -Requesting just manifestlog works (as impractical as that operation may be). - - $ sendhttpv2peer << EOF - > command rawstorefiledata - > files eval:[b'manifestlog'] - > EOF - creating http peer for wire protocol version 2 - sending rawstorefiledata command - response: gen[ - { - b'filecount': 1, - b'totalsize': 584 (no-zstd !) - b'totalsize': 588 (zstd !) - }, - { - b'location': b'store', - b'path': b'00manifest.i', - b'size': 584 (no-zstd !) - b'size': 588 (zstd !) - }, - b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !) - b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !) - b'' - ] - -Requesting both changelog and manifestlog works. - - $ sendhttpv2peer << EOF - > command rawstorefiledata - > files eval:[b'changelog', b'manifestlog'] - > EOF - creating http peer for wire protocol version 2 - sending rawstorefiledata command - response: gen[ - { - b'filecount': 2, - b'totalsize': 1111 (no-zstd !) - b'totalsize': 1118 (zstd !) - }, - { - b'location': b'store', - b'path': b'00manifest.i', - b'size': 584 (no-zstd !) - b'size': 588 (zstd !) - }, - b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !) - b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !) - b'', - { - b'location': b'store', - b'path': b'00changelog.i', - b'size': 527 (no-zstd !) - b'size': 530 (zstd !) - }, - b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !) - b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !) - b'' - ] - - $ cat error.log
--- a/tests/test-wireproto-content-redirects.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1475 +0,0 @@ - $ . $TESTDIR/wireprotohelpers.sh - -persistent-nodemap is not enabled by default. It is not relevant for this test so disable it. - - $ cat >> $HGRCPATH << EOF - > [format] - > use-persistent-nodemap = no - > [extensions] - > blackbox = - > [blackbox] - > track = simplecache - > EOF - - $ hg init server - $ enablehttpv2 server - $ cd server - $ cat >> .hg/hgrc << EOF - > [server] - > compressionengines = zlib - > [extensions] - > simplecache = $TESTDIR/wireprotosimplecache.py - > [simplecache] - > cacheapi = true - > EOF - - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - $ echo a1 > a - $ hg commit -m 'commit 1' - - $ hg --debug debugindex -m - rev linkrev nodeid p1 p2 - 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 - 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 - - $ hg --config simplecache.redirectsfile=redirects.py serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - - $ cat > redirects.py << EOF - > [ - > { - > b'name': b'target-a', - > b'protocol': b'http', - > b'snirequired': False, - > b'tlsversions': [b'1.2', b'1.3'], - > b'uris': [b'http://example.com/'], - > }, - > ] - > EOF - -Redirect targets advertised when configured - - $ sendhttpv2peerhandshake << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: 2289\r\n - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - (remote redirect target target-a is compatible) (tls1.2 !) - (remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !) - sending capabilities command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 111\r\n (tls1.2 !) - s> content-length: 102\r\n (no-tls1.2 !) - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a (tls1.2 !) - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80 (no-tls1.2 !) - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 6de\r\n - s> \xd6\x06\x00\x01\x00\x02\x041 - s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/ - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ], - b'redirect': { - b'hashes': [ - b'sha256', - b'sha1' - ], - b'targets': [ - { - b'name': b'target-a', - b'protocol': b'http', - b'snirequired': False, - b'tlsversions': [ - b'1.2', - b'1.3' - ], - b'uris': [ - b'http://example.com/' - ] - } - ] - } - } - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -Unknown protocol is filtered from compatible targets - - $ cat > redirects.py << EOF - > [ - > { - > b'name': b'target-a', - > b'protocol': b'http', - > b'uris': [b'http://example.com/'], - > }, - > { - > b'name': b'target-b', - > b'protocol': b'unknown', - > b'uris': [b'unknown://example.com/'], - > }, - > ] - > EOF - - $ sendhttpv2peerhandshake << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: 2316\r\n - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - (remote redirect target target-a is compatible) - (remote redirect target target-b uses unsupported protocol: unknown) - sending capabilities command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 111\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 6f9\r\n - s> \xf1\x06\x00\x01\x00\x02\x041 - s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/ - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ], - b'redirect': { - b'hashes': [ - b'sha256', - b'sha1' - ], - b'targets': [ - { - b'name': b'target-a', - b'protocol': b'http', - b'uris': [ - b'http://example.com/' - ] - }, - { - b'name': b'target-b', - b'protocol': b'unknown', - b'uris': [ - b'unknown://example.com/' - ] - } - ] - } - } - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -Missing SNI support filters targets that require SNI - - $ cat > nosni.py << EOF - > from mercurial import sslutil - > sslutil.hassni = False - > EOF - $ cat >> $HGRCPATH << EOF - > [extensions] - > nosni=`pwd`/nosni.py - > EOF - - $ cat > redirects.py << EOF - > [ - > { - > b'name': b'target-bad-tls', - > b'protocol': b'https', - > b'uris': [b'https://example.com/'], - > b'snirequired': True, - > }, - > ] - > EOF - - $ sendhttpv2peerhandshake << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: 2276\r\n - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - (redirect target target-bad-tls requires SNI, which is unsupported) - sending capabilities command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 102\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80 - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 6d1\r\n - s> \xc9\x06\x00\x01\x00\x02\x041 - s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/ - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ], - b'redirect': { - b'hashes': [ - b'sha256', - b'sha1' - ], - b'targets': [ - { - b'name': b'target-bad-tls', - b'protocol': b'https', - b'snirequired': True, - b'uris': [ - b'https://example.com/' - ] - } - ] - } - } - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat >> $HGRCPATH << EOF - > [extensions] - > nosni=! - > EOF - -Unknown tls value is filtered from compatible targets - - $ cat > redirects.py << EOF - > [ - > { - > b'name': b'target-bad-tls', - > b'protocol': b'https', - > b'uris': [b'https://example.com/'], - > b'tlsversions': [b'42', b'39'], - > }, - > ] - > EOF - - $ sendhttpv2peerhandshake << EOF - > command capabilities - > EOF - creating http peer for wire protocol version 2 - s> setsockopt(6, 1, 1) -> None (?) - s> GET /?cmd=capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> vary: X-HgProto-1,X-HgUpgrade-1\r\n - s> x-hgproto-1: cbor\r\n - s> x-hgupgrade-1: exp-http-v2-0003\r\n - s> accept: application/mercurial-0.1\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: 2282\r\n - s> \r\n - s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash - (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42) - sending capabilities command - s> setsockopt(6, 1, 1) -> None (?) - s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> accept: application/mercurial-exp-framing-0006\r\n - s> content-type: application/mercurial-exp-framing-0006\r\n - s> content-length: 102\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> user-agent: Mercurial debugwireproto\r\n - s> \r\n - s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80 - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-exp-framing-0006\r\n - s> Transfer-Encoding: chunked\r\n - s> \r\n - s> 11\r\n - s> \t\x00\x00\x01\x00\x02\x01\x92 - s> Hidentity - s> \r\n - s> 13\r\n - s> \x0b\x00\x00\x01\x00\x02\x041 - s> \xa1FstatusBok - s> \r\n - s> 6d7\r\n - s> \xcf\x06\x00\x01\x00\x02\x041 - s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/ - s> \r\n - s> 8\r\n - s> \x00\x00\x00\x01\x00\x02\x002 - s> \r\n - s> 0\r\n - s> \r\n - response: gen[ - { - b'commands': { - b'branchmap': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'capabilities': { - b'args': {}, - b'permissions': [ - b'pull' - ] - }, - b'changesetdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'bookmarks', - b'parents', - b'phase', - b'revision' - ]) - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filedata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'path': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'filesdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'firstchangeset', - b'linknode', - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'dict' - }, - b'revisions': { - b'required': True, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 50000 - }, - b'heads': { - b'args': { - b'publiconly': { - b'default': False, - b'required': False, - b'type': b'bool' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'known': { - b'args': { - b'nodes': { - b'default': [], - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'listkeys': { - b'args': { - b'namespace': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'lookup': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ] - }, - b'manifestdata': { - b'args': { - b'fields': { - b'default': set([]), - b'required': False, - b'type': b'set', - b'validvalues': set([ - b'parents', - b'revision' - ]) - }, - b'haveparents': { - b'default': False, - b'required': False, - b'type': b'bool' - }, - b'nodes': { - b'required': True, - b'type': b'list' - }, - b'tree': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'pull' - ], - b'recommendedbatchsize': 100000 - }, - b'pushkey': { - b'args': { - b'key': { - b'required': True, - b'type': b'bytes' - }, - b'namespace': { - b'required': True, - b'type': b'bytes' - }, - b'new': { - b'required': True, - b'type': b'bytes' - }, - b'old': { - b'required': True, - b'type': b'bytes' - } - }, - b'permissions': [ - b'push' - ] - }, - b'rawstorefiledata': { - b'args': { - b'files': { - b'required': True, - b'type': b'list' - }, - b'pathfilter': { - b'default': None, - b'required': False, - b'type': b'list' - } - }, - b'permissions': [ - b'pull' - ] - } - }, - b'framingmediatypes': [ - b'application/mercurial-exp-framing-0006' - ], - b'pathfilterprefixes': set([ - b'path:', - b'rootfilesin:' - ]), - b'rawrepoformats': [ - b'generaldelta', - b'revlogv1', - b'sparserevlog' - ], - b'redirect': { - b'hashes': [ - b'sha256', - b'sha1' - ], - b'targets': [ - { - b'name': b'target-bad-tls', - b'protocol': b'https', - b'tlsversions': [ - b'42', - b'39' - ], - b'uris': [ - b'https://example.com/' - ] - } - ] - } - } - ] - (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob) - -Set up the server to issue content redirects to its built-in API server. - - $ cat > redirects.py << EOF - > [ - > { - > b'name': b'local', - > b'protocol': b'http', - > b'uris': [b'http://example.com/'], - > }, - > ] - > EOF - -Request to eventual cache URL should return 404 (validating the cache server works) - - $ sendhttpraw << EOF - > httprequest GET api/simplecache/missingkey - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/simplecache/missingkey HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 404 Not Found\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: text/plain\r\n - s> Content-Length: 22\r\n - s> \r\n - s> key not found in cache - -Send a cacheable request - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -Cached entry should be available on server - - $ sendhttpraw << EOF - > httprequest GET api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c - > user-agent: test - > EOF - using raw connection to peer - s> setsockopt(6, 1, 1) -> None (?) - s> GET /api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c HTTP/1.1\r\n - s> Accept-Encoding: identity\r\n - s> user-agent: test\r\n - s> host: $LOCALIP:$HGPORT\r\n (glob) - s> \r\n - s> makefile('rb', None) - s> HTTP/1.1 200 OK\r\n - s> Server: testing stub value\r\n - s> Date: $HTTP_DATE$\r\n - s> Content-Type: application/mercurial-cbor\r\n - s> Content-Length: 91\r\n - s> \r\n - s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 - cbor> [ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - -2nd request should result in content redirect response - - $ sendhttpv2peer << EOF - > command manifestdata - > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41'] - > tree eval:b'' - > fields eval:[b'parents'] - > EOF - creating http peer for wire protocol version 2 - sending manifestdata command - response: gen[ - { - b'totalitems': 1 - }, - { - b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - b'parents': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - ] - } - ] - - $ cat error.log - $ killdaemons.py - - $ cat .hg/blackbox.log - *> cacher constructed for manifestdata (glob) - *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> cacher constructed for manifestdata (glob) - *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob) - *> sending content redirect for 47abb8efa5f01b8964d74917793ad2464db0fa2c to http://*:$HGPORT/api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
--- a/tests/test-wireproto-exchangev2-shallow.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,597 +0,0 @@ -#require sqlite - -Tests for wire protocol version 2 exchange. -Tests in this file should be folded into existing tests once protocol -v2 has enough features that it can be enabled via #testcase in existing -tests. - - $ . $TESTDIR/wireprotohelpers.sh - $ enablehttpv2client - $ cat >> $HGRCPATH << EOF - > [extensions] - > sqlitestore = - > pullext = $TESTDIR/pullext.py - > [storage] - > new-repo-backend=sqlite - > EOF - -Configure a server - - $ hg init server-basic - $ enablehttpv2 server-basic - $ cd server-basic - $ mkdir dir0 dir1 - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ hg -q commit -A -m 'commit 1' - $ echo e0 > dir1/e - $ echo f0 > dir1/f - $ hg -q commit -A -m 'commit 2' - $ echo c1 > dir0/c - $ echo e1 > dir1/e - $ hg commit -m 'commit 3' - $ echo c2 > dir0/c - $ echo e2 > dir1/e - $ echo f1 > dir1/f - $ hg commit -m 'commit 4' - $ echo a1 > a - $ echo b1 > b - $ hg commit -m 'commit 5' - - $ hg log -G -T '{node} {desc}' - @ 93a8bd067ed2840d9aa810ad598168383a3a2c3a commit 5 - | - o dc666cf9ecf3d94e6b830f30e5f1272e2a9164d9 commit 4 - | - o 97765fc3cd624fd1fa0176932c21ffd16adf432e commit 3 - | - o 47fe012ab237a8c7fc0c78f9f26d5866eef3f825 commit 2 - | - o b709380892b193c1091d3a817f706052e346821b commit 1 - | - o 3390ef850073fbc2f0dfff2244342c8e9229013a commit 0 - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - - $ cd .. - -Shallow clone pulls down latest revision of every file - - $ hg --debug clone --depth 1 http://localhost:$HGPORT client-shallow-1 - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 3390ef850073 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset b709380892b1 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 47fe012ab237 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 97765fc3cd62 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset dc666cf9ecf3 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 93a8bd067ed2 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3', - 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby', - '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83' - ], - 'tree': '' - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'linknode', - 'parents', - 'revision' - ]), - 'haveparents': False, - 'revisions': [ - { - 'nodes': [ - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'type': 'changesetexplicit' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - updating the branch cache - new changesets 3390ef850073:93a8bd067ed2 - updating to branch default - resolving manifests - branchmerge: False, force: False, partial: False - ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2 - a: remote created -> g - getting a - b: remote created -> g - getting b - dir0/c: remote created -> g - getting dir0/c - dir0/d: remote created -> g - getting dir0/d - dir1/e: remote created -> g - getting dir1/e - dir1/f: remote created -> g - getting dir1/f - 6 files updated, 0 files merged, 0 files removed, 0 files unresolved - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - -#if chg - $ hg --kill-chg-daemon - $ sleep 2 -#endif - $ sqlite3 -line client-shallow-1/.hg/store/db.sqlite << EOF - > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC; - > EOF - id = 1 - path = a - revnum = 0 - node = \x9a8\x12)\x97\xb3\xac\x97\xbe*\x9a\xa2\xe5V\x83\x83A\xfd\xf2\xcc (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - - id = 2 - path = b - revnum = 0 - node = \xb1zk\xd3g=\x9a\xb8\xce\xd5\x81\xa2 \xf6/=\xa5\xccEx (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - - id = 3 - path = dir0/c - revnum = 0 - node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - - id = 4 - path = dir0/d - revnum = 0 - node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 0 - - id = 5 - path = dir1/e - revnum = 0 - node = ]\xf3\xac\xd8\xd0\xc7\xfaP\x98\xd0'\x9a\x044\xc3\x02\x9e+x\xe1 (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - - id = 6 - path = dir1/f - revnum = 0 - node = (\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - -Test a shallow clone with only some files - - $ hg --debug clone --depth 1 --include dir0/ http://localhost:$HGPORT client-shallow-narrow-1 - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 3390ef850073 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset b709380892b1 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 47fe012ab237 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 97765fc3cd62 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset dc666cf9ecf3 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 93a8bd067ed2 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3', - 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby', - '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83' - ], - 'tree': '' - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'linknode', - 'parents', - 'revision' - ]), - 'haveparents': False, - 'pathfilter': { - 'include': [ - 'path:dir0' - ] - }, - 'revisions': [ - { - 'nodes': [ - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'type': 'changesetexplicit' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=355; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - updating the branch cache - new changesets 3390ef850073:93a8bd067ed2 - updating to branch default - resolving manifests - branchmerge: False, force: False, partial: False - ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2 - dir0/c: remote created -> g - getting dir0/c - dir0/d: remote created -> g - getting dir0/d - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - -#if chg - $ hg --kill-chg-daemon - $ sleep 2 -#endif - $ sqlite3 -line client-shallow-narrow-1/.hg/store/db.sqlite << EOF - > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC; - > EOF - id = 1 - path = dir0/c - revnum = 0 - node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 2 - - id = 2 - path = dir0/d - revnum = 0 - node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc) - p1rev = -1 - p2rev = -1 - linkrev = 5 - flags = 0 - -Cloning an old revision with depth=1 works - - $ hg --debug clone --depth 1 -r 97765fc3cd624fd1fa0176932c21ffd16adf432e http://localhost:$HGPORT client-shallow-2 - using http://localhost:$HGPORT/ - sending capabilities command - sending 1 commands - sending command lookup: { - 'key': '97765fc3cd624fd1fa0176932c21ffd16adf432e' - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 3390ef850073 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset b709380892b1 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 47fe012ab237 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 97765fc3cd62 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3' - ], - 'tree': '' - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'linknode', - 'parents', - 'revision' - ]), - 'haveparents': False, - 'revisions': [ - { - 'nodes': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - updating the branch cache - new changesets 3390ef850073:97765fc3cd62 - updating to branch default - resolving manifests - branchmerge: False, force: False, partial: False - ancestor: 000000000000, local: 000000000000+, remote: 97765fc3cd62 - a: remote created -> g - getting a - b: remote created -> g - getting b - dir0/c: remote created -> g - getting dir0/c - dir0/d: remote created -> g - getting dir0/d - dir1/e: remote created -> g - getting dir1/e - dir1/f: remote created -> g - getting dir1/f - 6 files updated, 0 files merged, 0 files removed, 0 files unresolved - updating the branch cache - (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob) - -Incremental pull of shallow clone fetches new changesets - - $ hg --cwd client-shallow-2 --debug pull http://localhost:$HGPORT - pulling from http://localhost:$HGPORT/ - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - searching for changes - all local changesets known remotely - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'roots': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetdagrange' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=400; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset dc666cf9ecf3 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - add changeset 93a8bd067ed2 - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?) - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby', - '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83' - ], - 'tree': '' - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=561; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'linknode', - 'parents', - 'revision' - ]), - 'haveparents': False, - 'revisions': [ - { - 'nodes': [ - '\xdcfl\xf9\xec\xf3\xd9Nk\x83\x0f0\xe5\xf1\'.*\x91d\xd9', - '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:' - ], - 'type': 'changesetexplicit' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1373; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - updating the branch cache - new changesets dc666cf9ecf3:93a8bd067ed2 - (run 'hg update' to get a working copy) - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ hg --cwd client-shallow-2 up tip - merging dir0/c - merging dir1/e - 3 files updated, 2 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-wireproto-exchangev2.t Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1392 +0,0 @@ -Tests for wire protocol version 2 exchange. -Tests in this file should be folded into existing tests once protocol -v2 has enough features that it can be enabled via #testcase in existing -tests. - - $ . $TESTDIR/wireprotohelpers.sh - $ enablehttpv2client - - $ hg init server-simple - $ enablehttpv2 server-simple - $ cd server-simple - $ cat >> .hg/hgrc << EOF - > [phases] - > publish = false - > EOF - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - - $ echo a1 > a - $ hg commit -m 'commit 1' - $ hg phase --public -r . - $ echo a2 > a - $ hg commit -m 'commit 2' - - $ hg -q up -r 0 - $ echo b1 > b - $ hg -q commit -m 'head 2 commit 1' - $ echo b2 > b - $ hg -q commit -m 'head 2 commit 2' - - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - - $ cd .. - -Test basic clone - -Output is flaky, save it in a file and check part independently - $ hg --debug clone -U http://localhost:$HGPORT client-simple > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset 4432d83626e8 - add changeset cd2534766bec - add changeset e96ae20f4188 - add changeset caa2a465451d - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8', - '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5', - '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f', - '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f', - '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5', - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:caa2a465451d (3 drafts) - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=941; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - -All changesets should have been transferred - - $ hg -R client-simple debugindex -c - rev linkrev nodeid p1 p2 - 0 0 3390ef850073 000000000000 000000000000 - 1 1 4432d83626e8 3390ef850073 000000000000 - 2 2 cd2534766bec 4432d83626e8 000000000000 - 3 3 e96ae20f4188 3390ef850073 000000000000 - 4 4 caa2a465451d e96ae20f4188 000000000000 - - $ hg -R client-simple log -G -T '{rev} {node} {phase}\n' - o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft - | - o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft - | - | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft - | | - | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public - |/ - o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public - - -All manifests should have been transferred - - $ hg -R client-simple debugindex -m - rev linkrev nodeid p1 p2 - 0 0 992f4779029a 000000000000 000000000000 - 1 1 a988fb43583e 992f4779029a 000000000000 - 2 2 ec804e488c20 a988fb43583e 000000000000 - 3 3 045c7f3927da 992f4779029a 000000000000 - 4 4 379cb0c2e664 045c7f3927da 000000000000 - -Cloning only a specific revision works - -Output is flaky, save it in a file and check part independently - $ hg --debug clone -U -r 4432d83626e8 http://localhost:$HGPORT client-singlehead > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - sending 1 commands - sending command lookup: { - 'key': '4432d83626e8' - } - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset 4432d83626e8 - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:4432d83626e8 - updating the branch cache - (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=381; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=404; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=439; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - - $ cd client-singlehead - - $ hg log -G -T '{rev} {node} {phase}\n' - o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public - | - o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public - - - $ hg debugindex -m - rev linkrev nodeid p1 p2 - 0 0 992f4779029a 000000000000 000000000000 - 1 1 a988fb43583e 992f4779029a 000000000000 - -Incremental pull works - -Output is flaky, save it in a file and check part independently - $ hg --debug pull > pull-output - - $ cat pull-output | grep -v "received frame" - pulling from http://localhost:$HGPORT/ - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0' - ] - } - searching for changes - all local changesets known remotely - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'roots': [ - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0' - ], - 'type': 'changesetdagrange' - } - ] - } - add changeset cd2534766bec - add changeset e96ae20f4188 - add changeset caa2a465451d - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5', - '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f', - '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'revisions': [ - { - 'nodes': [ - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f', - '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5', - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets cd2534766bec:caa2a465451d (3 drafts) - (run 'hg update' to get a working copy) - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat pull-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=573; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=601; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=527; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm pull-output - - $ hg log -G -T '{rev} {node} {phase}\n' - o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft - | - o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft - | - | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft - | | - | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public - |/ - o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public - - - $ hg debugindex -m - rev linkrev nodeid p1 p2 - 0 0 992f4779029a 000000000000 000000000000 - 1 1 a988fb43583e 992f4779029a 000000000000 - 2 2 ec804e488c20 a988fb43583e 000000000000 - 3 3 045c7f3927da 992f4779029a 000000000000 - 4 4 379cb0c2e664 045c7f3927da 000000000000 - -Phase-only update works -TODO this doesn't work - - $ hg -R ../server-simple phase --public -r caa2a465451dd - $ hg --debug pull - pulling from http://localhost:$HGPORT/ - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f', - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1' - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - searching for changes - all remote heads known locally - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'roots': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'type': 'changesetdagrange' - } - ] - } - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - checking for updated bookmarks - (run 'hg update' to get a working copy) - (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ hg log -G -T '{rev} {node} {phase}\n' - o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft - | - o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft - | - | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft - | | - | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public - |/ - o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public - - - $ cd .. - -Bookmarks are transferred on clone - - $ hg -R server-simple bookmark -r 3390ef850073fbc2f0dfff2244342c8e9229013a book-1 - $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-2 - -Output is flaky, save it in a file and check part independently - $ hg --debug clone -U http://localhost:$HGPORT/ client-bookmarks > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset 4432d83626e8 - add changeset cd2534766bec - add changeset e96ae20f4188 - add changeset caa2a465451d - checking for updated bookmarks - adding remote bookmark book-1 - adding remote bookmark book-2 - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8', - '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5', - '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f', - '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f', - '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5', - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:caa2a465451d (1 drafts) - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=979; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - - $ hg -R client-bookmarks bookmarks - book-1 0:3390ef850073 - book-2 2:cd2534766bec - -Server-side bookmark moves are reflected during `hg pull` - - $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-1 - moving bookmark 'book-1' forward from 3390ef850073 - -Output is flaky, save it in a file and check part independently - $ hg -R client-bookmarks --debug pull > pull-output - - $ cat pull-output | grep -v "received frame" - pulling from http://localhost:$HGPORT/ - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f', - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1' - ] - } - searching for changes - all remote heads known locally - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'roots': [ - '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1', - '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f' - ], - 'type': 'changesetdagrange' - } - ] - } - checking for updated bookmarks - updating bookmark book-1 - (run 'hg update' to get a working copy) - (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat pull-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=65; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm pull-output - - $ hg -R client-bookmarks bookmarks - book-1 2:cd2534766bec - book-2 2:cd2534766bec - - $ killdaemons.py - -Let's set up a slightly more complicated server - - $ hg init server-2 - $ enablehttpv2 server-2 - $ cd server-2 - $ mkdir dir0 dir1 - $ echo a0 > a - $ echo b0 > b - $ hg -q commit -A -m 'commit 0' - $ echo c0 > dir0/c - $ echo d0 > dir0/d - $ hg -q commit -A -m 'commit 1' - $ echo e0 > dir1/e - $ echo f0 > dir1/f - $ hg -q commit -A -m 'commit 2' - $ echo c1 > dir0/c - $ echo e1 > dir1/e - $ hg commit -m 'commit 3' - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - - $ cd .. - -Narrow clone only fetches some files - -Output is flaky, save it in a file and check part independently - $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ http://localhost:$HGPORT/ client-narrow-0 > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset b709380892b1 - add changeset 47fe012ab237 - add changeset 97765fc3cd62 - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'pathfilter': { - 'include': [ - 'path:dir0' - ] - }, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b', - 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%', - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:97765fc3cd62 - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - -#if reporevlogstore - $ find client-narrow-0/.hg/store -type f -name '*.i' | sort - client-narrow-0/.hg/store/00changelog.i - client-narrow-0/.hg/store/00manifest.i - client-narrow-0/.hg/store/data/dir0/c.i - client-narrow-0/.hg/store/data/dir0/d.i -#endif - ---exclude by itself works - -Output is flaky, save it in a file and check part independently - $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --exclude dir0/ http://localhost:$HGPORT/ client-narrow-1 > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset b709380892b1 - add changeset 47fe012ab237 - add changeset 97765fc3cd62 - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'pathfilter': { - 'exclude': [ - 'path:dir0' - ], - 'include': [ - 'path:.' - ] - }, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b', - 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%', - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:97765fc3cd62 - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=709; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - -#if reporevlogstore - $ find client-narrow-1/.hg/store -type f -name '*.i' | sort - client-narrow-1/.hg/store/00changelog.i - client-narrow-1/.hg/store/00manifest.i - client-narrow-1/.hg/store/data/a.i - client-narrow-1/.hg/store/data/b.i - client-narrow-1/.hg/store/data/dir1/e.i - client-narrow-1/.hg/store/data/dir1/f.i -#endif - -Mixing --include and --exclude works - -Output is flaky, save it in a file and check part independently - $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ --exclude dir0/c http://localhost:$HGPORT/ client-narrow-2 > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset b709380892b1 - add changeset 47fe012ab237 - add changeset 97765fc3cd62 - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'pathfilter': { - 'exclude': [ - 'path:dir0/c' - ], - 'include': [ - 'path:dir0' - ] - }, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b', - 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%', - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - new changesets 3390ef850073:97765fc3cd62 - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=160; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - -#if reporevlogstore - $ find client-narrow-2/.hg/store -type f -name '*.i' | sort - client-narrow-2/.hg/store/00changelog.i - client-narrow-2/.hg/store/00manifest.i - client-narrow-2/.hg/store/data/dir0/d.i -#endif - ---stream will use rawfiledata to transfer changelog and manifestlog, then -fall through to get files data - -Output is flaky, save it in a file and check part independently - $ hg --debug clone --stream -U http://localhost:$HGPORT client-stream-0 > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - sending 1 commands - sending command rawstorefiledata: { - 'files': [ - 'changelog', - 'manifestlog' - ] - } - updating the branch cache - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ] - } - searching for changes - all remote heads known locally - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetdagrange' - } - ] - } - checking for updated bookmarks - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b', - 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%', - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !) - received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1133; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - ---stream + --include/--exclude will only obtain some files - -Output is flaky, save it in a file and check part independently - $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --stream --include dir0/ -U http://localhost:$HGPORT client-stream-2 > clone-output - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - sending 1 commands - sending command rawstorefiledata: { - 'files': [ - 'changelog', - 'manifestlog' - ] - } - updating the branch cache - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ] - } - searching for changes - all remote heads known locally - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetdagrange' - } - ] - } - checking for updated bookmarks - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'pathfilter': { - 'include': [ - 'path:dir0' - ] - }, - 'revisions': [ - { - 'nodes': [ - '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:', - '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b', - 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%', - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !) - received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - -#if reporevlogstore - $ find client-stream-2/.hg/store -type f -name '*.i' | sort - client-stream-2/.hg/store/00changelog.i - client-stream-2/.hg/store/00manifest.i - client-stream-2/.hg/store/data/dir0/c.i - client-stream-2/.hg/store/data/dir0/d.i -#endif - -Shallow clone doesn't work with revlogs - -Output is flaky, save it in a file and check part independently - $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --depth 1 -U http://localhost:$HGPORT client-shallow-revlogs > clone-output - transaction abort! - rollback completed - abort: revlog storage does not support missing parents write mode - [255] - - $ cat clone-output | grep -v "received frame" - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending 2 commands - sending command heads: {} - sending command known: { - 'nodes': [] - } - sending 1 commands - sending command changesetdata: { - 'fields': set([ - 'bookmarks', - 'parents', - 'phase', - 'revision' - ]), - 'revisions': [ - { - 'heads': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'roots': [], - 'type': 'changesetdagrange' - } - ] - } - add changeset 3390ef850073 - add changeset b709380892b1 - add changeset 47fe012ab237 - add changeset 97765fc3cd62 - checking for updated bookmarks - sending 1 commands - sending command manifestdata: { - 'fields': set([ - 'parents', - 'revision' - ]), - 'haveparents': True, - 'nodes': [ - '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A', - '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8', - '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb', - '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3' - ], - 'tree': '' - } - sending 1 commands - sending command filesdata: { - 'fields': set([ - 'linknode', - 'parents', - 'revision' - ]), - 'haveparents': False, - 'revisions': [ - { - 'nodes': [ - '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.' - ], - 'type': 'changesetexplicit' - } - ] - } - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) - - $ cat clone-output | grep "received frame" - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos) - received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) - received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) - - $ rm clone-output - - $ killdaemons.py - -Repo with 2 DAG branches introducing same filenode, to test linknode adjustment - - $ hg init server-linknode - $ enablehttpv2 server-linknode - $ cd server-linknode - $ touch foo - $ hg -q commit -Am initial - $ echo foo > dupe-file - $ hg commit -Am 'dupe 1' - adding dupe-file - $ hg -q up -r 0 - $ echo foo > dupe-file - $ hg commit -Am 'dupe 2' - adding dupe-file - created new head - $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - -Perform an incremental pull of both heads and ensure linkrev is written out properly - - $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-1 - new changesets 96ee1d7354c4 - updating to branch default - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cd client-linknode-1 - $ touch extra - $ hg commit -Am extra - adding extra - $ cd .. - - $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-2 - new changesets 96ee1d7354c4 - updating to branch default - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cd client-linknode-2 - $ touch extra - $ hg commit -Am extra - adding extra - $ cd .. - - $ hg -R client-linknode-1 pull -r 1681c33f9f80 - pulling from http://localhost:$HGPORT/ - searching for changes - new changesets 1681c33f9f80 - (run 'hg update' to get a working copy) - -#if reporevlogstore - $ hg -R client-linknode-1 debugrevlogindex dupe-file - rev linkrev nodeid p1 p2 - 0 2 2ed2a3912a0b 000000000000 000000000000 -#endif - - $ hg -R client-linknode-2 pull -r 639c8990d6a5 - pulling from http://localhost:$HGPORT/ - searching for changes - new changesets 639c8990d6a5 - (run 'hg update' to get a working copy) - -#if reporevlogstore - $ hg -R client-linknode-2 debugrevlogindex dupe-file - rev linkrev nodeid p1 p2 - 0 2 2ed2a3912a0b 000000000000 000000000000 -#endif
--- a/tests/wireprotohelpers.sh Thu Dec 30 13:25:44 2021 +0100 +++ b/tests/wireprotohelpers.sh Tue Jan 18 10:27:13 2022 +0100 @@ -1,44 +1,23 @@ -HTTPV2=exp-http-v2-0003 MEDIATYPE=application/mercurial-exp-framing-0006 sendhttpraw() { hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/ } -sendhttpv2peer() { - hg --config experimental.httppeer.v2-encoder-order=identity debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ -} - -sendhttpv2peerverbose() { - hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ -} - -sendhttpv2peerhandshake() { - hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --peer http2 http://$LOCALIP:$HGPORT/ -} - cat > dummycommands.py << EOF from mercurial import ( wireprototypes, wireprotov1server, - wireprotov2server, ) @wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull') def customreadonlyv1(repo, proto): return wireprototypes.bytesresponse(b'customreadonly bytes response') -@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull') -def customreadonlyv2(repo, proto): - yield b'customreadonly bytes response' - @wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push') def customreadwrite(repo, proto): return wireprototypes.bytesresponse(b'customreadwrite bytes response') -@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push') -def customreadwritev2(repo, proto): - yield b'customreadwrite bytes response' EOF cat >> $HGRCPATH << EOF @@ -53,20 +32,3 @@ EOF } -enablehttpv2() { - cat >> $1/.hg/hgrc << EOF -[experimental] -web.apiserver = true -web.api.http-v2 = true -EOF -} - -enablehttpv2client() { - cat >> $HGRCPATH << EOF -[experimental] -httppeer.advertise-v2 = true -# So tests are in plain text. Also, zstd isn't available in all installs, -# which would make tests non-deterministic. -httppeer.v2-encoder-order = identity -EOF -}
--- a/tests/wireprotosimplecache.py Thu Dec 30 13:25:44 2021 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,220 +0,0 @@ -# wireprotosimplecache.py - Extension providing in-memory wire protocol cache -# -# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -from mercurial import ( - extensions, - registrar, - util, - wireprotoserver, - wireprototypes, - wireprotov2server, -) -from mercurial.interfaces import ( - repository, - util as interfaceutil, -) -from mercurial.utils import stringutil - -CACHE = None - -configtable = {} -configitem = registrar.configitem(configtable) - -configitem(b'simplecache', b'cacheapi', default=False) -configitem(b'simplecache', b'cacheobjects', default=False) -configitem(b'simplecache', b'redirectsfile', default=None) - -# API handler that makes cached keys available. -def handlecacherequest(rctx, req, res, checkperm, urlparts): - if rctx.repo.ui.configbool(b'simplecache', b'cacheobjects'): - res.status = b'500 Internal Server Error' - res.setbodybytes(b'cacheobjects not supported for api server') - return - - if not urlparts: - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(b'simple cache server') - return - - key = b'/'.join(urlparts) - - if key not in CACHE: - res.status = b'404 Not Found' - res.headers[b'Content-Type'] = b'text/plain' - res.setbodybytes(b'key not found in cache') - return - - res.status = b'200 OK' - res.headers[b'Content-Type'] = b'application/mercurial-cbor' - res.setbodybytes(CACHE[key]) - - -def cachedescriptor(req, repo): - return {} - - -wireprotoserver.API_HANDLERS[b'simplecache'] = { - b'config': (b'simplecache', b'cacheapi'), - b'handler': handlecacherequest, - b'apidescriptor': cachedescriptor, -} - - -@interfaceutil.implementer(repository.iwireprotocolcommandcacher) -class memorycacher(object): - def __init__( - self, ui, command, encodefn, redirecttargets, redirecthashes, req - ): - self.ui = ui - self.encodefn = encodefn - self.redirecttargets = redirecttargets - self.redirecthashes = redirecthashes - self.req = req - self.key = None - self.cacheobjects = ui.configbool(b'simplecache', b'cacheobjects') - self.cacheapi = ui.configbool(b'simplecache', b'cacheapi') - self.buffered = [] - - ui.log(b'simplecache', b'cacher constructed for %s\n', command) - - def __enter__(self): - return self - - def __exit__(self, exctype, excvalue, exctb): - if exctype: - self.ui.log(b'simplecache', b'cacher exiting due to error\n') - - def adjustcachekeystate(self, state): - # Needed in order to make tests deterministic. Don't copy this - # pattern for production caches! - del state[b'repo'] - - def setcachekey(self, key): - self.key = key - return True - - def lookup(self): - if self.key not in CACHE: - self.ui.log(b'simplecache', b'cache miss for %s\n', self.key) - return None - - entry = CACHE[self.key] - self.ui.log(b'simplecache', b'cache hit for %s\n', self.key) - - redirectable = True - - if not self.cacheapi: - redirectable = False - elif not self.redirecttargets: - redirectable = False - else: - clienttargets = set(self.redirecttargets) - ourtargets = {t[b'name'] for t in loadredirecttargets(self.ui)} - - # We only ever redirect to a single target (for now). So we don't - # need to store which target matched. - if not clienttargets & ourtargets: - redirectable = False - - if redirectable: - paths = self.req.dispatchparts[:-3] - paths.append(b'simplecache') - paths.append(self.key) - - url = b'%s/%s' % (self.req.baseurl, b'/'.join(paths)) - - # url = b'http://example.com/%s' % self.key - self.ui.log( - b'simplecache', - b'sending content redirect for %s to ' b'%s\n', - self.key, - url, - ) - response = wireprototypes.alternatelocationresponse( - url=url, mediatype=b'application/mercurial-cbor' - ) - - return {b'objs': [response]} - - if self.cacheobjects: - return { - b'objs': entry, - } - else: - return { - b'objs': [wireprototypes.encodedresponse(entry)], - } - - def onobject(self, obj): - if self.cacheobjects: - self.buffered.append(obj) - else: - self.buffered.extend(self.encodefn(obj)) - - yield obj - - def onfinished(self): - self.ui.log(b'simplecache', b'storing cache entry for %s\n', self.key) - if self.cacheobjects: - CACHE[self.key] = self.buffered - else: - CACHE[self.key] = b''.join(self.buffered) - - return [] - - -def makeresponsecacher( - orig, - repo, - proto, - command, - args, - objencoderfn, - redirecttargets, - redirecthashes, -): - return memorycacher( - repo.ui, - command, - objencoderfn, - redirecttargets, - redirecthashes, - proto._req, - ) - - -def loadredirecttargets(ui): - path = ui.config(b'simplecache', b'redirectsfile') - if not path: - return [] - - with open(path, 'rb') as fh: - s = fh.read() - - return stringutil.evalpythonliteral(s) - - -def getadvertisedredirecttargets(orig, repo, proto): - return loadredirecttargets(repo.ui) - - -def extsetup(ui): - global CACHE - - CACHE = util.lrucachedict(10000) - - extensions.wrapfunction( - wireprotov2server, b'makeresponsecacher', makeresponsecacher - ) - extensions.wrapfunction( - wireprotov2server, - b'getadvertisedredirecttargets', - getadvertisedredirecttargets, - )